Merge remote-tracking branch 'origin/master' into llvm16

This commit is contained in:
Andrew Kelley 2023-02-03 12:49:40 -07:00
commit fab9b7110e
215 changed files with 12978 additions and 11402 deletions

View File

@ -19,6 +19,7 @@ jobs:
- name: Build and Test - name: Build and Test
run: sh ci/x86_64-linux-debug.sh run: sh ci/x86_64-linux-debug.sh
x86_64-linux-release: x86_64-linux-release:
timeout-minutes: 420
runs-on: [self-hosted, Linux, x86_64] runs-on: [self-hosted, Linux, x86_64]
steps: steps:
- name: Checkout - name: Checkout

View File

@ -513,7 +513,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/zig/Ast.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/Ast.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/CrossTarget.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/CrossTarget.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/c_builtins.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/c_builtins.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/parse.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/Parse.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/render.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/render.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/string_literal.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/string_literal.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/system.zig" "${CMAKE_SOURCE_DIR}/lib/std/zig/system.zig"
@ -654,47 +654,20 @@ include_directories(
"${CMAKE_SOURCE_DIR}/src" "${CMAKE_SOURCE_DIR}/src"
) )
# These have to go before the -Wno- flags
if(MSVC) if(MSVC)
set(EXE_CXX_FLAGS "/std:c++17") set(EXE_CXX_FLAGS "/std:c++17")
else(MSVC) set(EXE_LDFLAGS "/STACK:16777216")
set(EXE_CXX_FLAGS "-std=c++17")
endif(MSVC)
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
if(MSVC)
set(EXE_CXX_FLAGS "${EXE_CXX_FLAGS} /w")
else()
set(EXE_CXX_FLAGS "${EXE_CXX_FLAGS} -Werror -Wall")
# fallthrough support was added in GCC 7.0
if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 7.0)
set(EXE_CXX_FLAGS "${EXE_CXX_FLAGS} -Werror=implicit-fallthrough")
endif()
# GCC 9.2 and older are unable to detect valid variable initialization in some cases
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS_EQUAL 9.2)
set(EXE_CXX_FLAGS "${EXE_CXX_FLAGS} -Wno-maybe-uninitialized")
endif()
endif()
endif()
if(MSVC)
set(EXE_CXX_FLAGS "${EXE_CXX_FLAGS}")
else()
set(EXE_CXX_FLAGS "${EXE_CXX_FLAGS} -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -D_GNU_SOURCE -fvisibility-inlines-hidden -fno-exceptions -fno-rtti -Werror=type-limits -Wno-missing-braces -Wno-comment")
if(MINGW)
set(EXE_CXX_FLAGS "${EXE_CXX_FLAGS} -Wno-format")
endif()
endif()
set(EXE_LDFLAGS " ")
if(MSVC)
set(EXE_LDFLAGS "${EXE_LDFLAGS} /STACK:16777216")
if(NOT "${CMAKE_BUILD_TYPE}" STREQUAL "Release" AND NOT "${CMAKE_BUILD_TYPE}" STREQUAL "MinSizeRel") if(NOT "${CMAKE_BUILD_TYPE}" STREQUAL "Release" AND NOT "${CMAKE_BUILD_TYPE}" STREQUAL "MinSizeRel")
set(EXE_LDFLAGS "${EXE_LDFLAGS} /debug:fastlink") set(EXE_LDFLAGS "${EXE_LDFLAGS} /debug:fastlink")
endif() endif()
elseif(MINGW) else()
set(EXE_CXX_FLAGS "-std=c++17 -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -D_GNU_SOURCE -fvisibility-inlines-hidden -fno-exceptions -fno-rtti -Werror=type-limits -Wno-missing-braces -Wno-comment")
set(EXE_LDFLAGS " ")
if(MINGW)
set(EXE_CXX_FLAGS "${EXE_CXX_FLAGS} -Wno-format")
set(EXE_LDFLAGS "${EXE_LDFLAGS} -Wl,--stack,16777216") set(EXE_LDFLAGS "${EXE_LDFLAGS} -Wl,--stack,16777216")
endif() endif()
endif()
if(ZIG_STATIC) if(ZIG_STATIC)
if(APPLE) if(APPLE)

259
build.zig
View File

@ -1,19 +1,18 @@
const std = @import("std"); const std = @import("std");
const builtin = std.builtin; const builtin = std.builtin;
const Builder = std.build.Builder;
const tests = @import("test/tests.zig"); const tests = @import("test/tests.zig");
const BufMap = std.BufMap; const BufMap = std.BufMap;
const mem = std.mem; const mem = std.mem;
const ArrayList = std.ArrayList; const ArrayList = std.ArrayList;
const io = std.io; const io = std.io;
const fs = std.fs; const fs = std.fs;
const InstallDirectoryOptions = std.build.InstallDirectoryOptions; const InstallDirectoryOptions = std.Build.InstallDirectoryOptions;
const assert = std.debug.assert; const assert = std.debug.assert;
const zig_version = std.builtin.Version{ .major = 0, .minor = 11, .patch = 0 }; const zig_version = std.builtin.Version{ .major = 0, .minor = 11, .patch = 0 };
const stack_size = 32 * 1024 * 1024; const stack_size = 32 * 1024 * 1024;
pub fn build(b: *Builder) !void { pub fn build(b: *std.Build) !void {
const release = b.option(bool, "release", "Build in release mode") orelse false; const release = b.option(bool, "release", "Build in release mode") orelse false;
const only_c = b.option(bool, "only-c", "Translate the Zig compiler to C code, with only the C backend enabled") orelse false; const only_c = b.option(bool, "only-c", "Translate the Zig compiler to C code, with only the C backend enabled") orelse false;
const target = t: { const target = t: {
@ -23,7 +22,7 @@ pub fn build(b: *Builder) !void {
} }
break :t b.standardTargetOptions(.{ .default_target = default_target }); break :t b.standardTargetOptions(.{ .default_target = default_target });
}; };
const mode: std.builtin.Mode = if (release) switch (target.getCpuArch()) { const optimize: std.builtin.OptimizeMode = if (release) switch (target.getCpuArch()) {
.wasm32 => .ReleaseSmall, .wasm32 => .ReleaseSmall,
else => .ReleaseFast, else => .ReleaseFast,
} else .Debug; } else .Debug;
@ -33,7 +32,12 @@ pub fn build(b: *Builder) !void {
const test_step = b.step("test", "Run all the tests"); const test_step = b.step("test", "Run all the tests");
const docgen_exe = b.addExecutable("docgen", "doc/docgen.zig"); const docgen_exe = b.addExecutable(.{
.name = "docgen",
.root_source_file = .{ .path = "doc/docgen.zig" },
.target = .{},
.optimize = .Debug,
});
docgen_exe.single_threaded = single_threaded; docgen_exe.single_threaded = single_threaded;
const rel_zig_exe = try fs.path.relative(b.allocator, b.build_root, b.zig_exe); const rel_zig_exe = try fs.path.relative(b.allocator, b.build_root, b.zig_exe);
@ -53,10 +57,12 @@ pub fn build(b: *Builder) !void {
const docs_step = b.step("docs", "Build documentation"); const docs_step = b.step("docs", "Build documentation");
docs_step.dependOn(&docgen_cmd.step); docs_step.dependOn(&docgen_cmd.step);
const test_cases = b.addTest("src/test.zig"); const test_cases = b.addTest(.{
.root_source_file = .{ .path = "src/test.zig" },
.optimize = optimize,
});
test_cases.main_pkg_path = "."; test_cases.main_pkg_path = ".";
test_cases.stack_size = stack_size; test_cases.stack_size = stack_size;
test_cases.setBuildMode(mode);
test_cases.single_threaded = single_threaded; test_cases.single_threaded = single_threaded;
const fmt_build_zig = b.addFmt(&[_][]const u8{"build.zig"}); const fmt_build_zig = b.addFmt(&[_][]const u8{"build.zig"});
@ -154,17 +160,15 @@ pub fn build(b: *Builder) !void {
const mem_leak_frames: u32 = b.option(u32, "mem-leak-frames", "How many stack frames to print when a memory leak occurs. Tests get 2x this amount.") orelse blk: { const mem_leak_frames: u32 = b.option(u32, "mem-leak-frames", "How many stack frames to print when a memory leak occurs. Tests get 2x this amount.") orelse blk: {
if (strip == true) break :blk @as(u32, 0); if (strip == true) break :blk @as(u32, 0);
if (mode != .Debug) break :blk 0; if (optimize != .Debug) break :blk 0;
break :blk 4; break :blk 4;
}; };
const exe = addCompilerStep(b); const exe = addCompilerStep(b, optimize, target);
exe.strip = strip; exe.strip = strip;
exe.sanitize_thread = sanitize_thread; exe.sanitize_thread = sanitize_thread;
exe.build_id = b.option(bool, "build-id", "Include a build id note") orelse false; exe.build_id = b.option(bool, "build-id", "Include a build id note") orelse false;
exe.install(); exe.install();
exe.setBuildMode(mode);
exe.setTarget(target);
const compile_step = b.step("compile", "Build the self-hosted compiler"); const compile_step = b.step("compile", "Build the self-hosted compiler");
compile_step.dependOn(&exe.step); compile_step.dependOn(&exe.step);
@ -201,7 +205,7 @@ pub fn build(b: *Builder) !void {
test_cases.linkLibC(); test_cases.linkLibC();
} }
const is_debug = mode == .Debug; const is_debug = optimize == .Debug;
const enable_logging = b.option(bool, "log", "Enable debug logging with --debug-log") orelse is_debug; const enable_logging = b.option(bool, "log", "Enable debug logging with --debug-log") orelse is_debug;
const enable_link_snapshots = b.option(bool, "link-snapshot", "Whether to enable linker state snapshots") orelse false; const enable_link_snapshots = b.option(bool, "link-snapshot", "Whether to enable linker state snapshots") orelse false;
@ -367,25 +371,25 @@ pub fn build(b: *Builder) !void {
test_step.dependOn(test_cases_step); test_step.dependOn(test_cases_step);
} }
var chosen_modes: [4]builtin.Mode = undefined; var chosen_opt_modes_buf: [4]builtin.Mode = undefined;
var chosen_mode_index: usize = 0; var chosen_mode_index: usize = 0;
if (!skip_debug) { if (!skip_debug) {
chosen_modes[chosen_mode_index] = builtin.Mode.Debug; chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.Debug;
chosen_mode_index += 1; chosen_mode_index += 1;
} }
if (!skip_release_safe) { if (!skip_release_safe) {
chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseSafe; chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.ReleaseSafe;
chosen_mode_index += 1; chosen_mode_index += 1;
} }
if (!skip_release_fast) { if (!skip_release_fast) {
chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseFast; chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.ReleaseFast;
chosen_mode_index += 1; chosen_mode_index += 1;
} }
if (!skip_release_small) { if (!skip_release_small) {
chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseSmall; chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.ReleaseSmall;
chosen_mode_index += 1; chosen_mode_index += 1;
} }
const modes = chosen_modes[0..chosen_mode_index]; const optimization_modes = chosen_opt_modes_buf[0..chosen_mode_index];
// run stage1 `zig fmt` on this build.zig file just to make sure it works // run stage1 `zig fmt` on this build.zig file just to make sure it works
test_step.dependOn(&fmt_build_zig.step); test_step.dependOn(&fmt_build_zig.step);
@ -398,7 +402,7 @@ pub fn build(b: *Builder) !void {
"test/behavior.zig", "test/behavior.zig",
"behavior", "behavior",
"Run the behavior tests", "Run the behavior tests",
modes, optimization_modes,
skip_single_threaded, skip_single_threaded,
skip_non_native, skip_non_native,
skip_libc, skip_libc,
@ -412,7 +416,7 @@ pub fn build(b: *Builder) !void {
"lib/compiler_rt.zig", "lib/compiler_rt.zig",
"compiler-rt", "compiler-rt",
"Run the compiler_rt tests", "Run the compiler_rt tests",
modes, optimization_modes,
true, // skip_single_threaded true, // skip_single_threaded
skip_non_native, skip_non_native,
true, // skip_libc true, // skip_libc
@ -426,7 +430,7 @@ pub fn build(b: *Builder) !void {
"lib/c.zig", "lib/c.zig",
"universal-libc", "universal-libc",
"Run the universal libc tests", "Run the universal libc tests",
modes, optimization_modes,
true, // skip_single_threaded true, // skip_single_threaded
skip_non_native, skip_non_native,
true, // skip_libc true, // skip_libc
@ -434,11 +438,11 @@ pub fn build(b: *Builder) !void {
skip_stage2_tests or true, // TODO get these all passing skip_stage2_tests or true, // TODO get these all passing
)); ));
test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes)); test_step.dependOn(tests.addCompareOutputTests(b, test_filter, optimization_modes));
test_step.dependOn(tests.addStandaloneTests( test_step.dependOn(tests.addStandaloneTests(
b, b,
test_filter, test_filter,
modes, optimization_modes,
skip_non_native, skip_non_native,
enable_macos_sdk, enable_macos_sdk,
target, target,
@ -451,10 +455,10 @@ pub fn build(b: *Builder) !void {
enable_symlinks_windows, enable_symlinks_windows,
)); ));
test_step.dependOn(tests.addCAbiTests(b, skip_non_native, skip_release)); test_step.dependOn(tests.addCAbiTests(b, skip_non_native, skip_release));
test_step.dependOn(tests.addLinkTests(b, test_filter, modes, enable_macos_sdk, skip_stage2_tests, enable_symlinks_windows)); test_step.dependOn(tests.addLinkTests(b, test_filter, optimization_modes, enable_macos_sdk, skip_stage2_tests, enable_symlinks_windows));
test_step.dependOn(tests.addStackTraceTests(b, test_filter, modes)); test_step.dependOn(tests.addStackTraceTests(b, test_filter, optimization_modes));
test_step.dependOn(tests.addCliTests(b, test_filter, modes)); test_step.dependOn(tests.addCliTests(b, test_filter, optimization_modes));
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes)); test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, optimization_modes));
test_step.dependOn(tests.addTranslateCTests(b, test_filter)); test_step.dependOn(tests.addTranslateCTests(b, test_filter));
if (!skip_run_translated_c) { if (!skip_run_translated_c) {
test_step.dependOn(tests.addRunTranslatedCTests(b, test_filter, target)); test_step.dependOn(tests.addRunTranslatedCTests(b, test_filter, target));
@ -468,7 +472,7 @@ pub fn build(b: *Builder) !void {
"lib/std/std.zig", "lib/std/std.zig",
"std", "std",
"Run the standard library tests", "Run the standard library tests",
modes, optimization_modes,
skip_single_threaded, skip_single_threaded,
skip_non_native, skip_non_native,
skip_libc, skip_libc,
@ -479,7 +483,7 @@ pub fn build(b: *Builder) !void {
try addWasiUpdateStep(b, version); try addWasiUpdateStep(b, version);
} }
fn addWasiUpdateStep(b: *Builder, version: [:0]const u8) !void { fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
const semver = try std.SemanticVersion.parse(version); const semver = try std.SemanticVersion.parse(version);
var target: std.zig.CrossTarget = .{ var target: std.zig.CrossTarget = .{
@ -488,9 +492,7 @@ fn addWasiUpdateStep(b: *Builder, version: [:0]const u8) !void {
}; };
target.cpu_features_add.addFeature(@enumToInt(std.Target.wasm.Feature.bulk_memory)); target.cpu_features_add.addFeature(@enumToInt(std.Target.wasm.Feature.bulk_memory));
const exe = addCompilerStep(b); const exe = addCompilerStep(b, .ReleaseSmall, target);
exe.setBuildMode(.ReleaseSmall);
exe.setTarget(target);
const exe_options = b.addOptions(); const exe_options = b.addOptions();
exe.addOptions("build_options", exe_options); exe.addOptions("build_options", exe_options);
@ -517,8 +519,17 @@ fn addWasiUpdateStep(b: *Builder, version: [:0]const u8) !void {
update_zig1_step.dependOn(&run_opt.step); update_zig1_step.dependOn(&run_opt.step);
} }
fn addCompilerStep(b: *Builder) *std.build.LibExeObjStep { fn addCompilerStep(
const exe = b.addExecutable("zig", "src/main.zig"); b: *std.Build,
optimize: std.builtin.OptimizeMode,
target: std.zig.CrossTarget,
) *std.Build.CompileStep {
const exe = b.addExecutable(.{
.name = "zig",
.root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
exe.stack_size = stack_size; exe.stack_size = stack_size;
return exe; return exe;
} }
@ -538,9 +549,9 @@ const exe_cflags = [_][]const u8{
}; };
fn addCmakeCfgOptionsToExe( fn addCmakeCfgOptionsToExe(
b: *Builder, b: *std.Build,
cfg: CMakeConfig, cfg: CMakeConfig,
exe: *std.build.LibExeObjStep, exe: *std.Build.CompileStep,
use_zig_libcxx: bool, use_zig_libcxx: bool,
) !void { ) !void {
if (exe.target.isDarwin()) { if (exe.target.isDarwin()) {
@ -619,7 +630,7 @@ fn addCmakeCfgOptionsToExe(
} }
} }
fn addStaticLlvmOptionsToExe(exe: *std.build.LibExeObjStep) !void { fn addStaticLlvmOptionsToExe(exe: *std.Build.CompileStep) !void {
// Adds the Zig C++ sources which both stage1 and stage2 need. // Adds the Zig C++ sources which both stage1 and stage2 need.
// //
// We need this because otherwise zig_clang_cc1_main.cpp ends up pulling // We need this because otherwise zig_clang_cc1_main.cpp ends up pulling
@ -656,9 +667,9 @@ fn addStaticLlvmOptionsToExe(exe: *std.build.LibExeObjStep) !void {
} }
fn addCxxKnownPath( fn addCxxKnownPath(
b: *Builder, b: *std.Build,
ctx: CMakeConfig, ctx: CMakeConfig,
exe: *std.build.LibExeObjStep, exe: *std.Build.CompileStep,
objname: []const u8, objname: []const u8,
errtxt: ?[]const u8, errtxt: ?[]const u8,
need_cpp_includes: bool, need_cpp_includes: bool,
@ -691,7 +702,7 @@ fn addCxxKnownPath(
} }
} }
fn addCMakeLibraryList(exe: *std.build.LibExeObjStep, list: []const u8) void { fn addCMakeLibraryList(exe: *std.Build.CompileStep, list: []const u8) void {
var it = mem.tokenize(u8, list, ";"); var it = mem.tokenize(u8, list, ";");
while (it.next()) |lib| { while (it.next()) |lib| {
if (mem.startsWith(u8, lib, "-l")) { if (mem.startsWith(u8, lib, "-l")) {
@ -705,7 +716,7 @@ fn addCMakeLibraryList(exe: *std.build.LibExeObjStep, list: []const u8) void {
} }
const CMakeConfig = struct { const CMakeConfig = struct {
llvm_linkage: std.build.LibExeObjStep.Linkage, llvm_linkage: std.Build.CompileStep.Linkage,
cmake_binary_dir: []const u8, cmake_binary_dir: []const u8,
cmake_prefix_path: []const u8, cmake_prefix_path: []const u8,
cmake_static_library_prefix: []const u8, cmake_static_library_prefix: []const u8,
@ -722,7 +733,7 @@ const CMakeConfig = struct {
const max_config_h_bytes = 1 * 1024 * 1024; const max_config_h_bytes = 1 * 1024 * 1024;
fn findConfigH(b: *Builder, config_h_path_option: ?[]const u8) ?[]const u8 { fn findConfigH(b: *std.Build, config_h_path_option: ?[]const u8) ?[]const u8 {
if (config_h_path_option) |path| { if (config_h_path_option) |path| {
var config_h_or_err = fs.cwd().openFile(path, .{}); var config_h_or_err = fs.cwd().openFile(path, .{});
if (config_h_or_err) |*file| { if (config_h_or_err) |*file| {
@ -768,7 +779,7 @@ fn findConfigH(b: *Builder, config_h_path_option: ?[]const u8) ?[]const u8 {
} else unreachable; // TODO should not need `else unreachable`. } else unreachable; // TODO should not need `else unreachable`.
} }
fn parseConfigH(b: *Builder, config_h_text: []const u8) ?CMakeConfig { fn parseConfigH(b: *std.Build, config_h_text: []const u8) ?CMakeConfig {
var ctx: CMakeConfig = .{ var ctx: CMakeConfig = .{
.llvm_linkage = undefined, .llvm_linkage = undefined,
.cmake_binary_dir = undefined, .cmake_binary_dir = undefined,
@ -857,7 +868,7 @@ fn parseConfigH(b: *Builder, config_h_text: []const u8) ?CMakeConfig {
return ctx; return ctx;
} }
fn toNativePathSep(b: *Builder, s: []const u8) []u8 { fn toNativePathSep(b: *std.Build, s: []const u8) []u8 {
const duplicated = b.allocator.dupe(u8, s) catch unreachable; const duplicated = b.allocator.dupe(u8, s) catch unreachable;
for (duplicated) |*byte| switch (byte.*) { for (duplicated) |*byte| switch (byte.*) {
'/' => byte.* = fs.path.sep, '/' => byte.* = fs.path.sep,
@ -866,166 +877,6 @@ fn toNativePathSep(b: *Builder, s: []const u8) []u8 {
return duplicated; return duplicated;
} }
const softfloat_sources = [_][]const u8{
"deps/SoftFloat-3e/source/8086/f128M_isSignalingNaN.c",
"deps/SoftFloat-3e/source/8086/extF80M_isSignalingNaN.c",
"deps/SoftFloat-3e/source/8086/s_commonNaNToF128M.c",
"deps/SoftFloat-3e/source/8086/s_commonNaNToExtF80M.c",
"deps/SoftFloat-3e/source/8086/s_commonNaNToF16UI.c",
"deps/SoftFloat-3e/source/8086/s_commonNaNToF32UI.c",
"deps/SoftFloat-3e/source/8086/s_commonNaNToF64UI.c",
"deps/SoftFloat-3e/source/8086/s_f128MToCommonNaN.c",
"deps/SoftFloat-3e/source/8086/s_extF80MToCommonNaN.c",
"deps/SoftFloat-3e/source/8086/s_f16UIToCommonNaN.c",
"deps/SoftFloat-3e/source/8086/s_f32UIToCommonNaN.c",
"deps/SoftFloat-3e/source/8086/s_f64UIToCommonNaN.c",
"deps/SoftFloat-3e/source/8086/s_propagateNaNF128M.c",
"deps/SoftFloat-3e/source/8086/s_propagateNaNExtF80M.c",
"deps/SoftFloat-3e/source/8086/s_propagateNaNF16UI.c",
"deps/SoftFloat-3e/source/8086/softfloat_raiseFlags.c",
"deps/SoftFloat-3e/source/f128M_add.c",
"deps/SoftFloat-3e/source/f128M_div.c",
"deps/SoftFloat-3e/source/f128M_eq.c",
"deps/SoftFloat-3e/source/f128M_eq_signaling.c",
"deps/SoftFloat-3e/source/f128M_le.c",
"deps/SoftFloat-3e/source/f128M_le_quiet.c",
"deps/SoftFloat-3e/source/f128M_lt.c",
"deps/SoftFloat-3e/source/f128M_lt_quiet.c",
"deps/SoftFloat-3e/source/f128M_mul.c",
"deps/SoftFloat-3e/source/f128M_mulAdd.c",
"deps/SoftFloat-3e/source/f128M_rem.c",
"deps/SoftFloat-3e/source/f128M_roundToInt.c",
"deps/SoftFloat-3e/source/f128M_sqrt.c",
"deps/SoftFloat-3e/source/f128M_sub.c",
"deps/SoftFloat-3e/source/f128M_to_f16.c",
"deps/SoftFloat-3e/source/f128M_to_f32.c",
"deps/SoftFloat-3e/source/f128M_to_f64.c",
"deps/SoftFloat-3e/source/f128M_to_extF80M.c",
"deps/SoftFloat-3e/source/f128M_to_i32.c",
"deps/SoftFloat-3e/source/f128M_to_i32_r_minMag.c",
"deps/SoftFloat-3e/source/f128M_to_i64.c",
"deps/SoftFloat-3e/source/f128M_to_i64_r_minMag.c",
"deps/SoftFloat-3e/source/f128M_to_ui32.c",
"deps/SoftFloat-3e/source/f128M_to_ui32_r_minMag.c",
"deps/SoftFloat-3e/source/f128M_to_ui64.c",
"deps/SoftFloat-3e/source/f128M_to_ui64_r_minMag.c",
"deps/SoftFloat-3e/source/extF80M_add.c",
"deps/SoftFloat-3e/source/extF80M_div.c",
"deps/SoftFloat-3e/source/extF80M_eq.c",
"deps/SoftFloat-3e/source/extF80M_le.c",
"deps/SoftFloat-3e/source/extF80M_lt.c",
"deps/SoftFloat-3e/source/extF80M_mul.c",
"deps/SoftFloat-3e/source/extF80M_rem.c",
"deps/SoftFloat-3e/source/extF80M_roundToInt.c",
"deps/SoftFloat-3e/source/extF80M_sqrt.c",
"deps/SoftFloat-3e/source/extF80M_sub.c",
"deps/SoftFloat-3e/source/extF80M_to_f16.c",
"deps/SoftFloat-3e/source/extF80M_to_f32.c",
"deps/SoftFloat-3e/source/extF80M_to_f64.c",
"deps/SoftFloat-3e/source/extF80M_to_f128M.c",
"deps/SoftFloat-3e/source/f16_add.c",
"deps/SoftFloat-3e/source/f16_div.c",
"deps/SoftFloat-3e/source/f16_eq.c",
"deps/SoftFloat-3e/source/f16_isSignalingNaN.c",
"deps/SoftFloat-3e/source/f16_lt.c",
"deps/SoftFloat-3e/source/f16_mul.c",
"deps/SoftFloat-3e/source/f16_mulAdd.c",
"deps/SoftFloat-3e/source/f16_rem.c",
"deps/SoftFloat-3e/source/f16_roundToInt.c",
"deps/SoftFloat-3e/source/f16_sqrt.c",
"deps/SoftFloat-3e/source/f16_sub.c",
"deps/SoftFloat-3e/source/f16_to_extF80M.c",
"deps/SoftFloat-3e/source/f16_to_f128M.c",
"deps/SoftFloat-3e/source/f16_to_f64.c",
"deps/SoftFloat-3e/source/f32_to_extF80M.c",
"deps/SoftFloat-3e/source/f32_to_f128M.c",
"deps/SoftFloat-3e/source/f64_to_extF80M.c",
"deps/SoftFloat-3e/source/f64_to_f128M.c",
"deps/SoftFloat-3e/source/f64_to_f16.c",
"deps/SoftFloat-3e/source/i32_to_f128M.c",
"deps/SoftFloat-3e/source/s_add256M.c",
"deps/SoftFloat-3e/source/s_addCarryM.c",
"deps/SoftFloat-3e/source/s_addComplCarryM.c",
"deps/SoftFloat-3e/source/s_addF128M.c",
"deps/SoftFloat-3e/source/s_addExtF80M.c",
"deps/SoftFloat-3e/source/s_addM.c",
"deps/SoftFloat-3e/source/s_addMagsF16.c",
"deps/SoftFloat-3e/source/s_addMagsF32.c",
"deps/SoftFloat-3e/source/s_addMagsF64.c",
"deps/SoftFloat-3e/source/s_approxRecip32_1.c",
"deps/SoftFloat-3e/source/s_approxRecipSqrt32_1.c",
"deps/SoftFloat-3e/source/s_approxRecipSqrt_1Ks.c",
"deps/SoftFloat-3e/source/s_approxRecip_1Ks.c",
"deps/SoftFloat-3e/source/s_compare128M.c",
"deps/SoftFloat-3e/source/s_compare96M.c",
"deps/SoftFloat-3e/source/s_compareNonnormExtF80M.c",
"deps/SoftFloat-3e/source/s_countLeadingZeros16.c",
"deps/SoftFloat-3e/source/s_countLeadingZeros32.c",
"deps/SoftFloat-3e/source/s_countLeadingZeros64.c",
"deps/SoftFloat-3e/source/s_countLeadingZeros8.c",
"deps/SoftFloat-3e/source/s_eq128.c",
"deps/SoftFloat-3e/source/s_invalidF128M.c",
"deps/SoftFloat-3e/source/s_invalidExtF80M.c",
"deps/SoftFloat-3e/source/s_isNaNF128M.c",
"deps/SoftFloat-3e/source/s_le128.c",
"deps/SoftFloat-3e/source/s_lt128.c",
"deps/SoftFloat-3e/source/s_mul128MTo256M.c",
"deps/SoftFloat-3e/source/s_mul64To128M.c",
"deps/SoftFloat-3e/source/s_mulAddF128M.c",
"deps/SoftFloat-3e/source/s_mulAddF16.c",
"deps/SoftFloat-3e/source/s_mulAddF32.c",
"deps/SoftFloat-3e/source/s_mulAddF64.c",
"deps/SoftFloat-3e/source/s_negXM.c",
"deps/SoftFloat-3e/source/s_normExtF80SigM.c",
"deps/SoftFloat-3e/source/s_normRoundPackMToF128M.c",
"deps/SoftFloat-3e/source/s_normRoundPackMToExtF80M.c",
"deps/SoftFloat-3e/source/s_normRoundPackToF16.c",
"deps/SoftFloat-3e/source/s_normRoundPackToF32.c",
"deps/SoftFloat-3e/source/s_normRoundPackToF64.c",
"deps/SoftFloat-3e/source/s_normSubnormalF128SigM.c",
"deps/SoftFloat-3e/source/s_normSubnormalF16Sig.c",
"deps/SoftFloat-3e/source/s_normSubnormalF32Sig.c",
"deps/SoftFloat-3e/source/s_normSubnormalF64Sig.c",
"deps/SoftFloat-3e/source/s_remStepMBy32.c",
"deps/SoftFloat-3e/source/s_roundMToI64.c",
"deps/SoftFloat-3e/source/s_roundMToUI64.c",
"deps/SoftFloat-3e/source/s_roundPackMToExtF80M.c",
"deps/SoftFloat-3e/source/s_roundPackMToF128M.c",
"deps/SoftFloat-3e/source/s_roundPackToF16.c",
"deps/SoftFloat-3e/source/s_roundPackToF32.c",
"deps/SoftFloat-3e/source/s_roundPackToF64.c",
"deps/SoftFloat-3e/source/s_roundToI32.c",
"deps/SoftFloat-3e/source/s_roundToI64.c",
"deps/SoftFloat-3e/source/s_roundToUI32.c",
"deps/SoftFloat-3e/source/s_roundToUI64.c",
"deps/SoftFloat-3e/source/s_shiftLeftM.c",
"deps/SoftFloat-3e/source/s_shiftNormSigF128M.c",
"deps/SoftFloat-3e/source/s_shiftRightJam256M.c",
"deps/SoftFloat-3e/source/s_shiftRightJam32.c",
"deps/SoftFloat-3e/source/s_shiftRightJam64.c",
"deps/SoftFloat-3e/source/s_shiftRightJamM.c",
"deps/SoftFloat-3e/source/s_shiftRightM.c",
"deps/SoftFloat-3e/source/s_shortShiftLeft64To96M.c",
"deps/SoftFloat-3e/source/s_shortShiftLeftM.c",
"deps/SoftFloat-3e/source/s_shortShiftRightExtendM.c",
"deps/SoftFloat-3e/source/s_shortShiftRightJam64.c",
"deps/SoftFloat-3e/source/s_shortShiftRightJamM.c",
"deps/SoftFloat-3e/source/s_shortShiftRightM.c",
"deps/SoftFloat-3e/source/s_sub1XM.c",
"deps/SoftFloat-3e/source/s_sub256M.c",
"deps/SoftFloat-3e/source/s_subM.c",
"deps/SoftFloat-3e/source/s_subMagsF16.c",
"deps/SoftFloat-3e/source/s_subMagsF32.c",
"deps/SoftFloat-3e/source/s_subMagsF64.c",
"deps/SoftFloat-3e/source/s_tryPropagateNaNF128M.c",
"deps/SoftFloat-3e/source/s_tryPropagateNaNExtF80M.c",
"deps/SoftFloat-3e/source/softfloat_state.c",
"deps/SoftFloat-3e/source/ui32_to_f128M.c",
"deps/SoftFloat-3e/source/ui64_to_f128M.c",
"deps/SoftFloat-3e/source/ui32_to_extF80M.c",
"deps/SoftFloat-3e/source/ui64_to_extF80M.c",
};
const zig_cpp_sources = [_][]const u8{ const zig_cpp_sources = [_][]const u8{
// These are planned to stay even when we are self-hosted. // These are planned to stay even when we are self-hosted.
"src/zig_llvm.cpp", "src/zig_llvm.cpp",

View File

@ -76,7 +76,8 @@ Write-Output "Build x86_64-windows-msvc behavior tests using the C backend..."
-ofmt=c ` -ofmt=c `
-femit-bin="test-x86_64-windows-msvc.c" ` -femit-bin="test-x86_64-windows-msvc.c" `
--test-no-exec ` --test-no-exec `
-target x86_64-windows-msvc -target x86_64-windows-msvc `
-lc
CheckLastExitCode CheckLastExitCode
& "stage3-debug\bin\zig.exe" build-obj ` & "stage3-debug\bin\zig.exe" build-obj `
@ -99,7 +100,7 @@ Enter-VsDevShell -VsInstallPath "C:\Program Files\Microsoft Visual Studio\2022\E
CheckLastExitCode CheckLastExitCode
Write-Output "Build and run behavior tests with msvc..." Write-Output "Build and run behavior tests with msvc..."
& cl.exe -I..\lib test-x86_64-windows-msvc.c compiler_rt-x86_64-windows-msvc.c /W3 /Z7 -link -nologo -debug -subsystem:console -entry:wWinMainCRTStartup kernel32.lib ntdll.lib vcruntime.lib libucrt.lib & cl.exe -I..\lib test-x86_64-windows-msvc.c compiler_rt-x86_64-windows-msvc.c /W3 /Z7 -link -nologo -debug -subsystem:console kernel32.lib ntdll.lib libcmt.lib
CheckLastExitCode CheckLastExitCode
& .\test-x86_64-windows-msvc.exe & .\test-x86_64-windows-msvc.exe

View File

@ -76,7 +76,8 @@ Write-Output "Build x86_64-windows-msvc behavior tests using the C backend..."
-ofmt=c ` -ofmt=c `
-femit-bin="test-x86_64-windows-msvc.c" ` -femit-bin="test-x86_64-windows-msvc.c" `
--test-no-exec ` --test-no-exec `
-target x86_64-windows-msvc -target x86_64-windows-msvc `
-lc
CheckLastExitCode CheckLastExitCode
& "stage3-release\bin\zig.exe" build-obj ` & "stage3-release\bin\zig.exe" build-obj `
@ -99,7 +100,7 @@ Enter-VsDevShell -VsInstallPath "C:\Program Files\Microsoft Visual Studio\2022\E
CheckLastExitCode CheckLastExitCode
Write-Output "Build and run behavior tests with msvc..." Write-Output "Build and run behavior tests with msvc..."
& cl.exe -I..\lib test-x86_64-windows-msvc.c compiler_rt-x86_64-windows-msvc.c /W3 /Z7 -link -nologo -debug -subsystem:console -entry:wWinMainCRTStartup kernel32.lib ntdll.lib vcruntime.lib libucrt.lib & cl.exe -I..\lib test-x86_64-windows-msvc.c compiler_rt-x86_64-windows-msvc.c /W3 /Z7 -link -nologo -debug -subsystem:console kernel32.lib ntdll.lib libcmt.lib
CheckLastExitCode CheckLastExitCode
& .\test-x86_64-windows-msvc.exe & .\test-x86_64-windows-msvc.exe

View File

@ -871,6 +871,13 @@ pub fn main() void {
However, it is possible to embed non-UTF-8 bytes into a string literal using <code>\xNN</code> notation. However, it is possible to embed non-UTF-8 bytes into a string literal using <code>\xNN</code> notation.
</p> </p>
<p> <p>
Indexing into a string containing non-ASCII bytes will return individual bytes, whether valid
UTF-8 or not.
The {#link|Zig Standard Library#} provides routines for checking the validity of UTF-8 encoded
strings, accessing their code points and other encoding/decoding related tasks in
{#syntax#}std.unicode{#endsyntax#}.
</p>
<p>
Unicode code point literals have type {#syntax#}comptime_int{#endsyntax#}, the same as Unicode code point literals have type {#syntax#}comptime_int{#endsyntax#}, the same as
{#link|Integer Literals#}. All {#link|Escape Sequences#} are valid in both string literals {#link|Integer Literals#}. All {#link|Escape Sequences#} are valid in both string literals
and Unicode code point literals. and Unicode code point literals.
@ -894,9 +901,12 @@ pub fn main() void {
print("{}\n", .{'e' == '\x65'}); // true print("{}\n", .{'e' == '\x65'}); // true
print("{d}\n", .{'\u{1f4a9}'}); // 128169 print("{d}\n", .{'\u{1f4a9}'}); // 128169
print("{d}\n", .{'💯'}); // 128175 print("{d}\n", .{'💯'}); // 128175
print("{}\n", .{mem.eql(u8, "hello", "h\x65llo")}); // true
print("0x{x}\n", .{"\xff"[0]}); // non-UTF-8 strings are possible with \xNN notation.
print("{u}\n", .{'⚡'}); print("{u}\n", .{'⚡'});
print("{}\n", .{mem.eql(u8, "hello", "h\x65llo")}); // true
print("{}\n", .{mem.eql(u8, "💯", "\xf0\x9f\x92\xaf")}); // also true
const invalid_utf8 = "\xff\xfe"; // non-UTF-8 strings are possible with \xNN notation.
print("0x{x}\n", .{invalid_utf8[1]}); // indexing them returns individual bytes...
print("0x{x}\n", .{"💯"[1]}); // ...as does indexing part-way through non-ASCII characters
} }
{#code_end#} {#code_end#}
{#see_also|Arrays|Source Encoding#} {#see_also|Arrays|Source Encoding#}
@ -8799,6 +8809,15 @@ pub const PrefetchOptions = struct {
{#link|Optional Pointers#} are allowed. Casting an optional pointer which is {#link|null#} {#link|Optional Pointers#} are allowed. Casting an optional pointer which is {#link|null#}
to a non-optional pointer invokes safety-checked {#link|Undefined Behavior#}. to a non-optional pointer invokes safety-checked {#link|Undefined Behavior#}.
</p> </p>
<p>
{#syntax#}@ptrCast{#endsyntax#} cannot be used for:
</p>
<ul>
<li>Removing {#syntax#}const{#endsyntax#} or {#syntax#}volatile{#endsyntax#} qualifier, use {#link|@qualCast#}.</li>
<li>Changing pointer address space, use {#link|@addrSpaceCast#}.</li>
<li>Increasing pointer alignment, use {#link|@alignCast#}.</li>
<li>Casting a non-slice pointer to a slice, use slicing syntax {#syntax#}ptr[start..end]{#endsyntax#}.</li>
</ul>
{#header_close#} {#header_close#}
{#header_open|@ptrToInt#} {#header_open|@ptrToInt#}
@ -8811,6 +8830,13 @@ pub const PrefetchOptions = struct {
{#header_close#} {#header_close#}
{#header_open|@qualCast#}
<pre>{#syntax#}@qualCast(comptime DestType: type, value: anytype) DestType{#endsyntax#}</pre>
<p>
Remove {#syntax#}const{#endsyntax#} or {#syntax#}volatile{#endsyntax#} qualifier from a pointer.
</p>
{#header_close#}
{#header_open|@rem#} {#header_open|@rem#}
<pre>{#syntax#}@rem(numerator: T, denominator: T) T{#endsyntax#}</pre> <pre>{#syntax#}@rem(numerator: T, denominator: T) T{#endsyntax#}</pre>
<p> <p>
@ -9180,8 +9206,7 @@ fn doTheTest() !void {
when available. when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
{#header_open|@sin#} {#header_open|@sin#}
@ -9191,8 +9216,7 @@ fn doTheTest() !void {
when available. when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
@ -9203,8 +9227,7 @@ fn doTheTest() !void {
when available. when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
@ -9215,8 +9238,7 @@ fn doTheTest() !void {
Uses a dedicated hardware instruction when available. Uses a dedicated hardware instruction when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
@ -9227,8 +9249,7 @@ fn doTheTest() !void {
when available. when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
{#header_open|@exp2#} {#header_open|@exp2#}
@ -9238,8 +9259,7 @@ fn doTheTest() !void {
when available. when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
{#header_open|@log#} {#header_open|@log#}
@ -9249,8 +9269,7 @@ fn doTheTest() !void {
when available. when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
{#header_open|@log2#} {#header_open|@log2#}
@ -9260,8 +9279,7 @@ fn doTheTest() !void {
when available. when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
{#header_open|@log10#} {#header_open|@log10#}
@ -9271,8 +9289,7 @@ fn doTheTest() !void {
when available. when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
{#header_open|@fabs#} {#header_open|@fabs#}
@ -9282,8 +9299,7 @@ fn doTheTest() !void {
when available. when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
{#header_open|@floor#} {#header_open|@floor#}
@ -9293,8 +9309,7 @@ fn doTheTest() !void {
Uses a dedicated hardware instruction when available. Uses a dedicated hardware instruction when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
{#header_open|@ceil#} {#header_open|@ceil#}
@ -9304,8 +9319,7 @@ fn doTheTest() !void {
Uses a dedicated hardware instruction when available. Uses a dedicated hardware instruction when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
{#header_open|@trunc#} {#header_open|@trunc#}
@ -9315,8 +9329,7 @@ fn doTheTest() !void {
Uses a dedicated hardware instruction when available. Uses a dedicated hardware instruction when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
{#header_open|@round#} {#header_open|@round#}
@ -9326,8 +9339,7 @@ fn doTheTest() !void {
when available. when available.
</p> </p>
<p> <p>
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that Supports {#link|Floats#} and {#link|Vectors#} of floats.
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
</p> </p>
{#header_close#} {#header_close#}
@ -9528,11 +9540,15 @@ fn foo(comptime T: type, ptr: *T) T {
To add standard build options to a <code class="file">build.zig</code> file: To add standard build options to a <code class="file">build.zig</code> file:
</p> </p>
{#code_begin|syntax|build#} {#code_begin|syntax|build#}
const Builder = @import("std").build.Builder; const std = @import("std");
pub fn build(b: *Builder) void { pub fn build(b: *std.Build) void {
const exe = b.addExecutable("example", "example.zig"); const optimize = b.standardOptimizeOption(.{});
exe.setBuildMode(b.standardReleaseOptions()); const exe = b.addExecutable(.{
.name = "example",
.root_source_file = .{ .path = "example.zig" },
.optimize = optimize,
});
b.default_step.dependOn(&exe.step); b.default_step.dependOn(&exe.step);
} }
{#code_end#} {#code_end#}
@ -10547,22 +10563,26 @@ const separator = if (builtin.os.tag == .windows) '\\' else '/';
<p>This <code class="file">build.zig</code> file is automatically generated <p>This <code class="file">build.zig</code> file is automatically generated
by <kbd>zig init-exe</kbd>.</p> by <kbd>zig init-exe</kbd>.</p>
{#code_begin|syntax|build_executable#} {#code_begin|syntax|build_executable#}
const Builder = @import("std").build.Builder; const std = @import("std");
pub fn build(b: *Builder) void { pub fn build(b: *std.Build) void {
// Standard target options allows the person running `zig build` to choose // Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which // what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options // means any target is allowed, and the default is native. Other options
// for restricting supported target set are available. // for restricting supported target set are available.
const target = b.standardTargetOptions(.{}); const target = b.standardTargetOptions(.{});
// Standard release options allow the person running `zig build` to select // Standard optimization options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
const mode = b.standardReleaseOptions(); // set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
const exe = b.addExecutable("example", "src/main.zig"); const exe = b.addExecutable(.{
exe.setTarget(target); .name = "example",
exe.setBuildMode(mode); .root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
exe.install(); exe.install();
const run_cmd = exe.run(); const run_cmd = exe.run();
@ -10581,16 +10601,21 @@ pub fn build(b: *Builder) void {
<p>This <code class="file">build.zig</code> file is automatically generated <p>This <code class="file">build.zig</code> file is automatically generated
by <kbd>zig init-lib</kbd>.</p> by <kbd>zig init-lib</kbd>.</p>
{#code_begin|syntax|build_library#} {#code_begin|syntax|build_library#}
const Builder = @import("std").build.Builder; const std = @import("std");
pub fn build(b: *Builder) void { pub fn build(b: *std.Build) void {
const mode = b.standardReleaseOptions(); const optimize = b.standardOptimizeOption(.{});
const lib = b.addStaticLibrary("example", "src/main.zig"); const lib = b.addStaticLibrary(.{
lib.setBuildMode(mode); .name = "example",
.root_source_file = .{ .path = "src/main.zig" },
.optimize = optimize,
});
lib.install(); lib.install();
var main_tests = b.addTest("src/main.zig"); const main_tests = b.addTest(.{
main_tests.setBuildMode(mode); .root_source_file = .{ .path = "src/main.zig" },
.optimize = optimize,
});
const test_step = b.step("test", "Run library tests"); const test_step = b.step("test", "Run library tests");
test_step.dependOn(&main_tests.step); test_step.dependOn(&main_tests.step);
@ -10949,12 +10974,17 @@ int main(int argc, char **argv) {
} }
{#end_syntax_block#} {#end_syntax_block#}
{#code_begin|syntax|build_c#} {#code_begin|syntax|build_c#}
const Builder = @import("std").build.Builder; const std = @import("std");
pub fn build(b: *Builder) void { pub fn build(b: *std.Build) void {
const lib = b.addSharedLibrary("mathtest", "mathtest.zig", b.version(1, 0, 0)); const lib = b.addSharedLibrary(.{
.name = "mathtest",
const exe = b.addExecutable("test", null); .root_source_file = .{ .path = "mathtest.zig" },
.version = .{ .major = 1, .minor = 0, .patch = 0 },
});
const exe = b.addExecutable(.{
.name = "test",
});
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"}); exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
exe.linkLibrary(lib); exe.linkLibrary(lib);
exe.linkSystemLibrary("c"); exe.linkSystemLibrary("c");
@ -11011,12 +11041,17 @@ int main(int argc, char **argv) {
} }
{#end_syntax_block#} {#end_syntax_block#}
{#code_begin|syntax|build_object#} {#code_begin|syntax|build_object#}
const Builder = @import("std").build.Builder; const std = @import("std");
pub fn build(b: *Builder) void { pub fn build(b: *std.Build) void {
const obj = b.addObject("base64", "base64.zig"); const obj = b.addObject(.{
.name = "base64",
.root_source_file = .{ .path = "base64.zig" },
});
const exe = b.addExecutable("test", null); const exe = b.addExecutable(.{
.name = "test",
});
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"}); exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
exe.addObject(obj); exe.addObject(obj);
exe.linkSystemLibrary("c"); exe.linkSystemLibrary("c");

View File

@ -3,7 +3,6 @@ const std = @import("std");
const builtin = @import("builtin"); const builtin = @import("builtin");
const io = std.io; const io = std.io;
const fmt = std.fmt; const fmt = std.fmt;
const Builder = std.build.Builder;
const mem = std.mem; const mem = std.mem;
const process = std.process; const process = std.process;
const ArrayList = std.ArrayList; const ArrayList = std.ArrayList;
@ -42,12 +41,15 @@ pub fn main() !void {
return error.InvalidArgs; return error.InvalidArgs;
}; };
const builder = try Builder.create( const host = try std.zig.system.NativeTargetInfo.detect(.{});
const builder = try std.Build.create(
allocator, allocator,
zig_exe, zig_exe,
build_root, build_root,
cache_root, cache_root,
global_cache_root, global_cache_root,
host,
); );
defer builder.destroy(); defer builder.destroy();
@ -58,7 +60,7 @@ pub fn main() !void {
const stdout_stream = io.getStdOut().writer(); const stdout_stream = io.getStdOut().writer();
var install_prefix: ?[]const u8 = null; var install_prefix: ?[]const u8 = null;
var dir_list = Builder.DirList{}; var dir_list = std.Build.DirList{};
// before arg parsing, check for the NO_COLOR environment variable // before arg parsing, check for the NO_COLOR environment variable
// if it exists, default the color setting to .off // if it exists, default the color setting to .off
@ -230,7 +232,7 @@ pub fn main() !void {
}; };
} }
fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void { fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !void {
// run the build script to collect the options // run the build script to collect the options
if (!already_ran_build) { if (!already_ran_build) {
builder.resolveInstallPrefix(null, .{}); builder.resolveInstallPrefix(null, .{});
@ -330,7 +332,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void
); );
} }
fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: anytype) void { fn usageAndErr(builder: *std.Build, already_ran_build: bool, out_stream: anytype) void {
usage(builder, already_ran_build, out_stream) catch {}; usage(builder, already_ran_build, out_stream) catch {};
process.exit(1); process.exit(1);
} }

File diff suppressed because it is too large Load Diff

View File

@ -192,6 +192,10 @@ fn __atomic_load_8(src: *u64, model: i32) callconv(.C) u64 {
return atomic_load_N(u64, src, model); return atomic_load_N(u64, src, model);
} }
fn __atomic_load_16(src: *u128, model: i32) callconv(.C) u128 {
return atomic_load_N(u128, src, model);
}
inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void { inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void {
_ = model; _ = model;
if (@sizeOf(T) > largest_atomic_size) { if (@sizeOf(T) > largest_atomic_size) {
@ -219,6 +223,10 @@ fn __atomic_store_8(dst: *u64, value: u64, model: i32) callconv(.C) void {
return atomic_store_N(u64, dst, value, model); return atomic_store_N(u64, dst, value, model);
} }
fn __atomic_store_16(dst: *u128, value: u128, model: i32) callconv(.C) void {
return atomic_store_N(u128, dst, value, model);
}
fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T { fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T {
const WideAtomic = std.meta.Int(.unsigned, smallest_atomic_fetch_exch_size * 8); const WideAtomic = std.meta.Int(.unsigned, smallest_atomic_fetch_exch_size * 8);
@ -282,6 +290,10 @@ fn __atomic_exchange_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return atomic_exchange_N(u64, ptr, val, model); return atomic_exchange_N(u64, ptr, val, model);
} }
fn __atomic_exchange_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
return atomic_exchange_N(u128, ptr, val, model);
}
inline fn atomic_compare_exchange_N( inline fn atomic_compare_exchange_N(
comptime T: type, comptime T: type,
ptr: *T, ptr: *T,
@ -327,6 +339,10 @@ fn __atomic_compare_exchange_8(ptr: *u64, expected: *u64, desired: u64, success:
return atomic_compare_exchange_N(u64, ptr, expected, desired, success, failure); return atomic_compare_exchange_N(u64, ptr, expected, desired, success, failure);
} }
fn __atomic_compare_exchange_16(ptr: *u128, expected: *u128, desired: u128, success: i32, failure: i32) callconv(.C) i32 {
return atomic_compare_exchange_N(u128, ptr, expected, desired, success, failure);
}
inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr: *T, val: T, model: i32) T { inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr: *T, val: T, model: i32) T {
_ = model; _ = model;
const Updater = struct { const Updater = struct {
@ -338,6 +354,8 @@ inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr
.Nand => ~(old & new), .Nand => ~(old & new),
.Or => old | new, .Or => old | new,
.Xor => old ^ new, .Xor => old ^ new,
.Max => @max(old, new),
.Min => @min(old, new),
else => @compileError("unsupported atomic op"), else => @compileError("unsupported atomic op"),
}; };
} }
@ -374,6 +392,10 @@ fn __atomic_fetch_add_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .Add, ptr, val, model); return fetch_op_N(u64, .Add, ptr, val, model);
} }
fn __atomic_fetch_add_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
return fetch_op_N(u128, .Add, ptr, val, model);
}
fn __atomic_fetch_sub_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 { fn __atomic_fetch_sub_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .Sub, ptr, val, model); return fetch_op_N(u8, .Sub, ptr, val, model);
} }
@ -390,6 +412,10 @@ fn __atomic_fetch_sub_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .Sub, ptr, val, model); return fetch_op_N(u64, .Sub, ptr, val, model);
} }
fn __atomic_fetch_sub_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
return fetch_op_N(u128, .Sub, ptr, val, model);
}
fn __atomic_fetch_and_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 { fn __atomic_fetch_and_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .And, ptr, val, model); return fetch_op_N(u8, .And, ptr, val, model);
} }
@ -406,6 +432,10 @@ fn __atomic_fetch_and_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .And, ptr, val, model); return fetch_op_N(u64, .And, ptr, val, model);
} }
fn __atomic_fetch_and_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
return fetch_op_N(u128, .And, ptr, val, model);
}
fn __atomic_fetch_or_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 { fn __atomic_fetch_or_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .Or, ptr, val, model); return fetch_op_N(u8, .Or, ptr, val, model);
} }
@ -422,6 +452,10 @@ fn __atomic_fetch_or_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .Or, ptr, val, model); return fetch_op_N(u64, .Or, ptr, val, model);
} }
fn __atomic_fetch_or_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
return fetch_op_N(u128, .Or, ptr, val, model);
}
fn __atomic_fetch_xor_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 { fn __atomic_fetch_xor_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .Xor, ptr, val, model); return fetch_op_N(u8, .Xor, ptr, val, model);
} }
@ -438,6 +472,10 @@ fn __atomic_fetch_xor_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .Xor, ptr, val, model); return fetch_op_N(u64, .Xor, ptr, val, model);
} }
fn __atomic_fetch_xor_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
return fetch_op_N(u128, .Xor, ptr, val, model);
}
fn __atomic_fetch_nand_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 { fn __atomic_fetch_nand_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .Nand, ptr, val, model); return fetch_op_N(u8, .Nand, ptr, val, model);
} }
@ -454,6 +492,50 @@ fn __atomic_fetch_nand_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .Nand, ptr, val, model); return fetch_op_N(u64, .Nand, ptr, val, model);
} }
fn __atomic_fetch_nand_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
return fetch_op_N(u128, .Nand, ptr, val, model);
}
fn __atomic_fetch_umax_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .Max, ptr, val, model);
}
fn __atomic_fetch_umax_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
return fetch_op_N(u16, .Max, ptr, val, model);
}
fn __atomic_fetch_umax_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
return fetch_op_N(u32, .Max, ptr, val, model);
}
fn __atomic_fetch_umax_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .Max, ptr, val, model);
}
fn __atomic_fetch_umax_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
return fetch_op_N(u128, .Max, ptr, val, model);
}
fn __atomic_fetch_umin_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
return fetch_op_N(u8, .Min, ptr, val, model);
}
fn __atomic_fetch_umin_2(ptr: *u16, val: u16, model: i32) callconv(.C) u16 {
return fetch_op_N(u16, .Min, ptr, val, model);
}
fn __atomic_fetch_umin_4(ptr: *u32, val: u32, model: i32) callconv(.C) u32 {
return fetch_op_N(u32, .Min, ptr, val, model);
}
fn __atomic_fetch_umin_8(ptr: *u64, val: u64, model: i32) callconv(.C) u64 {
return fetch_op_N(u64, .Min, ptr, val, model);
}
fn __atomic_fetch_umin_16(ptr: *u128, val: u128, model: i32) callconv(.C) u128 {
return fetch_op_N(u128, .Min, ptr, val, model);
}
comptime { comptime {
if (supports_atomic_ops and builtin.object_format != .c) { if (supports_atomic_ops and builtin.object_format != .c) {
@export(__atomic_load, .{ .name = "__atomic_load", .linkage = linkage, .visibility = visibility }); @export(__atomic_load, .{ .name = "__atomic_load", .linkage = linkage, .visibility = visibility });
@ -465,50 +547,72 @@ comptime {
@export(__atomic_fetch_add_2, .{ .name = "__atomic_fetch_add_2", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_add_2, .{ .name = "__atomic_fetch_add_2", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_add_4, .{ .name = "__atomic_fetch_add_4", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_add_4, .{ .name = "__atomic_fetch_add_4", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_add_8, .{ .name = "__atomic_fetch_add_8", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_add_8, .{ .name = "__atomic_fetch_add_8", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_add_16, .{ .name = "__atomic_fetch_add_16", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_sub_1, .{ .name = "__atomic_fetch_sub_1", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_sub_1, .{ .name = "__atomic_fetch_sub_1", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_sub_2, .{ .name = "__atomic_fetch_sub_2", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_sub_2, .{ .name = "__atomic_fetch_sub_2", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_sub_4, .{ .name = "__atomic_fetch_sub_4", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_sub_4, .{ .name = "__atomic_fetch_sub_4", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_sub_8, .{ .name = "__atomic_fetch_sub_8", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_sub_8, .{ .name = "__atomic_fetch_sub_8", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_sub_16, .{ .name = "__atomic_fetch_sub_16", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_and_1, .{ .name = "__atomic_fetch_and_1", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_and_1, .{ .name = "__atomic_fetch_and_1", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_and_2, .{ .name = "__atomic_fetch_and_2", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_and_2, .{ .name = "__atomic_fetch_and_2", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_and_4, .{ .name = "__atomic_fetch_and_4", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_and_4, .{ .name = "__atomic_fetch_and_4", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_and_8, .{ .name = "__atomic_fetch_and_8", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_and_8, .{ .name = "__atomic_fetch_and_8", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_and_16, .{ .name = "__atomic_fetch_and_16", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_or_1, .{ .name = "__atomic_fetch_or_1", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_or_1, .{ .name = "__atomic_fetch_or_1", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_or_2, .{ .name = "__atomic_fetch_or_2", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_or_2, .{ .name = "__atomic_fetch_or_2", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_or_4, .{ .name = "__atomic_fetch_or_4", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_or_4, .{ .name = "__atomic_fetch_or_4", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_or_8, .{ .name = "__atomic_fetch_or_8", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_or_8, .{ .name = "__atomic_fetch_or_8", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_or_16, .{ .name = "__atomic_fetch_or_16", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_xor_1, .{ .name = "__atomic_fetch_xor_1", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_xor_1, .{ .name = "__atomic_fetch_xor_1", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_xor_2, .{ .name = "__atomic_fetch_xor_2", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_xor_2, .{ .name = "__atomic_fetch_xor_2", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_xor_4, .{ .name = "__atomic_fetch_xor_4", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_xor_4, .{ .name = "__atomic_fetch_xor_4", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_xor_8, .{ .name = "__atomic_fetch_xor_8", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_xor_8, .{ .name = "__atomic_fetch_xor_8", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_xor_16, .{ .name = "__atomic_fetch_xor_16", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_nand_1, .{ .name = "__atomic_fetch_nand_1", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_nand_1, .{ .name = "__atomic_fetch_nand_1", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_nand_2, .{ .name = "__atomic_fetch_nand_2", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_nand_2, .{ .name = "__atomic_fetch_nand_2", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_nand_4, .{ .name = "__atomic_fetch_nand_4", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_nand_4, .{ .name = "__atomic_fetch_nand_4", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_nand_8, .{ .name = "__atomic_fetch_nand_8", .linkage = linkage, .visibility = visibility }); @export(__atomic_fetch_nand_8, .{ .name = "__atomic_fetch_nand_8", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_nand_16, .{ .name = "__atomic_fetch_nand_16", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_umax_1, .{ .name = "__atomic_fetch_umax_1", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_umax_2, .{ .name = "__atomic_fetch_umax_2", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_umax_4, .{ .name = "__atomic_fetch_umax_4", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_umax_8, .{ .name = "__atomic_fetch_umax_8", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_umax_16, .{ .name = "__atomic_fetch_umax_16", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_umin_1, .{ .name = "__atomic_fetch_umin_1", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_umin_2, .{ .name = "__atomic_fetch_umin_2", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_umin_4, .{ .name = "__atomic_fetch_umin_4", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_umin_8, .{ .name = "__atomic_fetch_umin_8", .linkage = linkage, .visibility = visibility });
@export(__atomic_fetch_umin_16, .{ .name = "__atomic_fetch_umin_16", .linkage = linkage, .visibility = visibility });
@export(__atomic_load_1, .{ .name = "__atomic_load_1", .linkage = linkage, .visibility = visibility }); @export(__atomic_load_1, .{ .name = "__atomic_load_1", .linkage = linkage, .visibility = visibility });
@export(__atomic_load_2, .{ .name = "__atomic_load_2", .linkage = linkage, .visibility = visibility }); @export(__atomic_load_2, .{ .name = "__atomic_load_2", .linkage = linkage, .visibility = visibility });
@export(__atomic_load_4, .{ .name = "__atomic_load_4", .linkage = linkage, .visibility = visibility }); @export(__atomic_load_4, .{ .name = "__atomic_load_4", .linkage = linkage, .visibility = visibility });
@export(__atomic_load_8, .{ .name = "__atomic_load_8", .linkage = linkage, .visibility = visibility }); @export(__atomic_load_8, .{ .name = "__atomic_load_8", .linkage = linkage, .visibility = visibility });
@export(__atomic_load_16, .{ .name = "__atomic_load_16", .linkage = linkage, .visibility = visibility });
@export(__atomic_store_1, .{ .name = "__atomic_store_1", .linkage = linkage, .visibility = visibility }); @export(__atomic_store_1, .{ .name = "__atomic_store_1", .linkage = linkage, .visibility = visibility });
@export(__atomic_store_2, .{ .name = "__atomic_store_2", .linkage = linkage, .visibility = visibility }); @export(__atomic_store_2, .{ .name = "__atomic_store_2", .linkage = linkage, .visibility = visibility });
@export(__atomic_store_4, .{ .name = "__atomic_store_4", .linkage = linkage, .visibility = visibility }); @export(__atomic_store_4, .{ .name = "__atomic_store_4", .linkage = linkage, .visibility = visibility });
@export(__atomic_store_8, .{ .name = "__atomic_store_8", .linkage = linkage, .visibility = visibility }); @export(__atomic_store_8, .{ .name = "__atomic_store_8", .linkage = linkage, .visibility = visibility });
@export(__atomic_store_16, .{ .name = "__atomic_store_16", .linkage = linkage, .visibility = visibility });
@export(__atomic_exchange_1, .{ .name = "__atomic_exchange_1", .linkage = linkage, .visibility = visibility }); @export(__atomic_exchange_1, .{ .name = "__atomic_exchange_1", .linkage = linkage, .visibility = visibility });
@export(__atomic_exchange_2, .{ .name = "__atomic_exchange_2", .linkage = linkage, .visibility = visibility }); @export(__atomic_exchange_2, .{ .name = "__atomic_exchange_2", .linkage = linkage, .visibility = visibility });
@export(__atomic_exchange_4, .{ .name = "__atomic_exchange_4", .linkage = linkage, .visibility = visibility }); @export(__atomic_exchange_4, .{ .name = "__atomic_exchange_4", .linkage = linkage, .visibility = visibility });
@export(__atomic_exchange_8, .{ .name = "__atomic_exchange_8", .linkage = linkage, .visibility = visibility }); @export(__atomic_exchange_8, .{ .name = "__atomic_exchange_8", .linkage = linkage, .visibility = visibility });
@export(__atomic_exchange_16, .{ .name = "__atomic_exchange_16", .linkage = linkage, .visibility = visibility });
@export(__atomic_compare_exchange_1, .{ .name = "__atomic_compare_exchange_1", .linkage = linkage, .visibility = visibility }); @export(__atomic_compare_exchange_1, .{ .name = "__atomic_compare_exchange_1", .linkage = linkage, .visibility = visibility });
@export(__atomic_compare_exchange_2, .{ .name = "__atomic_compare_exchange_2", .linkage = linkage, .visibility = visibility }); @export(__atomic_compare_exchange_2, .{ .name = "__atomic_compare_exchange_2", .linkage = linkage, .visibility = visibility });
@export(__atomic_compare_exchange_4, .{ .name = "__atomic_compare_exchange_4", .linkage = linkage, .visibility = visibility }); @export(__atomic_compare_exchange_4, .{ .name = "__atomic_compare_exchange_4", .linkage = linkage, .visibility = visibility });
@export(__atomic_compare_exchange_8, .{ .name = "__atomic_compare_exchange_8", .linkage = linkage, .visibility = visibility }); @export(__atomic_compare_exchange_8, .{ .name = "__atomic_compare_exchange_8", .linkage = linkage, .visibility = visibility });
@export(__atomic_compare_exchange_16, .{ .name = "__atomic_compare_exchange_16", .linkage = linkage, .visibility = visibility });
} }
} }

View File

@ -1354,6 +1354,10 @@ const NAV_MODES = {
payloadHtml += "ptrCast"; payloadHtml += "ptrCast";
break; break;
} }
case "qual_cast": {
payloadHtml += "qualCast";
break;
}
case "truncate": { case "truncate": {
payloadHtml += "truncate"; payloadHtml += "truncate";
break; break;
@ -3158,7 +3162,6 @@ const NAV_MODES = {
canonTypeDecls = new Array(zigAnalysis.types.length); canonTypeDecls = new Array(zigAnalysis.types.length);
for (let pkgI = 0; pkgI < zigAnalysis.packages.length; pkgI += 1) { for (let pkgI = 0; pkgI < zigAnalysis.packages.length; pkgI += 1) {
if (pkgI === zigAnalysis.rootPkg && rootIsStd) continue;
let pkg = zigAnalysis.packages[pkgI]; let pkg = zigAnalysis.packages[pkgI];
let pkgNames = canonPkgPaths[pkgI]; let pkgNames = canonPkgPaths[pkgI];
if (pkgNames === undefined) continue; if (pkgNames === undefined) continue;

View File

@ -1,34 +1,67 @@
const std = @import("std"); const std = @import("std");
pub fn build(b: *std.build.Builder) void { // Although this function looks imperative, note that its job is to
// declaratively construct a build graph that will be executed by an external
// runner.
pub fn build(b: *std.Build) void {
// Standard target options allows the person running `zig build` to choose // Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which // what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options // means any target is allowed, and the default is native. Other options
// for restricting supported target set are available. // for restricting supported target set are available.
const target = b.standardTargetOptions(.{}); const target = b.standardTargetOptions(.{});
// Standard release options allow the person running `zig build` to select // Standard optimization options allow the person running `zig build` to select
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
const mode = b.standardReleaseOptions(); // set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
const exe = b.addExecutable("$", "src/main.zig"); const exe = b.addExecutable(.{
exe.setTarget(target); .name = "$",
exe.setBuildMode(mode); // In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
.root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
// This declares intent for the executable to be installed into the
// standard location when the user invokes the "install" step (the default
// step when running `zig build`).
exe.install(); exe.install();
// This *creates* a RunStep in the build graph, to be executed when another
// step is evaluated that depends on it. The next line below will establish
// such a dependency.
const run_cmd = exe.run(); const run_cmd = exe.run();
// By making the run step depend on the install step, it will be run from the
// installation directory rather than directly from within the cache directory.
// This is not necessary, however, if the application depends on other installed
// files, this ensures they will be present and in the expected location.
run_cmd.step.dependOn(b.getInstallStep()); run_cmd.step.dependOn(b.getInstallStep());
// This allows the user to pass arguments to the application in the build
// command itself, like this: `zig build run -- arg1 arg2 etc`
if (b.args) |args| { if (b.args) |args| {
run_cmd.addArgs(args); run_cmd.addArgs(args);
} }
// This creates a build step. It will be visible in the `zig build --help` menu,
// and can be selected like this: `zig build run`
// This will evaluate the `run` step rather than the default, which is "install".
const run_step = b.step("run", "Run the app"); const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step); run_step.dependOn(&run_cmd.step);
const exe_tests = b.addTest("src/main.zig"); // Creates a step for unit testing.
exe_tests.setTarget(target); const exe_tests = b.addTest(.{
exe_tests.setBuildMode(mode); .root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
// Similar to creating the run step earlier, this exposes a `test` step to
// the `zig build --help` menu, providing a way for the user to request
// running the unit tests.
const test_step = b.step("test", "Run unit tests"); const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&exe_tests.step); test_step.dependOn(&exe_tests.step);
} }

View File

@ -1,17 +1,44 @@
const std = @import("std"); const std = @import("std");
pub fn build(b: *std.build.Builder) void { // Although this function looks imperative, note that its job is to
// Standard release options allow the person running `zig build` to select // declaratively construct a build graph that will be executed by an external
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. // runner.
const mode = b.standardReleaseOptions(); pub fn build(b: *std.Build) void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
const lib = b.addStaticLibrary("$", "src/main.zig"); // Standard optimization options allow the person running `zig build` to select
lib.setBuildMode(mode); // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
// set a preferred release mode, allowing the user to decide how to optimize.
const optimize = b.standardOptimizeOption(.{});
const lib = b.addStaticLibrary(.{
.name = "$",
// In this case the main source file is merely a path, however, in more
// complicated build scripts, this could be a generated file.
.root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
// This declares intent for the library to be installed into the standard
// location when the user invokes the "install" step (the default step when
// running `zig build`).
lib.install(); lib.install();
const main_tests = b.addTest("src/main.zig"); // Creates a step for unit testing.
main_tests.setBuildMode(mode); const main_tests = b.addTest(.{
.root_source_file = .{ .path = "src/main.zig" },
.target = target,
.optimize = optimize,
});
// This creates a build step. It will be visible in the `zig build --help` menu,
// and can be selected like this: `zig build test`
// This will evaluate the `test` step rather than the default, which is "install".
const test_step = b.step("test", "Run library tests"); const test_step = b.step("test", "Run library tests");
test_step.dependOn(&main_tests.step); test_step.dependOn(&main_tests.step);
} }

View File

@ -31,10 +31,7 @@
#define valid(n, b) ((n) >= 0 && (n) < (b)) #define valid(n, b) ((n) >= 0 && (n) < (b))
intmax_t intmax_t
strtoimax(nptr, endptr, base) strtoimax(const char * __restrict__ nptr, char ** __restrict__ endptr, int base)
register const char * __restrict__ nptr;
char ** __restrict__ endptr;
register int base;
{ {
register uintmax_t accum; /* accumulates converted value */ register uintmax_t accum; /* accumulates converted value */
register int n; /* numeral from digit character */ register int n; /* numeral from digit character */

View File

@ -31,10 +31,7 @@
#define valid(n, b) ((n) >= 0 && (n) < (b)) #define valid(n, b) ((n) >= 0 && (n) < (b))
uintmax_t uintmax_t
strtoumax(nptr, endptr, base) strtoumax(const char * __restrict__ nptr, char ** __restrict__ endptr, int base)
register const char * __restrict__ nptr;
char ** __restrict__ endptr;
register int base;
{ {
register uintmax_t accum; /* accumulates converted value */ register uintmax_t accum; /* accumulates converted value */
register uintmax_t next; /* for computing next value of accum */ register uintmax_t next; /* for computing next value of accum */

View File

@ -33,10 +33,7 @@
#define valid(n, b) ((n) >= 0 && (n) < (b)) #define valid(n, b) ((n) >= 0 && (n) < (b))
intmax_t intmax_t
wcstoimax(nptr, endptr, base) wcstoimax(const wchar_t * __restrict__ nptr, wchar_t ** __restrict__ endptr, int base)
register const wchar_t * __restrict__ nptr;
wchar_t ** __restrict__ endptr;
register int base;
{ {
register uintmax_t accum; /* accumulates converted value */ register uintmax_t accum; /* accumulates converted value */
register int n; /* numeral from digit character */ register int n; /* numeral from digit character */

View File

@ -33,10 +33,7 @@
#define valid(n, b) ((n) >= 0 && (n) < (b)) #define valid(n, b) ((n) >= 0 && (n) < (b))
uintmax_t uintmax_t
wcstoumax(nptr, endptr, base) wcstoumax(const wchar_t * __restrict__ nptr, wchar_t ** __restrict__ endptr, int base)
register const wchar_t * __restrict__ nptr;
wchar_t ** __restrict__ endptr;
register int base;
{ {
register uintmax_t accum; /* accumulates converted value */ register uintmax_t accum; /* accumulates converted value */
register uintmax_t next; /* for computing next value of accum */ register uintmax_t next; /* for computing next value of accum */

1780
lib/std/Build.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,5 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const build = std.build; const Step = std.Build.Step;
const Step = build.Step;
const Builder = build.Builder;
const fs = std.fs; const fs = std.fs;
const mem = std.mem; const mem = std.mem;
@ -10,17 +8,17 @@ const CheckFileStep = @This();
pub const base_id = .check_file; pub const base_id = .check_file;
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
expected_matches: []const []const u8, expected_matches: []const []const u8,
source: build.FileSource, source: std.Build.FileSource,
max_bytes: usize = 20 * 1024 * 1024, max_bytes: usize = 20 * 1024 * 1024,
pub fn create( pub fn create(
builder: *Builder, builder: *std.Build,
source: build.FileSource, source: std.Build.FileSource,
expected_matches: []const []const u8, expected_matches: []const []const u8,
) *CheckFileStep { ) *CheckFileStep {
const self = builder.allocator.create(CheckFileStep) catch unreachable; const self = builder.allocator.create(CheckFileStep) catch @panic("OOM");
self.* = CheckFileStep{ self.* = CheckFileStep{
.builder = builder, .builder = builder,
.step = Step.init(.check_file, "CheckFile", builder.allocator, make), .step = Step.init(.check_file, "CheckFile", builder.allocator, make),

View File

@ -1,6 +1,5 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const assert = std.debug.assert; const assert = std.debug.assert;
const build = std.build;
const fs = std.fs; const fs = std.fs;
const macho = std.macho; const macho = std.macho;
const math = std.math; const math = std.math;
@ -10,23 +9,22 @@ const testing = std.testing;
const CheckObjectStep = @This(); const CheckObjectStep = @This();
const Allocator = mem.Allocator; const Allocator = mem.Allocator;
const Builder = build.Builder; const Step = std.Build.Step;
const Step = build.Step; const EmulatableRunStep = std.Build.EmulatableRunStep;
const EmulatableRunStep = build.EmulatableRunStep;
pub const base_id = .check_object; pub const base_id = .check_object;
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
source: build.FileSource, source: std.Build.FileSource,
max_bytes: usize = 20 * 1024 * 1024, max_bytes: usize = 20 * 1024 * 1024,
checks: std.ArrayList(Check), checks: std.ArrayList(Check),
dump_symtab: bool = false, dump_symtab: bool = false,
obj_format: std.Target.ObjectFormat, obj_format: std.Target.ObjectFormat,
pub fn create(builder: *Builder, source: build.FileSource, obj_format: std.Target.ObjectFormat) *CheckObjectStep { pub fn create(builder: *std.Build, source: std.Build.FileSource, obj_format: std.Target.ObjectFormat) *CheckObjectStep {
const gpa = builder.allocator; const gpa = builder.allocator;
const self = gpa.create(CheckObjectStep) catch unreachable; const self = gpa.create(CheckObjectStep) catch @panic("OOM");
self.* = .{ self.* = .{
.builder = builder, .builder = builder,
.step = Step.init(.check_file, "CheckObject", gpa, make), .step = Step.init(.check_file, "CheckObject", gpa, make),
@ -44,7 +42,7 @@ pub fn runAndCompare(self: *CheckObjectStep) *EmulatableRunStep {
const dependencies_len = self.step.dependencies.items.len; const dependencies_len = self.step.dependencies.items.len;
assert(dependencies_len > 0); assert(dependencies_len > 0);
const exe_step = self.step.dependencies.items[dependencies_len - 1]; const exe_step = self.step.dependencies.items[dependencies_len - 1];
const exe = exe_step.cast(std.build.LibExeObjStep).?; const exe = exe_step.cast(std.Build.CompileStep).?;
const emulatable_step = EmulatableRunStep.create(self.builder, "EmulatableRun", exe); const emulatable_step = EmulatableRunStep.create(self.builder, "EmulatableRun", exe);
emulatable_step.step.dependOn(&self.step); emulatable_step.step.dependOn(&self.step);
return emulatable_step; return emulatable_step;
@ -216,10 +214,10 @@ const ComputeCompareExpected = struct {
}; };
const Check = struct { const Check = struct {
builder: *Builder, builder: *std.Build,
actions: std.ArrayList(Action), actions: std.ArrayList(Action),
fn create(b: *Builder) Check { fn create(b: *std.Build) Check {
return .{ return .{
.builder = b, .builder = b,
.actions = std.ArrayList(Action).init(b.allocator), .actions = std.ArrayList(Action).init(b.allocator),
@ -230,14 +228,14 @@ const Check = struct {
self.actions.append(.{ self.actions.append(.{
.tag = .match, .tag = .match,
.phrase = self.builder.dupe(phrase), .phrase = self.builder.dupe(phrase),
}) catch unreachable; }) catch @panic("OOM");
} }
fn notPresent(self: *Check, phrase: []const u8) void { fn notPresent(self: *Check, phrase: []const u8) void {
self.actions.append(.{ self.actions.append(.{
.tag = .not_present, .tag = .not_present,
.phrase = self.builder.dupe(phrase), .phrase = self.builder.dupe(phrase),
}) catch unreachable; }) catch @panic("OOM");
} }
fn computeCmp(self: *Check, phrase: []const u8, expected: ComputeCompareExpected) void { fn computeCmp(self: *Check, phrase: []const u8, expected: ComputeCompareExpected) void {
@ -245,7 +243,7 @@ const Check = struct {
.tag = .compute_cmp, .tag = .compute_cmp,
.phrase = self.builder.dupe(phrase), .phrase = self.builder.dupe(phrase),
.expected = expected, .expected = expected,
}) catch unreachable; }) catch @panic("OOM");
} }
}; };
@ -253,7 +251,7 @@ const Check = struct {
pub fn checkStart(self: *CheckObjectStep, phrase: []const u8) void { pub fn checkStart(self: *CheckObjectStep, phrase: []const u8) void {
var new_check = Check.create(self.builder); var new_check = Check.create(self.builder);
new_check.match(phrase); new_check.match(phrase);
self.checks.append(new_check) catch unreachable; self.checks.append(new_check) catch @panic("OOM");
} }
/// Adds another searched phrase to the latest created Check with `CheckObjectStep.checkStart(...)`. /// Adds another searched phrase to the latest created Check with `CheckObjectStep.checkStart(...)`.
@ -295,7 +293,7 @@ pub fn checkComputeCompare(
) void { ) void {
var new_check = Check.create(self.builder); var new_check = Check.create(self.builder);
new_check.computeCmp(program, expected); new_check.computeCmp(program, expected);
self.checks.append(new_check) catch unreachable; self.checks.append(new_check) catch @panic("OOM");
} }
fn make(step: *Step) !void { fn make(step: *Step) !void {

View File

@ -1,7 +1,6 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const ConfigHeaderStep = @This(); const ConfigHeaderStep = @This();
const Step = std.build.Step; const Step = std.Build.Step;
const Builder = std.build.Builder;
pub const base_id: Step.Id = .config_header; pub const base_id: Step.Id = .config_header;
@ -24,15 +23,15 @@ pub const Value = union(enum) {
}; };
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
source: std.build.FileSource, source: std.Build.FileSource,
style: Style, style: Style,
values: std.StringHashMap(Value), values: std.StringHashMap(Value),
max_bytes: usize = 2 * 1024 * 1024, max_bytes: usize = 2 * 1024 * 1024,
output_dir: []const u8, output_dir: []const u8,
output_basename: []const u8, output_basename: []const u8,
pub fn create(builder: *Builder, source: std.build.FileSource, style: Style) *ConfigHeaderStep { pub fn create(builder: *std.Build, source: std.Build.FileSource, style: Style) *ConfigHeaderStep {
const self = builder.allocator.create(ConfigHeaderStep) catch @panic("OOM"); const self = builder.allocator.create(ConfigHeaderStep) catch @panic("OOM");
const name = builder.fmt("configure header {s}", .{source.getDisplayName()}); const name = builder.fmt("configure header {s}", .{source.getDisplayName()});
self.* = .{ self.* = .{
@ -62,39 +61,51 @@ pub fn addValues(self: *ConfigHeaderStep, values: anytype) void {
fn addValuesInner(self: *ConfigHeaderStep, values: anytype) !void { fn addValuesInner(self: *ConfigHeaderStep, values: anytype) !void {
inline for (@typeInfo(@TypeOf(values)).Struct.fields) |field| { inline for (@typeInfo(@TypeOf(values)).Struct.fields) |field| {
switch (@typeInfo(field.type)) { try putValue(self, field.name, field.type, @field(values, field.name));
}
}
fn putValue(self: *ConfigHeaderStep, field_name: []const u8, comptime T: type, v: T) !void {
switch (@typeInfo(T)) {
.Null => { .Null => {
try self.values.put(field.name, .undef); try self.values.put(field_name, .undef);
}, },
.Void => { .Void => {
try self.values.put(field.name, .defined); try self.values.put(field_name, .defined);
}, },
.Bool => { .Bool => {
try self.values.put(field.name, .{ .boolean = @field(values, field.name) }); try self.values.put(field_name, .{ .boolean = v });
},
.Int => {
try self.values.put(field_name, .{ .int = v });
}, },
.ComptimeInt => { .ComptimeInt => {
try self.values.put(field.name, .{ .int = @field(values, field.name) }); try self.values.put(field_name, .{ .int = v });
}, },
.EnumLiteral => { .EnumLiteral => {
try self.values.put(field.name, .{ .ident = @tagName(@field(values, field.name)) }); try self.values.put(field_name, .{ .ident = @tagName(v) });
},
.Optional => {
if (v) |x| {
return putValue(self, field_name, @TypeOf(x), x);
} else {
try self.values.put(field_name, .undef);
}
}, },
.Pointer => |ptr| { .Pointer => |ptr| {
switch (@typeInfo(ptr.child)) { switch (@typeInfo(ptr.child)) {
.Array => |array| { .Array => |array| {
if (ptr.size == .One and array.child == u8) { if (ptr.size == .One and array.child == u8) {
try self.values.put(field.name, .{ .string = @field(values, field.name) }); try self.values.put(field_name, .{ .string = v });
continue; return;
} }
}, },
else => {}, else => {},
} }
@compileError("unsupported ConfigHeaderStep value type: " ++ @compileError("unsupported ConfigHeaderStep value type: " ++ @typeName(T));
@typeName(field.type));
}, },
else => @compileError("unsupported ConfigHeaderStep value type: " ++ else => @compileError("unsupported ConfigHeaderStep value type: " ++ @typeName(T)),
@typeName(field.type)),
}
} }
} }

View File

@ -5,11 +5,9 @@
//! without having to verify if it's possible to be ran against. //! without having to verify if it's possible to be ran against.
const std = @import("../std.zig"); const std = @import("../std.zig");
const build = std.build; const Step = std.Build.Step;
const Step = std.build.Step; const CompileStep = std.Build.CompileStep;
const Builder = std.build.Builder; const RunStep = std.Build.RunStep;
const LibExeObjStep = std.build.LibExeObjStep;
const RunStep = std.build.RunStep;
const fs = std.fs; const fs = std.fs;
const process = std.process; const process = std.process;
@ -22,10 +20,10 @@ pub const base_id = .emulatable_run;
const max_stdout_size = 1 * 1024 * 1024; // 1 MiB const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
/// The artifact (executable) to be run by this step /// The artifact (executable) to be run by this step
exe: *LibExeObjStep, exe: *CompileStep,
/// Set this to `null` to ignore the exit code for the purpose of determining a successful execution /// Set this to `null` to ignore the exit code for the purpose of determining a successful execution
expected_exit_code: ?u8 = 0, expected_exit_code: ?u8 = 0,
@ -47,9 +45,9 @@ hide_foreign_binaries_warning: bool,
/// binary through emulation when any of the emulation options such as `enable_rosetta` are set to true. /// binary through emulation when any of the emulation options such as `enable_rosetta` are set to true.
/// When set to false, and the binary is foreign, running the executable is skipped. /// When set to false, and the binary is foreign, running the executable is skipped.
/// Asserts given artifact is an executable. /// Asserts given artifact is an executable.
pub fn create(builder: *Builder, name: []const u8, artifact: *LibExeObjStep) *EmulatableRunStep { pub fn create(builder: *std.Build, name: []const u8, artifact: *CompileStep) *EmulatableRunStep {
std.debug.assert(artifact.kind == .exe or artifact.kind == .test_exe); std.debug.assert(artifact.kind == .exe or artifact.kind == .test_exe);
const self = builder.allocator.create(EmulatableRunStep) catch unreachable; const self = builder.allocator.create(EmulatableRunStep) catch @panic("OOM");
const option_name = "hide-foreign-warnings"; const option_name = "hide-foreign-warnings";
const hide_warnings = if (builder.available_options_map.get(option_name) == null) warn: { const hide_warnings = if (builder.available_options_map.get(option_name) == null) warn: {
@ -156,9 +154,9 @@ fn warnAboutForeignBinaries(step: *EmulatableRunStep) void {
const builder = step.builder; const builder = step.builder;
const artifact = step.exe; const artifact = step.exe;
const host_name = builder.host.target.zigTriple(builder.allocator) catch unreachable; const host_name = builder.host.target.zigTriple(builder.allocator) catch @panic("unhandled error");
const foreign_name = artifact.target.zigTriple(builder.allocator) catch unreachable; const foreign_name = artifact.target.zigTriple(builder.allocator) catch @panic("unhandled error");
const target_info = std.zig.system.NativeTargetInfo.detect(artifact.target) catch unreachable; const target_info = std.zig.system.NativeTargetInfo.detect(artifact.target) catch @panic("unhandled error");
const need_cross_glibc = artifact.target.isGnuLibC() and artifact.is_linking_libc; const need_cross_glibc = artifact.target.isGnuLibC() and artifact.is_linking_libc;
switch (builder.host.getExternalExecutor(target_info, .{ switch (builder.host.getExternalExecutor(target_info, .{
.qemu_fixes_dl = need_cross_glibc and builder.glibc_runtimes_dir != null, .qemu_fixes_dl = need_cross_glibc and builder.glibc_runtimes_dir != null,

View File

@ -1,25 +1,20 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const build = @import("../build.zig"); const Step = std.Build.Step;
const Step = build.Step;
const Builder = build.Builder;
const BufMap = std.BufMap;
const mem = std.mem;
const FmtStep = @This(); const FmtStep = @This();
pub const base_id = .fmt; pub const base_id = .fmt;
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
argv: [][]const u8, argv: [][]const u8,
pub fn create(builder: *Builder, paths: []const []const u8) *FmtStep { pub fn create(builder: *std.Build, paths: []const []const u8) *FmtStep {
const self = builder.allocator.create(FmtStep) catch unreachable; const self = builder.allocator.create(FmtStep) catch @panic("OOM");
const name = "zig fmt"; const name = "zig fmt";
self.* = FmtStep{ self.* = FmtStep{
.step = Step.init(.fmt, name, builder.allocator, make), .step = Step.init(.fmt, name, builder.allocator, make),
.builder = builder, .builder = builder,
.argv = builder.allocator.alloc([]u8, paths.len + 2) catch unreachable, .argv = builder.allocator.alloc([]u8, paths.len + 2) catch @panic("OOM"),
}; };
self.argv[0] = builder.zig_exe; self.argv[0] = builder.zig_exe;

View File

@ -1,32 +1,29 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const build = @import("../build.zig"); const Step = std.Build.Step;
const Step = build.Step; const CompileStep = std.Build.CompileStep;
const Builder = build.Builder; const InstallDir = std.Build.InstallDir;
const LibExeObjStep = std.build.LibExeObjStep; const InstallArtifactStep = @This();
const InstallDir = std.build.InstallDir;
pub const base_id = .install_artifact; pub const base_id = .install_artifact;
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
artifact: *LibExeObjStep, artifact: *CompileStep,
dest_dir: InstallDir, dest_dir: InstallDir,
pdb_dir: ?InstallDir, pdb_dir: ?InstallDir,
h_dir: ?InstallDir, h_dir: ?InstallDir,
const Self = @This(); pub fn create(builder: *std.Build, artifact: *CompileStep) *InstallArtifactStep {
pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
if (artifact.install_step) |s| return s; if (artifact.install_step) |s| return s;
const self = builder.allocator.create(Self) catch unreachable; const self = builder.allocator.create(InstallArtifactStep) catch @panic("OOM");
self.* = Self{ self.* = InstallArtifactStep{
.builder = builder, .builder = builder,
.step = Step.init(.install_artifact, builder.fmt("install {s}", .{artifact.step.name}), builder.allocator, make), .step = Step.init(.install_artifact, builder.fmt("install {s}", .{artifact.step.name}), builder.allocator, make),
.artifact = artifact, .artifact = artifact,
.dest_dir = artifact.override_dest_dir orelse switch (artifact.kind) { .dest_dir = artifact.override_dest_dir orelse switch (artifact.kind) {
.obj => @panic("Cannot install a .obj build artifact."), .obj => @panic("Cannot install a .obj build artifact."),
.@"test" => @panic("Cannot install a test build artifact, use addTestExe instead."), .@"test" => @panic("Cannot install a .test build artifact, use .test_exe instead."),
.exe, .test_exe => InstallDir{ .bin = {} }, .exe, .test_exe => InstallDir{ .bin = {} },
.lib => InstallDir{ .lib = {} }, .lib => InstallDir{ .lib = {} },
}, },
@ -64,13 +61,13 @@ pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
} }
fn make(step: *Step) !void { fn make(step: *Step) !void {
const self = @fieldParentPtr(Self, "step", step); const self = @fieldParentPtr(InstallArtifactStep, "step", step);
const builder = self.builder; const builder = self.builder;
const full_dest_path = builder.getInstallPath(self.dest_dir, self.artifact.out_filename); const full_dest_path = builder.getInstallPath(self.dest_dir, self.artifact.out_filename);
try builder.updateFile(self.artifact.getOutputSource().getPath(builder), full_dest_path); try builder.updateFile(self.artifact.getOutputSource().getPath(builder), full_dest_path);
if (self.artifact.isDynamicLibrary() and self.artifact.version != null and self.artifact.target.wantSharedLibSymLinks()) { if (self.artifact.isDynamicLibrary() and self.artifact.version != null and self.artifact.target.wantSharedLibSymLinks()) {
try LibExeObjStep.doAtomicSymLinks(builder.allocator, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?); try CompileStep.doAtomicSymLinks(builder.allocator, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?);
} }
if (self.artifact.isDynamicLibrary() and self.artifact.target.isWindows() and self.artifact.emit_implib != .no_emit) { if (self.artifact.isDynamicLibrary() and self.artifact.target.isWindows() and self.artifact.emit_implib != .no_emit) {
const full_implib_path = builder.getInstallPath(self.dest_dir, self.artifact.out_lib_filename); const full_implib_path = builder.getInstallPath(self.dest_dir, self.artifact.out_lib_filename);

View File

@ -1,19 +1,17 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const mem = std.mem; const mem = std.mem;
const fs = std.fs; const fs = std.fs;
const build = @import("../build.zig"); const Step = std.Build.Step;
const Step = build.Step; const InstallDir = std.Build.InstallDir;
const Builder = build.Builder;
const InstallDir = std.build.InstallDir;
const InstallDirStep = @This(); const InstallDirStep = @This();
const log = std.log; const log = std.log;
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
options: Options, options: Options,
/// This is used by the build system when a file being installed comes from one /// This is used by the build system when a file being installed comes from one
/// package but is being installed by another. /// package but is being installed by another.
override_source_builder: ?*Builder = null, override_source_builder: ?*std.Build = null,
pub const base_id = .install_dir; pub const base_id = .install_dir;
@ -31,7 +29,7 @@ pub const Options = struct {
/// `@import("test.zig")` would be a compile error. /// `@import("test.zig")` would be a compile error.
blank_extensions: []const []const u8 = &.{}, blank_extensions: []const []const u8 = &.{},
fn dupe(self: Options, b: *Builder) Options { fn dupe(self: Options, b: *std.Build) Options {
return .{ return .{
.source_dir = b.dupe(self.source_dir), .source_dir = b.dupe(self.source_dir),
.install_dir = self.install_dir.dupe(b), .install_dir = self.install_dir.dupe(b),
@ -43,7 +41,7 @@ pub const Options = struct {
}; };
pub fn init( pub fn init(
builder: *Builder, builder: *std.Build,
options: Options, options: Options,
) InstallDirStep { ) InstallDirStep {
builder.pushInstalledFile(options.install_dir, options.install_subdir); builder.pushInstalledFile(options.install_dir, options.install_subdir);

View File

@ -1,24 +1,22 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const build = @import("../build.zig"); const Step = std.Build.Step;
const Step = build.Step; const FileSource = std.Build.FileSource;
const Builder = build.Builder; const InstallDir = std.Build.InstallDir;
const FileSource = std.build.FileSource;
const InstallDir = std.build.InstallDir;
const InstallFileStep = @This(); const InstallFileStep = @This();
pub const base_id = .install_file; pub const base_id = .install_file;
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
source: FileSource, source: FileSource,
dir: InstallDir, dir: InstallDir,
dest_rel_path: []const u8, dest_rel_path: []const u8,
/// This is used by the build system when a file being installed comes from one /// This is used by the build system when a file being installed comes from one
/// package but is being installed by another. /// package but is being installed by another.
override_source_builder: ?*Builder = null, override_source_builder: ?*std.Build = null,
pub fn init( pub fn init(
builder: *Builder, builder: *std.Build,
source: FileSource, source: FileSource,
dir: InstallDir, dir: InstallDir,
dest_rel_path: []const u8, dest_rel_path: []const u8,

View File

@ -7,11 +7,10 @@ const InstallRawStep = @This();
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator; const ArenaAllocator = std.heap.ArenaAllocator;
const ArrayListUnmanaged = std.ArrayListUnmanaged; const ArrayListUnmanaged = std.ArrayListUnmanaged;
const Builder = std.build.Builder;
const File = std.fs.File; const File = std.fs.File;
const InstallDir = std.build.InstallDir; const InstallDir = std.Build.InstallDir;
const LibExeObjStep = std.build.LibExeObjStep; const CompileStep = std.Build.CompileStep;
const Step = std.build.Step; const Step = std.Build.Step;
const elf = std.elf; const elf = std.elf;
const fs = std.fs; const fs = std.fs;
const io = std.io; const io = std.io;
@ -25,12 +24,12 @@ pub const RawFormat = enum {
}; };
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
artifact: *LibExeObjStep, artifact: *CompileStep,
dest_dir: InstallDir, dest_dir: InstallDir,
dest_filename: []const u8, dest_filename: []const u8,
options: CreateOptions, options: CreateOptions,
output_file: std.build.GeneratedFile, output_file: std.Build.GeneratedFile,
pub const CreateOptions = struct { pub const CreateOptions = struct {
format: ?RawFormat = null, format: ?RawFormat = null,
@ -39,8 +38,13 @@ pub const CreateOptions = struct {
pad_to: ?u64 = null, pad_to: ?u64 = null,
}; };
pub fn create(builder: *Builder, artifact: *LibExeObjStep, dest_filename: []const u8, options: CreateOptions) *InstallRawStep { pub fn create(
const self = builder.allocator.create(InstallRawStep) catch unreachable; builder: *std.Build,
artifact: *CompileStep,
dest_filename: []const u8,
options: CreateOptions,
) *InstallRawStep {
const self = builder.allocator.create(InstallRawStep) catch @panic("OOM");
self.* = InstallRawStep{ self.* = InstallRawStep{
.step = Step.init(.install_raw, builder.fmt("install raw binary {s}", .{artifact.step.name}), builder.allocator, make), .step = Step.init(.install_raw, builder.fmt("install raw binary {s}", .{artifact.step.name}), builder.allocator, make),
.builder = builder, .builder = builder,
@ -53,7 +57,7 @@ pub fn create(builder: *Builder, artifact: *LibExeObjStep, dest_filename: []cons
}, },
.dest_filename = dest_filename, .dest_filename = dest_filename,
.options = options, .options = options,
.output_file = std.build.GeneratedFile{ .step = &self.step }, .output_file = std.Build.GeneratedFile{ .step = &self.step },
}; };
self.step.dependOn(&artifact.step); self.step.dependOn(&artifact.step);
@ -61,8 +65,8 @@ pub fn create(builder: *Builder, artifact: *LibExeObjStep, dest_filename: []cons
return self; return self;
} }
pub fn getOutputSource(self: *const InstallRawStep) std.build.FileSource { pub fn getOutputSource(self: *const InstallRawStep) std.Build.FileSource {
return std.build.FileSource{ .generated = &self.output_file }; return std.Build.FileSource{ .generated = &self.output_file };
} }
fn make(step: *Step) !void { fn make(step: *Step) !void {
@ -78,7 +82,7 @@ fn make(step: *Step) !void {
const full_dest_path = b.getInstallPath(self.dest_dir, self.dest_filename); const full_dest_path = b.getInstallPath(self.dest_dir, self.dest_filename);
self.output_file.path = full_dest_path; self.output_file.path = full_dest_path;
fs.cwd().makePath(b.getInstallPath(self.dest_dir, "")) catch unreachable; try fs.cwd().makePath(b.getInstallPath(self.dest_dir, ""));
var argv_list = std.ArrayList([]const u8).init(b.allocator); var argv_list = std.ArrayList([]const u8).init(b.allocator);
try argv_list.appendSlice(&.{ b.zig_exe, "objcopy" }); try argv_list.appendSlice(&.{ b.zig_exe, "objcopy" });

View File

@ -1,17 +1,15 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const log = std.log; const log = std.log;
const build = @import("../build.zig"); const Step = std.Build.Step;
const Step = build.Step;
const Builder = build.Builder;
const LogStep = @This(); const LogStep = @This();
pub const base_id = .log; pub const base_id = .log;
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
data: []const u8, data: []const u8,
pub fn init(builder: *Builder, data: []const u8) LogStep { pub fn init(builder: *std.Build, data: []const u8) LogStep {
return LogStep{ return LogStep{
.builder = builder, .builder = builder,
.step = Step.init(.log, builder.fmt("log {s}", .{data}), builder.allocator, make), .step = Step.init(.log, builder.fmt("log {s}", .{data}), builder.allocator, make),

View File

@ -1,12 +1,10 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const builtin = @import("builtin"); const builtin = @import("builtin");
const build = std.build;
const fs = std.fs; const fs = std.fs;
const Step = build.Step; const Step = std.Build.Step;
const Builder = build.Builder; const GeneratedFile = std.Build.GeneratedFile;
const GeneratedFile = build.GeneratedFile; const CompileStep = std.Build.CompileStep;
const LibExeObjStep = build.LibExeObjStep; const FileSource = std.Build.FileSource;
const FileSource = build.FileSource;
const OptionsStep = @This(); const OptionsStep = @This();
@ -14,14 +12,14 @@ pub const base_id = .options;
step: Step, step: Step,
generated_file: GeneratedFile, generated_file: GeneratedFile,
builder: *Builder, builder: *std.Build,
contents: std.ArrayList(u8), contents: std.ArrayList(u8),
artifact_args: std.ArrayList(OptionArtifactArg), artifact_args: std.ArrayList(OptionArtifactArg),
file_source_args: std.ArrayList(OptionFileSourceArg), file_source_args: std.ArrayList(OptionFileSourceArg),
pub fn create(builder: *Builder) *OptionsStep { pub fn create(builder: *std.Build) *OptionsStep {
const self = builder.allocator.create(OptionsStep) catch unreachable; const self = builder.allocator.create(OptionsStep) catch @panic("OOM");
self.* = .{ self.* = .{
.builder = builder, .builder = builder,
.step = Step.init(.options, "options", builder.allocator, make), .step = Step.init(.options, "options", builder.allocator, make),
@ -36,44 +34,48 @@ pub fn create(builder: *Builder) *OptionsStep {
} }
pub fn addOption(self: *OptionsStep, comptime T: type, name: []const u8, value: T) void { pub fn addOption(self: *OptionsStep, comptime T: type, name: []const u8, value: T) void {
return addOptionFallible(self, T, name, value) catch @panic("unhandled error");
}
fn addOptionFallible(self: *OptionsStep, comptime T: type, name: []const u8, value: T) !void {
const out = self.contents.writer(); const out = self.contents.writer();
switch (T) { switch (T) {
[]const []const u8 => { []const []const u8 => {
out.print("pub const {}: []const []const u8 = &[_][]const u8{{\n", .{std.zig.fmtId(name)}) catch unreachable; try out.print("pub const {}: []const []const u8 = &[_][]const u8{{\n", .{std.zig.fmtId(name)});
for (value) |slice| { for (value) |slice| {
out.print(" \"{}\",\n", .{std.zig.fmtEscapes(slice)}) catch unreachable; try out.print(" \"{}\",\n", .{std.zig.fmtEscapes(slice)});
} }
out.writeAll("};\n") catch unreachable; try out.writeAll("};\n");
return; return;
}, },
[:0]const u8 => { [:0]const u8 => {
out.print("pub const {}: [:0]const u8 = \"{}\";\n", .{ std.zig.fmtId(name), std.zig.fmtEscapes(value) }) catch unreachable; try out.print("pub const {}: [:0]const u8 = \"{}\";\n", .{ std.zig.fmtId(name), std.zig.fmtEscapes(value) });
return; return;
}, },
[]const u8 => { []const u8 => {
out.print("pub const {}: []const u8 = \"{}\";\n", .{ std.zig.fmtId(name), std.zig.fmtEscapes(value) }) catch unreachable; try out.print("pub const {}: []const u8 = \"{}\";\n", .{ std.zig.fmtId(name), std.zig.fmtEscapes(value) });
return; return;
}, },
?[:0]const u8 => { ?[:0]const u8 => {
out.print("pub const {}: ?[:0]const u8 = ", .{std.zig.fmtId(name)}) catch unreachable; try out.print("pub const {}: ?[:0]const u8 = ", .{std.zig.fmtId(name)});
if (value) |payload| { if (value) |payload| {
out.print("\"{}\";\n", .{std.zig.fmtEscapes(payload)}) catch unreachable; try out.print("\"{}\";\n", .{std.zig.fmtEscapes(payload)});
} else { } else {
out.writeAll("null;\n") catch unreachable; try out.writeAll("null;\n");
} }
return; return;
}, },
?[]const u8 => { ?[]const u8 => {
out.print("pub const {}: ?[]const u8 = ", .{std.zig.fmtId(name)}) catch unreachable; try out.print("pub const {}: ?[]const u8 = ", .{std.zig.fmtId(name)});
if (value) |payload| { if (value) |payload| {
out.print("\"{}\";\n", .{std.zig.fmtEscapes(payload)}) catch unreachable; try out.print("\"{}\";\n", .{std.zig.fmtEscapes(payload)});
} else { } else {
out.writeAll("null;\n") catch unreachable; try out.writeAll("null;\n");
} }
return; return;
}, },
std.builtin.Version => { std.builtin.Version => {
out.print( try out.print(
\\pub const {}: @import("std").builtin.Version = .{{ \\pub const {}: @import("std").builtin.Version = .{{
\\ .major = {d}, \\ .major = {d},
\\ .minor = {d}, \\ .minor = {d},
@ -86,11 +88,11 @@ pub fn addOption(self: *OptionsStep, comptime T: type, name: []const u8, value:
value.major, value.major,
value.minor, value.minor,
value.patch, value.patch,
}) catch unreachable; });
return; return;
}, },
std.SemanticVersion => { std.SemanticVersion => {
out.print( try out.print(
\\pub const {}: @import("std").SemanticVersion = .{{ \\pub const {}: @import("std").SemanticVersion = .{{
\\ .major = {d}, \\ .major = {d},
\\ .minor = {d}, \\ .minor = {d},
@ -102,38 +104,38 @@ pub fn addOption(self: *OptionsStep, comptime T: type, name: []const u8, value:
value.major, value.major,
value.minor, value.minor,
value.patch, value.patch,
}) catch unreachable; });
if (value.pre) |some| { if (value.pre) |some| {
out.print(" .pre = \"{}\",\n", .{std.zig.fmtEscapes(some)}) catch unreachable; try out.print(" .pre = \"{}\",\n", .{std.zig.fmtEscapes(some)});
} }
if (value.build) |some| { if (value.build) |some| {
out.print(" .build = \"{}\",\n", .{std.zig.fmtEscapes(some)}) catch unreachable; try out.print(" .build = \"{}\",\n", .{std.zig.fmtEscapes(some)});
} }
out.writeAll("};\n") catch unreachable; try out.writeAll("};\n");
return; return;
}, },
else => {}, else => {},
} }
switch (@typeInfo(T)) { switch (@typeInfo(T)) {
.Enum => |enum_info| { .Enum => |enum_info| {
out.print("pub const {} = enum {{\n", .{std.zig.fmtId(@typeName(T))}) catch unreachable; try out.print("pub const {} = enum {{\n", .{std.zig.fmtId(@typeName(T))});
inline for (enum_info.fields) |field| { inline for (enum_info.fields) |field| {
out.print(" {},\n", .{std.zig.fmtId(field.name)}) catch unreachable; try out.print(" {},\n", .{std.zig.fmtId(field.name)});
} }
out.writeAll("};\n") catch unreachable; try out.writeAll("};\n");
out.print("pub const {}: {s} = {s}.{s};\n", .{ try out.print("pub const {}: {s} = {s}.{s};\n", .{
std.zig.fmtId(name), std.zig.fmtId(name),
std.zig.fmtId(@typeName(T)), std.zig.fmtId(@typeName(T)),
std.zig.fmtId(@typeName(T)), std.zig.fmtId(@typeName(T)),
std.zig.fmtId(@tagName(value)), std.zig.fmtId(@tagName(value)),
}) catch unreachable; });
return; return;
}, },
else => {}, else => {},
} }
out.print("pub const {}: {s} = ", .{ std.zig.fmtId(name), @typeName(T) }) catch unreachable; try out.print("pub const {}: {s} = ", .{ std.zig.fmtId(name), @typeName(T) });
printLiteral(out, value, 0) catch unreachable; try printLiteral(out, value, 0);
out.writeAll(";\n") catch unreachable; try out.writeAll(";\n");
} }
// TODO: non-recursive? // TODO: non-recursive?
@ -191,18 +193,18 @@ pub fn addOptionFileSource(
self.file_source_args.append(.{ self.file_source_args.append(.{
.name = name, .name = name,
.source = source.dupe(self.builder), .source = source.dupe(self.builder),
}) catch unreachable; }) catch @panic("OOM");
source.addStepDependencies(&self.step); source.addStepDependencies(&self.step);
} }
/// The value is the path in the cache dir. /// The value is the path in the cache dir.
/// Adds a dependency automatically. /// Adds a dependency automatically.
pub fn addOptionArtifact(self: *OptionsStep, name: []const u8, artifact: *LibExeObjStep) void { pub fn addOptionArtifact(self: *OptionsStep, name: []const u8, artifact: *CompileStep) void {
self.artifact_args.append(.{ .name = self.builder.dupe(name), .artifact = artifact }) catch unreachable; self.artifact_args.append(.{ .name = self.builder.dupe(name), .artifact = artifact }) catch @panic("OOM");
self.step.dependOn(&artifact.step); self.step.dependOn(&artifact.step);
} }
pub fn getPackage(self: *OptionsStep, package_name: []const u8) build.Pkg { pub fn getPackage(self: *OptionsStep, package_name: []const u8) std.Build.Pkg {
return .{ .name = package_name, .source = self.getSource() }; return .{ .name = package_name, .source = self.getSource() };
} }
@ -268,7 +270,7 @@ fn hashContentsToFileName(self: *OptionsStep) [64]u8 {
const OptionArtifactArg = struct { const OptionArtifactArg = struct {
name: []const u8, name: []const u8,
artifact: *LibExeObjStep, artifact: *CompileStep,
}; };
const OptionFileSourceArg = struct { const OptionFileSourceArg = struct {
@ -281,12 +283,16 @@ test "OptionsStep" {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator); var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit(); defer arena.deinit();
var builder = try Builder.create(
const host = try std.zig.system.NativeTargetInfo.detect(.{});
var builder = try std.Build.create(
arena.allocator(), arena.allocator(),
"test", "test",
"test", "test",
"test", "test",
"test", "test",
host,
); );
defer builder.destroy(); defer builder.destroy();
@ -361,5 +367,5 @@ test "OptionsStep" {
\\ \\
, options.contents.items); , options.contents.items);
_ = try std.zig.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0)); _ = try std.zig.Ast.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0), .zig);
} }

View File

@ -1,18 +1,16 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const log = std.log; const log = std.log;
const fs = std.fs; const fs = std.fs;
const build = @import("../build.zig"); const Step = std.Build.Step;
const Step = build.Step;
const Builder = build.Builder;
const RemoveDirStep = @This(); const RemoveDirStep = @This();
pub const base_id = .remove_dir; pub const base_id = .remove_dir;
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
dir_path: []const u8, dir_path: []const u8,
pub fn init(builder: *Builder, dir_path: []const u8) RemoveDirStep { pub fn init(builder: *std.Build, dir_path: []const u8) RemoveDirStep {
return RemoveDirStep{ return RemoveDirStep{
.builder = builder, .builder = builder,
.step = Step.init(.remove_dir, builder.fmt("RemoveDir {s}", .{dir_path}), builder.allocator, make), .step = Step.init(.remove_dir, builder.fmt("RemoveDir {s}", .{dir_path}), builder.allocator, make),

View File

@ -1,17 +1,15 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const builtin = @import("builtin"); const builtin = @import("builtin");
const build = std.build; const Step = std.Build.Step;
const Step = build.Step; const CompileStep = std.Build.CompileStep;
const Builder = build.Builder; const WriteFileStep = std.Build.WriteFileStep;
const LibExeObjStep = build.LibExeObjStep;
const WriteFileStep = build.WriteFileStep;
const fs = std.fs; const fs = std.fs;
const mem = std.mem; const mem = std.mem;
const process = std.process; const process = std.process;
const ArrayList = std.ArrayList; const ArrayList = std.ArrayList;
const EnvMap = process.EnvMap; const EnvMap = process.EnvMap;
const Allocator = mem.Allocator; const Allocator = mem.Allocator;
const ExecError = build.Builder.ExecError; const ExecError = std.Build.ExecError;
const max_stdout_size = 1 * 1024 * 1024; // 1 MiB const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
@ -20,7 +18,7 @@ const RunStep = @This();
pub const base_id: Step.Id = .run; pub const base_id: Step.Id = .run;
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
/// See also addArg and addArgs to modifying this directly /// See also addArg and addArgs to modifying this directly
argv: ArrayList(Arg), argv: ArrayList(Arg),
@ -50,13 +48,13 @@ pub const StdIoAction = union(enum) {
}; };
pub const Arg = union(enum) { pub const Arg = union(enum) {
artifact: *LibExeObjStep, artifact: *CompileStep,
file_source: build.FileSource, file_source: std.Build.FileSource,
bytes: []u8, bytes: []u8,
}; };
pub fn create(builder: *Builder, name: []const u8) *RunStep { pub fn create(builder: *std.Build, name: []const u8) *RunStep {
const self = builder.allocator.create(RunStep) catch unreachable; const self = builder.allocator.create(RunStep) catch @panic("OOM");
self.* = RunStep{ self.* = RunStep{
.builder = builder, .builder = builder,
.step = Step.init(base_id, name, builder.allocator, make), .step = Step.init(base_id, name, builder.allocator, make),
@ -68,20 +66,20 @@ pub fn create(builder: *Builder, name: []const u8) *RunStep {
return self; return self;
} }
pub fn addArtifactArg(self: *RunStep, artifact: *LibExeObjStep) void { pub fn addArtifactArg(self: *RunStep, artifact: *CompileStep) void {
self.argv.append(Arg{ .artifact = artifact }) catch unreachable; self.argv.append(Arg{ .artifact = artifact }) catch @panic("OOM");
self.step.dependOn(&artifact.step); self.step.dependOn(&artifact.step);
} }
pub fn addFileSourceArg(self: *RunStep, file_source: build.FileSource) void { pub fn addFileSourceArg(self: *RunStep, file_source: std.Build.FileSource) void {
self.argv.append(Arg{ self.argv.append(Arg{
.file_source = file_source.dupe(self.builder), .file_source = file_source.dupe(self.builder),
}) catch unreachable; }) catch @panic("OOM");
file_source.addStepDependencies(&self.step); file_source.addStepDependencies(&self.step);
} }
pub fn addArg(self: *RunStep, arg: []const u8) void { pub fn addArg(self: *RunStep, arg: []const u8) void {
self.argv.append(Arg{ .bytes = self.builder.dupe(arg) }) catch unreachable; self.argv.append(Arg{ .bytes = self.builder.dupe(arg) }) catch @panic("OOM");
} }
pub fn addArgs(self: *RunStep, args: []const []const u8) void { pub fn addArgs(self: *RunStep, args: []const []const u8) void {
@ -91,7 +89,7 @@ pub fn addArgs(self: *RunStep, args: []const []const u8) void {
} }
pub fn clearEnvironment(self: *RunStep) void { pub fn clearEnvironment(self: *RunStep) void {
const new_env_map = self.builder.allocator.create(EnvMap) catch unreachable; const new_env_map = self.builder.allocator.create(EnvMap) catch @panic("OOM");
new_env_map.* = EnvMap.init(self.builder.allocator); new_env_map.* = EnvMap.init(self.builder.allocator);
self.env_map = new_env_map; self.env_map = new_env_map;
} }
@ -101,7 +99,7 @@ pub fn addPathDir(self: *RunStep, search_path: []const u8) void {
} }
/// For internal use only, users of `RunStep` should use `addPathDir` directly. /// For internal use only, users of `RunStep` should use `addPathDir` directly.
pub fn addPathDirInternal(step: *Step, builder: *Builder, search_path: []const u8) void { pub fn addPathDirInternal(step: *Step, builder: *std.Build, search_path: []const u8) void {
const env_map = getEnvMapInternal(step, builder.allocator); const env_map = getEnvMapInternal(step, builder.allocator);
const key = "PATH"; const key = "PATH";
@ -109,9 +107,9 @@ pub fn addPathDirInternal(step: *Step, builder: *Builder, search_path: []const u
if (prev_path) |pp| { if (prev_path) |pp| {
const new_path = builder.fmt("{s}" ++ [1]u8{fs.path.delimiter} ++ "{s}", .{ pp, search_path }); const new_path = builder.fmt("{s}" ++ [1]u8{fs.path.delimiter} ++ "{s}", .{ pp, search_path });
env_map.put(key, new_path) catch unreachable; env_map.put(key, new_path) catch @panic("OOM");
} else { } else {
env_map.put(key, builder.dupePath(search_path)) catch unreachable; env_map.put(key, builder.dupePath(search_path)) catch @panic("OOM");
} }
} }
@ -122,12 +120,12 @@ pub fn getEnvMap(self: *RunStep) *EnvMap {
fn getEnvMapInternal(step: *Step, allocator: Allocator) *EnvMap { fn getEnvMapInternal(step: *Step, allocator: Allocator) *EnvMap {
const maybe_env_map = switch (step.id) { const maybe_env_map = switch (step.id) {
.run => step.cast(RunStep).?.env_map, .run => step.cast(RunStep).?.env_map,
.emulatable_run => step.cast(build.EmulatableRunStep).?.env_map, .emulatable_run => step.cast(std.Build.EmulatableRunStep).?.env_map,
else => unreachable, else => unreachable,
}; };
return maybe_env_map orelse { return maybe_env_map orelse {
const env_map = allocator.create(EnvMap) catch unreachable; const env_map = allocator.create(EnvMap) catch @panic("OOM");
env_map.* = process.getEnvMap(allocator) catch unreachable; env_map.* = process.getEnvMap(allocator) catch @panic("unhandled error");
switch (step.id) { switch (step.id) {
.run => step.cast(RunStep).?.env_map = env_map, .run => step.cast(RunStep).?.env_map = env_map,
.emulatable_run => step.cast(RunStep).?.env_map = env_map, .emulatable_run => step.cast(RunStep).?.env_map = env_map,
@ -142,7 +140,7 @@ pub fn setEnvironmentVariable(self: *RunStep, key: []const u8, value: []const u8
env_map.put( env_map.put(
self.builder.dupe(key), self.builder.dupe(key),
self.builder.dupe(value), self.builder.dupe(value),
) catch unreachable; ) catch @panic("unhandled error");
} }
pub fn expectStdErrEqual(self: *RunStep, bytes: []const u8) void { pub fn expectStdErrEqual(self: *RunStep, bytes: []const u8) void {
@ -195,7 +193,7 @@ fn make(step: *Step) !void {
pub fn runCommand( pub fn runCommand(
argv: []const []const u8, argv: []const []const u8,
builder: *Builder, builder: *std.Build,
expected_exit_code: ?u8, expected_exit_code: ?u8,
stdout_action: StdIoAction, stdout_action: StdIoAction,
stderr_action: StdIoAction, stderr_action: StdIoAction,
@ -236,7 +234,7 @@ pub fn runCommand(
switch (stdout_action) { switch (stdout_action) {
.expect_exact, .expect_matches => { .expect_exact, .expect_matches => {
stdout = child.stdout.?.reader().readAllAlloc(builder.allocator, max_stdout_size) catch unreachable; stdout = try child.stdout.?.reader().readAllAlloc(builder.allocator, max_stdout_size);
}, },
.inherit, .ignore => {}, .inherit, .ignore => {},
} }
@ -246,7 +244,7 @@ pub fn runCommand(
switch (stderr_action) { switch (stderr_action) {
.expect_exact, .expect_matches => { .expect_exact, .expect_matches => {
stderr = child.stderr.?.reader().readAllAlloc(builder.allocator, max_stdout_size) catch unreachable; stderr = try child.stderr.?.reader().readAllAlloc(builder.allocator, max_stdout_size);
}, },
.inherit, .ignore => {}, .inherit, .ignore => {},
} }
@ -357,13 +355,13 @@ fn printCmd(cwd: ?[]const u8, argv: []const []const u8) void {
std.debug.print("\n", .{}); std.debug.print("\n", .{});
} }
fn addPathForDynLibs(self: *RunStep, artifact: *LibExeObjStep) void { fn addPathForDynLibs(self: *RunStep, artifact: *CompileStep) void {
addPathForDynLibsInternal(&self.step, self.builder, artifact); addPathForDynLibsInternal(&self.step, self.builder, artifact);
} }
/// This should only be used for internal usage, this is called automatically /// This should only be used for internal usage, this is called automatically
/// for the user. /// for the user.
pub fn addPathForDynLibsInternal(step: *Step, builder: *Builder, artifact: *LibExeObjStep) void { pub fn addPathForDynLibsInternal(step: *Step, builder: *std.Build, artifact: *CompileStep) void {
for (artifact.link_objects.items) |link_object| { for (artifact.link_objects.items) |link_object| {
switch (link_object) { switch (link_object) {
.other_step => |other| { .other_step => |other| {

97
lib/std/Build/Step.zig Normal file
View File

@ -0,0 +1,97 @@
id: Id,
name: []const u8,
makeFn: *const fn (self: *Step) anyerror!void,
dependencies: std.ArrayList(*Step),
loop_flag: bool,
done_flag: bool,
pub const Id = enum {
top_level,
compile,
install_artifact,
install_file,
install_dir,
log,
remove_dir,
fmt,
translate_c,
write_file,
run,
emulatable_run,
check_file,
check_object,
config_header,
install_raw,
options,
custom,
pub fn Type(comptime id: Id) type {
return switch (id) {
.top_level => Build.TopLevelStep,
.compile => Build.CompileStep,
.install_artifact => Build.InstallArtifactStep,
.install_file => Build.InstallFileStep,
.install_dir => Build.InstallDirStep,
.log => Build.LogStep,
.remove_dir => Build.RemoveDirStep,
.fmt => Build.FmtStep,
.translate_c => Build.TranslateCStep,
.write_file => Build.WriteFileStep,
.run => Build.RunStep,
.emulatable_run => Build.EmulatableRunStep,
.check_file => Build.CheckFileStep,
.check_object => Build.CheckObjectStep,
.config_header => Build.ConfigHeaderStep,
.install_raw => Build.InstallRawStep,
.options => Build.OptionsStep,
.custom => @compileError("no type available for custom step"),
};
}
};
pub fn init(
id: Id,
name: []const u8,
allocator: Allocator,
makeFn: *const fn (self: *Step) anyerror!void,
) Step {
return Step{
.id = id,
.name = allocator.dupe(u8, name) catch @panic("OOM"),
.makeFn = makeFn,
.dependencies = std.ArrayList(*Step).init(allocator),
.loop_flag = false,
.done_flag = false,
};
}
pub fn initNoOp(id: Id, name: []const u8, allocator: Allocator) Step {
return init(id, name, allocator, makeNoOp);
}
pub fn make(self: *Step) !void {
if (self.done_flag) return;
try self.makeFn(self);
self.done_flag = true;
}
pub fn dependOn(self: *Step, other: *Step) void {
self.dependencies.append(other) catch @panic("OOM");
}
fn makeNoOp(self: *Step) anyerror!void {
_ = self;
}
pub fn cast(step: *Step, comptime T: type) ?*T {
if (step.id == T.base_id) {
return @fieldParentPtr(T, "step", step);
}
return null;
}
const Step = @This();
const std = @import("../std.zig");
const Build = std.Build;
const Allocator = std.mem.Allocator;

View File

@ -1,9 +1,7 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const build = std.build; const Step = std.Build.Step;
const Step = build.Step; const CompileStep = std.Build.CompileStep;
const Builder = build.Builder; const CheckFileStep = std.Build.CheckFileStep;
const LibExeObjStep = build.LibExeObjStep;
const CheckFileStep = build.CheckFileStep;
const fs = std.fs; const fs = std.fs;
const mem = std.mem; const mem = std.mem;
const CrossTarget = std.zig.CrossTarget; const CrossTarget = std.zig.CrossTarget;
@ -13,17 +11,25 @@ const TranslateCStep = @This();
pub const base_id = .translate_c; pub const base_id = .translate_c;
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
source: build.FileSource, source: std.Build.FileSource,
include_dirs: std.ArrayList([]const u8), include_dirs: std.ArrayList([]const u8),
c_macros: std.ArrayList([]const u8), c_macros: std.ArrayList([]const u8),
output_dir: ?[]const u8, output_dir: ?[]const u8,
out_basename: []const u8, out_basename: []const u8,
target: CrossTarget = CrossTarget{}, target: CrossTarget,
output_file: build.GeneratedFile, optimize: std.builtin.OptimizeMode,
output_file: std.Build.GeneratedFile,
pub fn create(builder: *Builder, source: build.FileSource) *TranslateCStep { pub const Options = struct {
const self = builder.allocator.create(TranslateCStep) catch unreachable; source_file: std.Build.FileSource,
target: CrossTarget,
optimize: std.builtin.OptimizeMode,
};
pub fn create(builder: *std.Build, options: Options) *TranslateCStep {
const self = builder.allocator.create(TranslateCStep) catch @panic("OOM");
const source = options.source_file.dupe(builder);
self.* = TranslateCStep{ self.* = TranslateCStep{
.step = Step.init(.translate_c, "translate-c", builder.allocator, make), .step = Step.init(.translate_c, "translate-c", builder.allocator, make),
.builder = builder, .builder = builder,
@ -32,23 +38,36 @@ pub fn create(builder: *Builder, source: build.FileSource) *TranslateCStep {
.c_macros = std.ArrayList([]const u8).init(builder.allocator), .c_macros = std.ArrayList([]const u8).init(builder.allocator),
.output_dir = null, .output_dir = null,
.out_basename = undefined, .out_basename = undefined,
.output_file = build.GeneratedFile{ .step = &self.step }, .target = options.target,
.optimize = options.optimize,
.output_file = std.Build.GeneratedFile{ .step = &self.step },
}; };
source.addStepDependencies(&self.step); source.addStepDependencies(&self.step);
return self; return self;
} }
pub fn setTarget(self: *TranslateCStep, target: CrossTarget) void { pub const AddExecutableOptions = struct {
self.target = target; name: ?[]const u8 = null,
} version: ?std.builtin.Version = null,
target: ?CrossTarget = null,
optimize: ?std.builtin.Mode = null,
linkage: ?CompileStep.Linkage = null,
};
/// Creates a step to build an executable from the translated source. /// Creates a step to build an executable from the translated source.
pub fn addExecutable(self: *TranslateCStep) *LibExeObjStep { pub fn addExecutable(self: *TranslateCStep, options: AddExecutableOptions) *CompileStep {
return self.builder.addExecutableSource("translated_c", build.FileSource{ .generated = &self.output_file }); return self.builder.addExecutable(.{
.root_source_file = .{ .generated = &self.output_file },
.name = options.name orelse "translated_c",
.version = options.version,
.target = options.target orelse self.target,
.optimize = options.optimize orelse self.optimize,
.linkage = options.linkage,
});
} }
pub fn addIncludeDir(self: *TranslateCStep, include_dir: []const u8) void { pub fn addIncludeDir(self: *TranslateCStep, include_dir: []const u8) void {
self.include_dirs.append(self.builder.dupePath(include_dir)) catch unreachable; self.include_dirs.append(self.builder.dupePath(include_dir)) catch @panic("OOM");
} }
pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8) *CheckFileStep { pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8) *CheckFileStep {
@ -58,13 +77,13 @@ pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8)
/// If the value is omitted, it is set to 1. /// If the value is omitted, it is set to 1.
/// `name` and `value` need not live longer than the function call. /// `name` and `value` need not live longer than the function call.
pub fn defineCMacro(self: *TranslateCStep, name: []const u8, value: ?[]const u8) void { pub fn defineCMacro(self: *TranslateCStep, name: []const u8, value: ?[]const u8) void {
const macro = build.constructCMacro(self.builder.allocator, name, value); const macro = std.Build.constructCMacro(self.builder.allocator, name, value);
self.c_macros.append(macro) catch unreachable; self.c_macros.append(macro) catch @panic("OOM");
} }
/// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1. /// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1.
pub fn defineCMacroRaw(self: *TranslateCStep, name_and_value: []const u8) void { pub fn defineCMacroRaw(self: *TranslateCStep, name_and_value: []const u8) void {
self.c_macros.append(self.builder.dupe(name_and_value)) catch unreachable; self.c_macros.append(self.builder.dupe(name_and_value)) catch @panic("OOM");
} }
fn make(step: *Step) !void { fn make(step: *Step) !void {
@ -82,6 +101,11 @@ fn make(step: *Step) !void {
try argv_list.append(try self.target.zigTriple(self.builder.allocator)); try argv_list.append(try self.target.zigTriple(self.builder.allocator));
} }
switch (self.optimize) {
.Debug => {}, // Skip since it's the default.
else => try argv_list.append(self.builder.fmt("-O{s}", .{@tagName(self.optimize)})),
}
for (self.include_dirs.items) |include_dir| { for (self.include_dirs.items) |include_dir| {
try argv_list.append("-I"); try argv_list.append("-I");
try argv_list.append(include_dir); try argv_list.append(include_dir);
@ -105,8 +129,8 @@ fn make(step: *Step) !void {
self.output_dir = fs.path.dirname(output_path).?; self.output_dir = fs.path.dirname(output_path).?;
} }
self.output_file.path = fs.path.join( self.output_file.path = try fs.path.join(
self.builder.allocator, self.builder.allocator,
&[_][]const u8{ self.output_dir.?, self.out_basename }, &[_][]const u8{ self.output_dir.?, self.out_basename },
) catch unreachable; );
} }

View File

@ -1,7 +1,5 @@
const std = @import("../std.zig"); const std = @import("../std.zig");
const build = @import("../build.zig"); const Step = std.Build.Step;
const Step = build.Step;
const Builder = build.Builder;
const fs = std.fs; const fs = std.fs;
const ArrayList = std.ArrayList; const ArrayList = std.ArrayList;
@ -10,17 +8,17 @@ const WriteFileStep = @This();
pub const base_id = .write_file; pub const base_id = .write_file;
step: Step, step: Step,
builder: *Builder, builder: *std.Build,
output_dir: []const u8, output_dir: []const u8,
files: std.TailQueue(File), files: std.TailQueue(File),
pub const File = struct { pub const File = struct {
source: build.GeneratedFile, source: std.Build.GeneratedFile,
basename: []const u8, basename: []const u8,
bytes: []const u8, bytes: []const u8,
}; };
pub fn init(builder: *Builder) WriteFileStep { pub fn init(builder: *std.Build) WriteFileStep {
return WriteFileStep{ return WriteFileStep{
.builder = builder, .builder = builder,
.step = Step.init(.write_file, "writefile", builder.allocator, make), .step = Step.init(.write_file, "writefile", builder.allocator, make),
@ -30,10 +28,10 @@ pub fn init(builder: *Builder) WriteFileStep {
} }
pub fn add(self: *WriteFileStep, basename: []const u8, bytes: []const u8) void { pub fn add(self: *WriteFileStep, basename: []const u8, bytes: []const u8) void {
const node = self.builder.allocator.create(std.TailQueue(File).Node) catch unreachable; const node = self.builder.allocator.create(std.TailQueue(File).Node) catch @panic("unhandled error");
node.* = .{ node.* = .{
.data = .{ .data = .{
.source = build.GeneratedFile{ .step = &self.step }, .source = std.Build.GeneratedFile{ .step = &self.step },
.basename = self.builder.dupePath(basename), .basename = self.builder.dupePath(basename),
.bytes = self.builder.dupe(bytes), .bytes = self.builder.dupe(bytes),
}, },
@ -43,11 +41,11 @@ pub fn add(self: *WriteFileStep, basename: []const u8, bytes: []const u8) void {
} }
/// Gets a file source for the given basename. If the file does not exist, returns `null`. /// Gets a file source for the given basename. If the file does not exist, returns `null`.
pub fn getFileSource(step: *WriteFileStep, basename: []const u8) ?build.FileSource { pub fn getFileSource(step: *WriteFileStep, basename: []const u8) ?std.Build.FileSource {
var it = step.files.first; var it = step.files.first;
while (it) |node| : (it = node.next) { while (it) |node| : (it = node.next) {
if (std.mem.eql(u8, node.data.basename, basename)) if (std.mem.eql(u8, node.data.basename, basename))
return build.FileSource{ .generated = &node.data.source }; return std.Build.FileSource{ .generated = &node.data.source };
} }
return null; return null;
} }
@ -108,10 +106,10 @@ fn make(step: *Step) !void {
}); });
return err; return err;
}; };
node.data.source.path = fs.path.join( node.data.source.path = try fs.path.join(
self.builder.allocator, self.builder.allocator,
&[_][]const u8{ self.output_dir, node.data.basename }, &[_][]const u8{ self.output_dir, node.data.basename },
) catch unreachable; );
} }
} }
} }

View File

@ -166,7 +166,7 @@ pub const GetNameError = error{
pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]const u8 { pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]const u8 {
buffer_ptr[max_name_len] = 0; buffer_ptr[max_name_len] = 0;
var buffer = std.mem.span(buffer_ptr); var buffer: [:0]u8 = buffer_ptr;
switch (target.os.tag) { switch (target.os.tag) {
.linux => if (use_pthreads and is_gnu) { .linux => if (use_pthreads and is_gnu) {

View File

@ -1145,7 +1145,8 @@ pub fn ArrayHashMapUnmanaged(
} }
/// Create a copy of the hash map which can be modified separately. /// Create a copy of the hash map which can be modified separately.
/// The copy uses the same context and allocator as this instance. /// The copy uses the same context as this instance, but is allocated
/// with the provided allocator.
pub fn clone(self: Self, allocator: Allocator) !Self { pub fn clone(self: Self, allocator: Allocator) !Self {
if (@sizeOf(ByIndexContext) != 0) if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead."); @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");

View File

@ -29,7 +29,11 @@ pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type {
} }
/// View the internal array as a slice whose size was previously set. /// View the internal array as a slice whose size was previously set.
pub fn slice(self: anytype) mem.Span(@TypeOf(&self.buffer)) { pub fn slice(self: anytype) switch (@TypeOf(&self.buffer)) {
*[buffer_capacity]T => []T,
*const [buffer_capacity]T => []const T,
else => unreachable,
} {
return self.buffer[0..self.len]; return self.buffer[0..self.len];
} }

File diff suppressed because it is too large Load Diff

View File

@ -131,13 +131,16 @@ pub const CodeModel = enum {
/// This data structure is used by the Zig language code generation and /// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation. /// therefore must be kept in sync with the compiler implementation.
pub const Mode = enum { pub const OptimizeMode = enum {
Debug, Debug,
ReleaseSafe, ReleaseSafe,
ReleaseFast, ReleaseFast,
ReleaseSmall, ReleaseSmall,
}; };
/// Deprecated; use OptimizeMode.
pub const Mode = OptimizeMode;
/// This data structure is used by the Zig language code generation and /// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation. /// therefore must be kept in sync with the compiler implementation.
pub const CallingConvention = enum { pub const CallingConvention = enum {

View File

@ -90,6 +90,8 @@ pub usingnamespace switch (builtin.os.tag) {
pub extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *c.Stat) c_int; pub extern "c" fn stat(noalias path: [*:0]const u8, noalias buf: *c.Stat) c_int;
pub extern "c" fn alarm(seconds: c_uint) c_uint; pub extern "c" fn alarm(seconds: c_uint) c_uint;
pub extern "c" fn msync(addr: *align(page_size) const anyopaque, len: usize, flags: c_int) c_int;
}, },
}; };
@ -145,7 +147,6 @@ pub extern "c" fn write(fd: c.fd_t, buf: [*]const u8, nbyte: usize) isize;
pub extern "c" fn pwrite(fd: c.fd_t, buf: [*]const u8, nbyte: usize, offset: c.off_t) isize; pub extern "c" fn pwrite(fd: c.fd_t, buf: [*]const u8, nbyte: usize, offset: c.off_t) isize;
pub extern "c" fn mmap(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: c.fd_t, offset: c.off_t) *anyopaque; pub extern "c" fn mmap(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: c.fd_t, offset: c.off_t) *anyopaque;
pub extern "c" fn munmap(addr: *align(page_size) const anyopaque, len: usize) c_int; pub extern "c" fn munmap(addr: *align(page_size) const anyopaque, len: usize) c_int;
pub extern "c" fn msync(addr: *align(page_size) const anyopaque, len: usize, flags: c_int) c_int;
pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: c_uint) c_int; pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: c_uint) c_int;
pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: c_int) c_int; pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8, flags: c_int) c_int;
pub extern "c" fn linkat(oldfd: c.fd_t, oldpath: [*:0]const u8, newfd: c.fd_t, newpath: [*:0]const u8, flags: c_int) c_int; pub extern "c" fn linkat(oldfd: c.fd_t, oldpath: [*:0]const u8, newfd: c.fd_t, newpath: [*:0]const u8, flags: c_int) c_int;

View File

@ -59,6 +59,9 @@ pub const sched_yield = __libc_thr_yield;
pub extern "c" fn posix_memalign(memptr: *?*anyopaque, alignment: usize, size: usize) c_int; pub extern "c" fn posix_memalign(memptr: *?*anyopaque, alignment: usize, size: usize) c_int;
pub extern "c" fn __msync13(addr: *align(std.mem.page_size) const anyopaque, len: usize, flags: c_int) c_int;
pub const msync = __msync13;
pub const pthread_mutex_t = extern struct { pub const pthread_mutex_t = extern struct {
magic: u32 = 0x33330003, magic: u32 = 0x33330003,
errorcheck: padded_pthread_spin_t = 0, errorcheck: padded_pthread_spin_t = 0,

View File

@ -1164,7 +1164,7 @@ fn windowsCreateProcessPathExt(
var app_name_unicode_string = windows.UNICODE_STRING{ var app_name_unicode_string = windows.UNICODE_STRING{
.Length = app_name_len_bytes, .Length = app_name_len_bytes,
.MaximumLength = app_name_len_bytes, .MaximumLength = app_name_len_bytes,
.Buffer = @intToPtr([*]u16, @ptrToInt(app_name_wildcard.ptr)), .Buffer = @qualCast([*:0]u16, app_name_wildcard.ptr),
}; };
const rc = windows.ntdll.NtQueryDirectoryFile( const rc = windows.ntdll.NtQueryDirectoryFile(
dir.fd, dir.fd,
@ -1261,7 +1261,7 @@ fn windowsCreateProcessPathExt(
var app_name_unicode_string = windows.UNICODE_STRING{ var app_name_unicode_string = windows.UNICODE_STRING{
.Length = app_name_len_bytes, .Length = app_name_len_bytes,
.MaximumLength = app_name_len_bytes, .MaximumLength = app_name_len_bytes,
.Buffer = @intToPtr([*]u16, @ptrToInt(app_name_appended.ptr)), .Buffer = @qualCast([*:0]u16, app_name_appended.ptr),
}; };
// Re-use the directory handle but this time we call with the appended app name // Re-use the directory handle but this time we call with the appended app name

View File

@ -28,7 +28,6 @@ test "cstr fns" {
fn testCStrFnsImpl() !void { fn testCStrFnsImpl() !void {
try testing.expect(cmp("aoeu", "aoez") == -1); try testing.expect(cmp("aoeu", "aoez") == -1);
try testing.expect(mem.len("123456789") == 9);
} }
/// Returns a mutable, null-terminated slice with the same length as `slice`. /// Returns a mutable, null-terminated slice with the same length as `slice`.

View File

@ -2060,6 +2060,11 @@ pub fn dumpStackPointerAddr(prefix: []const u8) void {
test "manage resources correctly" { test "manage resources correctly" {
if (builtin.os.tag == .wasi) return error.SkipZigTest; if (builtin.os.tag == .wasi) return error.SkipZigTest;
if (builtin.os.tag == .windows and builtin.cpu.arch == .x86_64) {
// https://github.com/ziglang/zig/issues/13963
return error.SkipZigTest;
}
const writer = std.io.null_writer; const writer = std.io.null_writer;
var di = try openSelfDebugInfo(testing.allocator); var di = try openSelfDebugInfo(testing.allocator);
defer di.deinit(); defer di.deinit();

View File

@ -1,11 +1,12 @@
const std = @import("std.zig"); const std = @import("std.zig");
const builtin = @import("builtin");
const io = std.io; const io = std.io;
const math = std.math; const math = std.math;
const assert = std.debug.assert; const assert = std.debug.assert;
const mem = std.mem; const mem = std.mem;
const unicode = std.unicode; const unicode = std.unicode;
const meta = std.meta; const meta = std.meta;
const builtin = @import("builtin");
const errol = @import("fmt/errol.zig"); const errol = @import("fmt/errol.zig");
const lossyCast = std.math.lossyCast; const lossyCast = std.math.lossyCast;
const expectFmt = std.testing.expectFmt; const expectFmt = std.testing.expectFmt;
@ -190,7 +191,7 @@ pub fn format(
.precision = precision, .precision = precision,
}, },
writer, writer,
default_max_depth, std.options.fmt_max_depth,
); );
} }
@ -2140,15 +2141,15 @@ test "buffer" {
{ {
var buf1: [32]u8 = undefined; var buf1: [32]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf1); var fbs = std.io.fixedBufferStream(&buf1);
try formatType(1234, "", FormatOptions{}, fbs.writer(), default_max_depth); try formatType(1234, "", FormatOptions{}, fbs.writer(), std.options.fmt_max_depth);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1234")); try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1234"));
fbs.reset(); fbs.reset();
try formatType('a', "c", FormatOptions{}, fbs.writer(), default_max_depth); try formatType('a', "c", FormatOptions{}, fbs.writer(), std.options.fmt_max_depth);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "a")); try std.testing.expect(mem.eql(u8, fbs.getWritten(), "a"));
fbs.reset(); fbs.reset();
try formatType(0b1100, "b", FormatOptions{}, fbs.writer(), default_max_depth); try formatType(0b1100, "b", FormatOptions{}, fbs.writer(), std.options.fmt_max_depth);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1100")); try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1100"));
} }
} }

View File

@ -834,7 +834,7 @@ pub const IterableDir = struct {
self.end_index = self.index; // Force fd_readdir in the next loop. self.end_index = self.index; // Force fd_readdir in the next loop.
continue :start_over; continue :start_over;
} }
const name = mem.span(self.buf[name_index .. name_index + entry.d_namlen]); const name = self.buf[name_index .. name_index + entry.d_namlen];
const next_index = name_index + entry.d_namlen; const next_index = name_index + entry.d_namlen;
self.index = next_index; self.index = next_index;
@ -1763,7 +1763,7 @@ pub const Dir = struct {
var nt_name = w.UNICODE_STRING{ var nt_name = w.UNICODE_STRING{
.Length = path_len_bytes, .Length = path_len_bytes,
.MaximumLength = path_len_bytes, .MaximumLength = path_len_bytes,
.Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w)), .Buffer = @qualCast([*:0]u16, sub_path_w),
}; };
var attr = w.OBJECT_ATTRIBUTES{ var attr = w.OBJECT_ATTRIBUTES{
.Length = @sizeOf(w.OBJECT_ATTRIBUTES), .Length = @sizeOf(w.OBJECT_ATTRIBUTES),

View File

@ -179,7 +179,7 @@ pub const File = struct {
lock_nonblocking: bool = false, lock_nonblocking: bool = false,
/// For POSIX systems this is the file system mode the file will /// For POSIX systems this is the file system mode the file will
/// be created with. /// be created with. On other systems this is always 0.
mode: Mode = default_mode, mode: Mode = default_mode,
/// Setting this to `.blocking` prevents `O.NONBLOCK` from being passed even /// Setting this to `.blocking` prevents `O.NONBLOCK` from being passed even
@ -307,6 +307,7 @@ pub const File = struct {
/// is unique to each filesystem. /// is unique to each filesystem.
inode: INode, inode: INode,
size: u64, size: u64,
/// This is available on POSIX systems and is always 0 otherwise.
mode: Mode, mode: Mode,
kind: Kind, kind: Kind,

View File

@ -113,14 +113,27 @@ pub fn FixedBufferStream(comptime Buffer: type) type {
}; };
} }
pub fn fixedBufferStream(buffer: anytype) FixedBufferStream(NonSentinelSpan(@TypeOf(buffer))) { pub fn fixedBufferStream(buffer: anytype) FixedBufferStream(Slice(@TypeOf(buffer))) {
return .{ .buffer = mem.span(buffer), .pos = 0 }; return .{ .buffer = buffer, .pos = 0 };
} }
fn NonSentinelSpan(comptime T: type) type { fn Slice(comptime T: type) type {
var ptr_info = @typeInfo(mem.Span(T)).Pointer; switch (@typeInfo(T)) {
ptr_info.sentinel = null; .Pointer => |ptr_info| {
return @Type(.{ .Pointer = ptr_info }); var new_ptr_info = ptr_info;
switch (ptr_info.size) {
.Slice => {},
.One => switch (@typeInfo(ptr_info.child)) {
.Array => |info| new_ptr_info.child = info.child,
else => @compileError("invalid type given to fixedBufferStream"),
},
else => @compileError("invalid type given to fixedBufferStream"),
}
new_ptr_info.size = .Slice;
return @Type(.{ .Pointer = new_ptr_info });
},
else => @compileError("invalid type given to fixedBufferStream"),
}
} }
test "FixedBufferStream output" { test "FixedBufferStream output" {

View File

@ -1384,7 +1384,7 @@ fn ParseInternalErrorImpl(comptime T: type, comptime inferred_types: []const typ
return errors; return errors;
}, },
.Array => |arrayInfo| { .Array => |arrayInfo| {
return error{ UnexpectedEndOfJson, UnexpectedToken } || TokenStream.Error || return error{ UnexpectedEndOfJson, UnexpectedToken, LengthMismatch } || TokenStream.Error ||
UnescapeValidStringError || UnescapeValidStringError ||
ParseInternalErrorImpl(arrayInfo.child, inferred_types ++ [_]type{T}); ParseInternalErrorImpl(arrayInfo.child, inferred_types ++ [_]type{T});
}, },
@ -1625,6 +1625,7 @@ fn parseInternal(
if (arrayInfo.child != u8) return error.UnexpectedToken; if (arrayInfo.child != u8) return error.UnexpectedToken;
var r: T = undefined; var r: T = undefined;
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1); const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
if (r.len != stringToken.decodedLength()) return error.LengthMismatch;
switch (stringToken.escapes) { switch (stringToken.escapes) {
.None => mem.copy(u8, &r, source_slice), .None => mem.copy(u8, &r, source_slice),
.Some => try unescapeValidString(&r, source_slice), .Some => try unescapeValidString(&r, source_slice),

View File

@ -2238,6 +2238,12 @@ test "parse into struct with no fields" {
try testing.expectEqual(T{}, try parse(T, &ts, ParseOptions{})); try testing.expectEqual(T{}, try parse(T, &ts, ParseOptions{}));
} }
test "parse into struct where destination and source lengths mismatch" {
const T = struct { a: [2]u8 };
var ts = TokenStream.init("{\"a\": \"bbb\"}");
try testing.expectError(error.LengthMismatch, parse(T, &ts, ParseOptions{}));
}
test "parse into struct with misc fields" { test "parse into struct with misc fields" {
@setEvalBranchQuota(10000); @setEvalBranchQuota(10000);
const options = ParseOptions{ .allocator = testing.allocator }; const options = ParseOptions{ .allocator = testing.allocator };

View File

@ -636,12 +636,9 @@ test "indexOfDiff" {
try testing.expectEqual(indexOfDiff(u8, "xne", "one"), 0); try testing.expectEqual(indexOfDiff(u8, "xne", "one"), 0);
} }
/// Takes a pointer to an array, a sentinel-terminated pointer, or a slice, and /// Takes a sentinel-terminated pointer and returns a slice preserving pointer attributes.
/// returns a slice. If there is a sentinel on the input type, there will be a /// `[*c]` pointers are assumed to be 0-terminated and assumed to not be allowzero.
/// sentinel on the output type. The constness of the output type matches fn Span(comptime T: type) type {
/// the constness of the input type. `[*c]` pointers are assumed to be 0-terminated,
/// and assumed to not allow null.
pub fn Span(comptime T: type) type {
switch (@typeInfo(T)) { switch (@typeInfo(T)) {
.Optional => |optional_info| { .Optional => |optional_info| {
return ?Span(optional_info.child); return ?Span(optional_info.child);
@ -649,39 +646,22 @@ pub fn Span(comptime T: type) type {
.Pointer => |ptr_info| { .Pointer => |ptr_info| {
var new_ptr_info = ptr_info; var new_ptr_info = ptr_info;
switch (ptr_info.size) { switch (ptr_info.size) {
.One => switch (@typeInfo(ptr_info.child)) {
.Array => |info| {
new_ptr_info.child = info.child;
new_ptr_info.sentinel = info.sentinel;
},
else => @compileError("invalid type given to std.mem.Span"),
},
.C => { .C => {
new_ptr_info.sentinel = &@as(ptr_info.child, 0); new_ptr_info.sentinel = &@as(ptr_info.child, 0);
new_ptr_info.is_allowzero = false; new_ptr_info.is_allowzero = false;
}, },
.Many, .Slice => {}, .Many => if (ptr_info.sentinel == null) @compileError("invalid type given to std.mem.span: " ++ @typeName(T)),
.One, .Slice => @compileError("invalid type given to std.mem.span: " ++ @typeName(T)),
} }
new_ptr_info.size = .Slice; new_ptr_info.size = .Slice;
return @Type(.{ .Pointer = new_ptr_info }); return @Type(.{ .Pointer = new_ptr_info });
}, },
else => @compileError("invalid type given to std.mem.Span"), else => {},
} }
@compileError("invalid type given to std.mem.span: " ++ @typeName(T));
} }
test "Span" { test "Span" {
try testing.expect(Span(*[5]u16) == []u16);
try testing.expect(Span(?*[5]u16) == ?[]u16);
try testing.expect(Span(*const [5]u16) == []const u16);
try testing.expect(Span(?*const [5]u16) == ?[]const u16);
try testing.expect(Span([]u16) == []u16);
try testing.expect(Span(?[]u16) == ?[]u16);
try testing.expect(Span([]const u8) == []const u8);
try testing.expect(Span(?[]const u8) == ?[]const u8);
try testing.expect(Span([:1]u16) == [:1]u16);
try testing.expect(Span(?[:1]u16) == ?[:1]u16);
try testing.expect(Span([:1]const u8) == [:1]const u8);
try testing.expect(Span(?[:1]const u8) == ?[:1]const u8);
try testing.expect(Span([*:1]u16) == [:1]u16); try testing.expect(Span([*:1]u16) == [:1]u16);
try testing.expect(Span(?[*:1]u16) == ?[:1]u16); try testing.expect(Span(?[*:1]u16) == ?[:1]u16);
try testing.expect(Span([*:1]const u8) == [:1]const u8); try testing.expect(Span([*:1]const u8) == [:1]const u8);
@ -692,13 +672,10 @@ test "Span" {
try testing.expect(Span(?[*c]const u8) == ?[:0]const u8); try testing.expect(Span(?[*c]const u8) == ?[:0]const u8);
} }
/// Takes a pointer to an array, a sentinel-terminated pointer, or a slice, and /// Takes a sentinel-terminated pointer and returns a slice, iterating over the
/// returns a slice. If there is a sentinel on the input type, there will be a /// memory to find the sentinel and determine the length.
/// sentinel on the output type. The constness of the output type matches /// Ponter attributes such as const are preserved.
/// the constness of the input type. /// `[*c]` pointers are assumed to be non-null and 0-terminated.
///
/// When there is both a sentinel and an array length or slice length, the
/// length value is used instead of the sentinel.
pub fn span(ptr: anytype) Span(@TypeOf(ptr)) { pub fn span(ptr: anytype) Span(@TypeOf(ptr)) {
if (@typeInfo(@TypeOf(ptr)) == .Optional) { if (@typeInfo(@TypeOf(ptr)) == .Optional) {
if (ptr) |non_null| { if (ptr) |non_null| {
@ -722,7 +699,6 @@ test "span" {
var array: [5]u16 = [_]u16{ 1, 2, 3, 4, 5 }; var array: [5]u16 = [_]u16{ 1, 2, 3, 4, 5 };
const ptr = @as([*:3]u16, array[0..2 :3]); const ptr = @as([*:3]u16, array[0..2 :3]);
try testing.expect(eql(u16, span(ptr), &[_]u16{ 1, 2 })); try testing.expect(eql(u16, span(ptr), &[_]u16{ 1, 2 }));
try testing.expect(eql(u16, span(&array), &[_]u16{ 1, 2, 3, 4, 5 }));
try testing.expectEqual(@as(?[:0]u16, null), span(@as(?[*:0]u16, null))); try testing.expectEqual(@as(?[:0]u16, null), span(@as(?[*:0]u16, null)));
} }
@ -919,22 +895,15 @@ test "lenSliceTo" {
} }
} }
/// Takes a pointer to an array, an array, a vector, a sentinel-terminated pointer, /// Takes a sentinel-terminated pointer and iterates over the memory to find the
/// a slice or a tuple, and returns the length. /// sentinel and determine the length.
/// In the case of a sentinel-terminated array, it uses the array length. /// `[*c]` pointers are assumed to be non-null and 0-terminated.
/// For C pointers it assumes it is a pointer-to-many with a 0 sentinel.
pub fn len(value: anytype) usize { pub fn len(value: anytype) usize {
return switch (@typeInfo(@TypeOf(value))) { switch (@typeInfo(@TypeOf(value))) {
.Array => |info| info.len,
.Vector => |info| info.len,
.Pointer => |info| switch (info.size) { .Pointer => |info| switch (info.size) {
.One => switch (@typeInfo(info.child)) {
.Array => value.len,
else => @compileError("invalid type given to std.mem.len"),
},
.Many => { .Many => {
const sentinel_ptr = info.sentinel orelse const sentinel_ptr = info.sentinel orelse
@compileError("length of pointer with no sentinel"); @compileError("invalid type given to std.mem.len: " ++ @typeName(@TypeOf(value)));
const sentinel = @ptrCast(*align(1) const info.child, sentinel_ptr).*; const sentinel = @ptrCast(*align(1) const info.child, sentinel_ptr).*;
return indexOfSentinel(info.child, sentinel, value); return indexOfSentinel(info.child, sentinel, value);
}, },
@ -942,41 +911,18 @@ pub fn len(value: anytype) usize {
assert(value != null); assert(value != null);
return indexOfSentinel(info.child, 0, value); return indexOfSentinel(info.child, 0, value);
}, },
.Slice => value.len, else => @compileError("invalid type given to std.mem.len: " ++ @typeName(@TypeOf(value))),
}, },
.Struct => |info| if (info.is_tuple) { else => @compileError("invalid type given to std.mem.len: " ++ @typeName(@TypeOf(value))),
return info.fields.len; }
} else @compileError("invalid type given to std.mem.len"),
else => @compileError("invalid type given to std.mem.len"),
};
} }
test "len" { test "len" {
try testing.expect(len("aoeu") == 4); var array: [5]u16 = [_]u16{ 1, 2, 0, 4, 5 };
const ptr = @as([*:4]u16, array[0..3 :4]);
{ try testing.expect(len(ptr) == 3);
var array: [5]u16 = [_]u16{ 1, 2, 3, 4, 5 }; const c_ptr = @as([*c]u16, ptr);
try testing.expect(len(&array) == 5); try testing.expect(len(c_ptr) == 2);
try testing.expect(len(array[0..3]) == 3);
array[2] = 0;
const ptr = @as([*:0]u16, array[0..2 :0]);
try testing.expect(len(ptr) == 2);
}
{
var array: [5:0]u16 = [_:0]u16{ 1, 2, 3, 4, 5 };
try testing.expect(len(&array) == 5);
array[2] = 0;
try testing.expect(len(&array) == 5);
}
{
const vector: meta.Vector(2, u32) = [2]u32{ 1, 2 };
try testing.expect(len(vector) == 2);
}
{
const tuple = .{ 1, 2 };
try testing.expect(len(tuple) == 2);
try testing.expect(tuple[0] == 1);
}
} }
pub fn indexOfSentinel(comptime Elem: type, comptime sentinel: Elem, ptr: [*:sentinel]const Elem) usize { pub fn indexOfSentinel(comptime Elem: type, comptime sentinel: Elem, ptr: [*:sentinel]const Elem) usize {

View File

@ -550,7 +550,6 @@ pub fn abort() noreturn {
exit(0); // TODO choose appropriate exit code exit(0); // TODO choose appropriate exit code
} }
if (builtin.os.tag == .wasi) { if (builtin.os.tag == .wasi) {
@breakpoint();
exit(1); exit(1);
} }
if (builtin.os.tag == .cuda) { if (builtin.os.tag == .cuda) {
@ -4514,7 +4513,7 @@ pub fn faccessatW(dirfd: fd_t, sub_path_w: [*:0]const u16, mode: u32, flags: u32
var nt_name = windows.UNICODE_STRING{ var nt_name = windows.UNICODE_STRING{
.Length = path_len_bytes, .Length = path_len_bytes,
.MaximumLength = path_len_bytes, .MaximumLength = path_len_bytes,
.Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w)), .Buffer = @qualCast([*:0]u16, sub_path_w),
}; };
var attr = windows.OBJECT_ATTRIBUTES{ var attr = windows.OBJECT_ATTRIBUTES{
.Length = @sizeOf(windows.OBJECT_ATTRIBUTES), .Length = @sizeOf(windows.OBJECT_ATTRIBUTES),
@ -6029,7 +6028,7 @@ pub fn sendfile(
.BADF => unreachable, // Always a race condition. .BADF => unreachable, // Always a race condition.
.FAULT => unreachable, // Segmentation fault. .FAULT => unreachable, // Segmentation fault.
.OVERFLOW => unreachable, // We avoid passing too large of a `count`. .OVERFLOW => unreachable, // We avoid passing too large of a `count`.
.NOTCONN => unreachable, // `out_fd` is an unconnected socket. .NOTCONN => return error.BrokenPipe, // `out_fd` is an unconnected socket
.INVAL, .NOSYS => { .INVAL, .NOSYS => {
// EINVAL could be any of the following situations: // EINVAL could be any of the following situations:
@ -6097,7 +6096,7 @@ pub fn sendfile(
.BADF => unreachable, // Always a race condition. .BADF => unreachable, // Always a race condition.
.FAULT => unreachable, // Segmentation fault. .FAULT => unreachable, // Segmentation fault.
.NOTCONN => unreachable, // `out_fd` is an unconnected socket. .NOTCONN => return error.BrokenPipe, // `out_fd` is an unconnected socket
.INVAL, .OPNOTSUPP, .NOTSOCK, .NOSYS => { .INVAL, .OPNOTSUPP, .NOTSOCK, .NOSYS => {
// EINVAL could be any of the following situations: // EINVAL could be any of the following situations:
@ -6179,7 +6178,7 @@ pub fn sendfile(
.BADF => unreachable, // Always a race condition. .BADF => unreachable, // Always a race condition.
.FAULT => unreachable, // Segmentation fault. .FAULT => unreachable, // Segmentation fault.
.INVAL => unreachable, .INVAL => unreachable,
.NOTCONN => unreachable, // `out_fd` is an unconnected socket. .NOTCONN => return error.BrokenPipe, // `out_fd` is an unconnected socket
.OPNOTSUPP, .NOTSOCK, .NOSYS => break :sf, .OPNOTSUPP, .NOTSOCK, .NOSYS => break :sf,
@ -6473,7 +6472,7 @@ pub fn recvfrom(
.BADF => unreachable, // always a race condition .BADF => unreachable, // always a race condition
.FAULT => unreachable, .FAULT => unreachable,
.INVAL => unreachable, .INVAL => unreachable,
.NOTCONN => unreachable, .NOTCONN => return error.SocketNotConnected,
.NOTSOCK => unreachable, .NOTSOCK => unreachable,
.INTR => continue, .INTR => continue,
.AGAIN => return error.WouldBlock, .AGAIN => return error.WouldBlock,

View File

@ -22,7 +22,7 @@ const UefiPoolAllocator = struct {
assert(len > 0); assert(len > 0);
const ptr_align = 1 << log2_ptr_align; const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
const metadata_len = mem.alignForward(@sizeOf(usize), ptr_align); const metadata_len = mem.alignForward(@sizeOf(usize), ptr_align);

View File

@ -85,7 +85,7 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
var nt_name = UNICODE_STRING{ var nt_name = UNICODE_STRING{
.Length = path_len_bytes, .Length = path_len_bytes,
.MaximumLength = path_len_bytes, .MaximumLength = path_len_bytes,
.Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w.ptr)), .Buffer = @qualCast([*]u16, sub_path_w.ptr),
}; };
var attr = OBJECT_ATTRIBUTES{ var attr = OBJECT_ATTRIBUTES{
.Length = @sizeOf(OBJECT_ATTRIBUTES), .Length = @sizeOf(OBJECT_ATTRIBUTES),
@ -634,7 +634,7 @@ pub fn SetCurrentDirectory(path_name: []const u16) SetCurrentDirectoryError!void
var nt_name = UNICODE_STRING{ var nt_name = UNICODE_STRING{
.Length = path_len_bytes, .Length = path_len_bytes,
.MaximumLength = path_len_bytes, .MaximumLength = path_len_bytes,
.Buffer = @intToPtr([*]u16, @ptrToInt(path_name.ptr)), .Buffer = @qualCast([*]u16, path_name.ptr),
}; };
const rc = ntdll.RtlSetCurrentDirectory_U(&nt_name); const rc = ntdll.RtlSetCurrentDirectory_U(&nt_name);
@ -766,7 +766,7 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin
var nt_name = UNICODE_STRING{ var nt_name = UNICODE_STRING{
.Length = path_len_bytes, .Length = path_len_bytes,
.MaximumLength = path_len_bytes, .MaximumLength = path_len_bytes,
.Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w.ptr)), .Buffer = @qualCast([*]u16, sub_path_w.ptr),
}; };
var attr = OBJECT_ATTRIBUTES{ var attr = OBJECT_ATTRIBUTES{
.Length = @sizeOf(OBJECT_ATTRIBUTES), .Length = @sizeOf(OBJECT_ATTRIBUTES),
@ -876,7 +876,7 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil
.Length = path_len_bytes, .Length = path_len_bytes,
.MaximumLength = path_len_bytes, .MaximumLength = path_len_bytes,
// The Windows API makes this mutable, but it will not mutate here. // The Windows API makes this mutable, but it will not mutate here.
.Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w.ptr)), .Buffer = @qualCast([*]u16, sub_path_w.ptr),
}; };
if (sub_path_w[0] == '.' and sub_path_w[1] == 0) { if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
@ -1414,7 +1414,7 @@ pub fn sendmsg(
} }
pub fn sendto(s: ws2_32.SOCKET, buf: [*]const u8, len: usize, flags: u32, to: ?*const ws2_32.sockaddr, to_len: ws2_32.socklen_t) i32 { pub fn sendto(s: ws2_32.SOCKET, buf: [*]const u8, len: usize, flags: u32, to: ?*const ws2_32.sockaddr, to_len: ws2_32.socklen_t) i32 {
var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = @intToPtr([*]u8, @ptrToInt(buf)) }; var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = @qualCast([*]u8, buf) };
var bytes_send: DWORD = undefined; var bytes_send: DWORD = undefined;
if (ws2_32.WSASendTo(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_send, flags, to, @intCast(i32, to_len), null, null) == ws2_32.SOCKET_ERROR) { if (ws2_32.WSASendTo(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_send, flags, to, @intCast(i32, to_len), null, null) == ws2_32.SOCKET_ERROR) {
return ws2_32.SOCKET_ERROR; return ws2_32.SOCKET_ERROR;
@ -1876,13 +1876,13 @@ pub fn eqlIgnoreCaseWTF16(a: []const u16, b: []const u16) bool {
const a_string = UNICODE_STRING{ const a_string = UNICODE_STRING{
.Length = a_bytes, .Length = a_bytes,
.MaximumLength = a_bytes, .MaximumLength = a_bytes,
.Buffer = @intToPtr([*]u16, @ptrToInt(a.ptr)), .Buffer = @qualCast([*]u16, a.ptr),
}; };
const b_bytes = @intCast(u16, b.len * 2); const b_bytes = @intCast(u16, b.len * 2);
const b_string = UNICODE_STRING{ const b_string = UNICODE_STRING{
.Length = b_bytes, .Length = b_bytes,
.MaximumLength = b_bytes, .MaximumLength = b_bytes,
.Buffer = @intToPtr([*]u16, @ptrToInt(b.ptr)), .Buffer = @qualCast([*]u16, b.ptr),
}; };
return ntdll.RtlEqualUnicodeString(&a_string, &b_string, TRUE) == TRUE; return ntdll.RtlEqualUnicodeString(&a_string, &b_string, TRUE) == TRUE;
} }

View File

@ -7,12 +7,14 @@ export var _tls_end: u8 linksection(".tls$ZZZ") = 0;
export var __xl_a: std.os.windows.PIMAGE_TLS_CALLBACK linksection(".CRT$XLA") = null; export var __xl_a: std.os.windows.PIMAGE_TLS_CALLBACK linksection(".CRT$XLA") = null;
export var __xl_z: std.os.windows.PIMAGE_TLS_CALLBACK linksection(".CRT$XLZ") = null; export var __xl_z: std.os.windows.PIMAGE_TLS_CALLBACK linksection(".CRT$XLZ") = null;
const tls_array: u32 = 0x2c;
comptime { comptime {
if (builtin.target.cpu.arch == .x86) { if (builtin.target.cpu.arch == .x86 and builtin.zig_backend != .stage2_c) {
// The __tls_array is the offset of the ThreadLocalStoragePointer field // The __tls_array is the offset of the ThreadLocalStoragePointer field
// in the TEB block whose base address held in the %fs segment. // in the TEB block whose base address held in the %fs segment.
@export(tls_array, .{ .name = "_tls_array" }); asm (
\\ .global __tls_array
\\ __tls_array = 0x2C
);
} }
} }

View File

@ -9,6 +9,7 @@ pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged;
pub const AutoHashMap = hash_map.AutoHashMap; pub const AutoHashMap = hash_map.AutoHashMap;
pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged; pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged;
pub const BoundedArray = @import("bounded_array.zig").BoundedArray; pub const BoundedArray = @import("bounded_array.zig").BoundedArray;
pub const Build = @import("Build.zig");
pub const BufMap = @import("buf_map.zig").BufMap; pub const BufMap = @import("buf_map.zig").BufMap;
pub const BufSet = @import("buf_set.zig").BufSet; pub const BufSet = @import("buf_set.zig").BufSet;
pub const ChildProcess = @import("child_process.zig").ChildProcess; pub const ChildProcess = @import("child_process.zig").ChildProcess;
@ -49,7 +50,6 @@ pub const array_hash_map = @import("array_hash_map.zig");
pub const atomic = @import("atomic.zig"); pub const atomic = @import("atomic.zig");
pub const base64 = @import("base64.zig"); pub const base64 = @import("base64.zig");
pub const bit_set = @import("bit_set.zig"); pub const bit_set = @import("bit_set.zig");
pub const build = @import("build.zig");
pub const builtin = @import("builtin.zig"); pub const builtin = @import("builtin.zig");
pub const c = @import("c.zig"); pub const c = @import("c.zig");
pub const coff = @import("coff.zig"); pub const coff = @import("coff.zig");
@ -96,6 +96,9 @@ pub const wasm = @import("wasm.zig");
pub const zig = @import("zig.zig"); pub const zig = @import("zig.zig");
pub const start = @import("start.zig"); pub const start = @import("start.zig");
/// deprecated: use `Build`.
pub const build = Build;
const root = @import("root"); const root = @import("root");
const options_override = if (@hasDecl(root, "std_options")) root.std_options else struct {}; const options_override = if (@hasDecl(root, "std_options")) root.std_options else struct {};
@ -150,6 +153,11 @@ pub const options = struct {
else else
log.defaultLog; log.defaultLog;
pub const fmt_max_depth = if (@hasDecl(options_override, "fmt_max_depth"))
options_override.fmt_max_depth
else
fmt.default_max_depth;
pub const cryptoRandomSeed: fn (buffer: []u8) void = if (@hasDecl(options_override, "cryptoRandomSeed")) pub const cryptoRandomSeed: fn (buffer: []u8) void = if (@hasDecl(options_override, "cryptoRandomSeed"))
options_override.cryptoRandomSeed options_override.cryptoRandomSeed
else else

View File

@ -1,6 +1,18 @@
pub const Options = struct { pub const Options = struct {
/// Number of directory levels to skip when extracting files. /// Number of directory levels to skip when extracting files.
strip_components: u32 = 0, strip_components: u32 = 0,
/// How to handle the "mode" property of files from within the tar file.
mode_mode: ModeMode = .executable_bit_only,
const ModeMode = enum {
/// The mode from the tar file is completely ignored. Files are created
/// with the default mode when creating files.
ignore,
/// The mode from the tar file is inspected for the owner executable bit
/// only. This bit is copied to the group and other executable bits.
/// Other bits of the mode are left as the default when creating files.
executable_bit_only,
};
}; };
pub const Header = struct { pub const Header = struct {
@ -72,6 +84,17 @@ pub const Header = struct {
}; };
pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !void { pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !void {
switch (options.mode_mode) {
.ignore => {},
.executable_bit_only => {
// This code does not look at the mode bits yet. To implement this feature,
// the implementation must be adjusted to look at the mode, and check the
// user executable bit, then call fchmod on newly created files when
// the executable bit is supposed to be set.
// It also needs to properly deal with ACLs on Windows.
@panic("TODO: unimplemented: tar ModeMode.executable_bit_only");
},
}
var file_name_buffer: [255]u8 = undefined; var file_name_buffer: [255]u8 = undefined;
var buffer: [512 * 8]u8 = undefined; var buffer: [512 * 8]u8 = undefined;
var start: usize = 0; var start: usize = 0;

View File

@ -702,9 +702,6 @@ pub const Target = struct {
pub const ShiftInt = std.math.Log2Int(usize); pub const ShiftInt = std.math.Log2Int(usize);
pub const empty = Set{ .ints = [1]usize{0} ** usize_count }; pub const empty = Set{ .ints = [1]usize{0} ** usize_count };
pub fn empty_workaround() Set {
return Set{ .ints = [1]usize{0} ** usize_count };
}
pub fn isEmpty(set: Set) bool { pub fn isEmpty(set: Set) bool {
return for (set.ints) |x| { return for (set.ints) |x| {
@ -787,7 +784,7 @@ pub const Target = struct {
return struct { return struct {
/// Populates only the feature bits specified. /// Populates only the feature bits specified.
pub fn featureSet(features: []const F) Set { pub fn featureSet(features: []const F) Set {
var x = Set.empty_workaround(); // TODO remove empty_workaround var x = Set.empty;
for (features) |feature| { for (features) |feature| {
x.addFeature(@enumToInt(feature)); x.addFeature(@enumToInt(feature));
} }
@ -1907,6 +1904,561 @@ pub const Target = struct {
=> 16, => 16,
}; };
} }
pub const CType = enum {
short,
ushort,
int,
uint,
long,
ulong,
longlong,
ulonglong,
float,
double,
longdouble,
};
pub fn c_type_byte_size(t: Target, c_type: CType) u16 {
return switch (c_type) {
.short,
.ushort,
.int,
.uint,
.long,
.ulong,
.longlong,
.ulonglong,
=> @divExact(c_type_bit_size(t, c_type), 8),
.float => 4,
.double => 8,
.longdouble => switch (c_type_bit_size(t, c_type)) {
16 => 2,
32 => 4,
64 => 8,
80 => @intCast(u16, mem.alignForward(10, c_type_alignment(t, .longdouble))),
128 => 16,
else => unreachable,
},
};
}
pub fn c_type_bit_size(target: Target, c_type: CType) u16 {
switch (target.os.tag) {
.freestanding, .other => switch (target.cpu.arch) {
.msp430 => switch (c_type) {
.short, .ushort, .int, .uint => return 16,
.float, .long, .ulong => return 32,
.longlong, .ulonglong, .double, .longdouble => return 64,
},
.avr => switch (c_type) {
.short, .ushort, .int, .uint => return 16,
.long, .ulong, .float, .double, .longdouble => return 32,
.longlong, .ulonglong => return 64,
},
.tce, .tcele => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
.float, .double, .longdouble => return 32,
},
.mips64, .mips64el => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
.longlong, .ulonglong, .double => return 64,
.longdouble => return 128,
},
.x86_64 => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => switch (target.abi) {
.gnux32, .muslx32 => return 32,
else => return 64,
},
.longlong, .ulonglong, .double => return 64,
.longdouble => return 80,
},
else => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => return target.cpu.arch.ptrBitWidth(),
.longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.cpu.arch) {
.x86 => switch (target.abi) {
.android => return 64,
else => return 80,
},
.powerpc,
.powerpcle,
.powerpc64,
.powerpc64le,
=> switch (target.abi) {
.musl,
.musleabi,
.musleabihf,
.muslx32,
=> return 64,
else => return 128,
},
.riscv32,
.riscv64,
.aarch64,
.aarch64_be,
.aarch64_32,
.s390x,
.sparc,
.sparc64,
.sparcel,
.wasm32,
.wasm64,
=> return 128,
else => return 64,
},
},
},
.linux,
.freebsd,
.netbsd,
.dragonfly,
.openbsd,
.wasi,
.emscripten,
.plan9,
.solaris,
.haiku,
.ananas,
.fuchsia,
.minix,
=> switch (target.cpu.arch) {
.msp430 => switch (c_type) {
.short, .ushort, .int, .uint => return 16,
.long, .ulong, .float => return 32,
.longlong, .ulonglong, .double, .longdouble => return 64,
},
.avr => switch (c_type) {
.short, .ushort, .int, .uint => return 16,
.long, .ulong, .float, .double, .longdouble => return 32,
.longlong, .ulonglong => return 64,
},
.tce, .tcele => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
.float, .double, .longdouble => return 32,
},
.mips64, .mips64el => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
.longlong, .ulonglong, .double => return 64,
.longdouble => if (target.os.tag == .freebsd) return 64 else return 128,
},
.x86_64 => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => switch (target.abi) {
.gnux32, .muslx32 => return 32,
else => return 64,
},
.longlong, .ulonglong, .double => return 64,
.longdouble => return 80,
},
else => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => return target.cpu.arch.ptrBitWidth(),
.longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.cpu.arch) {
.x86 => switch (target.abi) {
.android => return 64,
else => return 80,
},
.powerpc,
.powerpcle,
=> switch (target.abi) {
.musl,
.musleabi,
.musleabihf,
.muslx32,
=> return 64,
else => switch (target.os.tag) {
.freebsd, .netbsd, .openbsd => return 64,
else => return 128,
},
},
.powerpc64,
.powerpc64le,
=> switch (target.abi) {
.musl,
.musleabi,
.musleabihf,
.muslx32,
=> return 64,
else => switch (target.os.tag) {
.freebsd, .openbsd => return 64,
else => return 128,
},
},
.riscv32,
.riscv64,
.aarch64,
.aarch64_be,
.aarch64_32,
.s390x,
.mips64,
.mips64el,
.sparc,
.sparc64,
.sparcel,
.wasm32,
.wasm64,
=> return 128,
else => return 64,
},
},
},
.windows, .uefi => switch (target.cpu.arch) {
.x86 => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => return 32,
.longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.abi) {
.gnu, .gnuilp32, .cygnus => return 80,
else => return 64,
},
},
.x86_64 => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => switch (target.abi) {
.cygnus => return 64,
else => return 32,
},
.longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.abi) {
.gnu, .gnuilp32, .cygnus => return 80,
else => return 64,
},
},
else => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => return 32,
.longlong, .ulonglong, .double => return 64,
.longdouble => return 64,
},
},
.macos, .ios, .tvos, .watchos => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => switch (target.cpu.arch) {
.x86, .arm, .aarch64_32 => return 32,
.x86_64 => switch (target.abi) {
.gnux32, .muslx32 => return 32,
else => return 64,
},
else => return 64,
},
.longlong, .ulonglong, .double => return 64,
.longdouble => switch (target.cpu.arch) {
.x86 => switch (target.abi) {
.android => return 64,
else => return 80,
},
.x86_64 => return 80,
else => return 64,
},
},
.nvcl, .cuda => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong => switch (target.cpu.arch) {
.nvptx => return 32,
.nvptx64 => return 64,
else => return 64,
},
.longlong, .ulonglong, .double => return 64,
.longdouble => return 64,
},
.amdhsa, .amdpal => switch (c_type) {
.short, .ushort => return 16,
.int, .uint, .float => return 32,
.long, .ulong, .longlong, .ulonglong, .double => return 64,
.longdouble => return 128,
},
.cloudabi,
.kfreebsd,
.lv2,
.zos,
.rtems,
.nacl,
.aix,
.ps4,
.ps5,
.elfiamcu,
.mesa3d,
.contiki,
.hermit,
.hurd,
.opencl,
.glsl450,
.vulkan,
.driverkit,
.shadermodel,
=> @panic("TODO specify the C integer and float type sizes for this OS"),
}
}
pub fn c_type_alignment(target: Target, c_type: CType) u16 {
// Overrides for unusual alignments
switch (target.cpu.arch) {
.avr => switch (c_type) {
.short, .ushort => return 2,
else => return 1,
},
.x86 => switch (target.os.tag) {
.windows, .uefi => switch (c_type) {
.longlong, .ulonglong, .double => return 8,
.longdouble => switch (target.abi) {
.gnu, .gnuilp32, .cygnus => return 4,
else => return 8,
},
else => {},
},
else => {},
},
else => {},
}
// Next-power-of-two-aligned, up to a maximum.
return @min(
std.math.ceilPowerOfTwoAssert(u16, (c_type_bit_size(target, c_type) + 7) / 8),
switch (target.cpu.arch) {
.arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
.netbsd => switch (target.abi) {
.gnueabi,
.gnueabihf,
.eabi,
.eabihf,
.android,
.musleabi,
.musleabihf,
=> 8,
else => @as(u16, 4),
},
.ios, .tvos, .watchos => 4,
else => 8,
},
.msp430,
.avr,
=> 2,
.arc,
.csky,
.x86,
.xcore,
.dxil,
.loongarch32,
.tce,
.tcele,
.le32,
.amdil,
.hsail,
.spir,
.spirv32,
.kalimba,
.shave,
.renderscript32,
.ve,
.spu_2,
.xtensa,
=> 4,
.aarch64_32,
.amdgcn,
.amdil64,
.bpfel,
.bpfeb,
.hexagon,
.hsail64,
.loongarch64,
.m68k,
.mips,
.mipsel,
.sparc,
.sparcel,
.sparc64,
.lanai,
.le64,
.nvptx,
.nvptx64,
.r600,
.s390x,
.spir64,
.spirv64,
.renderscript64,
=> 8,
.aarch64,
.aarch64_be,
.mips64,
.mips64el,
.powerpc,
.powerpcle,
.powerpc64,
.powerpc64le,
.riscv32,
.riscv64,
.x86_64,
.wasm32,
.wasm64,
=> 16,
},
);
}
pub fn c_type_preferred_alignment(target: Target, c_type: CType) u16 {
// Overrides for unusual alignments
switch (target.cpu.arch) {
.arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
.netbsd => switch (target.abi) {
.gnueabi,
.gnueabihf,
.eabi,
.eabihf,
.android,
.musleabi,
.musleabihf,
=> {},
else => switch (c_type) {
.longdouble => return 4,
else => {},
},
},
.ios, .tvos, .watchos => switch (c_type) {
.longdouble => return 4,
else => {},
},
else => {},
},
.arc => switch (c_type) {
.longdouble => return 4,
else => {},
},
.avr => switch (c_type) {
.int, .uint, .long, .ulong, .float, .longdouble => return 1,
.short, .ushort => return 2,
.double => return 4,
.longlong, .ulonglong => return 8,
},
.x86 => switch (target.os.tag) {
.windows, .uefi => switch (c_type) {
.longdouble => switch (target.abi) {
.gnu, .gnuilp32, .cygnus => return 4,
else => return 8,
},
else => {},
},
else => switch (c_type) {
.longdouble => return 4,
else => {},
},
},
else => {},
}
// Next-power-of-two-aligned, up to a maximum.
return @min(
std.math.ceilPowerOfTwoAssert(u16, (c_type_bit_size(target, c_type) + 7) / 8),
switch (target.cpu.arch) {
.msp430 => @as(u16, 2),
.csky,
.xcore,
.dxil,
.loongarch32,
.tce,
.tcele,
.le32,
.amdil,
.hsail,
.spir,
.spirv32,
.kalimba,
.shave,
.renderscript32,
.ve,
.spu_2,
.xtensa,
=> 4,
.arc,
.arm,
.armeb,
.avr,
.thumb,
.thumbeb,
.aarch64_32,
.amdgcn,
.amdil64,
.bpfel,
.bpfeb,
.hexagon,
.hsail64,
.x86,
.loongarch64,
.m68k,
.mips,
.mipsel,
.sparc,
.sparcel,
.sparc64,
.lanai,
.le64,
.nvptx,
.nvptx64,
.r600,
.s390x,
.spir64,
.spirv64,
.renderscript64,
=> 8,
.aarch64,
.aarch64_be,
.mips64,
.mips64el,
.powerpc,
.powerpcle,
.powerpc64,
.powerpc64le,
.riscv32,
.riscv64,
.x86_64,
.wasm32,
.wasm64,
=> 16,
},
);
}
}; };
test { test {

View File

@ -670,6 +670,252 @@ pub fn expectStringEndsWith(actual: []const u8, expected_ends_with: []const u8)
return error.TestExpectedEndsWith; return error.TestExpectedEndsWith;
} }
/// This function is intended to be used only in tests. When the two values are not
/// deeply equal, prints diagnostics to stderr to show exactly how they are not equal,
/// then returns a test failure error.
/// `actual` is casted to the type of `expected`.
///
/// Deeply equal is defined as follows:
/// Primitive types are deeply equal if they are equal using `==` operator.
/// Struct values are deeply equal if their corresponding fields are deeply equal.
/// Container types(like Array/Slice/Vector) deeply equal when their corresponding elements are deeply equal.
/// Pointer values are deeply equal if values they point to are deeply equal.
///
/// Note: Self-referential structs are not supported (e.g. things like std.SinglyLinkedList)
pub fn expectEqualDeep(expected: anytype, actual: @TypeOf(expected)) !void {
switch (@typeInfo(@TypeOf(actual))) {
.NoReturn,
.Opaque,
.Frame,
.AnyFrame,
=> @compileError("value of type " ++ @typeName(@TypeOf(actual)) ++ " encountered"),
.Undefined,
.Null,
.Void,
=> return,
.Type => {
if (actual != expected) {
std.debug.print("expected type {s}, found type {s}\n", .{ @typeName(expected), @typeName(actual) });
return error.TestExpectedEqual;
}
},
.Bool,
.Int,
.Float,
.ComptimeFloat,
.ComptimeInt,
.EnumLiteral,
.Enum,
.Fn,
.ErrorSet,
=> {
if (actual != expected) {
std.debug.print("expected {}, found {}\n", .{ expected, actual });
return error.TestExpectedEqual;
}
},
.Pointer => |pointer| {
switch (pointer.size) {
// We have no idea what is behind those pointers, so the best we can do is `==` check.
.C, .Many => {
if (actual != expected) {
std.debug.print("expected {*}, found {*}\n", .{ expected, actual });
return error.TestExpectedEqual;
}
},
.One => {
// Length of those pointers are runtime value, so the best we can do is `==` check.
switch (@typeInfo(pointer.child)) {
.Fn, .Opaque => {
if (actual != expected) {
std.debug.print("expected {*}, found {*}\n", .{ expected, actual });
return error.TestExpectedEqual;
}
},
else => try expectEqualDeep(expected.*, actual.*),
}
},
.Slice => {
if (expected.len != actual.len) {
std.debug.print("Slice len not the same, expected {d}, found {d}\n", .{ expected.len, actual.len });
return error.TestExpectedEqual;
}
var i: usize = 0;
while (i < expected.len) : (i += 1) {
expectEqualDeep(expected[i], actual[i]) catch |e| {
std.debug.print("index {d} incorrect. expected {any}, found {any}\n", .{
i, expected[i], actual[i],
});
return e;
};
}
},
}
},
.Array => |_| {
if (expected.len != actual.len) {
std.debug.print("Array len not the same, expected {d}, found {d}\n", .{ expected.len, actual.len });
return error.TestExpectedEqual;
}
var i: usize = 0;
while (i < expected.len) : (i += 1) {
expectEqualDeep(expected[i], actual[i]) catch |e| {
std.debug.print("index {d} incorrect. expected {any}, found {any}\n", .{
i, expected[i], actual[i],
});
return e;
};
}
},
.Vector => |info| {
if (info.len != @typeInfo(@TypeOf(actual)).Vector.len) {
std.debug.print("Vector len not the same, expected {d}, found {d}\n", .{ info.len, @typeInfo(@TypeOf(actual)).Vector.len });
return error.TestExpectedEqual;
}
var i: usize = 0;
while (i < info.len) : (i += 1) {
expectEqualDeep(expected[i], actual[i]) catch |e| {
std.debug.print("index {d} incorrect. expected {any}, found {any}\n", .{
i, expected[i], actual[i],
});
return e;
};
}
},
.Struct => |structType| {
inline for (structType.fields) |field| {
expectEqualDeep(@field(expected, field.name), @field(actual, field.name)) catch |e| {
std.debug.print("Field {s} incorrect. expected {any}, found {any}\n", .{ field.name, @field(expected, field.name), @field(actual, field.name) });
return e;
};
}
},
.Union => |union_info| {
if (union_info.tag_type == null) {
@compileError("Unable to compare untagged union values");
}
const Tag = std.meta.Tag(@TypeOf(expected));
const expectedTag = @as(Tag, expected);
const actualTag = @as(Tag, actual);
try expectEqual(expectedTag, actualTag);
// we only reach this loop if the tags are equal
switch (expected) {
inline else => |val, tag| {
try expectEqualDeep(val, @field(actual, @tagName(tag)));
},
}
},
.Optional => {
if (expected) |expected_payload| {
if (actual) |actual_payload| {
try expectEqualDeep(expected_payload, actual_payload);
} else {
std.debug.print("expected {any}, found null\n", .{expected_payload});
return error.TestExpectedEqual;
}
} else {
if (actual) |actual_payload| {
std.debug.print("expected null, found {any}\n", .{actual_payload});
return error.TestExpectedEqual;
}
}
},
.ErrorUnion => {
if (expected) |expected_payload| {
if (actual) |actual_payload| {
try expectEqualDeep(expected_payload, actual_payload);
} else |actual_err| {
std.debug.print("expected {any}, found {any}\n", .{ expected_payload, actual_err });
return error.TestExpectedEqual;
}
} else |expected_err| {
if (actual) |actual_payload| {
std.debug.print("expected {any}, found {any}\n", .{ expected_err, actual_payload });
return error.TestExpectedEqual;
} else |actual_err| {
try expectEqualDeep(expected_err, actual_err);
}
}
},
}
}
test "expectEqualDeep primitive type" {
try expectEqualDeep(1, 1);
try expectEqualDeep(true, true);
try expectEqualDeep(1.5, 1.5);
try expectEqualDeep(u8, u8);
try expectEqualDeep(error.Bad, error.Bad);
// optional
{
const foo: ?u32 = 1;
const bar: ?u32 = 1;
try expectEqualDeep(foo, bar);
try expectEqualDeep(?u32, ?u32);
}
// function type
{
const fnType = struct {
fn foo() void {
unreachable;
}
}.foo;
try expectEqualDeep(fnType, fnType);
}
}
test "expectEqualDeep pointer" {
const a = 1;
const b = 1;
try expectEqualDeep(&a, &b);
}
test "expectEqualDeep composite type" {
try expectEqualDeep("abc", "abc");
const s1: []const u8 = "abc";
const s2 = "abcd";
const s3: []const u8 = s2[0..3];
try expectEqualDeep(s1, s3);
const TestStruct = struct { s: []const u8 };
try expectEqualDeep(TestStruct{ .s = "abc" }, TestStruct{ .s = "abc" });
try expectEqualDeep([_][]const u8{ "a", "b", "c" }, [_][]const u8{ "a", "b", "c" });
// vector
try expectEqualDeep(@splat(4, @as(u32, 4)), @splat(4, @as(u32, 4)));
// nested array
{
const a = [2][2]f32{
[_]f32{ 1.0, 0.0 },
[_]f32{ 0.0, 1.0 },
};
const b = [2][2]f32{
[_]f32{ 1.0, 0.0 },
[_]f32{ 0.0, 1.0 },
};
try expectEqualDeep(a, b);
try expectEqualDeep(&a, &b);
}
}
fn printIndicatorLine(source: []const u8, indicator_index: usize) void { fn printIndicatorLine(source: []const u8, indicator_index: usize) void {
const line_begin_index = if (std.mem.lastIndexOfScalar(u8, source[0..indicator_index], '\n')) |line_begin| const line_begin_index = if (std.mem.lastIndexOfScalar(u8, source[0..indicator_index], '\n')) |line_begin|
line_begin + 1 line_begin + 1

View File

@ -8,7 +8,6 @@ pub const Tokenizer = tokenizer.Tokenizer;
pub const fmtId = fmt.fmtId; pub const fmtId = fmt.fmtId;
pub const fmtEscapes = fmt.fmtEscapes; pub const fmtEscapes = fmt.fmtEscapes;
pub const isValidId = fmt.isValidId; pub const isValidId = fmt.isValidId;
pub const parse = @import("zig/parse.zig").parse;
pub const string_literal = @import("zig/string_literal.zig"); pub const string_literal = @import("zig/string_literal.zig");
pub const number_literal = @import("zig/number_literal.zig"); pub const number_literal = @import("zig/number_literal.zig");
pub const primitives = @import("zig/primitives.zig"); pub const primitives = @import("zig/primitives.zig");

View File

@ -1,4 +1,8 @@
//! Abstract Syntax Tree for Zig source code. //! Abstract Syntax Tree for Zig source code.
//! For Zig syntax, the root node is at nodes[0] and contains the list of
//! sub-nodes.
//! For Zon syntax, the root node is at nodes[0] and contains lhs as the node
//! index of the main expression.
/// Reference to externally-owned data. /// Reference to externally-owned data.
source: [:0]const u8, source: [:0]const u8,
@ -11,13 +15,6 @@ extra_data: []Node.Index,
errors: []const Error, errors: []const Error,
const std = @import("../std.zig");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const Token = std.zig.Token;
const Ast = @This();
pub const TokenIndex = u32; pub const TokenIndex = u32;
pub const ByteOffset = u32; pub const ByteOffset = u32;
@ -34,7 +31,7 @@ pub const Location = struct {
line_end: usize, line_end: usize,
}; };
pub fn deinit(tree: *Ast, gpa: mem.Allocator) void { pub fn deinit(tree: *Ast, gpa: Allocator) void {
tree.tokens.deinit(gpa); tree.tokens.deinit(gpa);
tree.nodes.deinit(gpa); tree.nodes.deinit(gpa);
gpa.free(tree.extra_data); gpa.free(tree.extra_data);
@ -48,11 +45,69 @@ pub const RenderError = error{
OutOfMemory, OutOfMemory,
}; };
pub const Mode = enum { zig, zon };
/// Result should be freed with tree.deinit() when there are
/// no more references to any of the tokens or nodes.
pub fn parse(gpa: Allocator, source: [:0]const u8, mode: Mode) Allocator.Error!Ast {
var tokens = Ast.TokenList{};
defer tokens.deinit(gpa);
// Empirically, the zig std lib has an 8:1 ratio of source bytes to token count.
const estimated_token_count = source.len / 8;
try tokens.ensureTotalCapacity(gpa, estimated_token_count);
var tokenizer = std.zig.Tokenizer.init(source);
while (true) {
const token = tokenizer.next();
try tokens.append(gpa, .{
.tag = token.tag,
.start = @intCast(u32, token.loc.start),
});
if (token.tag == .eof) break;
}
var parser: Parse = .{
.source = source,
.gpa = gpa,
.token_tags = tokens.items(.tag),
.token_starts = tokens.items(.start),
.errors = .{},
.nodes = .{},
.extra_data = .{},
.scratch = .{},
.tok_i = 0,
};
defer parser.errors.deinit(gpa);
defer parser.nodes.deinit(gpa);
defer parser.extra_data.deinit(gpa);
defer parser.scratch.deinit(gpa);
// Empirically, Zig source code has a 2:1 ratio of tokens to AST nodes.
// Make sure at least 1 so we can use appendAssumeCapacity on the root node below.
const estimated_node_count = (tokens.len + 2) / 2;
try parser.nodes.ensureTotalCapacity(gpa, estimated_node_count);
switch (mode) {
.zig => try parser.parseRoot(),
.zon => try parser.parseZon(),
}
// TODO experiment with compacting the MultiArrayList slices here
return Ast{
.source = source,
.tokens = tokens.toOwnedSlice(),
.nodes = parser.nodes.toOwnedSlice(),
.extra_data = try parser.extra_data.toOwnedSlice(gpa),
.errors = try parser.errors.toOwnedSlice(gpa),
};
}
/// `gpa` is used for allocating the resulting formatted source code, as well as /// `gpa` is used for allocating the resulting formatted source code, as well as
/// for allocating extra stack memory if needed, because this function utilizes recursion. /// for allocating extra stack memory if needed, because this function utilizes recursion.
/// Note: that's not actually true yet, see https://github.com/ziglang/zig/issues/1006. /// Note: that's not actually true yet, see https://github.com/ziglang/zig/issues/1006.
/// Caller owns the returned slice of bytes, allocated with `gpa`. /// Caller owns the returned slice of bytes, allocated with `gpa`.
pub fn render(tree: Ast, gpa: mem.Allocator) RenderError![]u8 { pub fn render(tree: Ast, gpa: Allocator) RenderError![]u8 {
var buffer = std.ArrayList(u8).init(gpa); var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit(); defer buffer.deinit();
@ -3347,3 +3402,12 @@ pub const Node = struct {
rparen: TokenIndex, rparen: TokenIndex,
}; };
}; };
const std = @import("../std.zig");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const Token = std.zig.Token;
const Ast = @This();
const Allocator = std.mem.Allocator;
const Parse = @import("Parse.zig");

3825
lib/std/zig/Parse.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -75,7 +75,7 @@ fn castPtr(comptime DestType: type, target: anytype) DestType {
const source = ptrInfo(@TypeOf(target)); const source = ptrInfo(@TypeOf(target));
if (source.is_const and !dest.is_const or source.is_volatile and !dest.is_volatile) if (source.is_const and !dest.is_const or source.is_volatile and !dest.is_volatile)
return @intToPtr(DestType, @ptrToInt(target)) return @qualCast(DestType, target)
else if (@typeInfo(dest.child) == .Opaque) else if (@typeInfo(dest.child) == .Opaque)
// dest.alignment would error out // dest.alignment would error out
return @ptrCast(DestType, target) return @ptrCast(DestType, target)

File diff suppressed because it is too large Load Diff

View File

@ -186,6 +186,15 @@ test "zig fmt: file ends in comment" {
); );
} }
test "zig fmt: file ends in multi line comment" {
try testTransform(
\\ \\foobar
,
\\\\foobar
\\
);
}
test "zig fmt: file ends in comment after var decl" { test "zig fmt: file ends in comment after var decl" {
try testTransform( try testTransform(
\\const x = 42; \\const x = 42;
@ -6064,7 +6073,7 @@ var fixed_buffer_mem: [100 * 1024]u8 = undefined;
fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 { fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 {
const stderr = io.getStdErr().writer(); const stderr = io.getStdErr().writer();
var tree = try std.zig.parse(allocator, source); var tree = try std.zig.Ast.parse(allocator, source, .zig);
defer tree.deinit(allocator); defer tree.deinit(allocator);
for (tree.errors) |parse_error| { for (tree.errors) |parse_error| {
@ -6115,7 +6124,7 @@ fn testCanonical(source: [:0]const u8) !void {
const Error = std.zig.Ast.Error.Tag; const Error = std.zig.Ast.Error.Tag;
fn testError(source: [:0]const u8, expected_errors: []const Error) !void { fn testError(source: [:0]const u8, expected_errors: []const Error) !void {
var tree = try std.zig.parse(std.testing.allocator, source); var tree = try std.zig.Ast.parse(std.testing.allocator, source, .zig);
defer tree.deinit(std.testing.allocator); defer tree.deinit(std.testing.allocator);
std.testing.expectEqual(expected_errors.len, tree.errors.len) catch |err| { std.testing.expectEqual(expected_errors.len, tree.errors.len) catch |err| {

View File

@ -1,7 +1,6 @@
const std = @import("std"); const std = @import("std");
const mem = std.mem; const mem = std.mem;
const Tokenizer = std.zig.Tokenizer; const Tokenizer = std.zig.Tokenizer;
const Parser = std.zig.Parser;
const io = std.io; const io = std.io;
const fmtIntSizeBin = std.fmt.fmtIntSizeBin; const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
@ -34,6 +33,6 @@ pub fn main() !void {
fn testOnce() usize { fn testOnce() usize {
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var allocator = fixed_buf_alloc.allocator(); var allocator = fixed_buf_alloc.allocator();
_ = std.zig.parse(allocator, source) catch @panic("parse failure"); _ = std.zig.Ast.parse(allocator, source, .zig) catch @panic("parse failure");
return fixed_buf_alloc.end_index; return fixed_buf_alloc.end_index;
} }

View File

@ -2759,8 +2759,7 @@ fn tokenSliceForRender(tree: Ast, token_index: Ast.TokenIndex) []const u8 {
var ret = tree.tokenSlice(token_index); var ret = tree.tokenSlice(token_index);
switch (tree.tokens.items(.tag)[token_index]) { switch (tree.tokens.items(.tag)[token_index]) {
.multiline_string_literal_line => { .multiline_string_literal_line => {
assert(ret[ret.len - 1] == '\n'); if (ret[ret.len - 1] == '\n') ret.len -= 1;
ret.len -= 1;
}, },
.container_doc_comment, .doc_comment => { .container_doc_comment, .doc_comment => {
ret = mem.trimRight(u8, ret, &std.ascii.whitespace); ret = mem.trimRight(u8, ret, &std.ascii.whitespace);

View File

@ -11,7 +11,7 @@ var log_err_count: usize = 0;
pub fn main() void { pub fn main() void {
if (builtin.zig_backend != .stage1 and if (builtin.zig_backend != .stage1 and
(builtin.zig_backend != .stage2_llvm or builtin.cpu.arch == .wasm32) and builtin.zig_backend != .stage2_llvm and
builtin.zig_backend != .stage2_c) builtin.zig_backend != .stage2_c)
{ {
return main2() catch @panic("test failure"); return main2() catch @panic("test failure");

View File

@ -93,6 +93,14 @@ typedef char bool;
#define zig_align zig_align_unavailable #define zig_align zig_align_unavailable
#endif #endif
#if zig_has_attribute(aligned)
#define zig_under_align(alignment) __attribute__((aligned(alignment)))
#elif _MSC_VER
#define zig_under_align(alignment) zig_align(alignment)
#else
#define zig_align zig_align_unavailable
#endif
#if zig_has_attribute(aligned) #if zig_has_attribute(aligned)
#define zig_align_fn(alignment) __attribute__((aligned(alignment))) #define zig_align_fn(alignment) __attribute__((aligned(alignment)))
#elif _MSC_VER #elif _MSC_VER
@ -101,6 +109,22 @@ typedef char bool;
#define zig_align_fn zig_align_fn_unavailable #define zig_align_fn zig_align_fn_unavailable
#endif #endif
#if zig_has_attribute(packed)
#define zig_packed(definition) __attribute__((packed)) definition
#elif _MSC_VER
#define zig_packed(definition) __pragma(pack(1)) definition __pragma(pack())
#else
#define zig_packed(definition) zig_packed_unavailable
#endif
#if zig_has_attribute(section)
#define zig_linksection(name, def, ...) def __attribute__((section(name)))
#elif _MSC_VER
#define zig_linksection(name, def, ...) __pragma(section(name, __VA_ARGS__)) __declspec(allocate(name)) def
#else
#define zig_linksection(name, def, ...) zig_linksection_unavailable
#endif
#if zig_has_builtin(unreachable) || defined(zig_gnuc) #if zig_has_builtin(unreachable) || defined(zig_gnuc)
#define zig_unreachable() __builtin_unreachable() #define zig_unreachable() __builtin_unreachable()
#else #else

View File

@ -2530,6 +2530,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.bit_size_of, .bit_size_of,
.typeof_log2_int_type, .typeof_log2_int_type,
.ptr_to_int, .ptr_to_int,
.qual_cast,
.align_of, .align_of,
.bool_to_int, .bool_to_int,
.embed_file, .embed_file,
@ -4278,7 +4279,34 @@ fn testDecl(
var num_namespaces_out: u32 = 0; var num_namespaces_out: u32 = 0;
var capturing_namespace: ?*Scope.Namespace = null; var capturing_namespace: ?*Scope.Namespace = null;
while (true) switch (s.tag) { while (true) switch (s.tag) {
.local_val, .local_ptr => unreachable, // a test cannot be in a local scope .local_val => {
const local_val = s.cast(Scope.LocalVal).?;
if (local_val.name == name_str_index) {
local_val.used = test_name_token;
return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{
@tagName(local_val.id_cat),
}, &[_]u32{
try astgen.errNoteTok(local_val.token_src, "{s} declared here", .{
@tagName(local_val.id_cat),
}),
});
}
s = local_val.parent;
},
.local_ptr => {
const local_ptr = s.cast(Scope.LocalPtr).?;
if (local_ptr.name == name_str_index) {
local_ptr.used = test_name_token;
return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{
@tagName(local_ptr.id_cat),
}, &[_]u32{
try astgen.errNoteTok(local_ptr.token_src, "{s} declared here", .{
@tagName(local_ptr.id_cat),
}),
});
}
s = local_ptr.parent;
},
.gen_zir => s = s.cast(GenZir).?.parent, .gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => { .namespace, .enum_namespace => {
@ -8010,6 +8038,7 @@ fn builtinCall(
.float_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .float_cast), .float_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .float_cast),
.int_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .int_cast), .int_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .int_cast),
.ptr_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .ptr_cast), .ptr_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .ptr_cast),
.qual_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .qual_cast),
.truncate => return typeCast(gz, scope, ri, node, params[0], params[1], .truncate), .truncate => return typeCast(gz, scope, ri, node, params[0], params[1], .truncate),
// zig fmt: on // zig fmt: on
@ -8692,6 +8721,7 @@ fn callExpr(
defer arg_block.unstack(); defer arg_block.unstack();
// `call_inst` is reused to provide the param type. // `call_inst` is reused to provide the param type.
arg_block.rl_ty_inst = call_inst;
const arg_ref = try expr(&arg_block, &arg_block.base, .{ .rl = .{ .coerced_ty = call_inst }, .ctx = .fn_arg }, param_node); const arg_ref = try expr(&arg_block, &arg_block.base, .{ .rl = .{ .coerced_ty = call_inst }, .ctx = .fn_arg }, param_node);
_ = try arg_block.addBreak(.break_inline, call_index, arg_ref); _ = try arg_block.addBreak(.break_inline, call_index, arg_ref);
@ -10840,7 +10870,12 @@ const GenZir = struct {
// we emit ZIR for the block break instructions to have the result values, // we emit ZIR for the block break instructions to have the result values,
// and then rvalue() on that to pass the value to the result location. // and then rvalue() on that to pass the value to the result location.
switch (parent_ri.rl) { switch (parent_ri.rl) {
.ty, .coerced_ty => |ty_inst| { .coerced_ty => |ty_inst| {
// Type coercion needs to happend before breaks.
gz.rl_ty_inst = ty_inst;
gz.break_result_info = .{ .rl = .{ .ty = ty_inst } };
},
.ty => |ty_inst| {
gz.rl_ty_inst = ty_inst; gz.rl_ty_inst = ty_inst;
gz.break_result_info = parent_ri; gz.break_result_info = parent_ri;
}, },

View File

@ -1400,6 +1400,7 @@ fn walkInstruction(
.float_cast, .float_cast,
.int_cast, .int_cast,
.ptr_cast, .ptr_cast,
.qual_cast,
.truncate, .truncate,
.align_cast, .align_cast,
.has_decl, .has_decl,
@ -2200,17 +2201,10 @@ fn walkInstruction(
false, false,
); );
_ = operand; return DocData.WalkResult{
.typeRef = operand.expr,
// WIP .expr = .{ .@"struct" = &.{} },
};
printWithContext(
file,
inst_index,
"TODO: implement `{s}` for walkInstruction\n\n",
.{@tagName(tags[inst_index])},
);
return self.cteTodo(@tagName(tags[inst_index]));
}, },
.struct_init_anon => { .struct_init_anon => {
const pl_node = data[inst_index].pl_node; const pl_node = data[inst_index].pl_node;
@ -2537,6 +2531,7 @@ fn walkInstruction(
const var_init_ref = @intToEnum(Ref, file.zir.extra[extra_index]); const var_init_ref = @intToEnum(Ref, file.zir.extra[extra_index]);
const var_init = try self.walkRef(file, parent_scope, parent_src, var_init_ref, need_type); const var_init = try self.walkRef(file, parent_scope, parent_src, var_init_ref, need_type);
value.expr = var_init.expr; value.expr = var_init.expr;
value.typeRef = var_init.typeRef;
} }
return value; return value;

View File

@ -75,6 +75,7 @@ pub const Tag = enum {
prefetch, prefetch,
ptr_cast, ptr_cast,
ptr_to_int, ptr_to_int,
qual_cast,
rem, rem,
return_address, return_address,
select, select,
@ -674,6 +675,13 @@ pub const list = list: {
.param_count = 1, .param_count = 1,
}, },
}, },
.{
"@qualCast",
.{
.tag = .qual_cast,
.param_count = 2,
},
},
.{ .{
"@rem", "@rem",
.{ .{

View File

@ -385,7 +385,7 @@ pub const AllErrors = struct {
count: u32 = 1, count: u32 = 1,
/// Does not include the trailing newline. /// Does not include the trailing newline.
source_line: ?[]const u8, source_line: ?[]const u8,
notes: []Message = &.{}, notes: []const Message = &.{},
reference_trace: []Message = &.{}, reference_trace: []Message = &.{},
/// Splits the error message up into lines to properly indent them /// Splits the error message up into lines to properly indent them
@ -3299,7 +3299,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const gpa = comp.gpa; const gpa = comp.gpa;
const module = comp.bin_file.options.module.?; const module = comp.bin_file.options.module.?;
const decl = module.declPtr(decl_index); const decl = module.declPtr(decl_index);
comp.bin_file.updateDeclLineNumber(module, decl) catch |err| { comp.bin_file.updateDeclLineNumber(module, decl_index) catch |err| {
try module.failed_decls.ensureUnusedCapacity(gpa, 1); try module.failed_decls.ensureUnusedCapacity(gpa, 1);
module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create( module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create(
gpa, gpa,

499
src/Manifest.zig Normal file
View File

@ -0,0 +1,499 @@
pub const basename = "build.zig.zon";
pub const Hash = std.crypto.hash.sha2.Sha256;
pub const Dependency = struct {
url: []const u8,
url_tok: Ast.TokenIndex,
hash: ?[]const u8,
hash_tok: Ast.TokenIndex,
};
pub const ErrorMessage = struct {
msg: []const u8,
tok: Ast.TokenIndex,
off: u32,
};
pub const MultihashFunction = enum(u16) {
identity = 0x00,
sha1 = 0x11,
@"sha2-256" = 0x12,
@"sha2-512" = 0x13,
@"sha3-512" = 0x14,
@"sha3-384" = 0x15,
@"sha3-256" = 0x16,
@"sha3-224" = 0x17,
@"sha2-384" = 0x20,
@"sha2-256-trunc254-padded" = 0x1012,
@"sha2-224" = 0x1013,
@"sha2-512-224" = 0x1014,
@"sha2-512-256" = 0x1015,
@"blake2b-256" = 0xb220,
_,
};
pub const multihash_function: MultihashFunction = switch (Hash) {
std.crypto.hash.sha2.Sha256 => .@"sha2-256",
else => @compileError("unreachable"),
};
comptime {
// We avoid unnecessary uleb128 code in hexDigest by asserting here the
// values are small enough to be contained in the one-byte encoding.
assert(@enumToInt(multihash_function) < 127);
assert(Hash.digest_length < 127);
}
pub const multihash_len = 1 + 1 + Hash.digest_length;
name: []const u8,
version: std.SemanticVersion,
dependencies: std.StringArrayHashMapUnmanaged(Dependency),
errors: []ErrorMessage,
arena_state: std.heap.ArenaAllocator.State,
pub const Error = Allocator.Error;
pub fn parse(gpa: Allocator, ast: std.zig.Ast) Error!Manifest {
const node_tags = ast.nodes.items(.tag);
const node_datas = ast.nodes.items(.data);
assert(node_tags[0] == .root);
const main_node_index = node_datas[0].lhs;
var arena_instance = std.heap.ArenaAllocator.init(gpa);
errdefer arena_instance.deinit();
var p: Parse = .{
.gpa = gpa,
.ast = ast,
.arena = arena_instance.allocator(),
.errors = .{},
.name = undefined,
.version = undefined,
.dependencies = .{},
.buf = .{},
};
defer p.buf.deinit(gpa);
defer p.errors.deinit(gpa);
defer p.dependencies.deinit(gpa);
p.parseRoot(main_node_index) catch |err| switch (err) {
error.ParseFailure => assert(p.errors.items.len > 0),
else => |e| return e,
};
return .{
.name = p.name,
.version = p.version,
.dependencies = try p.dependencies.clone(p.arena),
.errors = try p.arena.dupe(ErrorMessage, p.errors.items),
.arena_state = arena_instance.state,
};
}
pub fn deinit(man: *Manifest, gpa: Allocator) void {
man.arena_state.promote(gpa).deinit();
man.* = undefined;
}
const hex_charset = "0123456789abcdef";
pub fn hex64(x: u64) [16]u8 {
var result: [16]u8 = undefined;
var i: usize = 0;
while (i < 8) : (i += 1) {
const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
result[i * 2 + 0] = hex_charset[byte >> 4];
result[i * 2 + 1] = hex_charset[byte & 15];
}
return result;
}
test hex64 {
const s = "[" ++ hex64(0x12345678_abcdef00) ++ "]";
try std.testing.expectEqualStrings("[00efcdab78563412]", s);
}
pub fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
var result: [multihash_len * 2]u8 = undefined;
result[0] = hex_charset[@enumToInt(multihash_function) >> 4];
result[1] = hex_charset[@enumToInt(multihash_function) & 15];
result[2] = hex_charset[Hash.digest_length >> 4];
result[3] = hex_charset[Hash.digest_length & 15];
for (digest) |byte, i| {
result[4 + i * 2] = hex_charset[byte >> 4];
result[5 + i * 2] = hex_charset[byte & 15];
}
return result;
}
const Parse = struct {
gpa: Allocator,
ast: std.zig.Ast,
arena: Allocator,
buf: std.ArrayListUnmanaged(u8),
errors: std.ArrayListUnmanaged(ErrorMessage),
name: []const u8,
version: std.SemanticVersion,
dependencies: std.StringArrayHashMapUnmanaged(Dependency),
const InnerError = error{ ParseFailure, OutOfMemory };
fn parseRoot(p: *Parse, node: Ast.Node.Index) !void {
const ast = p.ast;
const main_tokens = ast.nodes.items(.main_token);
const main_token = main_tokens[node];
var buf: [2]Ast.Node.Index = undefined;
const struct_init = ast.fullStructInit(&buf, node) orelse {
return fail(p, main_token, "expected top level expression to be a struct", .{});
};
var have_name = false;
var have_version = false;
for (struct_init.ast.fields) |field_init| {
const name_token = ast.firstToken(field_init) - 2;
const field_name = try identifierTokenString(p, name_token);
// We could get fancy with reflection and comptime logic here but doing
// things manually provides an opportunity to do any additional verification
// that is desirable on a per-field basis.
if (mem.eql(u8, field_name, "dependencies")) {
try parseDependencies(p, field_init);
} else if (mem.eql(u8, field_name, "name")) {
p.name = try parseString(p, field_init);
have_name = true;
} else if (mem.eql(u8, field_name, "version")) {
const version_text = try parseString(p, field_init);
p.version = std.SemanticVersion.parse(version_text) catch |err| v: {
try appendError(p, main_tokens[field_init], "unable to parse semantic version: {s}", .{@errorName(err)});
break :v undefined;
};
have_version = true;
} else {
// Ignore unknown fields so that we can add fields in future zig
// versions without breaking older zig versions.
}
}
if (!have_name) {
try appendError(p, main_token, "missing top-level 'name' field", .{});
}
if (!have_version) {
try appendError(p, main_token, "missing top-level 'version' field", .{});
}
}
fn parseDependencies(p: *Parse, node: Ast.Node.Index) !void {
const ast = p.ast;
const main_tokens = ast.nodes.items(.main_token);
var buf: [2]Ast.Node.Index = undefined;
const struct_init = ast.fullStructInit(&buf, node) orelse {
const tok = main_tokens[node];
return fail(p, tok, "expected dependencies expression to be a struct", .{});
};
for (struct_init.ast.fields) |field_init| {
const name_token = ast.firstToken(field_init) - 2;
const dep_name = try identifierTokenString(p, name_token);
const dep = try parseDependency(p, field_init);
try p.dependencies.put(p.gpa, dep_name, dep);
}
}
fn parseDependency(p: *Parse, node: Ast.Node.Index) !Dependency {
const ast = p.ast;
const main_tokens = ast.nodes.items(.main_token);
var buf: [2]Ast.Node.Index = undefined;
const struct_init = ast.fullStructInit(&buf, node) orelse {
const tok = main_tokens[node];
return fail(p, tok, "expected dependency expression to be a struct", .{});
};
var dep: Dependency = .{
.url = undefined,
.url_tok = undefined,
.hash = null,
.hash_tok = undefined,
};
var have_url = false;
for (struct_init.ast.fields) |field_init| {
const name_token = ast.firstToken(field_init) - 2;
const field_name = try identifierTokenString(p, name_token);
// We could get fancy with reflection and comptime logic here but doing
// things manually provides an opportunity to do any additional verification
// that is desirable on a per-field basis.
if (mem.eql(u8, field_name, "url")) {
dep.url = parseString(p, field_init) catch |err| switch (err) {
error.ParseFailure => continue,
else => |e| return e,
};
dep.url_tok = main_tokens[field_init];
have_url = true;
} else if (mem.eql(u8, field_name, "hash")) {
dep.hash = parseHash(p, field_init) catch |err| switch (err) {
error.ParseFailure => continue,
else => |e| return e,
};
dep.hash_tok = main_tokens[field_init];
} else {
// Ignore unknown fields so that we can add fields in future zig
// versions without breaking older zig versions.
}
}
if (!have_url) {
try appendError(p, main_tokens[node], "dependency is missing 'url' field", .{});
}
return dep;
}
fn parseString(p: *Parse, node: Ast.Node.Index) ![]const u8 {
const ast = p.ast;
const node_tags = ast.nodes.items(.tag);
const main_tokens = ast.nodes.items(.main_token);
if (node_tags[node] != .string_literal) {
return fail(p, main_tokens[node], "expected string literal", .{});
}
const str_lit_token = main_tokens[node];
const token_bytes = ast.tokenSlice(str_lit_token);
p.buf.clearRetainingCapacity();
try parseStrLit(p, str_lit_token, &p.buf, token_bytes, 0);
const duped = try p.arena.dupe(u8, p.buf.items);
return duped;
}
fn parseHash(p: *Parse, node: Ast.Node.Index) ![]const u8 {
const ast = p.ast;
const main_tokens = ast.nodes.items(.main_token);
const tok = main_tokens[node];
const h = try parseString(p, node);
if (h.len >= 2) {
const their_multihash_func = std.fmt.parseInt(u8, h[0..2], 16) catch |err| {
return fail(p, tok, "invalid multihash value: unable to parse hash function: {s}", .{
@errorName(err),
});
};
if (@intToEnum(MultihashFunction, their_multihash_func) != multihash_function) {
return fail(p, tok, "unsupported hash function: only sha2-256 is supported", .{});
}
}
const hex_multihash_len = 2 * Manifest.multihash_len;
if (h.len != hex_multihash_len) {
return fail(p, tok, "wrong hash size. expected: {d}, found: {d}", .{
hex_multihash_len, h.len,
});
}
return h;
}
/// TODO: try to DRY this with AstGen.identifierTokenString
fn identifierTokenString(p: *Parse, token: Ast.TokenIndex) InnerError![]const u8 {
const ast = p.ast;
const token_tags = ast.tokens.items(.tag);
assert(token_tags[token] == .identifier);
const ident_name = ast.tokenSlice(token);
if (!mem.startsWith(u8, ident_name, "@")) {
return ident_name;
}
p.buf.clearRetainingCapacity();
try parseStrLit(p, token, &p.buf, ident_name, 1);
const duped = try p.arena.dupe(u8, p.buf.items);
return duped;
}
/// TODO: try to DRY this with AstGen.parseStrLit
fn parseStrLit(
p: *Parse,
token: Ast.TokenIndex,
buf: *std.ArrayListUnmanaged(u8),
bytes: []const u8,
offset: u32,
) InnerError!void {
const raw_string = bytes[offset..];
var buf_managed = buf.toManaged(p.gpa);
const result = std.zig.string_literal.parseWrite(buf_managed.writer(), raw_string);
buf.* = buf_managed.moveToUnmanaged();
switch (try result) {
.success => {},
.failure => |err| try p.appendStrLitError(err, token, bytes, offset),
}
}
/// TODO: try to DRY this with AstGen.failWithStrLitError
fn appendStrLitError(
p: *Parse,
err: std.zig.string_literal.Error,
token: Ast.TokenIndex,
bytes: []const u8,
offset: u32,
) Allocator.Error!void {
const raw_string = bytes[offset..];
switch (err) {
.invalid_escape_character => |bad_index| {
try p.appendErrorOff(
token,
offset + @intCast(u32, bad_index),
"invalid escape character: '{c}'",
.{raw_string[bad_index]},
);
},
.expected_hex_digit => |bad_index| {
try p.appendErrorOff(
token,
offset + @intCast(u32, bad_index),
"expected hex digit, found '{c}'",
.{raw_string[bad_index]},
);
},
.empty_unicode_escape_sequence => |bad_index| {
try p.appendErrorOff(
token,
offset + @intCast(u32, bad_index),
"empty unicode escape sequence",
.{},
);
},
.expected_hex_digit_or_rbrace => |bad_index| {
try p.appendErrorOff(
token,
offset + @intCast(u32, bad_index),
"expected hex digit or '}}', found '{c}'",
.{raw_string[bad_index]},
);
},
.invalid_unicode_codepoint => |bad_index| {
try p.appendErrorOff(
token,
offset + @intCast(u32, bad_index),
"unicode escape does not correspond to a valid codepoint",
.{},
);
},
.expected_lbrace => |bad_index| {
try p.appendErrorOff(
token,
offset + @intCast(u32, bad_index),
"expected '{{', found '{c}",
.{raw_string[bad_index]},
);
},
.expected_rbrace => |bad_index| {
try p.appendErrorOff(
token,
offset + @intCast(u32, bad_index),
"expected '}}', found '{c}",
.{raw_string[bad_index]},
);
},
.expected_single_quote => |bad_index| {
try p.appendErrorOff(
token,
offset + @intCast(u32, bad_index),
"expected single quote ('), found '{c}",
.{raw_string[bad_index]},
);
},
.invalid_character => |bad_index| {
try p.appendErrorOff(
token,
offset + @intCast(u32, bad_index),
"invalid byte in string or character literal: '{c}'",
.{raw_string[bad_index]},
);
},
}
}
fn fail(
p: *Parse,
tok: Ast.TokenIndex,
comptime fmt: []const u8,
args: anytype,
) InnerError {
try appendError(p, tok, fmt, args);
return error.ParseFailure;
}
fn appendError(p: *Parse, tok: Ast.TokenIndex, comptime fmt: []const u8, args: anytype) !void {
return appendErrorOff(p, tok, 0, fmt, args);
}
fn appendErrorOff(
p: *Parse,
tok: Ast.TokenIndex,
byte_offset: u32,
comptime fmt: []const u8,
args: anytype,
) Allocator.Error!void {
try p.errors.append(p.gpa, .{
.msg = try std.fmt.allocPrint(p.arena, fmt, args),
.tok = tok,
.off = byte_offset,
});
}
};
const Manifest = @This();
const std = @import("std");
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Ast = std.zig.Ast;
const testing = std.testing;
test "basic" {
const gpa = testing.allocator;
const example =
\\.{
\\ .name = "foo",
\\ .version = "3.2.1",
\\ .dependencies = .{
\\ .bar = .{
\\ .url = "https://example.com/baz.tar.gz",
\\ .hash = "1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f",
\\ },
\\ },
\\}
;
var ast = try std.zig.Ast.parse(gpa, example, .zon);
defer ast.deinit(gpa);
try testing.expect(ast.errors.len == 0);
var manifest = try Manifest.parse(gpa, ast);
defer manifest.deinit(gpa);
try testing.expectEqualStrings("foo", manifest.name);
try testing.expectEqual(@as(std.SemanticVersion, .{
.major = 3,
.minor = 2,
.patch = 1,
}), manifest.version);
try testing.expect(manifest.dependencies.count() == 1);
try testing.expectEqualStrings("bar", manifest.dependencies.keys()[0]);
try testing.expectEqualStrings(
"https://example.com/baz.tar.gz",
manifest.dependencies.values()[0].url,
);
try testing.expectEqualStrings(
"1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f",
manifest.dependencies.values()[0].hash orelse return error.TestFailed,
);
}

View File

@ -328,8 +328,6 @@ pub const ErrorInt = u32;
pub const Export = struct { pub const Export = struct {
options: std.builtin.ExportOptions, options: std.builtin.ExportOptions,
src: LazySrcLoc, src: LazySrcLoc,
/// Represents the position of the export, if any, in the output file.
link: link.File.Export,
/// The Decl that performs the export. Note that this is *not* the Decl being exported. /// The Decl that performs the export. Note that this is *not* the Decl being exported.
owner_decl: Decl.Index, owner_decl: Decl.Index,
/// The Decl containing the export statement. Inline function calls /// The Decl containing the export statement. Inline function calls
@ -533,16 +531,8 @@ pub const Decl = struct {
/// What kind of a declaration is this. /// What kind of a declaration is this.
kind: Kind, kind: Kind,
/// Represents the position of the code in the output file. /// TODO remove this once Wasm backend catches up
/// This is populated regardless of semantic analysis and code generation. fn_link: ?link.File.Wasm.FnData = null,
link: link.File.LinkBlock,
/// Represents the function in the linked output file, if the `Decl` is a function.
/// This is stored here and not in `Fn` because `Decl` survives across updates but
/// `Fn` does not.
/// TODO Look into making `Fn` a longer lived structure and moving this field there
/// to save on memory usage.
fn_link: link.File.LinkFn,
/// The shallow set of other decls whose typed_value could possibly change if this Decl's /// The shallow set of other decls whose typed_value could possibly change if this Decl's
/// typed_value is modified. /// typed_value is modified.
@ -2067,7 +2057,7 @@ pub const File = struct {
if (file.tree_loaded) return &file.tree; if (file.tree_loaded) return &file.tree;
const source = try file.getSource(gpa); const source = try file.getSource(gpa);
file.tree = try std.zig.parse(gpa, source.bytes); file.tree = try Ast.parse(gpa, source.bytes, .zig);
file.tree_loaded = true; file.tree_loaded = true;
return &file.tree; return &file.tree;
} }
@ -3672,7 +3662,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
file.source = source; file.source = source;
file.source_loaded = true; file.source_loaded = true;
file.tree = try std.zig.parse(gpa, source); file.tree = try Ast.parse(gpa, source, .zig);
defer if (!file.tree_loaded) file.tree.deinit(gpa); defer if (!file.tree_loaded) file.tree.deinit(gpa);
if (file.tree.errors.len != 0) { if (file.tree.errors.len != 0) {
@ -3987,7 +3977,7 @@ pub fn populateBuiltinFile(mod: *Module) !void {
else => |e| return e, else => |e| return e,
} }
file.tree = try std.zig.parse(gpa, file.source); file.tree = try Ast.parse(gpa, file.source, .zig);
file.tree_loaded = true; file.tree_loaded = true;
assert(file.tree.errors.len == 0); // builtin.zig must parse assert(file.tree.errors.len == 0); // builtin.zig must parse
@ -4098,7 +4088,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
// The exports this Decl performs will be re-discovered, so we remove them here // The exports this Decl performs will be re-discovered, so we remove them here
// prior to re-analysis. // prior to re-analysis.
mod.deleteDeclExports(decl_index); try mod.deleteDeclExports(decl_index);
// Similarly, `@setAlignStack` invocations will be re-discovered. // Similarly, `@setAlignStack` invocations will be re-discovered.
if (decl.getFunction()) |func| { if (decl.getFunction()) |func| {
@ -4585,7 +4575,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
// We don't fully codegen the decl until later, but we do need to reserve a global // We don't fully codegen the decl until later, but we do need to reserve a global
// offset table index for it. This allows us to codegen decls out of dependency // offset table index for it. This allows us to codegen decls out of dependency
// order, increasing how many computations can be done in parallel. // order, increasing how many computations can be done in parallel.
try mod.comp.bin_file.allocateDeclIndexes(decl_index);
try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); try mod.comp.work_queue.writeItem(.{ .codegen_func = func });
if (type_changed and mod.emit_h != null) { if (type_changed and mod.emit_h != null) {
try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index }); try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index });
@ -4697,7 +4686,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
// codegen backend wants full access to the Decl Type. // codegen backend wants full access to the Decl Type.
try sema.resolveTypeFully(decl.ty); try sema.resolveTypeFully(decl.ty);
try mod.comp.bin_file.allocateDeclIndexes(decl_index);
try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index }); try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
if (type_changed and mod.emit_h != null) { if (type_changed and mod.emit_h != null) {
@ -5185,20 +5173,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
decl.zir_decl_index = @intCast(u32, decl_sub_index); decl.zir_decl_index = @intCast(u32, decl_sub_index);
if (decl.getFunction()) |_| { if (decl.getFunction()) |_| {
switch (comp.bin_file.tag) { switch (comp.bin_file.tag) {
.coff => { .coff, .elf, .macho, .plan9 => {
// TODO Implement for COFF
},
.elf => if (decl.fn_link.elf.len != 0) {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
},
.macho => if (decl.fn_link.macho.len != 0) {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
},
.plan9 => {
// TODO Look into detecting when this would be unnecessary by storing enough state // TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change. // in `Decl` to notice that the line number did not change.
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index }); comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
@ -5267,33 +5242,15 @@ pub fn clearDecl(
assert(emit_h.decl_table.swapRemove(decl_index)); assert(emit_h.decl_table.swapRemove(decl_index));
} }
_ = mod.compile_log_decls.swapRemove(decl_index); _ = mod.compile_log_decls.swapRemove(decl_index);
mod.deleteDeclExports(decl_index); try mod.deleteDeclExports(decl_index);
if (decl.has_tv) { if (decl.has_tv) {
if (decl.ty.isFnOrHasRuntimeBits()) { if (decl.ty.isFnOrHasRuntimeBits()) {
mod.comp.bin_file.freeDecl(decl_index); mod.comp.bin_file.freeDecl(decl_index);
// TODO instead of a union, put this memory trailing Decl objects,
// and allow it to be variably sized.
decl.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.Atom.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.Atom.empty },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
.spirv => .{ .spirv = {} },
.nvptx => .{ .nvptx = {} },
};
decl.fn_link = switch (mod.comp.bin_file.tag) { decl.fn_link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = {} }, .wasm => link.File.Wasm.FnData.empty,
.elf => .{ .elf = link.File.Dwarf.SrcFn.empty }, else => null,
.macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
.plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
.spirv => .{ .spirv = .{} },
.nvptx => .{ .nvptx = {} },
}; };
} }
if (decl.getInnerNamespace()) |namespace| { if (decl.getInnerNamespace()) |namespace| {
@ -5315,23 +5272,6 @@ pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void {
const decl = mod.declPtr(decl_index); const decl = mod.declPtr(decl_index);
log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name }); log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name });
// TODO: remove `allocateDeclIndexes` and make the API that the linker backends
// are required to notice the first time `updateDecl` happens and keep track
// of it themselves. However they can rely on getting a `freeDecl` call if any
// `updateDecl` or `updateFunc` calls happen. This will allow us to avoid any call
// into the linker backend here, since the linker backend will never have been told
// about the Decl in the first place.
// Until then, we did call `allocateDeclIndexes` on this anonymous Decl and so we
// must call `freeDecl` in the linker backend now.
switch (mod.comp.bin_file.tag) {
.c => {}, // this linker backend has already migrated to the new API
else => if (decl.has_tv) {
if (decl.ty.isFnOrHasRuntimeBits()) {
mod.comp.bin_file.freeDecl(decl_index);
}
},
}
assert(!mod.declIsRoot(decl_index)); assert(!mod.declIsRoot(decl_index));
assert(decl.src_namespace.anon_decls.swapRemove(decl_index)); assert(decl.src_namespace.anon_decls.swapRemove(decl_index));
@ -5377,7 +5317,7 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
/// Delete all the Export objects that are caused by this Decl. Re-analysis of /// Delete all the Export objects that are caused by this Decl. Re-analysis of
/// this Decl will cause them to be re-created (or not). /// this Decl will cause them to be re-created (or not).
fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void { fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
var export_owners = (mod.export_owners.fetchSwapRemove(decl_index) orelse return).value; var export_owners = (mod.export_owners.fetchSwapRemove(decl_index) orelse return).value;
for (export_owners.items) |exp| { for (export_owners.items) |exp| {
@ -5400,16 +5340,16 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
} }
} }
if (mod.comp.bin_file.cast(link.File.Elf)) |elf| { if (mod.comp.bin_file.cast(link.File.Elf)) |elf| {
elf.deleteExport(exp.link.elf); elf.deleteDeclExport(decl_index, exp.options.name);
} }
if (mod.comp.bin_file.cast(link.File.MachO)) |macho| { if (mod.comp.bin_file.cast(link.File.MachO)) |macho| {
macho.deleteExport(exp.link.macho); try macho.deleteDeclExport(decl_index, exp.options.name);
} }
if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| { if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| {
wasm.deleteExport(exp.link.wasm); wasm.deleteDeclExport(decl_index);
} }
if (mod.comp.bin_file.cast(link.File.Coff)) |coff| { if (mod.comp.bin_file.cast(link.File.Coff)) |coff| {
coff.deleteExport(exp.link.coff); coff.deleteDeclExport(decl_index, exp.options.name);
} }
if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| { if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| {
failed_kv.value.destroy(mod.gpa); failed_kv.value.destroy(mod.gpa);
@ -5712,25 +5652,9 @@ pub fn allocateNewDecl(
.deletion_flag = false, .deletion_flag = false,
.zir_decl_index = 0, .zir_decl_index = 0,
.src_scope = src_scope, .src_scope = src_scope,
.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.Atom.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.Atom.empty },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
.spirv => .{ .spirv = {} },
.nvptx => .{ .nvptx = {} },
},
.fn_link = switch (mod.comp.bin_file.tag) { .fn_link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = {} }, .wasm => link.File.Wasm.FnData.empty,
.elf => .{ .elf = link.File.Dwarf.SrcFn.empty }, else => null,
.macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
.plan9 => .{ .plan9 = {} },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
.spirv => .{ .spirv = .{} },
.nvptx => .{ .nvptx = {} },
}, },
.generation = 0, .generation = 0,
.is_pub = false, .is_pub = false,
@ -5816,7 +5740,6 @@ pub fn initNewAnonDecl(
// the Decl will be garbage collected by the `codegen_decl` task instead of sent // the Decl will be garbage collected by the `codegen_decl` task instead of sent
// to the linker. // to the linker.
if (typed_value.ty.isFnOrHasRuntimeBits()) { if (typed_value.ty.isFnOrHasRuntimeBits()) {
try mod.comp.bin_file.allocateDeclIndexes(new_decl_index);
try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index }); try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index });
} }
} }

View File

@ -1,12 +1,13 @@
const Package = @This(); const Package = @This();
const builtin = @import("builtin");
const std = @import("std"); const std = @import("std");
const fs = std.fs; const fs = std.fs;
const mem = std.mem; const mem = std.mem;
const Allocator = mem.Allocator; const Allocator = mem.Allocator;
const assert = std.debug.assert; const assert = std.debug.assert;
const Hash = std.crypto.hash.sha2.Sha256;
const log = std.log.scoped(.package); const log = std.log.scoped(.package);
const main = @import("main.zig");
const Compilation = @import("Compilation.zig"); const Compilation = @import("Compilation.zig");
const Module = @import("Module.zig"); const Module = @import("Module.zig");
@ -14,6 +15,7 @@ const ThreadPool = @import("ThreadPool.zig");
const WaitGroup = @import("WaitGroup.zig"); const WaitGroup = @import("WaitGroup.zig");
const Cache = @import("Cache.zig"); const Cache = @import("Cache.zig");
const build_options = @import("build_options"); const build_options = @import("build_options");
const Manifest = @import("Manifest.zig");
pub const Table = std.StringHashMapUnmanaged(*Package); pub const Table = std.StringHashMapUnmanaged(*Package);
@ -140,10 +142,10 @@ pub fn addAndAdopt(parent: *Package, gpa: Allocator, child: *Package) !void {
} }
pub const build_zig_basename = "build.zig"; pub const build_zig_basename = "build.zig";
pub const ini_basename = build_zig_basename ++ ".ini";
pub fn fetchAndAddDependencies( pub fn fetchAndAddDependencies(
pkg: *Package, pkg: *Package,
arena: Allocator,
thread_pool: *ThreadPool, thread_pool: *ThreadPool,
http_client: *std.http.Client, http_client: *std.http.Client,
directory: Compilation.Directory, directory: Compilation.Directory,
@ -152,89 +154,77 @@ pub fn fetchAndAddDependencies(
dependencies_source: *std.ArrayList(u8), dependencies_source: *std.ArrayList(u8),
build_roots_source: *std.ArrayList(u8), build_roots_source: *std.ArrayList(u8),
name_prefix: []const u8, name_prefix: []const u8,
color: main.Color,
) !void { ) !void {
const max_bytes = 10 * 1024 * 1024; const max_bytes = 10 * 1024 * 1024;
const gpa = thread_pool.allocator; const gpa = thread_pool.allocator;
const build_zig_ini = directory.handle.readFileAlloc(gpa, ini_basename, max_bytes) catch |err| switch (err) { const build_zig_zon_bytes = directory.handle.readFileAllocOptions(
arena,
Manifest.basename,
max_bytes,
null,
1,
0,
) catch |err| switch (err) {
error.FileNotFound => { error.FileNotFound => {
// Handle the same as no dependencies. // Handle the same as no dependencies.
return; return;
}, },
else => |e| return e, else => |e| return e,
}; };
defer gpa.free(build_zig_ini);
const ini: std.Ini = .{ .bytes = build_zig_ini }; var ast = try std.zig.Ast.parse(gpa, build_zig_zon_bytes, .zon);
defer ast.deinit(gpa);
if (ast.errors.len > 0) {
const file_path = try directory.join(arena, &.{Manifest.basename});
try main.printErrsMsgToStdErr(gpa, arena, ast, file_path, color);
return error.PackageFetchFailed;
}
var manifest = try Manifest.parse(gpa, ast);
defer manifest.deinit(gpa);
if (manifest.errors.len > 0) {
const ttyconf: std.debug.TTY.Config = switch (color) {
.auto => std.debug.detectTTYConfig(std.io.getStdErr()),
.on => .escape_codes,
.off => .no_color,
};
const file_path = try directory.join(arena, &.{Manifest.basename});
for (manifest.errors) |msg| {
Report.renderErrorMessage(ast, file_path, ttyconf, msg, &.{});
}
return error.PackageFetchFailed;
}
const report: Report = .{
.ast = &ast,
.directory = directory,
.color = color,
.arena = arena,
};
var any_error = false; var any_error = false;
var it = ini.iterateSection("\n[dependency]\n"); const deps_list = manifest.dependencies.values();
while (it.next()) |dep| { for (manifest.dependencies.keys()) |name, i| {
var line_it = mem.split(u8, dep, "\n"); const dep = deps_list[i];
var opt_name: ?[]const u8 = null;
var opt_url: ?[]const u8 = null;
var expected_hash: ?[]const u8 = null;
while (line_it.next()) |kv| {
const eq_pos = mem.indexOfScalar(u8, kv, '=') orelse continue;
const key = kv[0..eq_pos];
const value = kv[eq_pos + 1 ..];
if (mem.eql(u8, key, "name")) {
opt_name = value;
} else if (mem.eql(u8, key, "url")) {
opt_url = value;
} else if (mem.eql(u8, key, "hash")) {
expected_hash = value;
} else {
const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(key.ptr) - @ptrToInt(ini.bytes.ptr));
std.log.warn("{s}/{s}:{d}:{d} unrecognized key: '{s}'", .{
directory.path orelse ".",
"build.zig.ini",
loc.line,
loc.column,
key,
});
}
}
const name = opt_name orelse { const sub_prefix = try std.fmt.allocPrint(arena, "{s}{s}.", .{ name_prefix, name });
const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(dep.ptr) - @ptrToInt(ini.bytes.ptr));
std.log.err("{s}/{s}:{d}:{d} missing key: 'name'", .{
directory.path orelse ".",
"build.zig.ini",
loc.line,
loc.column,
});
any_error = true;
continue;
};
const url = opt_url orelse {
const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(dep.ptr) - @ptrToInt(ini.bytes.ptr));
std.log.err("{s}/{s}:{d}:{d} missing key: 'name'", .{
directory.path orelse ".",
"build.zig.ini",
loc.line,
loc.column,
});
any_error = true;
continue;
};
const sub_prefix = try std.fmt.allocPrint(gpa, "{s}{s}.", .{ name_prefix, name });
defer gpa.free(sub_prefix);
const fqn = sub_prefix[0 .. sub_prefix.len - 1]; const fqn = sub_prefix[0 .. sub_prefix.len - 1];
const sub_pkg = try fetchAndUnpack( const sub_pkg = try fetchAndUnpack(
thread_pool, thread_pool,
http_client, http_client,
global_cache_directory, global_cache_directory,
url, dep,
expected_hash, report,
ini,
directory,
build_roots_source, build_roots_source,
fqn, fqn,
); );
try pkg.fetchAndAddDependencies( try pkg.fetchAndAddDependencies(
arena,
thread_pool, thread_pool,
http_client, http_client,
sub_pkg.root_src_directory, sub_pkg.root_src_directory,
@ -243,6 +233,7 @@ pub fn fetchAndAddDependencies(
dependencies_source, dependencies_source,
build_roots_source, build_roots_source,
sub_prefix, sub_prefix,
color,
); );
try addAndAdopt(pkg, gpa, sub_pkg); try addAndAdopt(pkg, gpa, sub_pkg);
@ -252,7 +243,7 @@ pub fn fetchAndAddDependencies(
}); });
} }
if (any_error) return error.InvalidBuildZigIniFile; if (any_error) return error.InvalidBuildManifestFile;
} }
pub fn createFilePkg( pub fn createFilePkg(
@ -263,7 +254,7 @@ pub fn createFilePkg(
contents: []const u8, contents: []const u8,
) !*Package { ) !*Package {
const rand_int = std.crypto.random.int(u64); const rand_int = std.crypto.random.int(u64);
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ hex64(rand_int); const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ Manifest.hex64(rand_int);
{ {
var tmp_dir = try cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{}); var tmp_dir = try cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{});
defer tmp_dir.close(); defer tmp_dir.close();
@ -281,14 +272,73 @@ pub fn createFilePkg(
return createWithDir(gpa, name, cache_directory, o_dir_sub_path, basename); return createWithDir(gpa, name, cache_directory, o_dir_sub_path, basename);
} }
const Report = struct {
ast: *const std.zig.Ast,
directory: Compilation.Directory,
color: main.Color,
arena: Allocator,
fn fail(
report: Report,
tok: std.zig.Ast.TokenIndex,
comptime fmt_string: []const u8,
fmt_args: anytype,
) error{ PackageFetchFailed, OutOfMemory } {
return failWithNotes(report, &.{}, tok, fmt_string, fmt_args);
}
fn failWithNotes(
report: Report,
notes: []const Compilation.AllErrors.Message,
tok: std.zig.Ast.TokenIndex,
comptime fmt_string: []const u8,
fmt_args: anytype,
) error{ PackageFetchFailed, OutOfMemory } {
const ttyconf: std.debug.TTY.Config = switch (report.color) {
.auto => std.debug.detectTTYConfig(std.io.getStdErr()),
.on => .escape_codes,
.off => .no_color,
};
const file_path = try report.directory.join(report.arena, &.{Manifest.basename});
renderErrorMessage(report.ast.*, file_path, ttyconf, .{
.tok = tok,
.off = 0,
.msg = try std.fmt.allocPrint(report.arena, fmt_string, fmt_args),
}, notes);
return error.PackageFetchFailed;
}
fn renderErrorMessage(
ast: std.zig.Ast,
file_path: []const u8,
ttyconf: std.debug.TTY.Config,
msg: Manifest.ErrorMessage,
notes: []const Compilation.AllErrors.Message,
) void {
const token_starts = ast.tokens.items(.start);
const start_loc = ast.tokenLocation(0, msg.tok);
Compilation.AllErrors.Message.renderToStdErr(.{ .src = .{
.msg = msg.msg,
.src_path = file_path,
.line = @intCast(u32, start_loc.line),
.column = @intCast(u32, start_loc.column),
.span = .{
.start = token_starts[msg.tok],
.end = @intCast(u32, token_starts[msg.tok] + ast.tokenSlice(msg.tok).len),
.main = token_starts[msg.tok] + msg.off,
},
.source_line = ast.source[start_loc.line_start..start_loc.line_end],
.notes = notes,
} }, ttyconf);
}
};
fn fetchAndUnpack( fn fetchAndUnpack(
thread_pool: *ThreadPool, thread_pool: *ThreadPool,
http_client: *std.http.Client, http_client: *std.http.Client,
global_cache_directory: Compilation.Directory, global_cache_directory: Compilation.Directory,
url: []const u8, dep: Manifest.Dependency,
expected_hash: ?[]const u8, report: Report,
ini: std.Ini,
comp_directory: Compilation.Directory,
build_roots_source: *std.ArrayList(u8), build_roots_source: *std.ArrayList(u8),
fqn: []const u8, fqn: []const u8,
) !*Package { ) !*Package {
@ -297,17 +347,9 @@ fn fetchAndUnpack(
// Check if the expected_hash is already present in the global package // Check if the expected_hash is already present in the global package
// cache, and thereby avoid both fetching and unpacking. // cache, and thereby avoid both fetching and unpacking.
if (expected_hash) |h| cached: { if (dep.hash) |h| cached: {
if (h.len != 2 * Hash.digest_length) { const hex_multihash_len = 2 * Manifest.multihash_len;
return reportError( const hex_digest = h[0..hex_multihash_len];
ini,
comp_directory,
h.ptr,
"wrong hash size. expected: {d}, found: {d}",
.{ Hash.digest_length, h.len },
);
}
const hex_digest = h[0 .. 2 * Hash.digest_length];
const pkg_dir_sub_path = "p" ++ s ++ hex_digest; const pkg_dir_sub_path = "p" ++ s ++ hex_digest;
var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) { var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :cached, error.FileNotFound => break :cached,
@ -344,10 +386,10 @@ fn fetchAndUnpack(
return ptr; return ptr;
} }
const uri = try std.Uri.parse(url); const uri = try std.Uri.parse(dep.url);
const rand_int = std.crypto.random.int(u64); const rand_int = std.crypto.random.int(u64);
const tmp_dir_sub_path = "tmp" ++ s ++ hex64(rand_int); const tmp_dir_sub_path = "tmp" ++ s ++ Manifest.hex64(rand_int);
const actual_hash = a: { const actual_hash = a: {
var tmp_directory: Compilation.Directory = d: { var tmp_directory: Compilation.Directory = d: {
@ -376,13 +418,9 @@ fn fetchAndUnpack(
// by default, so the same logic applies for buffering the reader as for gzip. // by default, so the same logic applies for buffering the reader as for gzip.
try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.xz); try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.xz);
} else { } else {
return reportError( return report.fail(dep.url_tok, "unknown file extension for path '{s}'", .{
ini, uri.path,
comp_directory, });
uri.path.ptr,
"unknown file extension for path '{s}'",
.{uri.path},
);
} }
// TODO: delete files not included in the package prior to computing the package hash. // TODO: delete files not included in the package prior to computing the package hash.
@ -393,28 +431,21 @@ fn fetchAndUnpack(
break :a try computePackageHash(thread_pool, .{ .dir = tmp_directory.handle }); break :a try computePackageHash(thread_pool, .{ .dir = tmp_directory.handle });
}; };
const pkg_dir_sub_path = "p" ++ s ++ hexDigest(actual_hash); const pkg_dir_sub_path = "p" ++ s ++ Manifest.hexDigest(actual_hash);
try renameTmpIntoCache(global_cache_directory.handle, tmp_dir_sub_path, pkg_dir_sub_path); try renameTmpIntoCache(global_cache_directory.handle, tmp_dir_sub_path, pkg_dir_sub_path);
if (expected_hash) |h| { const actual_hex = Manifest.hexDigest(actual_hash);
const actual_hex = hexDigest(actual_hash); if (dep.hash) |h| {
if (!mem.eql(u8, h, &actual_hex)) { if (!mem.eql(u8, h, &actual_hex)) {
return reportError( return report.fail(dep.hash_tok, "hash mismatch: expected: {s}, found: {s}", .{
ini, h, actual_hex,
comp_directory, });
h.ptr,
"hash mismatch: expected: {s}, found: {s}",
.{ h, actual_hex },
);
} }
} else { } else {
return reportError( const notes: [1]Compilation.AllErrors.Message = .{.{ .plain = .{
ini, .msg = try std.fmt.allocPrint(report.arena, "expected .hash = \"{s}\",", .{&actual_hex}),
comp_directory, } }};
url.ptr, return report.failWithNotes(&notes, dep.url_tok, "url field is missing corresponding hash field", .{});
"url field is missing corresponding hash field: hash={s}",
.{std.fmt.fmtSliceHexLower(&actual_hash)},
);
} }
const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path}); const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path});
@ -440,35 +471,21 @@ fn unpackTarball(
try std.tar.pipeToFileSystem(out_dir, decompress.reader(), .{ try std.tar.pipeToFileSystem(out_dir, decompress.reader(), .{
.strip_components = 1, .strip_components = 1,
// TODO: we would like to set this to executable_bit_only, but two
// things need to happen before that:
// 1. the tar implementation needs to support it
// 2. the hashing algorithm here needs to support detecting the is_executable
// bit on Windows from the ACLs (see the isExecutable function).
.mode_mode = .ignore,
}); });
} }
fn reportError(
ini: std.Ini,
comp_directory: Compilation.Directory,
src_ptr: [*]const u8,
comptime fmt_string: []const u8,
fmt_args: anytype,
) error{PackageFetchFailed} {
const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(src_ptr) - @ptrToInt(ini.bytes.ptr));
if (comp_directory.path) |p| {
std.debug.print("{s}{c}{s}:{d}:{d}: error: " ++ fmt_string ++ "\n", .{
p, fs.path.sep, ini_basename, loc.line + 1, loc.column + 1,
} ++ fmt_args);
} else {
std.debug.print("{s}:{d}:{d}: error: " ++ fmt_string ++ "\n", .{
ini_basename, loc.line + 1, loc.column + 1,
} ++ fmt_args);
}
return error.PackageFetchFailed;
}
const HashedFile = struct { const HashedFile = struct {
path: []const u8, path: []const u8,
hash: [Hash.digest_length]u8, hash: [Manifest.Hash.digest_length]u8,
failure: Error!void, failure: Error!void,
const Error = fs.File.OpenError || fs.File.ReadError; const Error = fs.File.OpenError || fs.File.ReadError || fs.File.StatError;
fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool { fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
_ = context; _ = context;
@ -479,7 +496,7 @@ const HashedFile = struct {
fn computePackageHash( fn computePackageHash(
thread_pool: *ThreadPool, thread_pool: *ThreadPool,
pkg_dir: fs.IterableDir, pkg_dir: fs.IterableDir,
) ![Hash.digest_length]u8 { ) ![Manifest.Hash.digest_length]u8 {
const gpa = thread_pool.allocator; const gpa = thread_pool.allocator;
// We'll use an arena allocator for the path name strings since they all // We'll use an arena allocator for the path name strings since they all
@ -522,7 +539,7 @@ fn computePackageHash(
std.sort.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan); std.sort.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan);
var hasher = Hash.init(.{}); var hasher = Manifest.Hash.init(.{});
var any_failures = false; var any_failures = false;
for (all_files.items) |hashed_file| { for (all_files.items) |hashed_file| {
hashed_file.failure catch |err| { hashed_file.failure catch |err| {
@ -543,7 +560,9 @@ fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile, wg: *WaitGroup) void {
fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void { fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
var buf: [8000]u8 = undefined; var buf: [8000]u8 = undefined;
var file = try dir.openFile(hashed_file.path, .{}); var file = try dir.openFile(hashed_file.path, .{});
var hasher = Hash.init(.{}); var hasher = Manifest.Hash.init(.{});
hasher.update(hashed_file.path);
hasher.update(&.{ 0, @boolToInt(try isExecutable(file)) });
while (true) { while (true) {
const bytes_read = try file.read(&buf); const bytes_read = try file.read(&buf);
if (bytes_read == 0) break; if (bytes_read == 0) break;
@ -552,31 +571,17 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
hasher.final(&hashed_file.hash); hasher.final(&hashed_file.hash);
} }
const hex_charset = "0123456789abcdef"; fn isExecutable(file: fs.File) !bool {
if (builtin.os.tag == .windows) {
fn hex64(x: u64) [16]u8 { // TODO check the ACL on Windows.
var result: [16]u8 = undefined; // Until this is implemented, this could be a false negative on
var i: usize = 0; // Windows, which is why we do not yet set executable_bit_only above
while (i < 8) : (i += 1) { // when unpacking the tarball.
const byte = @truncate(u8, x >> @intCast(u6, 8 * i)); return false;
result[i * 2 + 0] = hex_charset[byte >> 4]; } else {
result[i * 2 + 1] = hex_charset[byte & 15]; const stat = try file.stat();
return (stat.mode & std.os.S.IXUSR) != 0;
} }
return result;
}
test hex64 {
const s = "[" ++ hex64(0x12345678_abcdef00) ++ "]";
try std.testing.expectEqualStrings("[00efcdab78563412]", s);
}
fn hexDigest(digest: [Hash.digest_length]u8) [Hash.digest_length * 2]u8 {
var result: [Hash.digest_length * 2]u8 = undefined;
for (digest) |byte, i| {
result[i * 2 + 0] = hex_charset[byte >> 4];
result[i * 2 + 1] = hex_charset[byte & 15];
}
return result;
} }
fn renameTmpIntoCache( fn renameTmpIntoCache(

View File

@ -1015,6 +1015,7 @@ fn analyzeBodyInner(
.float_cast => try sema.zirFloatCast(block, inst), .float_cast => try sema.zirFloatCast(block, inst),
.int_cast => try sema.zirIntCast(block, inst), .int_cast => try sema.zirIntCast(block, inst),
.ptr_cast => try sema.zirPtrCast(block, inst), .ptr_cast => try sema.zirPtrCast(block, inst),
.qual_cast => try sema.zirQualCast(block, inst),
.truncate => try sema.zirTruncate(block, inst), .truncate => try sema.zirTruncate(block, inst),
.align_cast => try sema.zirAlignCast(block, inst), .align_cast => try sema.zirAlignCast(block, inst),
.has_decl => try sema.zirHasDecl(block, inst), .has_decl => try sema.zirHasDecl(block, inst),
@ -3294,7 +3295,7 @@ fn ensureResultUsed(
const msg = msg: { const msg = msg: {
const msg = try sema.errMsg(block, src, "error is ignored", .{}); const msg = try sema.errMsg(block, src, "error is ignored", .{});
errdefer msg.destroy(sema.gpa); errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "consider using `try`, `catch`, or `if`", .{}); try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
break :msg msg; break :msg msg;
}; };
return sema.failWithOwnedErrorMsg(msg); return sema.failWithOwnedErrorMsg(msg);
@ -3325,7 +3326,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const msg = msg: { const msg = msg: {
const msg = try sema.errMsg(block, src, "error is discarded", .{}); const msg = try sema.errMsg(block, src, "error is discarded", .{});
errdefer msg.destroy(sema.gpa); errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "consider using `try`, `catch`, or `if`", .{}); try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
break :msg msg; break :msg msg;
}; };
return sema.failWithOwnedErrorMsg(msg); return sema.failWithOwnedErrorMsg(msg);
@ -5564,16 +5565,6 @@ pub fn analyzeExport(
.visibility = borrowed_options.visibility, .visibility = borrowed_options.visibility,
}, },
.src = src, .src = src,
.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = .{} },
.elf => .{ .elf = .{} },
.macho => .{ .macho = .{} },
.plan9 => .{ .plan9 = null },
.c => .{ .c = {} },
.wasm => .{ .wasm = .{} },
.spirv => .{ .spirv = {} },
.nvptx => .{ .nvptx = {} },
},
.owner_decl = sema.owner_decl_index, .owner_decl = sema.owner_decl_index,
.src_decl = block.src_decl, .src_decl = block.src_decl,
.exported_decl = exported_decl_index, .exported_decl = exported_decl_index,
@ -6446,7 +6437,12 @@ fn analyzeCall(
.extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{ .extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"), @as([]const u8, if (is_comptime_call) "comptime" else "inline"),
}), }),
else => unreachable, else => {
assert(callee_ty.isPtrAtRuntime());
return sema.fail(block, call_src, "{s} call of function pointer", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
});
},
}; };
if (func_ty_info.is_var_args) { if (func_ty_info.is_var_args) {
return sema.fail(block, call_src, "{s} call of variadic function", .{ return sema.fail(block, call_src, "{s} call of variadic function", .{
@ -6879,6 +6875,8 @@ fn analyzeInlineCallArg(
if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err); if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err);
return err; return err;
}; };
} else if (!is_comptime_call and zir_tags[inst] == .param_comptime) {
_ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
} }
const casted_arg = sema.coerceExtra(arg_block, param_ty, uncasted_arg, arg_src, .{ .param_src = .{ const casted_arg = sema.coerceExtra(arg_block, param_ty, uncasted_arg, arg_src, .{ .param_src = .{
.func_inst = func_inst, .func_inst = func_inst,
@ -6952,6 +6950,9 @@ fn analyzeInlineCallArg(
.val = arg_val, .val = arg_val,
}; };
} else { } else {
if (zir_tags[inst] == .param_anytype_comptime) {
_ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
}
sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg); sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
} }
@ -7510,7 +7511,6 @@ fn resolveGenericInstantiationType(
// Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field // Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field
// will be populated, ensuring it will have `analyzeBody` called with the ZIR // will be populated, ensuring it will have `analyzeBody` called with the ZIR
// parameters mapped appropriately. // parameters mapped appropriately.
try mod.comp.bin_file.allocateDeclIndexes(new_decl_index);
try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func }); try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func });
return new_func; return new_func;
} }
@ -8473,7 +8473,7 @@ fn handleExternLibName(
return sema.fail( return sema.fail(
block, block,
src_loc, src_loc,
"dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by `-l{s}` or `-fPIC`.", "dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by '-l{s}' or '-fPIC'.",
.{ lib_name, lib_name }, .{ lib_name, lib_name },
); );
} }
@ -9010,7 +9010,18 @@ fn zirParam(
if (is_comptime and sema.preallocated_new_func != null) { if (is_comptime and sema.preallocated_new_func != null) {
// We have a comptime value for this parameter so it should be elided from the // We have a comptime value for this parameter so it should be elided from the
// function type of the function instruction in this block. // function type of the function instruction in this block.
const coerced_arg = try sema.coerce(block, param_ty, arg, src); const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) {
error.NeededSourceLocation => {
// We are instantiating a generic function and a comptime arg
// cannot be coerced to the param type, but since we don't
// have the callee source location return `GenericPoison`
// so that the instantiation is failed and the coercion
// is handled by comptime call logic instead.
assert(sema.is_generic_instantiation);
return error.GenericPoison;
},
else => return err,
};
sema.inst_map.putAssumeCapacity(inst, coerced_arg); sema.inst_map.putAssumeCapacity(inst, coerced_arg);
return; return;
} }
@ -19525,13 +19536,34 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand_info = operand_ty.ptrInfo().data; const operand_info = operand_ty.ptrInfo().data;
const dest_info = dest_ty.ptrInfo().data; const dest_info = dest_ty.ptrInfo().data;
if (!operand_info.mutable and dest_info.mutable) { if (!operand_info.mutable and dest_info.mutable) {
return sema.fail(block, src, "cast discards const qualifier", .{}); const msg = msg: {
const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "consider using '@qualCast'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
} }
if (operand_info.@"volatile" and !dest_info.@"volatile") { if (operand_info.@"volatile" and !dest_info.@"volatile") {
return sema.fail(block, src, "cast discards volatile qualifier", .{}); const msg = msg: {
const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "consider using '@qualCast'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
} }
if (operand_info.@"addrspace" != dest_info.@"addrspace") { if (operand_info.@"addrspace" != dest_info.@"addrspace") {
return sema.fail(block, src, "cast changes pointer address space", .{}); const msg = msg: {
const msg = try sema.errMsg(block, src, "cast changes pointer address space", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, src, msg, "consider using '@addrSpaceCast'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
} }
const dest_is_slice = dest_ty.isSlice(); const dest_is_slice = dest_ty.isSlice();
@ -19586,6 +19618,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{ try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{
dest_ty.fmt(sema.mod), dest_align, dest_ty.fmt(sema.mod), dest_align,
}); });
try sema.errNote(block, src, msg, "consider using '@alignCast'", .{});
break :msg msg; break :msg msg;
}; };
return sema.failWithOwnedErrorMsg(msg); return sema.failWithOwnedErrorMsg(msg);
@ -19621,6 +19655,49 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return block.addBitCast(aligned_dest_ty, ptr); return block.addBitCast(aligned_dest_ty, ptr);
} }
fn zirQualCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
const operand = try sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
try sema.checkPtrType(block, dest_ty_src, dest_ty);
try sema.checkPtrOperand(block, operand_src, operand_ty);
var operand_payload = operand_ty.ptrInfo();
var dest_info = dest_ty.ptrInfo();
operand_payload.data.mutable = dest_info.data.mutable;
operand_payload.data.@"volatile" = dest_info.data.@"volatile";
const altered_operand_ty = Type.initPayload(&operand_payload.base);
if (!altered_operand_ty.eql(dest_ty, sema.mod)) {
const msg = msg: {
const msg = try sema.errMsg(block, src, "'@qualCast' can only modify 'const' and 'volatile' qualifiers", .{});
errdefer msg.destroy(sema.gpa);
dest_info.data.mutable = !operand_ty.isConstPtr();
dest_info.data.@"volatile" = operand_ty.isVolatilePtr();
const altered_dest_ty = Type.initPayload(&dest_info.base);
try sema.errNote(block, src, msg, "expected type '{}'", .{altered_dest_ty.fmt(sema.mod)});
try sema.errNote(block, src, msg, "got type '{}'", .{operand_ty.fmt(sema.mod)});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
return sema.addConstant(dest_ty, operand_val);
}
try sema.requireRuntimeBlock(block, src, null);
return block.addBitCast(dest_ty, operand);
}
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src(); const src = inst_data.src();
@ -25137,7 +25214,7 @@ fn coerceExtra(
(try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) (try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{ {
try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{}); try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{});
try sema.errNote(block, inst_src, msg, "consider using `try`, `catch`, or `if`", .{}); try sema.errNote(block, inst_src, msg, "consider using 'try', 'catch', or 'if'", .{});
} }
// ?T to T // ?T to T
@ -25146,7 +25223,7 @@ fn coerceExtra(
(try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok) (try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{ {
try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{}); try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{});
try sema.errNote(block, inst_src, msg, "consider using `.?`, `orelse`, or `if`", .{}); try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{});
} }
try in_memory_result.report(sema, block, inst_src, msg); try in_memory_result.report(sema, block, inst_src, msg);
@ -26072,7 +26149,7 @@ fn coerceVarArgParam(
.Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}), .Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}),
.Float => float: { .Float => float: {
const target = sema.mod.getTarget(); const target = sema.mod.getTarget();
const double_bits = @import("type.zig").CType.sizeInBits(.double, target); const double_bits = target.c_type_bit_size(.double);
const inst_bits = uncasted_ty.floatBits(sema.mod.getTarget()); const inst_bits = uncasted_ty.floatBits(sema.mod.getTarget());
if (inst_bits >= double_bits) break :float inst; if (inst_bits >= double_bits) break :float inst;
switch (double_bits) { switch (double_bits) {

View File

@ -176,7 +176,9 @@ pub fn print(
var i: u32 = 0; var i: u32 = 0;
while (i < max_len) : (i += 1) { while (i < max_len) : (i += 1) {
buf[i] = std.math.cast(u8, val.fieldValue(ty, i).toUnsignedInt(target)) orelse break :str; const elem = val.fieldValue(ty, i);
if (elem.isUndef()) break :str;
buf[i] = std.math.cast(u8, elem.toUnsignedInt(target)) orelse break :str;
} }
const truncated = if (len > max_string_len) " (truncated)" else ""; const truncated = if (len > max_string_len) " (truncated)" else "";
@ -390,6 +392,7 @@ pub fn print(
while (i < max_len) : (i += 1) { while (i < max_len) : (i += 1) {
var elem_buf: Value.ElemValueBuffer = undefined; var elem_buf: Value.ElemValueBuffer = undefined;
const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf); const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf);
if (elem_val.isUndef()) break :str;
buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(target)) orelse break :str; buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(target)) orelse break :str;
} }

View File

@ -857,6 +857,9 @@ pub const Inst = struct {
/// Implements the `@ptrCast` builtin. /// Implements the `@ptrCast` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
ptr_cast, ptr_cast,
/// Implements the `@qualCast` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
qual_cast,
/// Implements the `@truncate` builtin. /// Implements the `@truncate` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand. /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
truncate, truncate,
@ -1195,6 +1198,7 @@ pub const Inst = struct {
.float_cast, .float_cast,
.int_cast, .int_cast,
.ptr_cast, .ptr_cast,
.qual_cast,
.truncate, .truncate,
.align_cast, .align_cast,
.has_field, .has_field,
@ -1484,6 +1488,7 @@ pub const Inst = struct {
.float_cast, .float_cast,
.int_cast, .int_cast,
.ptr_cast, .ptr_cast,
.qual_cast,
.truncate, .truncate,
.align_cast, .align_cast,
.has_field, .has_field,
@ -1755,6 +1760,7 @@ pub const Inst = struct {
.float_cast = .pl_node, .float_cast = .pl_node,
.int_cast = .pl_node, .int_cast = .pl_node,
.ptr_cast = .pl_node, .ptr_cast = .pl_node,
.qual_cast = .pl_node,
.truncate = .pl_node, .truncate = .pl_node,
.align_cast = .pl_node, .align_cast = .pl_node,
.typeof_builtin = .pl_node, .typeof_builtin = .pl_node,

View File

@ -24,7 +24,7 @@ const log = std.log.scoped(.codegen);
const build_options = @import("build_options"); const build_options = @import("build_options");
const GenerateSymbolError = codegen.GenerateSymbolError; const GenerateSymbolError = codegen.GenerateSymbolError;
const FnResult = codegen.FnResult; const Result = codegen.Result;
const DebugInfoOutput = codegen.DebugInfoOutput; const DebugInfoOutput = codegen.DebugInfoOutput;
const bits = @import("bits.zig"); const bits = @import("bits.zig");
@ -181,6 +181,7 @@ const DbgInfoReloc = struct {
else => unreachable, else => unreachable,
} }
} }
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void { fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void {
switch (function.debug_output) { switch (function.debug_output) {
.dwarf => |dw| { .dwarf => |dw| {
@ -202,13 +203,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument else => unreachable, // not a possible argument
}; };
try dw.genArgDbgInfo( try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
reloc.name,
reloc.ty,
function.bin_file.tag,
function.mod_fn.owner_decl,
loc,
);
}, },
.plan9 => {}, .plan9 => {},
.none => {}, .none => {},
@ -254,14 +249,7 @@ const DbgInfoReloc = struct {
break :blk .nop; break :blk .nop;
}, },
}; };
try dw.genVarDbgInfo( try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
reloc.name,
reloc.ty,
function.bin_file.tag,
function.mod_fn.owner_decl,
is_ptr,
loc,
);
}, },
.plan9 => {}, .plan9 => {},
.none => {}, .none => {},
@ -349,7 +337,7 @@ pub fn generate(
liveness: Liveness, liveness: Liveness,
code: *std.ArrayList(u8), code: *std.ArrayList(u8),
debug_output: DebugInfoOutput, debug_output: DebugInfoOutput,
) GenerateSymbolError!FnResult { ) GenerateSymbolError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) { if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration"); @panic("Attempted to compile for architecture that was disabled by build configuration");
} }
@ -392,8 +380,8 @@ pub fn generate(
defer function.dbg_info_relocs.deinit(bin_file.allocator); defer function.dbg_info_relocs.deinit(bin_file.allocator);
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return FnResult{ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
}, },
else => |e| return e, else => |e| return e,
@ -406,8 +394,8 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count; function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) { function.gen() catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return FnResult{ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
}, },
else => |e| return e, else => |e| return e,
@ -439,14 +427,14 @@ pub fn generate(
defer emit.deinit(); defer emit.deinit();
emit.emitMir() catch |err| switch (err) { emit.emitMir() catch |err| switch (err) {
error.EmitFail => return FnResult{ .fail = emit.err_msg.? }, error.EmitFail => return Result{ .fail = emit.err_msg.? },
else => |e| return e, else => |e| return e,
}; };
if (function.err_msg) |em| { if (function.err_msg) |em| {
return FnResult{ .fail = em }; return Result{ .fail = em };
} else { } else {
return FnResult{ .appended = {} }; return Result.ok;
} }
} }
@ -527,6 +515,28 @@ fn gen(self: *Self) !void {
self.ret_mcv = MCValue{ .stack_offset = stack_offset }; self.ret_mcv = MCValue{ .stack_offset = stack_offset };
} }
for (self.args) |*arg, arg_index| {
// Copy register arguments to the stack
switch (arg.*) {
.register => |reg| {
// The first AIR instructions of the main body are guaranteed
// to be the functions arguments
const inst = self.air.getMainBody()[arg_index];
assert(self.air.instructions.items(.tag)[inst] == .arg);
const ty = self.air.typeOfIndex(inst);
const abi_size = @intCast(u32, ty.abiSize(self.target.*));
const abi_align = ty.abiAlignment(self.target.*);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
arg.* = MCValue{ .stack_offset = stack_offset };
},
else => {},
}
}
_ = try self.addInst(.{ _ = try self.addInst(.{
.tag = .dbg_prologue_end, .tag = .dbg_prologue_end,
.data = .{ .nop = {} }, .data = .{ .nop = {} },
@ -3996,11 +4006,17 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.direct => .load_memory_ptr_direct, .direct => .load_memory_ptr_direct,
.import => unreachable, .import => unreachable,
}; };
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) { const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.sym_index, .macho => blk: {
.coff => owner_decl.link.coff.sym_index, const macho_file = self.bin_file.cast(link.File.MachO).?;
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
.coff => blk: {
const coff_file = self.bin_file.cast(link.File.Coff).?;
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
},
else => unreachable, // unsupported target format else => unreachable, // unsupported target format
}; };
_ = try self.addInst(.{ _ = try self.addInst(.{
@ -4163,45 +4179,19 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
self.arg_index += 1; self.arg_index += 1;
const ty = self.air.typeOfIndex(inst); const ty = self.air.typeOfIndex(inst);
const result = self.args[arg_index]; const tag = self.air.instructions.items(.tag)[inst];
const src_index = self.air.instructions.items(.data)[inst].arg.src_index; const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index); const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index);
const mcv = switch (result) {
// Copy registers to the stack
.register => |reg| blk: {
const mod = self.bin_file.options.module.?;
const abi_size = math.cast(u32, ty.abiSize(self.target.*)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{ty.fmt(mod)});
};
const abi_align = ty.abiAlignment(self.target.*);
const stack_offset = try self.allocMem(abi_size, abi_align, inst);
try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
break :blk MCValue{ .stack_offset = stack_offset };
},
else => result,
};
const tag = self.air.instructions.items(.tag)[inst];
try self.dbg_info_relocs.append(self.gpa, .{ try self.dbg_info_relocs.append(self.gpa, .{
.tag = tag, .tag = tag,
.ty = ty, .ty = ty,
.name = name, .name = name,
.mcv = result, .mcv = self.args[arg_index],
}); });
if (self.liveness.isUnused(inst)) const result: MCValue = if (self.liveness.isUnused(inst)) .dead else self.args[arg_index];
return self.finishAirBookkeeping(); return self.finishAir(inst, result, .{ .none, .none, .none });
switch (mcv) {
.register => |reg| {
self.register_manager.getRegAssumeFree(reg, inst);
},
else => {},
}
return self.finishAir(inst, mcv, .{ .none, .none, .none });
} }
fn airBreakpoint(self: *Self) !void { fn airBreakpoint(self: *Self) !void {
@ -4302,39 +4292,43 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// on linking. // on linking.
const mod = self.bin_file.options.module.?; const mod = self.bin_file.options.module.?;
if (self.air.value(callee)) |func_value| { if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
if (self.bin_file.cast(link.File.Elf)) |elf_file| { if (self.bin_file.cast(link.File.Elf)) |elf_file| {
if (func_value.castTag(.function)) |func_payload| { const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const func = func_payload.data; const atom = elf_file.getAtom(atom_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
};
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr }); try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
_ = try self.addInst(.{
.tag = .blr,
.data = .{ .reg = .x30 },
});
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| { } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
if (func_value.castTag(.function)) |func_payload| { const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
const func = func_payload.data; const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
const fn_owner_decl = mod.declPtr(func.owner_decl);
try self.genSetReg(Type.initTag(.u64), .x30, .{ try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{ .linker_load = .{
.type = .got, .type = .got,
.sym_index = fn_owner_decl.link.macho.sym_index, .sym_index = sym_index,
}, },
}); });
// blr x30 } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = coff_file.getAtom(atom).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
.sym_index = sym_index,
},
});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
const decl_block_index = try p9.seeDecl(func.owner_decl);
const decl_block = p9.getDeclBlock(decl_block_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = decl_block.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
} else unreachable;
_ = try self.addInst(.{ _ = try self.addInst(.{
.tag = .blr, .tag = .blr,
.data = .{ .reg = .x30 }, .data = .{ .reg = .x30 },
@ -4348,44 +4342,21 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
lib_name, lib_name,
}); });
} }
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{ _ = try self.addInst(.{
.tag = .call_extern, .tag = .call_extern,
.data = .{ .data = .{
.relocation = .{ .relocation = .{
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index, .atom_index = atom_index,
.sym_index = sym_index, .sym_index = sym_index,
}, },
}, },
}); });
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| { } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
.sym_index = fn_owner_decl.link.coff.sym_index,
},
});
// blr x30
_ = try self.addInst(.{
.tag = .blr,
.data = .{ .reg = .x30 },
});
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data;
const decl_name = mod.declPtr(extern_fn.owner_decl).name;
if (extern_fn.lib_name) |lib_name| {
log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
decl_name,
lib_name,
});
}
const sym_index = try coff_file.getGlobalSymbol(mem.sliceTo(decl_name, 0)); const sym_index = try coff_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
try self.genSetReg(Type.initTag(.u64), .x30, .{ try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{ .linker_load = .{
@ -4393,35 +4364,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.sym_index = sym_index, .sym_index = sym_index,
}, },
}); });
// blr x30
_ = try self.addInst(.{ _ = try self.addInst(.{
.tag = .blr, .tag = .blr,
.data = .{ .reg = .x30 }, .data = .{ .reg = .x30 },
}); });
} else { } else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
if (func_value.castTag(.function)) |func_payload| {
try p9.seeDecl(func_payload.data.owner_decl);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = mod.declPtr(func_payload.data.owner_decl).link.plan9.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
_ = try self.addInst(.{
.tag = .blr,
.data = .{ .reg = .x30 },
});
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{}); return self.fail("TODO implement calling extern functions", .{});
}
} else { } else {
return self.fail("TODO implement calling bitcasted functions", .{}); return self.fail("TODO implement calling bitcasted functions", .{});
} }
} else unreachable;
} else { } else {
assert(ty.zigTypeTag() == .Pointer); assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee); const mcv = try self.resolveInst(callee);
@ -5534,11 +5486,17 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.direct => .load_memory_ptr_direct, .direct => .load_memory_ptr_direct,
.import => unreachable, .import => unreachable,
}; };
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) { const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.sym_index, .macho => blk: {
.coff => owner_decl.link.coff.sym_index, const macho_file = self.bin_file.cast(link.File.MachO).?;
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
.coff => blk: {
const coff_file = self.bin_file.cast(link.File.Coff).?;
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
},
else => unreachable, // unsupported target format else => unreachable, // unsupported target format
}; };
_ = try self.addInst(.{ _ = try self.addInst(.{
@ -5648,11 +5606,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.direct => .load_memory_direct, .direct => .load_memory_direct,
.import => .load_memory_import, .import => .load_memory_import,
}; };
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) { const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.sym_index, .macho => blk: {
.coff => owner_decl.link.coff.sym_index, const macho_file = self.bin_file.cast(link.File.MachO).?;
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
.coff => blk: {
const coff_file = self.bin_file.cast(link.File.Coff).?;
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
},
else => unreachable, // unsupported target format else => unreachable, // unsupported target format
}; };
_ = try self.addInst(.{ _ = try self.addInst(.{
@ -5842,11 +5806,17 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
.direct => .load_memory_ptr_direct, .direct => .load_memory_ptr_direct,
.import => unreachable, .import => unreachable,
}; };
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) { const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.sym_index, .macho => blk: {
.coff => owner_decl.link.coff.sym_index, const macho_file = self.bin_file.cast(link.File.MachO).?;
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
},
.coff => blk: {
const coff_file = self.bin_file.cast(link.File.Coff).?;
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
},
else => unreachable, // unsupported target format else => unreachable, // unsupported target format
}; };
_ = try self.addInst(.{ _ = try self.addInst(.{
@ -6165,28 +6135,27 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl); mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| { if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; const atom = elf_file.getAtom(atom_index);
return MCValue{ .memory = got_addr }; return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| { } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
// Because MachO is PIE-always-on, we defer memory address resolution until const atom = try macho_file.getOrCreateAtomForDecl(decl_index);
// the linker has enough info to perform relocations. const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
assert(decl.link.macho.sym_index != 0);
return MCValue{ .linker_load = .{ return MCValue{ .linker_load = .{
.type = .got, .type = .got,
.sym_index = decl.link.macho.sym_index, .sym_index = sym_index,
} }; } };
} else if (self.bin_file.cast(link.File.Coff)) |_| { } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
// Because COFF is PIE-always-on, we defer memory address resolution until const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
// the linker has enough info to perform relocations. const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
assert(decl.link.coff.sym_index != 0);
return MCValue{ .linker_load = .{ return MCValue{ .linker_load = .{
.type = .got, .type = .got,
.sym_index = decl.link.coff.sym_index, .sym_index = sym_index,
} }; } };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| { } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index); const decl_block_index = try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; const decl_block = p9.getDeclBlock(decl_block_index);
const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr }; return MCValue{ .memory = got_addr };
} else { } else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{}); return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@ -6199,8 +6168,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)}); return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
}; };
if (self.bin_file.cast(link.File.Elf)) |elf_file| { if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const vaddr = elf_file.local_symbols.items[local_sym_index].st_value; return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
return MCValue{ .memory = vaddr };
} else if (self.bin_file.cast(link.File.MachO)) |_| { } else if (self.bin_file.cast(link.File.MachO)) |_| {
return MCValue{ .linker_load = .{ return MCValue{ .linker_load = .{
.type = .direct, .type = .direct,

View File

@ -670,9 +670,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| { if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl. // Add relocation to the decl.
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?; const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index); const target = macho_file.getGlobalByIndex(relocation.sym_index);
try atom.addRelocation(macho_file, .{ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_BRANCH26), .type = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
.target = target, .target = target,
.offset = offset, .offset = offset,
@ -883,10 +883,10 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
} }
if (emit.bin_file.cast(link.File.MachO)) |macho_file| { if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const atom = macho_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?; const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
// TODO this causes segfault in stage1 // TODO this causes segfault in stage1
// try atom.addRelocations(macho_file, 2, .{ // try atom.addRelocations(macho_file, 2, .{
try atom.addRelocation(macho_file, .{ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.target = .{ .sym_index = data.sym_index, .file = null }, .target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset, .offset = offset,
.addend = 0, .addend = 0,
@ -902,7 +902,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable, else => unreachable,
}, },
}); });
try atom.addRelocation(macho_file, .{ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.target = .{ .sym_index = data.sym_index, .file = null }, .target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset + 4, .offset = offset + 4,
.addend = 0, .addend = 0,
@ -919,7 +919,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
}, },
}); });
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| { } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
const atom = coff_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?; const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
const target = switch (tag) { const target = switch (tag) {
.load_memory_got, .load_memory_got,
.load_memory_ptr_got, .load_memory_ptr_got,
@ -929,7 +929,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
.load_memory_import => coff_file.getGlobalByIndex(data.sym_index), .load_memory_import => coff_file.getGlobalByIndex(data.sym_index),
else => unreachable, else => unreachable,
}; };
try atom.addRelocation(coff_file, .{ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.target = target, .target = target,
.offset = offset, .offset = offset,
.addend = 0, .addend = 0,
@ -946,7 +946,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable, else => unreachable,
}, },
}); });
try atom.addRelocation(coff_file, .{ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.target = target, .target = target,
.offset = offset + 4, .offset = offset + 4,
.addend = 0, .addend = 0,

View File

@ -23,7 +23,7 @@ const leb128 = std.leb;
const log = std.log.scoped(.codegen); const log = std.log.scoped(.codegen);
const build_options = @import("build_options"); const build_options = @import("build_options");
const FnResult = codegen.FnResult; const Result = codegen.Result;
const GenerateSymbolError = codegen.GenerateSymbolError; const GenerateSymbolError = codegen.GenerateSymbolError;
const DebugInfoOutput = codegen.DebugInfoOutput; const DebugInfoOutput = codegen.DebugInfoOutput;
@ -282,13 +282,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument else => unreachable, // not a possible argument
}; };
try dw.genArgDbgInfo( try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
reloc.name,
reloc.ty,
function.bin_file.tag,
function.mod_fn.owner_decl,
loc,
);
}, },
.plan9 => {}, .plan9 => {},
.none => {}, .none => {},
@ -331,14 +325,7 @@ const DbgInfoReloc = struct {
break :blk .nop; break :blk .nop;
}, },
}; };
try dw.genVarDbgInfo( try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
reloc.name,
reloc.ty,
function.bin_file.tag,
function.mod_fn.owner_decl,
is_ptr,
loc,
);
}, },
.plan9 => {}, .plan9 => {},
.none => {}, .none => {},
@ -356,7 +343,7 @@ pub fn generate(
liveness: Liveness, liveness: Liveness,
code: *std.ArrayList(u8), code: *std.ArrayList(u8),
debug_output: DebugInfoOutput, debug_output: DebugInfoOutput,
) GenerateSymbolError!FnResult { ) GenerateSymbolError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) { if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration"); @panic("Attempted to compile for architecture that was disabled by build configuration");
} }
@ -399,8 +386,8 @@ pub fn generate(
defer function.dbg_info_relocs.deinit(bin_file.allocator); defer function.dbg_info_relocs.deinit(bin_file.allocator);
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return FnResult{ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
}, },
else => |e| return e, else => |e| return e,
@ -413,8 +400,8 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count; function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) { function.gen() catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return FnResult{ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
}, },
else => |e| return e, else => |e| return e,
@ -446,14 +433,14 @@ pub fn generate(
defer emit.deinit(); defer emit.deinit();
emit.emitMir() catch |err| switch (err) { emit.emitMir() catch |err| switch (err) {
error.EmitFail => return FnResult{ .fail = emit.err_msg.? }, error.EmitFail => return Result{ .fail = emit.err_msg.? },
else => |e| return e, else => |e| return e,
}; };
if (function.err_msg) |em| { if (function.err_msg) |em| {
return FnResult{ .fail = em }; return Result{ .fail = em };
} else { } else {
return FnResult{ .appended = {} }; return Result.ok;
} }
} }
@ -4253,20 +4240,23 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends // Due to incremental compilation, how function calls are generated depends
// on linking. // on linking.
switch (self.bin_file.tag) {
.elf => {
if (self.air.value(callee)) |func_value| { if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| { if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data; const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const mod = self.bin_file.options.module.?; const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const fn_owner_decl = mod.declPtr(func.owner_decl); const atom = elf_file.getAtom(atom_index);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
} else unreachable;
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr }); try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
} else {
return self.fail("TODO implement call on {s} for {s}", .{
@tagName(self.bin_file.tag),
@tagName(self.target.cpu.arch),
});
}
} else if (func_value.castTag(.extern_fn)) |_| { } else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{}); return self.fail("TODO implement calling extern functions", .{});
} else { } else {
@ -4301,12 +4291,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// .data = .{ .reg = .lr }, // .data = .{ .reg = .lr },
// }); // });
} }
},
.macho => unreachable, // unsupported architecture for MachO
.coff => return self.fail("TODO implement call in COFF for {}", .{self.target.cpu.arch}),
.plan9 => return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}),
else => unreachable,
}
const result: MCValue = result: { const result: MCValue = result: {
switch (info.return_value) { switch (info.return_value) {
@ -6086,16 +6070,17 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl); mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| { if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; const atom = elf_file.getAtom(atom_index);
return MCValue{ .memory = got_addr }; return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| { } else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO unreachable; // unsupported architecture for MachO
} else if (self.bin_file.cast(link.File.Coff)) |_| { } else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{}); return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| { } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index); const decl_block_index = try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; const decl_block = p9.getDeclBlock(decl_block_index);
const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr }; return MCValue{ .memory = got_addr };
} else { } else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{}); return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@ -6109,8 +6094,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)}); return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
}; };
if (self.bin_file.cast(link.File.Elf)) |elf_file| { if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const vaddr = elf_file.local_symbols.items[local_sym_index].st_value; return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
return MCValue{ .memory = vaddr };
} else if (self.bin_file.cast(link.File.MachO)) |_| { } else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; unreachable;
} else if (self.bin_file.cast(link.File.Coff)) |_| { } else if (self.bin_file.cast(link.File.Coff)) |_| {

View File

@ -22,7 +22,7 @@ const leb128 = std.leb;
const log = std.log.scoped(.codegen); const log = std.log.scoped(.codegen);
const build_options = @import("build_options"); const build_options = @import("build_options");
const FnResult = @import("../../codegen.zig").FnResult; const Result = @import("../../codegen.zig").Result;
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError; const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
@ -225,7 +225,7 @@ pub fn generate(
liveness: Liveness, liveness: Liveness,
code: *std.ArrayList(u8), code: *std.ArrayList(u8),
debug_output: DebugInfoOutput, debug_output: DebugInfoOutput,
) GenerateSymbolError!FnResult { ) GenerateSymbolError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) { if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration"); @panic("Attempted to compile for architecture that was disabled by build configuration");
} }
@ -268,8 +268,8 @@ pub fn generate(
defer function.exitlude_jump_relocs.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return FnResult{ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
}, },
else => |e| return e, else => |e| return e,
@ -282,8 +282,8 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count; function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) { function.gen() catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return FnResult{ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
}, },
else => |e| return e, else => |e| return e,
@ -309,14 +309,14 @@ pub fn generate(
defer emit.deinit(); defer emit.deinit();
emit.emitMir() catch |err| switch (err) { emit.emitMir() catch |err| switch (err) {
error.EmitFail => return FnResult{ .fail = emit.err_msg.? }, error.EmitFail => return Result{ .fail = emit.err_msg.? },
else => |e| return e, else => |e| return e,
}; };
if (function.err_msg) |em| { if (function.err_msg) |em| {
return FnResult{ .fail = em }; return Result{ .fail = em };
} else { } else {
return FnResult{ .appended = {} }; return Result.ok;
} }
} }
@ -1615,13 +1615,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (self.debug_output) { switch (self.debug_output) {
.dwarf => |dw| switch (mcv) { .dwarf => |dw| switch (mcv) {
.register => |reg| try dw.genArgDbgInfo( .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
name, .register = reg.dwarfLocOp(),
ty, }),
self.bin_file.tag,
self.mod_fn.owner_decl,
.{ .register = reg.dwarfLocOp() },
),
.stack_offset => {}, .stack_offset => {},
else => {}, else => {},
}, },
@ -1721,16 +1717,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| { if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| { if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data; const func = func_payload.data;
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const atom = elf_file.getAtom(atom_index);
const ptr_bytes: u64 = @divExact(ptr_bits, 8); const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
};
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr }); try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
_ = try self.addInst(.{ _ = try self.addInst(.{
.tag = .jalr, .tag = .jalr,
@ -2557,18 +2546,17 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
const decl = mod.declPtr(decl_index); const decl = mod.declPtr(decl_index);
mod.markDeclAlive(decl); mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| { if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; const atom = elf_file.getAtom(atom_index);
return MCValue{ .memory = got_addr }; return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| { } else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing unreachable;
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |_| { } else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{}); return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| { } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index); const decl_block_index = try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; const decl_block = p9.getDeclBlock(decl_block_index);
const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr }; return MCValue{ .memory = got_addr };
} else { } else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{}); return self.fail("TODO codegen non-ELF const Decl pointer", .{});

View File

@ -20,7 +20,7 @@ const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig"); const Liveness = @import("../../Liveness.zig");
const Type = @import("../../type.zig").Type; const Type = @import("../../type.zig").Type;
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError; const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
const FnResult = @import("../../codegen.zig").FnResult; const Result = @import("../../codegen.zig").Result;
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const build_options = @import("build_options"); const build_options = @import("build_options");
@ -265,7 +265,7 @@ pub fn generate(
liveness: Liveness, liveness: Liveness,
code: *std.ArrayList(u8), code: *std.ArrayList(u8),
debug_output: DebugInfoOutput, debug_output: DebugInfoOutput,
) GenerateSymbolError!FnResult { ) GenerateSymbolError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) { if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration"); @panic("Attempted to compile for architecture that was disabled by build configuration");
} }
@ -310,8 +310,8 @@ pub fn generate(
defer function.exitlude_jump_relocs.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
var call_info = function.resolveCallingConventionValues(fn_type, .callee) catch |err| switch (err) { var call_info = function.resolveCallingConventionValues(fn_type, .callee) catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return FnResult{ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
}, },
else => |e| return e, else => |e| return e,
@ -324,8 +324,8 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count; function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) { function.gen() catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return FnResult{ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
}, },
else => |e| return e, else => |e| return e,
@ -351,14 +351,14 @@ pub fn generate(
defer emit.deinit(); defer emit.deinit();
emit.emitMir() catch |err| switch (err) { emit.emitMir() catch |err| switch (err) {
error.EmitFail => return FnResult{ .fail = emit.err_msg.? }, error.EmitFail => return Result{ .fail = emit.err_msg.? },
else => |e| return e, else => |e| return e,
}; };
if (function.err_msg) |em| { if (function.err_msg) |em| {
return FnResult{ .fail = em }; return Result{ .fail = em };
} else { } else {
return FnResult{ .appended = {} }; return Result.ok;
} }
} }
@ -1216,12 +1216,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.tag == link.File.Elf.base_tag) { if (self.bin_file.tag == link.File.Elf.base_tag) {
if (func_value.castTag(.function)) |func_payload| { if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data; const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const mod = self.bin_file.options.module.?; const atom = elf_file.getAtom(atom_index);
break :blk @intCast(u32, got.p_vaddr + mod.declPtr(func.owner_decl).link.elf.offset_table_index * ptr_bytes); break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file));
} else unreachable; } else unreachable;
try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr }); try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr });
@ -3414,13 +3412,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (self.debug_output) { switch (self.debug_output) {
.dwarf => |dw| switch (mcv) { .dwarf => |dw| switch (mcv) {
.register => |reg| try dw.genArgDbgInfo( .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
name, .register = reg.dwarfLocOp(),
ty, }),
self.bin_file.tag,
self.mod_fn.owner_decl,
.{ .register = reg.dwarfLocOp() },
),
else => {}, else => {},
}, },
else => {}, else => {},
@ -4193,9 +4187,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
} }
fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue { fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
if (tv.ty.zigTypeTag() == .Pointer) blk: { if (tv.ty.zigTypeTag() == .Pointer) blk: {
if (tv.ty.castPtrToFn()) |_| break :blk; if (tv.ty.castPtrToFn()) |_| break :blk;
@ -4209,9 +4200,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl); mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| { if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; const atom = elf_file.getAtom(atom_index);
return MCValue{ .memory = got_addr }; return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else { } else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{}); return self.fail("TODO codegen non-ELF const Decl pointer", .{});
} }

View File

@ -627,13 +627,6 @@ test "Wasm - buildOpcode" {
try testing.expectEqual(@as(wasm.Opcode, .f64_reinterpret_i64), f64_reinterpret_i64); try testing.expectEqual(@as(wasm.Opcode, .f64_reinterpret_i64), f64_reinterpret_i64);
} }
pub const Result = union(enum) {
/// The codegen bytes have been appended to `Context.code`
appended: void,
/// The data is managed externally and are part of the `Result`
externally_managed: []const u8,
};
/// Hashmap to store generated `WValue` for each `Air.Inst.Ref` /// Hashmap to store generated `WValue` for each `Air.Inst.Ref`
pub const ValueTable = std.AutoArrayHashMapUnmanaged(Air.Inst.Ref, WValue); pub const ValueTable = std.AutoArrayHashMapUnmanaged(Air.Inst.Ref, WValue);
@ -1171,7 +1164,7 @@ pub fn generate(
liveness: Liveness, liveness: Liveness,
code: *std.ArrayList(u8), code: *std.ArrayList(u8),
debug_output: codegen.DebugInfoOutput, debug_output: codegen.DebugInfoOutput,
) codegen.GenerateSymbolError!codegen.FnResult { ) codegen.GenerateSymbolError!codegen.Result {
_ = src_loc; _ = src_loc;
var code_gen: CodeGen = .{ var code_gen: CodeGen = .{
.gpa = bin_file.allocator, .gpa = bin_file.allocator,
@ -1190,18 +1183,18 @@ pub fn generate(
defer code_gen.deinit(); defer code_gen.deinit();
genFunc(&code_gen) catch |err| switch (err) { genFunc(&code_gen) catch |err| switch (err) {
error.CodegenFail => return codegen.FnResult{ .fail = code_gen.err_msg }, error.CodegenFail => return codegen.Result{ .fail = code_gen.err_msg },
else => |e| return e, else => |e| return e,
}; };
return codegen.FnResult{ .appended = {} }; return codegen.Result.ok;
} }
fn genFunc(func: *CodeGen) InnerError!void { fn genFunc(func: *CodeGen) InnerError!void {
const fn_info = func.decl.ty.fnInfo(); const fn_info = func.decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target); var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
defer func_type.deinit(func.gpa); defer func_type.deinit(func.gpa);
func.decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type); func.decl.fn_link.?.type_index = try func.bin_file.putOrGetFuncType(func_type);
var cc_result = try func.resolveCallingConventionValues(func.decl.ty); var cc_result = try func.resolveCallingConventionValues(func.decl.ty);
defer cc_result.deinit(func.gpa); defer cc_result.deinit(func.gpa);
@ -1276,10 +1269,10 @@ fn genFunc(func: *CodeGen) InnerError!void {
var emit: Emit = .{ var emit: Emit = .{
.mir = mir, .mir = mir,
.bin_file = &func.bin_file.base, .bin_file = func.bin_file,
.code = func.code, .code = func.code,
.locals = func.locals.items, .locals = func.locals.items,
.decl = func.decl, .decl_index = func.decl_index,
.dbg_output = func.debug_output, .dbg_output = func.debug_output,
.prev_di_line = 0, .prev_di_line = 0,
.prev_di_column = 0, .prev_di_column = 0,
@ -1713,9 +1706,11 @@ fn isByRef(ty: Type, target: std.Target) bool {
return true; return true;
}, },
.Optional => { .Optional => {
if (ty.optionalReprIsPayload()) return false; if (ty.isPtrLikeOptional()) return false;
var buf: Type.Payload.ElemType = undefined; var buf: Type.Payload.ElemType = undefined;
return ty.optionalChild(&buf).hasRuntimeBitsIgnoreComptime(); const pl_type = ty.optionalChild(&buf);
if (pl_type.zigTypeTag() == .ErrorSet) return false;
return pl_type.hasRuntimeBitsIgnoreComptime();
}, },
.Pointer => { .Pointer => {
// Slices act like struct and will be passed by reference // Slices act like struct and will be passed by reference
@ -2122,27 +2117,31 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const fn_info = fn_ty.fnInfo(); const fn_info = fn_ty.fnInfo();
const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target); const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target);
const callee: ?*Decl = blk: { const callee: ?Decl.Index = blk: {
const func_val = func.air.value(pl_op.operand) orelse break :blk null; const func_val = func.air.value(pl_op.operand) orelse break :blk null;
const module = func.bin_file.base.options.module.?; const module = func.bin_file.base.options.module.?;
if (func_val.castTag(.function)) |function| { if (func_val.castTag(.function)) |function| {
break :blk module.declPtr(function.data.owner_decl); _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl);
break :blk function.data.owner_decl;
} else if (func_val.castTag(.extern_fn)) |extern_fn| { } else if (func_val.castTag(.extern_fn)) |extern_fn| {
const ext_decl = module.declPtr(extern_fn.data.owner_decl); const ext_decl = module.declPtr(extern_fn.data.owner_decl);
const ext_info = ext_decl.ty.fnInfo(); const ext_info = ext_decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target); var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target);
defer func_type.deinit(func.gpa); defer func_type.deinit(func.gpa);
ext_decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type); const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
const atom = func.bin_file.getAtomPtr(atom_index);
ext_decl.fn_link.?.type_index = try func.bin_file.putOrGetFuncType(func_type);
try func.bin_file.addOrUpdateImport( try func.bin_file.addOrUpdateImport(
mem.sliceTo(ext_decl.name, 0), mem.sliceTo(ext_decl.name, 0),
ext_decl.link.wasm.sym_index, atom.getSymbolIndex().?,
ext_decl.getExternFn().?.lib_name, ext_decl.getExternFn().?.lib_name,
ext_decl.fn_link.wasm.type_index, ext_decl.fn_link.?.type_index,
); );
break :blk ext_decl; break :blk extern_fn.data.owner_decl;
} else if (func_val.castTag(.decl_ref)) |decl_ref| { } else if (func_val.castTag(.decl_ref)) |decl_ref| {
break :blk module.declPtr(decl_ref.data); _ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data);
break :blk decl_ref.data;
} }
return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()}); return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
}; };
@ -2163,7 +2162,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
} }
if (callee) |direct| { if (callee) |direct| {
try func.addLabel(.call, direct.link.wasm.sym_index); const atom_index = func.bin_file.decls.get(direct).?;
try func.addLabel(.call, func.bin_file.getAtom(atom_index).sym_index);
} else { } else {
// in this case we call a function pointer // in this case we call a function pointer
// so load its value onto the stack // so load its value onto the stack
@ -2476,7 +2476,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.dwarf => |dwarf| { .dwarf => |dwarf| {
const src_index = func.air.instructions.items(.data)[inst].arg.src_index; const src_index = func.air.instructions.items(.data)[inst].arg.src_index;
const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, src_index); const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, src_index);
try dwarf.genArgDbgInfo(name, arg_ty, .wasm, func.mod_fn.owner_decl, .{ try dwarf.genArgDbgInfo(name, arg_ty, func.mod_fn.owner_decl, .{
.wasm_local = arg.local.value, .wasm_local = arg.local.value,
}); });
}, },
@ -2759,8 +2759,10 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind
} }
module.markDeclAlive(decl); module.markDeclAlive(decl);
const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index);
const atom = func.bin_file.getAtom(atom_index);
const target_sym_index = decl.link.wasm.sym_index; const target_sym_index = atom.sym_index;
if (decl.ty.zigTypeTag() == .Fn) { if (decl.ty.zigTypeTag() == .Fn) {
try func.bin_file.addTableFunction(target_sym_index); try func.bin_file.addTableFunction(target_sym_index);
return WValue{ .function_index = target_sym_index }; return WValue{ .function_index = target_sym_index };
@ -3869,14 +3871,20 @@ fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind:
/// NOTE: Leaves the result on the stack /// NOTE: Leaves the result on the stack
fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
try func.emitWValue(operand); try func.emitWValue(operand);
if (!optional_ty.optionalReprIsPayload()) {
var buf: Type.Payload.ElemType = undefined; var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf); const payload_ty = optional_ty.optionalChild(&buf);
if (!optional_ty.optionalReprIsPayload()) {
// When payload is zero-bits, we can treat operand as a value, rather than // When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value // a pointer to the stack value
if (payload_ty.hasRuntimeBitsIgnoreComptime()) { if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset(), .alignment = 1 }); try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset(), .alignment = 1 });
} }
} else if (payload_ty.isSlice()) {
switch (func.arch()) {
.wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }),
.wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }),
else => unreachable,
}
} }
// Compare the null value with '0' // Compare the null value with '0'
@ -5539,7 +5547,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void {
break :blk .nop; break :blk .nop;
}, },
}; };
try func.debug_output.dwarf.genVarDbgInfo(name, ty, .wasm, func.mod_fn.owner_decl, is_ptr, loc); try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.mod_fn.owner_decl, is_ptr, loc);
func.finishAir(inst, .none, &.{}); func.finishAir(inst, .none, &.{});
} }

View File

@ -11,8 +11,8 @@ const leb128 = std.leb;
/// Contains our list of instructions /// Contains our list of instructions
mir: Mir, mir: Mir,
/// Reference to the file handler /// Reference to the Wasm module linker
bin_file: *link.File, bin_file: *link.File.Wasm,
/// Possible error message. When set, the value is allocated and /// Possible error message. When set, the value is allocated and
/// must be freed manually. /// must be freed manually.
error_msg: ?*Module.ErrorMsg = null, error_msg: ?*Module.ErrorMsg = null,
@ -21,7 +21,7 @@ code: *std.ArrayList(u8),
/// List of allocated locals. /// List of allocated locals.
locals: []const u8, locals: []const u8,
/// The declaration that code is being generated for. /// The declaration that code is being generated for.
decl: *Module.Decl, decl_index: Module.Decl.Index,
// Debug information // Debug information
/// Holds the debug information for this emission /// Holds the debug information for this emission
@ -252,8 +252,8 @@ fn offset(self: Emit) u32 {
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@setCold(true); @setCold(true);
std.debug.assert(emit.error_msg == null); std.debug.assert(emit.error_msg == null);
// TODO: Determine the source location. const mod = emit.bin_file.base.options.module.?;
emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.allocator, emit.decl.srcLoc(), format, args); emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(), format, args);
return error.EmitFail; return error.EmitFail;
} }
@ -304,8 +304,9 @@ fn emitGlobal(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
const global_offset = emit.offset(); const global_offset = emit.offset();
try emit.code.appendSlice(&buf); try emit.code.appendSlice(&buf);
// globals can have index 0 as it represents the stack pointer const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{ const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(emit.bin_file.base.allocator, .{
.index = label, .index = label,
.offset = global_offset, .offset = global_offset,
.relocation_type = .R_WASM_GLOBAL_INDEX_LEB, .relocation_type = .R_WASM_GLOBAL_INDEX_LEB,
@ -361,7 +362,9 @@ fn emitCall(emit: *Emit, inst: Mir.Inst.Index) !void {
try emit.code.appendSlice(&buf); try emit.code.appendSlice(&buf);
if (label != 0) { if (label != 0) {
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = call_offset, .offset = call_offset,
.index = label, .index = label,
.relocation_type = .R_WASM_FUNCTION_INDEX_LEB, .relocation_type = .R_WASM_FUNCTION_INDEX_LEB,
@ -387,7 +390,9 @@ fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void {
try emit.code.appendSlice(&buf); try emit.code.appendSlice(&buf);
if (symbol_index != 0) { if (symbol_index != 0) {
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = index_offset, .offset = index_offset,
.index = symbol_index, .index = symbol_index,
.relocation_type = .R_WASM_TABLE_INDEX_SLEB, .relocation_type = .R_WASM_TABLE_INDEX_SLEB,
@ -399,7 +404,7 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload; const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const mem = emit.mir.extraData(Mir.Memory, extra_index).data; const mem = emit.mir.extraData(Mir.Memory, extra_index).data;
const mem_offset = emit.offset() + 1; const mem_offset = emit.offset() + 1;
const is_wasm32 = emit.bin_file.options.target.cpu.arch == .wasm32; const is_wasm32 = emit.bin_file.base.options.target.cpu.arch == .wasm32;
if (is_wasm32) { if (is_wasm32) {
try emit.code.append(std.wasm.opcode(.i32_const)); try emit.code.append(std.wasm.opcode(.i32_const));
var buf: [5]u8 = undefined; var buf: [5]u8 = undefined;
@ -413,7 +418,9 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
} }
if (mem.pointer != 0) { if (mem.pointer != 0) {
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
const atom = emit.bin_file.getAtomPtr(atom_index);
try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = mem_offset, .offset = mem_offset,
.index = mem.pointer, .index = mem.pointer,
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64, .relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,

View File

@ -16,7 +16,7 @@ const Compilation = @import("../../Compilation.zig");
const DebugInfoOutput = codegen.DebugInfoOutput; const DebugInfoOutput = codegen.DebugInfoOutput;
const DW = std.dwarf; const DW = std.dwarf;
const ErrorMsg = Module.ErrorMsg; const ErrorMsg = Module.ErrorMsg;
const FnResult = codegen.FnResult; const Result = codegen.Result;
const GenerateSymbolError = codegen.GenerateSymbolError; const GenerateSymbolError = codegen.GenerateSymbolError;
const Emit = @import("Emit.zig"); const Emit = @import("Emit.zig");
const Liveness = @import("../../Liveness.zig"); const Liveness = @import("../../Liveness.zig");
@ -257,7 +257,7 @@ pub fn generate(
liveness: Liveness, liveness: Liveness,
code: *std.ArrayList(u8), code: *std.ArrayList(u8),
debug_output: DebugInfoOutput, debug_output: DebugInfoOutput,
) GenerateSymbolError!FnResult { ) GenerateSymbolError!Result {
if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) { if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) {
@panic("Attempted to compile for architecture that was disabled by build configuration"); @panic("Attempted to compile for architecture that was disabled by build configuration");
} }
@ -305,8 +305,8 @@ pub fn generate(
defer if (builtin.mode == .Debug) function.mir_to_air_map.deinit(); defer if (builtin.mode == .Debug) function.mir_to_air_map.deinit();
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return FnResult{ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
}, },
else => |e| return e, else => |e| return e,
@ -319,8 +319,8 @@ pub fn generate(
function.max_end_stack = call_info.stack_byte_count; function.max_end_stack = call_info.stack_byte_count;
function.gen() catch |err| switch (err) { function.gen() catch |err| switch (err) {
error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.CodegenFail => return Result{ .fail = function.err_msg.? },
error.OutOfRegisters => return FnResult{ error.OutOfRegisters => return Result{
.fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}),
}, },
else => |e| return e, else => |e| return e,
@ -345,14 +345,14 @@ pub fn generate(
}; };
defer emit.deinit(); defer emit.deinit();
emit.lowerMir() catch |err| switch (err) { emit.lowerMir() catch |err| switch (err) {
error.EmitFail => return FnResult{ .fail = emit.err_msg.? }, error.EmitFail => return Result{ .fail = emit.err_msg.? },
else => |e| return e, else => |e| return e,
}; };
if (function.err_msg) |em| { if (function.err_msg) |em| {
return FnResult{ .fail = em }; return Result{ .fail = em };
} else { } else {
return FnResult{ .appended = {} }; return Result.ok;
} }
} }
@ -2668,12 +2668,13 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
switch (ptr) { switch (ptr) {
.linker_load => |load_struct| { .linker_load => |load_struct| {
const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*)); const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*));
const mod = self.bin_file.options.module.?; const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: {
const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl); const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
const atom_index = if (self.bin_file.tag == link.File.MachO.base_tag) break :blk macho_file.getAtom(atom).getSymbolIndex().?;
fn_owner_decl.link.macho.sym_index } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: {
else const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
fn_owner_decl.link.coff.sym_index; break :blk coff_file.getAtom(atom).getSymbolIndex().?;
} else unreachable;
const flags: u2 = switch (load_struct.type) { const flags: u2 = switch (load_struct.type) {
.got => 0b00, .got => 0b00,
.direct => 0b01, .direct => 0b01,
@ -3835,7 +3836,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void {
}, },
else => unreachable, // not a valid function parameter else => unreachable, // not a valid function parameter
}; };
try dw.genArgDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, loc); try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, loc);
}, },
.plan9 => {}, .plan9 => {},
.none => {}, .none => {},
@ -3875,7 +3876,7 @@ fn genVarDbgInfo(
break :blk .nop; break :blk .nop;
}, },
}; };
try dw.genVarDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, is_ptr, loc); try dw.genVarDbgInfo(name, ty, self.mod_fn.owner_decl, is_ptr, loc);
}, },
.plan9 => {}, .plan9 => {},
.none => {}, .none => {},
@ -3992,49 +3993,26 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends // Due to incremental compilation, how function calls are generated depends
// on linking. // on linking.
const mod = self.bin_file.options.module.?; const mod = self.bin_file.options.module.?;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
if (self.air.value(callee)) |func_value| { if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| { if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data; const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const fn_owner_decl = mod.declPtr(func.owner_decl); const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const got_addr = blk: { const atom = elf_file.getAtom(atom_index);
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
};
_ = try self.addInst(.{ _ = try self.addInst(.{
.tag = .call, .tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }), .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .imm = @truncate(u32, got_addr) }, .data = .{ .imm = got_addr },
}); });
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
}
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| { } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
if (self.air.value(callee)) |func_value| { const atom_index = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
if (func_value.castTag(.function)) |func_payload| { const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
try self.genSetReg(Type.initTag(.usize), .rax, .{ try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{ .linker_load = .{
.type = .got, .type = .got,
.sym_index = fn_owner_decl.link.coff.sym_index, .sym_index = sym_index,
}, },
}); });
_ = try self.addInst(.{ _ = try self.addInst(.{
@ -4045,6 +4023,37 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}), }),
.data = undefined, .data = undefined,
}); });
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const atom_index = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
.sym_index = sym_index,
},
});
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
const decl_block_index = try p9.seeDecl(func.owner_decl);
const decl_block = p9.getDeclBlock(decl_block_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = decl_block.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .imm = @intCast(u32, fn_got_addr) },
});
} else unreachable;
} else if (func_value.castTag(.extern_fn)) |func_payload| { } else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data; const extern_fn = func_payload.data;
const decl_name = mod.declPtr(extern_fn.owner_decl).name; const decl_name = mod.declPtr(extern_fn.owner_decl).name;
@ -4054,6 +4063,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
lib_name, lib_name,
}); });
} }
if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const sym_index = try coff_file.getGlobalSymbol(mem.sliceTo(decl_name, 0)); const sym_index = try coff_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
try self.genSetReg(Type.initTag(.usize), .rax, .{ try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{ .linker_load = .{
@ -4069,63 +4080,21 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}), }),
.data = undefined, .data = undefined,
}); });
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| { } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
const sym_index = fn_owner_decl.link.macho.sym_index;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
.sym_index = sym_index,
},
});
// callq *%rax
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data;
const decl_name = mod.declPtr(extern_fn.owner_decl).name;
if (extern_fn.lib_name) |lib_name| {
log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
decl_name,
lib_name,
});
}
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0)); const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{ _ = try self.addInst(.{
.tag = .call_extern, .tag = .call_extern,
.ops = undefined, .ops = undefined,
.data = .{ .data = .{ .relocation = .{
.relocation = .{ .atom_index = atom_index,
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index,
.sym_index = sym_index, .sym_index = sym_index,
}, } },
},
}); });
} else {
return self.fail("TODO implement calling extern functions", .{});
}
} else { } else {
return self.fail("TODO implement calling bitcasted functions", .{}); return self.fail("TODO implement calling bitcasted functions", .{});
} }
@ -4142,35 +4111,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.data = undefined, .data = undefined,
}); });
} }
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
try p9.seeDecl(func_payload.data.owner_decl);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = mod.declPtr(func_payload.data.owner_decl).link.plan9.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .imm = @intCast(u32, fn_got_addr) },
});
} else return self.fail("TODO implement calling extern fn on plan9", .{});
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
}
} else unreachable;
if (info.stack_byte_count > 0) { if (info.stack_byte_count > 0) {
// Readjust the stack // Readjust the stack
@ -6781,24 +6721,27 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
module.markDeclAlive(decl); module.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| { if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; const atom = elf_file.getAtom(atom_index);
return MCValue{ .memory = got_addr }; return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| { } else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
assert(decl.link.macho.sym_index != 0); const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{ return MCValue{ .linker_load = .{
.type = .got, .type = .got,
.sym_index = decl.link.macho.sym_index, .sym_index = sym_index,
} }; } };
} else if (self.bin_file.cast(link.File.Coff)) |_| { } else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
assert(decl.link.coff.sym_index != 0); const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{ return MCValue{ .linker_load = .{
.type = .got, .type = .got,
.sym_index = decl.link.coff.sym_index, .sym_index = sym_index,
} }; } };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| { } else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index); const decl_block_index = try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; const decl_block = p9.getDeclBlock(decl_block_index);
const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr }; return MCValue{ .memory = got_addr };
} else { } else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{}); return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@ -6811,8 +6754,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)}); return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
}; };
if (self.bin_file.cast(link.File.Elf)) |elf_file| { if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const vaddr = elf_file.local_symbols.items[local_sym_index].st_value; return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
return MCValue{ .memory = vaddr };
} else if (self.bin_file.cast(link.File.MachO)) |_| { } else if (self.bin_file.cast(link.File.MachO)) |_| {
return MCValue{ .linker_load = .{ return MCValue{ .linker_load = .{
.type = .direct, .type = .direct,

View File

@ -1001,8 +1001,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED), 0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
else => unreachable, else => unreachable,
}; };
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?; const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
try atom.addRelocation(macho_file, .{ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = reloc_type, .type = reloc_type,
.target = .{ .sym_index = relocation.sym_index, .file = null }, .target = .{ .sym_index = relocation.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4), .offset = @intCast(u32, end_offset - 4),
@ -1011,8 +1011,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.length = 2, .length = 2,
}); });
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| { } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?; const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
try atom.addRelocation(coff_file, .{ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = switch (ops.flags) { .type = switch (ops.flags) {
0b00 => .got, 0b00 => .got,
0b01 => .direct, 0b01 => .direct,
@ -1140,9 +1140,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| { if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl. // Add relocation to the decl.
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?; const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index); const target = macho_file.getGlobalByIndex(relocation.sym_index);
try atom.addRelocation(macho_file, .{ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH), .type = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = target, .target = target,
.offset = offset, .offset = offset,
@ -1152,9 +1152,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
}); });
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| { } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl. // Add relocation to the decl.
const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?; const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = coff_file.getGlobalByIndex(relocation.sym_index); const target = coff_file.getGlobalByIndex(relocation.sym_index);
try atom.addRelocation(coff_file, .{ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = .direct, .type = .direct,
.target = target, .target = target,
.offset = offset, .offset = offset,

View File

@ -21,16 +21,11 @@ const TypedValue = @import("TypedValue.zig");
const Value = @import("value.zig").Value; const Value = @import("value.zig").Value;
const Zir = @import("Zir.zig"); const Zir = @import("Zir.zig");
pub const FnResult = union(enum) {
/// The `code` parameter passed to `generateSymbol` has the value appended.
appended: void,
fail: *ErrorMsg,
};
pub const Result = union(enum) { pub const Result = union(enum) {
/// The `code` parameter passed to `generateSymbol` has the value appended. /// The `code` parameter passed to `generateSymbol` has the value ok.
appended: void, ok: void,
/// The value is available externally, `code` is unused.
externally_managed: []const u8, /// There was a codegen error.
fail: *ErrorMsg, fail: *ErrorMsg,
}; };
@ -89,7 +84,7 @@ pub fn generateFunction(
liveness: Liveness, liveness: Liveness,
code: *std.ArrayList(u8), code: *std.ArrayList(u8),
debug_output: DebugInfoOutput, debug_output: DebugInfoOutput,
) GenerateSymbolError!FnResult { ) GenerateSymbolError!Result {
switch (bin_file.options.target.cpu.arch) { switch (bin_file.options.target.cpu.arch) {
.arm, .arm,
.armeb, .armeb,
@ -145,7 +140,7 @@ pub fn generateSymbol(
if (typed_value.val.isUndefDeep()) { if (typed_value.val.isUndefDeep()) {
const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow; const abi_size = math.cast(usize, typed_value.ty.abiSize(target)) orelse return error.Overflow;
try code.appendNTimes(0xaa, abi_size); try code.appendNTimes(0xaa, abi_size);
return Result{ .appended = {} }; return Result.ok;
} }
switch (typed_value.ty.zigTypeTag()) { switch (typed_value.ty.zigTypeTag()) {
@ -176,7 +171,7 @@ pub fn generateSymbol(
128 => writeFloat(f128, typed_value.val.toFloat(f128), target, endian, try code.addManyAsArray(16)), 128 => writeFloat(f128, typed_value.val.toFloat(f128), target, endian, try code.addManyAsArray(16)),
else => unreachable, else => unreachable,
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.Array => switch (typed_value.val.tag()) { .Array => switch (typed_value.val.tag()) {
.bytes => { .bytes => {
@ -185,7 +180,7 @@ pub fn generateSymbol(
// The bytes payload already includes the sentinel, if any // The bytes payload already includes the sentinel, if any
try code.ensureUnusedCapacity(len); try code.ensureUnusedCapacity(len);
code.appendSliceAssumeCapacity(bytes[0..len]); code.appendSliceAssumeCapacity(bytes[0..len]);
return Result{ .appended = {} }; return Result.ok;
}, },
.str_lit => { .str_lit => {
const str_lit = typed_value.val.castTag(.str_lit).?.data; const str_lit = typed_value.val.castTag(.str_lit).?.data;
@ -197,7 +192,7 @@ pub fn generateSymbol(
const byte = @intCast(u8, sent_val.toUnsignedInt(target)); const byte = @intCast(u8, sent_val.toUnsignedInt(target));
code.appendAssumeCapacity(byte); code.appendAssumeCapacity(byte);
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.aggregate => { .aggregate => {
const elem_vals = typed_value.val.castTag(.aggregate).?.data; const elem_vals = typed_value.val.castTag(.aggregate).?.data;
@ -208,14 +203,11 @@ pub fn generateSymbol(
.ty = elem_ty, .ty = elem_ty,
.val = elem_val, .val = elem_val,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.repeated => { .repeated => {
const array = typed_value.val.castTag(.repeated).?.data; const array = typed_value.val.castTag(.repeated).?.data;
@ -229,10 +221,7 @@ pub fn generateSymbol(
.ty = elem_ty, .ty = elem_ty,
.val = array, .val = array,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
} }
@ -242,15 +231,12 @@ pub fn generateSymbol(
.ty = elem_ty, .ty = elem_ty,
.val = sentinel_val, .val = sentinel_val,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.empty_array_sentinel => { .empty_array_sentinel => {
const elem_ty = typed_value.ty.childType(); const elem_ty = typed_value.ty.childType();
@ -259,13 +245,10 @@ pub fn generateSymbol(
.ty = elem_ty, .ty = elem_ty,
.val = sentinel_val, .val = sentinel_val,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
return Result{ .appended = {} }; return Result.ok;
}, },
else => return Result{ else => return Result{
.fail = try ErrorMsg.create( .fail = try ErrorMsg.create(
@ -289,7 +272,7 @@ pub fn generateSymbol(
}, },
else => unreachable, else => unreachable,
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.variable => { .variable => {
const decl = typed_value.val.castTag(.variable).?.data.owner_decl; const decl = typed_value.val.castTag(.variable).?.data.owner_decl;
@ -309,10 +292,7 @@ pub fn generateSymbol(
.ty = slice_ptr_field_type, .ty = slice_ptr_field_type,
.val = slice.ptr, .val = slice.ptr,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
@ -321,14 +301,11 @@ pub fn generateSymbol(
.ty = Type.initTag(.usize), .ty = Type.initTag(.usize),
.val = slice.len, .val = slice.len,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.field_ptr => { .field_ptr => {
const field_ptr = typed_value.val.castTag(.field_ptr).?.data; const field_ptr = typed_value.val.castTag(.field_ptr).?.data;
@ -375,13 +352,10 @@ pub fn generateSymbol(
.ty = typed_value.ty, .ty = typed_value.ty,
.val = container_ptr, .val = container_ptr,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
return Result{ .appended = {} }; return Result.ok;
}, },
else => return Result{ else => return Result{
.fail = try ErrorMsg.create( .fail = try ErrorMsg.create(
@ -434,7 +408,7 @@ pub fn generateSymbol(
.signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(target))), .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(target))),
}; };
try code.append(x); try code.append(x);
return Result{ .appended = {} }; return Result.ok;
} }
if (info.bits > 64) { if (info.bits > 64) {
var bigint_buffer: Value.BigIntSpace = undefined; var bigint_buffer: Value.BigIntSpace = undefined;
@ -443,7 +417,7 @@ pub fn generateSymbol(
const start = code.items.len; const start = code.items.len;
try code.resize(start + abi_size); try code.resize(start + abi_size);
bigint.writeTwosComplement(code.items[start..][0..abi_size], endian); bigint.writeTwosComplement(code.items[start..][0..abi_size], endian);
return Result{ .appended = {} }; return Result.ok;
} }
switch (info.signedness) { switch (info.signedness) {
.unsigned => { .unsigned => {
@ -471,7 +445,7 @@ pub fn generateSymbol(
} }
}, },
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.Enum => { .Enum => {
var int_buffer: Value.Payload.U64 = undefined; var int_buffer: Value.Payload.U64 = undefined;
@ -481,7 +455,7 @@ pub fn generateSymbol(
if (info.bits <= 8) { if (info.bits <= 8) {
const x = @intCast(u8, int_val.toUnsignedInt(target)); const x = @intCast(u8, int_val.toUnsignedInt(target));
try code.append(x); try code.append(x);
return Result{ .appended = {} }; return Result.ok;
} }
if (info.bits > 64) { if (info.bits > 64) {
return Result{ return Result{
@ -519,12 +493,12 @@ pub fn generateSymbol(
} }
}, },
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.Bool => { .Bool => {
const x: u8 = @boolToInt(typed_value.val.toBool()); const x: u8 = @boolToInt(typed_value.val.toBool());
try code.append(x); try code.append(x);
return Result{ .appended = {} }; return Result.ok;
}, },
.Struct => { .Struct => {
if (typed_value.ty.containerLayout() == .Packed) { if (typed_value.ty.containerLayout() == .Packed) {
@ -549,12 +523,7 @@ pub fn generateSymbol(
.ty = field_ty, .ty = field_ty,
.val = field_val, .val = field_val,
}, &tmp_list, debug_output, reloc_info)) { }, &tmp_list, debug_output, reloc_info)) {
.appended => { .ok => mem.copy(u8, code.items[current_pos..], tmp_list.items),
mem.copy(u8, code.items[current_pos..], tmp_list.items);
},
.externally_managed => |external_slice| {
mem.copy(u8, code.items[current_pos..], external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
} else { } else {
@ -563,7 +532,7 @@ pub fn generateSymbol(
bits += @intCast(u16, field_ty.bitSize(target)); bits += @intCast(u16, field_ty.bitSize(target));
} }
return Result{ .appended = {} }; return Result.ok;
} }
const struct_begin = code.items.len; const struct_begin = code.items.len;
@ -576,10 +545,7 @@ pub fn generateSymbol(
.ty = field_ty, .ty = field_ty,
.val = field_val, .val = field_val,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
const unpadded_field_end = code.items.len - struct_begin; const unpadded_field_end = code.items.len - struct_begin;
@ -593,7 +559,7 @@ pub fn generateSymbol(
} }
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.Union => { .Union => {
const union_obj = typed_value.val.castTag(.@"union").?.data; const union_obj = typed_value.val.castTag(.@"union").?.data;
@ -612,10 +578,7 @@ pub fn generateSymbol(
.ty = typed_value.ty.unionTagType().?, .ty = typed_value.ty.unionTagType().?,
.val = union_obj.tag, .val = union_obj.tag,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
} }
@ -632,10 +595,7 @@ pub fn generateSymbol(
.ty = field_ty, .ty = field_ty,
.val = union_obj.val, .val = union_obj.val,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
@ -650,15 +610,12 @@ pub fn generateSymbol(
.ty = union_ty.tag_ty, .ty = union_ty.tag_ty,
.val = union_obj.tag, .val = union_obj.tag,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.Optional => { .Optional => {
var opt_buf: Type.Payload.ElemType = undefined; var opt_buf: Type.Payload.ElemType = undefined;
@ -669,7 +626,7 @@ pub fn generateSymbol(
if (!payload_type.hasRuntimeBits()) { if (!payload_type.hasRuntimeBits()) {
try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size); try code.writer().writeByteNTimes(@boolToInt(is_pl), abi_size);
return Result{ .appended = {} }; return Result.ok;
} }
if (typed_value.ty.optionalReprIsPayload()) { if (typed_value.ty.optionalReprIsPayload()) {
@ -678,10 +635,7 @@ pub fn generateSymbol(
.ty = payload_type, .ty = payload_type,
.val = payload.data, .val = payload.data,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
} else if (!typed_value.val.isNull()) { } else if (!typed_value.val.isNull()) {
@ -689,17 +643,14 @@ pub fn generateSymbol(
.ty = payload_type, .ty = payload_type,
.val = typed_value.val, .val = typed_value.val,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
} else { } else {
try code.writer().writeByteNTimes(0, abi_size); try code.writer().writeByteNTimes(0, abi_size);
} }
return Result{ .appended = {} }; return Result.ok;
} }
const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef); const value = if (typed_value.val.castTag(.opt_payload)) |payload| payload.data else Value.initTag(.undef);
@ -708,14 +659,11 @@ pub fn generateSymbol(
.ty = payload_type, .ty = payload_type,
.val = value, .val = value,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.ErrorUnion => { .ErrorUnion => {
const error_ty = typed_value.ty.errorUnionSet(); const error_ty = typed_value.ty.errorUnionSet();
@ -740,10 +688,7 @@ pub fn generateSymbol(
.ty = error_ty, .ty = error_ty,
.val = if (is_payload) Value.initTag(.zero) else typed_value.val, .val = if (is_payload) Value.initTag(.zero) else typed_value.val,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
} }
@ -756,10 +701,7 @@ pub fn generateSymbol(
.ty = payload_ty, .ty = payload_ty,
.val = payload_val, .val = payload_val,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
const unpadded_end = code.items.len - begin; const unpadded_end = code.items.len - begin;
@ -778,10 +720,7 @@ pub fn generateSymbol(
.ty = error_ty, .ty = error_ty,
.val = if (is_payload) Value.initTag(.zero) else typed_value.val, .val = if (is_payload) Value.initTag(.zero) else typed_value.val,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
const unpadded_end = code.items.len - begin; const unpadded_end = code.items.len - begin;
@ -793,7 +732,7 @@ pub fn generateSymbol(
} }
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.ErrorSet => { .ErrorSet => {
switch (typed_value.val.tag()) { switch (typed_value.val.tag()) {
@ -806,7 +745,7 @@ pub fn generateSymbol(
try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(target))); try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(target)));
}, },
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.Vector => switch (typed_value.val.tag()) { .Vector => switch (typed_value.val.tag()) {
.bytes => { .bytes => {
@ -814,7 +753,7 @@ pub fn generateSymbol(
const len = @intCast(usize, typed_value.ty.arrayLen()); const len = @intCast(usize, typed_value.ty.arrayLen());
try code.ensureUnusedCapacity(len); try code.ensureUnusedCapacity(len);
code.appendSliceAssumeCapacity(bytes[0..len]); code.appendSliceAssumeCapacity(bytes[0..len]);
return Result{ .appended = {} }; return Result.ok;
}, },
.aggregate => { .aggregate => {
const elem_vals = typed_value.val.castTag(.aggregate).?.data; const elem_vals = typed_value.val.castTag(.aggregate).?.data;
@ -825,14 +764,11 @@ pub fn generateSymbol(
.ty = elem_ty, .ty = elem_ty,
.val = elem_val, .val = elem_val,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.repeated => { .repeated => {
const array = typed_value.val.castTag(.repeated).?.data; const array = typed_value.val.castTag(.repeated).?.data;
@ -845,14 +781,11 @@ pub fn generateSymbol(
.ty = elem_ty, .ty = elem_ty,
.val = array, .val = array,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
} }
return Result{ .appended = {} }; return Result.ok;
}, },
.str_lit => { .str_lit => {
const str_lit = typed_value.val.castTag(.str_lit).?.data; const str_lit = typed_value.val.castTag(.str_lit).?.data;
@ -860,7 +793,7 @@ pub fn generateSymbol(
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len]; const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
try code.ensureUnusedCapacity(str_lit.len); try code.ensureUnusedCapacity(str_lit.len);
code.appendSliceAssumeCapacity(bytes); code.appendSliceAssumeCapacity(bytes);
return Result{ .appended = {} }; return Result.ok;
}, },
else => unreachable, else => unreachable,
}, },
@ -901,10 +834,7 @@ fn lowerDeclRef(
.ty = slice_ptr_field_type, .ty = slice_ptr_field_type,
.val = typed_value.val, .val = typed_value.val,
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
@ -917,14 +847,11 @@ fn lowerDeclRef(
.ty = Type.usize, .ty = Type.usize,
.val = Value.initPayload(&slice_len.base), .val = Value.initPayload(&slice_len.base),
}, code, debug_output, reloc_info)) { }, code, debug_output, reloc_info)) {
.appended => {}, .ok => {},
.externally_managed => |external_slice| {
code.appendSliceAssumeCapacity(external_slice);
},
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
return Result{ .appended = {} }; return Result.ok;
} }
const ptr_width = target.cpu.arch.ptrBitWidth(); const ptr_width = target.cpu.arch.ptrBitWidth();
@ -932,7 +859,7 @@ fn lowerDeclRef(
const is_fn_body = decl.ty.zigTypeTag() == .Fn; const is_fn_body = decl.ty.zigTypeTag() == .Fn;
if (!is_fn_body and !decl.ty.hasRuntimeBits()) { if (!is_fn_body and !decl.ty.hasRuntimeBits()) {
try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8)); try code.writer().writeByteNTimes(0xaa, @divExact(ptr_width, 8));
return Result{ .appended = {} }; return Result.ok;
} }
module.markDeclAlive(decl); module.markDeclAlive(decl);
@ -950,7 +877,7 @@ fn lowerDeclRef(
else => unreachable, else => unreachable,
} }
return Result{ .appended = {} }; return Result.ok;
} }
pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 { pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 {

View File

@ -16,7 +16,6 @@ const trace = @import("../tracy.zig").trace;
const LazySrcLoc = Module.LazySrcLoc; const LazySrcLoc = Module.LazySrcLoc;
const Air = @import("../Air.zig"); const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig"); const Liveness = @import("../Liveness.zig");
const CType = @import("../type.zig").CType;
const target_util = @import("../target.zig"); const target_util = @import("../target.zig");
const libcFloatPrefix = target_util.libcFloatPrefix; const libcFloatPrefix = target_util.libcFloatPrefix;
@ -1663,6 +1662,22 @@ pub const DeclGen = struct {
defer buffer.deinit(); defer buffer.deinit();
try buffer.appendSlice("struct "); try buffer.appendSlice("struct ");
var needs_pack_attr = false;
{
var it = t.structFields().iterator();
while (it.next()) |field| {
const field_ty = field.value_ptr.ty;
if (!field_ty.hasRuntimeBits()) continue;
const alignment = field.value_ptr.abi_align;
if (alignment != 0 and alignment < field_ty.abiAlignment(dg.module.getTarget())) {
needs_pack_attr = true;
try buffer.appendSlice("zig_packed(");
break;
}
}
}
try buffer.appendSlice(name); try buffer.appendSlice(name);
try buffer.appendSlice(" {\n"); try buffer.appendSlice(" {\n");
{ {
@ -1672,7 +1687,7 @@ pub const DeclGen = struct {
const field_ty = field.value_ptr.ty; const field_ty = field.value_ptr.ty;
if (!field_ty.hasRuntimeBits()) continue; if (!field_ty.hasRuntimeBits()) continue;
const alignment = field.value_ptr.abi_align; const alignment = field.value_ptr.alignment(dg.module.getTarget(), t.containerLayout());
const field_name = CValue{ .identifier = field.key_ptr.* }; const field_name = CValue{ .identifier = field.key_ptr.* };
try buffer.append(' '); try buffer.append(' ');
try dg.renderTypeAndName(buffer.writer(), field_ty, field_name, .Mut, alignment, .Complete); try dg.renderTypeAndName(buffer.writer(), field_ty, field_name, .Mut, alignment, .Complete);
@ -1682,7 +1697,7 @@ pub const DeclGen = struct {
} }
if (empty) try buffer.appendSlice(" char empty_struct;\n"); if (empty) try buffer.appendSlice(" char empty_struct;\n");
} }
try buffer.appendSlice("};\n"); if (needs_pack_attr) try buffer.appendSlice("});\n") else try buffer.appendSlice("};\n");
const rendered = try buffer.toOwnedSlice(); const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered); errdefer dg.typedefs.allocator.free(rendered);
@ -2367,9 +2382,14 @@ pub const DeclGen = struct {
depth += 1; depth += 1;
} }
if (alignment != 0 and alignment > ty.abiAlignment(target)) { if (alignment != 0) {
const abi_alignment = ty.abiAlignment(target);
if (alignment < abi_alignment) {
try w.print("zig_under_align({}) ", .{alignment});
} else if (alignment > abi_alignment) {
try w.print("zig_align({}) ", .{alignment}); try w.print("zig_align({}) ", .{alignment});
} }
}
try dg.renderType(w, render_ty, kind); try dg.renderType(w, render_ty, kind);
const const_prefix = switch (mutability) { const const_prefix = switch (mutability) {
@ -2860,27 +2880,30 @@ pub fn genDecl(o: *Object) !void {
const w = o.writer(); const w = o.writer();
if (!is_global) try w.writeAll("static "); if (!is_global) try w.writeAll("static ");
if (variable.is_threadlocal) try w.writeAll("zig_threadlocal "); if (variable.is_threadlocal) try w.writeAll("zig_threadlocal ");
if (o.dg.decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section});
try o.dg.renderTypeAndName(w, o.dg.decl.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete); try o.dg.renderTypeAndName(w, o.dg.decl.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete);
if (o.dg.decl.@"linksection" != null) try w.writeAll(", read, write)");
try w.writeAll(" = "); try w.writeAll(" = ");
try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer); try o.dg.renderValue(w, tv.ty, variable.init, .StaticInitializer);
try w.writeByte(';'); try w.writeByte(';');
try o.indent_writer.insertNewline(); try o.indent_writer.insertNewline();
} else { } else {
const is_global = o.dg.module.decl_exports.contains(o.dg.decl_index);
const fwd_decl_writer = o.dg.fwd_decl.writer();
const decl_c_value: CValue = .{ .decl = o.dg.decl_index }; const decl_c_value: CValue = .{ .decl = o.dg.decl_index };
const fwd_decl_writer = o.dg.fwd_decl.writer(); try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
try fwd_decl_writer.writeAll("static "); try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .Const, o.dg.decl.@"align", .Complete);
try o.dg.renderTypeAndName(fwd_decl_writer, tv.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete);
try fwd_decl_writer.writeAll(";\n"); try fwd_decl_writer.writeAll(";\n");
const writer = o.writer(); const w = o.writer();
try writer.writeAll("static "); if (!is_global) try w.writeAll("static ");
// TODO ask the Decl if it is const if (o.dg.decl.@"linksection") |section| try w.print("zig_linksection(\"{s}\", ", .{section});
// https://github.com/ziglang/zig/issues/7582 try o.dg.renderTypeAndName(w, tv.ty, decl_c_value, .Const, o.dg.decl.@"align", .Complete);
try o.dg.renderTypeAndName(writer, tv.ty, decl_c_value, .Mut, o.dg.decl.@"align", .Complete); if (o.dg.decl.@"linksection" != null) try w.writeAll(", read)");
try writer.writeAll(" = "); try w.writeAll(" = ");
try o.dg.renderValue(writer, tv.ty, tv.val, .StaticInitializer); try o.dg.renderValue(w, tv.ty, tv.val, .StaticInitializer);
try writer.writeAll(";\n"); try w.writeAll(";\n");
} }
} }
@ -3726,16 +3749,15 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
const ptr_val = try f.resolveInst(bin_op.lhs); const ptr_val = try f.resolveInst(bin_op.lhs);
const src_ty = f.air.typeOf(bin_op.rhs); const src_ty = f.air.typeOf(bin_op.rhs);
const src_val = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
// TODO Sema should emit a different instruction when the store should // TODO Sema should emit a different instruction when the store should
// possibly do the safety 0xaa bytes for undefined. // possibly do the safety 0xaa bytes for undefined.
const src_val_is_undefined = const src_val_is_undefined =
if (f.air.value(bin_op.rhs)) |v| v.isUndefDeep() else false; if (f.air.value(bin_op.rhs)) |v| v.isUndefDeep() else false;
if (src_val_is_undefined) if (src_val_is_undefined) {
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return try storeUndefined(f, ptr_info.pointee_type, ptr_val); return try storeUndefined(f, ptr_info.pointee_type, ptr_val);
}
const target = f.object.dg.module.getTarget(); const target = f.object.dg.module.getTarget();
const is_aligned = ptr_info.@"align" == 0 or const is_aligned = ptr_info.@"align" == 0 or
@ -3744,6 +3766,9 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
const need_memcpy = !is_aligned or is_array; const need_memcpy = !is_aligned or is_array;
const writer = f.object.writer(); const writer = f.object.writer();
const src_val = try f.resolveInst(bin_op.rhs);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
if (need_memcpy) { if (need_memcpy) {
// For this memcpy to safely work we need the rhs to have the same // For this memcpy to safely work we need the rhs to have the same
// underlying type as the lhs (i.e. they must both be arrays of the same underlying type). // underlying type as the lhs (i.e. they must both be arrays of the same underlying type).
@ -4344,8 +4369,9 @@ fn airDbgInline(f: *Function, inst: Air.Inst.Index) !CValue {
fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[inst].pl_op; const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const name = f.air.nullTerminatedString(pl_op.payload); const name = f.air.nullTerminatedString(pl_op.payload);
const operand = try f.resolveInst(pl_op.operand); const operand_is_undef = if (f.air.value(pl_op.operand)) |v| v.isUndefDeep() else false;
_ = operand; if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand}); try reap(f, inst, &.{pl_op.operand});
const writer = f.object.writer(); const writer = f.object.writer();
try writer.print("/* var:{s} */\n", .{name}); try writer.print("/* var:{s} */\n", .{name});

View File

@ -19,7 +19,6 @@ const Liveness = @import("../Liveness.zig");
const Value = @import("../value.zig").Value; const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type; const Type = @import("../type.zig").Type;
const LazySrcLoc = Module.LazySrcLoc; const LazySrcLoc = Module.LazySrcLoc;
const CType = @import("../type.zig").CType;
const x86_64_abi = @import("../arch/x86_64/abi.zig"); const x86_64_abi = @import("../arch/x86_64/abi.zig");
const wasm_c_abi = @import("../arch/wasm/abi.zig"); const wasm_c_abi = @import("../arch/wasm/abi.zig");
const aarch64_c_abi = @import("../arch/aarch64/abi.zig"); const aarch64_c_abi = @import("../arch/aarch64/abi.zig");
@ -11057,8 +11056,8 @@ fn backendSupportsF128(target: std.Target) bool {
fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool { fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool {
return switch (scalar_ty.tag()) { return switch (scalar_ty.tag()) {
.f16 => backendSupportsF16(target), .f16 => backendSupportsF16(target),
.f80 => (CType.longdouble.sizeInBits(target) == 80) and backendSupportsF80(target), .f80 => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target),
.f128 => (CType.longdouble.sizeInBits(target) == 128) and backendSupportsF128(target), .f128 => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target),
else => true, else => true,
}; };
} }

View File

@ -49,7 +49,7 @@ pub const DeclGen = struct {
spv: *SpvModule, spv: *SpvModule,
/// The decl we are currently generating code for. /// The decl we are currently generating code for.
decl: *Decl, decl_index: Decl.Index,
/// The intermediate code of the declaration we are currently generating. Note: If /// The intermediate code of the declaration we are currently generating. Note: If
/// the declaration is not a function, this value will be undefined! /// the declaration is not a function, this value will be undefined!
@ -59,6 +59,8 @@ pub const DeclGen = struct {
/// Note: If the declaration is not a function, this value will be undefined! /// Note: If the declaration is not a function, this value will be undefined!
liveness: Liveness, liveness: Liveness,
ids: *const std.AutoHashMap(Decl.Index, IdResult),
/// An array of function argument result-ids. Each index corresponds with the /// An array of function argument result-ids. Each index corresponds with the
/// function argument of the same index. /// function argument of the same index.
args: std.ArrayListUnmanaged(IdRef) = .{}, args: std.ArrayListUnmanaged(IdRef) = .{},
@ -133,14 +135,20 @@ pub const DeclGen = struct {
/// Initialize the common resources of a DeclGen. Some fields are left uninitialized, /// Initialize the common resources of a DeclGen. Some fields are left uninitialized,
/// only set when `gen` is called. /// only set when `gen` is called.
pub fn init(allocator: Allocator, module: *Module, spv: *SpvModule) DeclGen { pub fn init(
allocator: Allocator,
module: *Module,
spv: *SpvModule,
ids: *const std.AutoHashMap(Decl.Index, IdResult),
) DeclGen {
return .{ return .{
.gpa = allocator, .gpa = allocator,
.module = module, .module = module,
.spv = spv, .spv = spv,
.decl = undefined, .decl_index = undefined,
.air = undefined, .air = undefined,
.liveness = undefined, .liveness = undefined,
.ids = ids,
.next_arg_index = undefined, .next_arg_index = undefined,
.current_block_label_id = undefined, .current_block_label_id = undefined,
.error_msg = undefined, .error_msg = undefined,
@ -150,9 +158,9 @@ pub const DeclGen = struct {
/// Generate the code for `decl`. If a reportable error occurred during code generation, /// Generate the code for `decl`. If a reportable error occurred during code generation,
/// a message is returned by this function. Callee owns the memory. If this function /// a message is returned by this function. Callee owns the memory. If this function
/// returns such a reportable error, it is valid to be called again for a different decl. /// returns such a reportable error, it is valid to be called again for a different decl.
pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg { pub fn gen(self: *DeclGen, decl_index: Decl.Index, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
// Reset internal resources, we don't want to re-allocate these. // Reset internal resources, we don't want to re-allocate these.
self.decl = decl; self.decl_index = decl_index;
self.air = air; self.air = air;
self.liveness = liveness; self.liveness = liveness;
self.args.items.len = 0; self.args.items.len = 0;
@ -194,7 +202,7 @@ pub const DeclGen = struct {
pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true); @setCold(true);
const src = LazySrcLoc.nodeOffset(0); const src = LazySrcLoc.nodeOffset(0);
const src_loc = src.toSrcLoc(self.decl); const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index));
assert(self.error_msg == null); assert(self.error_msg == null);
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args);
return error.CodegenFail; return error.CodegenFail;
@ -332,7 +340,7 @@ pub const DeclGen = struct {
}; };
const decl = self.module.declPtr(fn_decl_index); const decl = self.module.declPtr(fn_decl_index);
self.module.markDeclAlive(decl); self.module.markDeclAlive(decl);
return decl.fn_link.spirv.id.toRef(); return self.ids.get(fn_decl_index).?.toRef();
} }
const target = self.getTarget(); const target = self.getTarget();
@ -553,8 +561,8 @@ pub const DeclGen = struct {
} }
fn genDecl(self: *DeclGen) !void { fn genDecl(self: *DeclGen) !void {
const decl = self.decl; const result_id = self.ids.get(self.decl_index).?;
const result_id = decl.fn_link.spirv.id; const decl = self.module.declPtr(self.decl_index);
if (decl.val.castTag(.function)) |_| { if (decl.val.castTag(.function)) |_| {
assert(decl.ty.zigTypeTag() == .Fn); assert(decl.ty.zigTypeTag() == .Fn);
@ -945,7 +953,7 @@ pub const DeclGen = struct {
fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
const src_fname_id = try self.spv.resolveSourceFileName(self.decl); const src_fname_id = try self.spv.resolveSourceFileName(self.module.declPtr(self.decl_index));
try self.func.body.emit(self.spv.gpa, .OpLine, .{ try self.func.body.emit(self.spv.gpa, .OpLine, .{
.file = src_fname_id, .file = src_fname_id,
.line = dbg_stmt.line, .line = dbg_stmt.line,
@ -1106,7 +1114,7 @@ pub const DeclGen = struct {
assert(as.errors.items.len != 0); assert(as.errors.items.len != 0);
assert(self.error_msg == null); assert(self.error_msg == null);
const loc = LazySrcLoc.nodeOffset(0); const loc = LazySrcLoc.nodeOffset(0);
const src_loc = loc.toSrcLoc(self.decl); const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index));
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{}); self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len); const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len);

View File

@ -261,39 +261,6 @@ pub const File = struct {
/// of this linking operation. /// of this linking operation.
lock: ?Cache.Lock = null, lock: ?Cache.Lock = null,
pub const LinkBlock = union {
elf: Elf.TextBlock,
coff: Coff.Atom,
macho: MachO.Atom,
plan9: Plan9.DeclBlock,
c: void,
wasm: Wasm.DeclBlock,
spirv: void,
nvptx: void,
};
pub const LinkFn = union {
elf: Dwarf.SrcFn,
coff: Coff.SrcFn,
macho: Dwarf.SrcFn,
plan9: void,
c: void,
wasm: Wasm.FnData,
spirv: SpirV.FnData,
nvptx: void,
};
pub const Export = union {
elf: Elf.Export,
coff: Coff.Export,
macho: MachO.Export,
plan9: Plan9.Export,
c: void,
wasm: Wasm.Export,
spirv: void,
nvptx: void,
};
/// Attempts incremental linking, if the file already exists. If /// Attempts incremental linking, if the file already exists. If
/// incremental linking fails, falls back to truncating the file and /// incremental linking fails, falls back to truncating the file and
/// rewriting it. A malicious file is detected as incremental link failure /// rewriting it. A malicious file is detected as incremental link failure
@ -533,8 +500,7 @@ pub const File = struct {
} }
} }
/// May be called before or after updateDeclExports but must be called /// May be called before or after updateDeclExports for any given Decl.
/// after allocateDeclIndexes for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void { pub fn updateDecl(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
const decl = module.declPtr(decl_index); const decl = module.declPtr(decl_index);
log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty.fmtDebug() }); log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty.fmtDebug() });
@ -557,8 +523,7 @@ pub const File = struct {
} }
} }
/// May be called before or after updateDeclExports but must be called /// May be called before or after updateDeclExports for any given Decl.
/// after allocateDeclIndexes for any given Decl.
pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void { pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void {
const owner_decl = module.declPtr(func.owner_decl); const owner_decl = module.declPtr(func.owner_decl);
log.debug("updateFunc {*} ({s}), type={}", .{ log.debug("updateFunc {*} ({s}), type={}", .{
@ -582,48 +547,27 @@ pub const File = struct {
} }
} }
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void { pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
const decl = module.declPtr(decl_index);
log.debug("updateDeclLineNumber {*} ({s}), line={}", .{ log.debug("updateDeclLineNumber {*} ({s}), line={}", .{
decl, decl.name, decl.src_line + 1, decl, decl.name, decl.src_line + 1,
}); });
assert(decl.has_tv); assert(decl.has_tv);
if (build_options.only_c) { if (build_options.only_c) {
assert(base.tag == .c); assert(base.tag == .c);
return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl); return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index);
} }
switch (base.tag) { switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl), .coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl_index),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl), .elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl_index),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl), .macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl_index),
.c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl), .c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index),
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl), .wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl_index),
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl), .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl_index),
.spirv, .nvptx => {}, .spirv, .nvptx => {},
} }
} }
/// Must be called before any call to updateDecl or updateDeclExports for
/// any given Decl.
/// TODO we're transitioning to deleting this function and instead having
/// each linker backend notice the first time updateDecl or updateFunc is called, or
/// a callee referenced from AIR.
pub fn allocateDeclIndexes(base: *File, decl_index: Module.Decl.Index) error{OutOfMemory}!void {
const decl = base.options.module.?.declPtr(decl_index);
log.debug("allocateDeclIndexes {*} ({s})", .{ decl, decl.name });
if (build_options.only_c) {
assert(base.tag == .c);
return;
}
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl_index),
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl_index),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl_index),
.wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl_index),
.plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl_index),
.c, .spirv, .nvptx => {},
}
}
pub fn releaseLock(self: *File) void { pub fn releaseLock(self: *File) void {
if (self.lock) |*lock| { if (self.lock) |*lock| {
lock.release(); lock.release();
@ -874,8 +818,7 @@ pub const File = struct {
AnalysisFail, AnalysisFail,
}; };
/// May be called before or after updateDecl, but must be called after /// May be called before or after updateDecl for any given Decl.
/// allocateDeclIndexes for any given Decl.
pub fn updateDeclExports( pub fn updateDeclExports(
base: *File, base: *File,
module: *Module, module: *Module,
@ -911,6 +854,8 @@ pub const File = struct {
/// The linker is passed information about the containing atom, `parent_atom_index`, and offset within it's /// The linker is passed information about the containing atom, `parent_atom_index`, and offset within it's
/// memory buffer, `offset`, so that it can make a note of potential relocation sites, should the /// memory buffer, `offset`, so that it can make a note of potential relocation sites, should the
/// `Decl`'s address was not yet resolved, or the containing atom gets moved in virtual memory. /// `Decl`'s address was not yet resolved, or the containing atom gets moved in virtual memory.
/// May be called before or after updateFunc/updateDecl therefore it is up to the linker to allocate
/// the block/atom.
pub fn getDeclVAddr(base: *File, decl_index: Module.Decl.Index, reloc_info: RelocInfo) !u64 { pub fn getDeclVAddr(base: *File, decl_index: Module.Decl.Index, reloc_info: RelocInfo) !u64 {
if (build_options.only_c) unreachable; if (build_options.only_c) unreachable;
switch (base.tag) { switch (base.tag) {

View File

@ -219,12 +219,12 @@ pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !voi
code.shrinkAndFree(module.gpa, code.items.len); code.shrinkAndFree(module.gpa, code.items.len);
} }
pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void { pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void {
// The C backend does not have the ability to fix line numbers without re-generating // The C backend does not have the ability to fix line numbers without re-generating
// the entire Decl. // the entire Decl.
_ = self; _ = self;
_ = module; _ = module;
_ = decl; _ = decl_index;
} }
pub fn flush(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void { pub fn flush(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void {

View File

@ -79,13 +79,13 @@ entry_addr: ?u32 = null,
/// We store them here so that we can properly dispose of any allocated /// We store them here so that we can properly dispose of any allocated
/// memory within the atom in the incremental linker. /// memory within the atom in the incremental linker.
/// TODO consolidate this. /// TODO consolidate this.
decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{}, decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
/// List of atoms that are either synthetic or map directly to the Zig source program. /// List of atoms that are either synthetic or map directly to the Zig source program.
managed_atoms: std.ArrayListUnmanaged(*Atom) = .{}, atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Table of atoms indexed by the symbol index. /// Table of atoms indexed by the symbol index.
atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{}, atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`. /// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl` /// We store them here so that we can free the constants whenever the `Decl`
@ -124,9 +124,9 @@ const Entry = struct {
sym_index: u32, sym_index: u32,
}; };
const RelocTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation)); const RelocTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
const BaseRelocationTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32)); const BaseRelocationTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom)); const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
const default_file_alignment: u16 = 0x200; const default_file_alignment: u16 = 0x200;
const default_size_of_stack_reserve: u32 = 0x1000000; const default_size_of_stack_reserve: u32 = 0x1000000;
@ -137,7 +137,7 @@ const default_size_of_heap_commit: u32 = 0x1000;
const Section = struct { const Section = struct {
header: coff.SectionHeader, header: coff.SectionHeader,
last_atom: ?*Atom = null, last_atom_index: ?Atom.Index = null,
/// A list of atoms that have surplus capacity. This list can have false /// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added /// positives, as functions grow and shrink over time, only sometimes being added
@ -154,7 +154,34 @@ const Section = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to /// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh atom, which will have ideal capacity, and then grow it /// allocate a fresh atom, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity. /// by 1 byte. It will then have -1 overcapacity.
free_list: std.ArrayListUnmanaged(*Atom) = .{}, free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
};
const DeclMetadata = struct {
atom: Atom.Index,
section: u16,
/// A list of all exports aliases of this Decl.
exports: std.ArrayListUnmanaged(u32) = .{},
fn getExport(m: DeclMetadata, coff_file: *const Coff, name: []const u8) ?u32 {
for (m.exports.items) |exp| {
if (mem.eql(u8, name, coff_file.getSymbolName(.{
.sym_index = exp,
.file = null,
}))) return exp;
}
return null;
}
fn getExportPtr(m: *DeclMetadata, coff_file: *Coff, name: []const u8) ?*u32 {
for (m.exports.items) |*exp| {
if (mem.eql(u8, name, coff_file.getSymbolName(.{
.sym_index = exp.*,
.file = null,
}))) return exp;
}
return null;
}
}; };
pub const PtrWidth = enum { pub const PtrWidth = enum {
@ -168,11 +195,6 @@ pub const PtrWidth = enum {
}; };
} }
}; };
pub const SrcFn = void;
pub const Export = struct {
sym_index: ?u32 = null,
};
pub const SymbolWithLoc = struct { pub const SymbolWithLoc = struct {
// Index into the respective symbol table. // Index into the respective symbol table.
@ -271,11 +293,7 @@ pub fn deinit(self: *Coff) void {
} }
self.sections.deinit(gpa); self.sections.deinit(gpa);
for (self.managed_atoms.items) |atom| { self.atoms.deinit(gpa);
gpa.destroy(atom);
}
self.managed_atoms.deinit(gpa);
self.locals.deinit(gpa); self.locals.deinit(gpa);
self.globals.deinit(gpa); self.globals.deinit(gpa);
@ -297,7 +315,15 @@ pub fn deinit(self: *Coff) void {
self.imports.deinit(gpa); self.imports.deinit(gpa);
self.imports_free_list.deinit(gpa); self.imports_free_list.deinit(gpa);
self.imports_table.deinit(gpa); self.imports_table.deinit(gpa);
{
var it = self.decls.iterator();
while (it.next()) |entry| {
entry.value_ptr.exports.deinit(gpa);
}
self.decls.deinit(gpa); self.decls.deinit(gpa);
}
self.atom_by_index_table.deinit(gpa); self.atom_by_index_table.deinit(gpa);
{ {
@ -461,17 +487,18 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
// TODO: enforce order by increasing VM addresses in self.sections container. // TODO: enforce order by increasing VM addresses in self.sections container.
// This is required by the loader anyhow as far as I can tell. // This is required by the loader anyhow as far as I can tell.
for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| { for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
const maybe_last_atom = &self.sections.items(.last_atom)[sect_id + 1 + next_sect_id]; const maybe_last_atom_index = self.sections.items(.last_atom_index)[sect_id + 1 + next_sect_id];
next_header.virtual_address += diff; next_header.virtual_address += diff;
if (maybe_last_atom.*) |last_atom| { if (maybe_last_atom_index) |last_atom_index| {
var atom = last_atom; var atom_index = last_atom_index;
while (true) { while (true) {
const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self); const sym = atom.getSymbolPtr(self);
sym.value += diff; sym.value += diff;
if (atom.prev) |prev| { if (atom.prev_index) |prev_index| {
atom = prev; atom_index = prev_index;
} else break; } else break;
} }
} }
@ -480,24 +507,15 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
header.virtual_size = increased_size; header.virtual_size = increased_size;
} }
pub fn allocateDeclIndexes(self: *Coff, decl_index: Module.Decl.Index) !void { fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
if (self.llvm_object) |_| return;
const decl = self.base.options.module.?.declPtr(decl_index);
if (decl.link.coff.sym_index != 0) return;
decl.link.coff.sym_index = try self.allocateSymbol();
const gpa = self.base.allocator;
try self.atom_by_index_table.putNoClobber(gpa, decl.link.coff.sym_index, &decl.link.coff);
try self.decls.putNoClobber(gpa, decl_index, null);
}
fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
const tracy = trace(@src()); const tracy = trace(@src());
defer tracy.end(); defer tracy.end();
const atom = self.getAtom(atom_index);
const sect_id = @enumToInt(atom.getSymbol(self).section_number) - 1; const sect_id = @enumToInt(atom.getSymbol(self).section_number) - 1;
const header = &self.sections.items(.header)[sect_id]; const header = &self.sections.items(.header)[sect_id];
const free_list = &self.sections.items(.free_list)[sect_id]; const free_list = &self.sections.items(.free_list)[sect_id];
const maybe_last_atom = &self.sections.items(.last_atom)[sect_id]; const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
const new_atom_ideal_capacity = if (header.isCode()) padToIdeal(new_atom_size) else new_atom_size; const new_atom_ideal_capacity = if (header.isCode()) padToIdeal(new_atom_size) else new_atom_size;
// We use these to indicate our intention to update metadata, placing the new atom, // We use these to indicate our intention to update metadata, placing the new atom,
@ -505,7 +523,7 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
// It would be simpler to do it inside the for loop below, but that would cause a // It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action // problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible. // is actually carried out at the end of the function, when errors are no longer possible.
var atom_placement: ?*Atom = null; var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null; var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node. // First we look for an appropriately sized free list node.
@ -513,7 +531,8 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
var vaddr = blk: { var vaddr = blk: {
var i: usize = 0; var i: usize = 0;
while (i < free_list.items.len) { while (i < free_list.items.len) {
const big_atom = free_list.items[i]; const big_atom_index = free_list.items[i];
const big_atom = self.getAtom(big_atom_index);
// We now have a pointer to a live atom that has too much capacity. // We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom? // Is it enough that we could fit this new atom?
const sym = big_atom.getSymbol(self); const sym = big_atom.getSymbol(self);
@ -541,34 +560,43 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
const keep_free_list_node = remaining_capacity >= min_text_capacity; const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible. // Set up the metadata to be updated, after errors are no longer possible.
atom_placement = big_atom; atom_placement = big_atom_index;
if (!keep_free_list_node) { if (!keep_free_list_node) {
free_list_removal = i; free_list_removal = i;
} }
break :blk new_start_vaddr; break :blk new_start_vaddr;
} else if (maybe_last_atom.*) |last| { } else if (maybe_last_atom_index.*) |last_index| {
const last = self.getAtom(last_index);
const last_symbol = last.getSymbol(self); const last_symbol = last.getSymbol(self);
const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size; const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity; const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment); const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment);
atom_placement = last; atom_placement = last_index;
break :blk new_start_vaddr; break :blk new_start_vaddr;
} else { } else {
break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment); break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment);
} }
}; };
const expand_section = atom_placement == null or atom_placement.?.next == null; const expand_section = if (atom_placement) |placement_index|
self.getAtom(placement_index).next_index == null
else
true;
if (expand_section) { if (expand_section) {
const sect_capacity = self.allocatedSize(header.pointer_to_raw_data); const sect_capacity = self.allocatedSize(header.pointer_to_raw_data);
const needed_size: u32 = (vaddr + new_atom_size) - header.virtual_address; const needed_size: u32 = (vaddr + new_atom_size) - header.virtual_address;
if (needed_size > sect_capacity) { if (needed_size > sect_capacity) {
const new_offset = self.findFreeSpace(needed_size, default_file_alignment); const new_offset = self.findFreeSpace(needed_size, default_file_alignment);
const current_size = if (maybe_last_atom.*) |last_atom| blk: { const current_size = if (maybe_last_atom_index.*) |last_atom_index| blk: {
const last_atom = self.getAtom(last_atom_index);
const sym = last_atom.getSymbol(self); const sym = last_atom.getSymbol(self);
break :blk (sym.value + last_atom.size) - header.virtual_address; break :blk (sym.value + last_atom.size) - header.virtual_address;
} else 0; } else 0;
log.debug("moving {s} from 0x{x} to 0x{x}", .{ self.getSectionName(header), header.pointer_to_raw_data, new_offset }); log.debug("moving {s} from 0x{x} to 0x{x}", .{
self.getSectionName(header),
header.pointer_to_raw_data,
new_offset,
});
const amt = try self.base.file.?.copyRangeAll( const amt = try self.base.file.?.copyRangeAll(
header.pointer_to_raw_data, header.pointer_to_raw_data,
self.base.file.?, self.base.file.?,
@ -587,26 +615,34 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
header.virtual_size = @max(header.virtual_size, needed_size); header.virtual_size = @max(header.virtual_size, needed_size);
header.size_of_raw_data = needed_size; header.size_of_raw_data = needed_size;
maybe_last_atom.* = atom; maybe_last_atom_index.* = atom_index;
} }
atom.size = new_atom_size; {
atom.alignment = alignment; const atom_ptr = self.getAtomPtr(atom_index);
atom_ptr.size = new_atom_size;
if (atom.prev) |prev| { atom_ptr.alignment = alignment;
prev.next = atom.next;
}
if (atom.next) |next| {
next.prev = atom.prev;
} }
if (atom_placement) |big_atom| { if (atom.prev_index) |prev_index| {
atom.prev = big_atom; const prev = self.getAtomPtr(prev_index);
atom.next = big_atom.next; prev.next_index = atom.next_index;
big_atom.next = atom; }
if (atom.next_index) |next_index| {
const next = self.getAtomPtr(next_index);
next.prev_index = atom.prev_index;
}
if (atom_placement) |big_atom_index| {
const big_atom = self.getAtomPtr(big_atom_index);
const atom_ptr = self.getAtomPtr(atom_index);
atom_ptr.prev_index = big_atom_index;
atom_ptr.next_index = big_atom.next_index;
big_atom.next_index = atom_index;
} else { } else {
atom.prev = null; const atom_ptr = self.getAtomPtr(atom_index);
atom.next = null; atom_ptr.prev_index = null;
atom_ptr.next_index = null;
} }
if (free_list_removal) |i| { if (free_list_removal) |i| {
_ = free_list.swapRemove(i); _ = free_list.swapRemove(i);
@ -615,7 +651,7 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
return vaddr; return vaddr;
} }
fn allocateSymbol(self: *Coff) !u32 { pub fn allocateSymbol(self: *Coff) !u32 {
const gpa = self.base.allocator; const gpa = self.base.allocator;
try self.locals.ensureUnusedCapacity(gpa, 1); try self.locals.ensureUnusedCapacity(gpa, 1);
@ -711,25 +747,37 @@ pub fn allocateImportEntry(self: *Coff, target: SymbolWithLoc) !u32 {
return index; return index;
} }
fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom { pub fn createAtom(self: *Coff) !Atom.Index {
const gpa = self.base.allocator; const gpa = self.base.allocator;
const atom = try gpa.create(Atom); const atom_index = @intCast(Atom.Index, self.atoms.items.len);
errdefer gpa.destroy(atom); const atom = try self.atoms.addOne(gpa);
atom.* = Atom.empty; const sym_index = try self.allocateSymbol();
atom.sym_index = try self.allocateSymbol(); try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
atom.* = .{
.sym_index = sym_index,
.file = null,
.size = 0,
.alignment = 0,
.prev_index = null,
.next_index = null,
};
log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
return atom_index;
}
fn createGotAtom(self: *Coff, target: SymbolWithLoc) !Atom.Index {
const atom_index = try self.createAtom();
const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64); atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64); atom.alignment = @alignOf(u64);
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
const sym = atom.getSymbolPtr(self); const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.got_section_index.? + 1); sym.section_number = @intToEnum(coff.SectionNumber, self.got_section_index.? + 1);
sym.value = try self.allocateAtom(atom, atom.size, atom.alignment); sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
log.debug("allocated GOT atom at 0x{x}", .{sym.value}); log.debug("allocated GOT atom at 0x{x}", .{sym.value});
try atom.addRelocation(self, .{ try Atom.addRelocation(self, atom_index, .{
.type = .direct, .type = .direct,
.target = target, .target = target,
.offset = 0, .offset = 0,
@ -743,67 +791,67 @@ fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
.UNDEFINED => @panic("TODO generate a binding for undefined GOT target"), .UNDEFINED => @panic("TODO generate a binding for undefined GOT target"),
.ABSOLUTE => {}, .ABSOLUTE => {},
.DEBUG => unreachable, // not possible .DEBUG => unreachable, // not possible
else => try atom.addBaseRelocation(self, 0), else => try Atom.addBaseRelocation(self, atom_index, 0),
} }
return atom; return atom_index;
} }
fn createImportAtom(self: *Coff) !*Atom { fn createImportAtom(self: *Coff) !Atom.Index {
const gpa = self.base.allocator; const atom_index = try self.createAtom();
const atom = try gpa.create(Atom); const atom = self.getAtomPtr(atom_index);
errdefer gpa.destroy(atom);
atom.* = Atom.empty;
atom.sym_index = try self.allocateSymbol();
atom.size = @sizeOf(u64); atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64); atom.alignment = @alignOf(u64);
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
const sym = atom.getSymbolPtr(self); const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.idata_section_index.? + 1); sym.section_number = @intToEnum(coff.SectionNumber, self.idata_section_index.? + 1);
sym.value = try self.allocateAtom(atom, atom.size, atom.alignment); sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
log.debug("allocated import atom at 0x{x}", .{sym.value}); log.debug("allocated import atom at 0x{x}", .{sym.value});
return atom; return atom_index;
} }
fn growAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 { fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self); const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value; const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self); const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.value; if (!need_realloc) return sym.value;
return self.allocateAtom(atom, new_atom_size, alignment); return self.allocateAtom(atom_index, new_atom_size, alignment);
} }
fn shrinkAtom(self: *Coff, atom: *Atom, new_block_size: u32) void { fn shrinkAtom(self: *Coff, atom_index: Atom.Index, new_block_size: u32) void {
_ = self; _ = self;
_ = atom; _ = atom_index;
_ = new_block_size; _ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough // TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it. // capacity, insert a free list node for it.
} }
fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void { fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []const u8) !void {
const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self); const sym = atom.getSymbol(self);
const section = self.sections.get(@enumToInt(sym.section_number) - 1); const section = self.sections.get(@enumToInt(sym.section_number) - 1);
const file_offset = section.header.pointer_to_raw_data + sym.value - section.header.virtual_address; const file_offset = section.header.pointer_to_raw_data + sym.value - section.header.virtual_address;
log.debug("writing atom for symbol {s} at file offset 0x{x} to 0x{x}", .{ atom.getName(self), file_offset, file_offset + code.len }); log.debug("writing atom for symbol {s} at file offset 0x{x} to 0x{x}", .{
atom.getName(self),
file_offset,
file_offset + code.len,
});
try self.base.file.?.pwriteAll(code, file_offset); try self.base.file.?.pwriteAll(code, file_offset);
try self.resolveRelocs(atom); try self.resolveRelocs(atom_index);
} }
fn writePtrWidthAtom(self: *Coff, atom: *Atom) !void { fn writePtrWidthAtom(self: *Coff, atom_index: Atom.Index) !void {
switch (self.ptr_width) { switch (self.ptr_width) {
.p32 => { .p32 => {
var buffer: [@sizeOf(u32)]u8 = [_]u8{0} ** @sizeOf(u32); var buffer: [@sizeOf(u32)]u8 = [_]u8{0} ** @sizeOf(u32);
try self.writeAtom(atom, &buffer); try self.writeAtom(atom_index, &buffer);
}, },
.p64 => { .p64 => {
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64); var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
try self.writeAtom(atom, &buffer); try self.writeAtom(atom_index, &buffer);
}, },
} }
} }
@ -823,7 +871,8 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
var it = self.relocs.valueIterator(); var it = self.relocs.valueIterator();
while (it.next()) |relocs| { while (it.next()) |relocs| {
for (relocs.items) |*reloc| { for (relocs.items) |*reloc| {
const target_atom = reloc.getTargetAtom(self) orelse continue; const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
const target_atom = self.getAtom(target_atom_index);
const target_sym = target_atom.getSymbol(self); const target_sym = target_atom.getSymbol(self);
if (target_sym.value < addr) continue; if (target_sym.value < addr) continue;
reloc.dirty = true; reloc.dirty = true;
@ -831,23 +880,26 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
} }
} }
fn resolveRelocs(self: *Coff, atom: *Atom) !void { fn resolveRelocs(self: *Coff, atom_index: Atom.Index) !void {
const relocs = self.relocs.get(atom) orelse return; const relocs = self.relocs.get(atom_index) orelse return;
log.debug("relocating '{s}'", .{atom.getName(self)}); log.debug("relocating '{s}'", .{self.getAtom(atom_index).getName(self)});
for (relocs.items) |*reloc| { for (relocs.items) |*reloc| {
if (!reloc.dirty) continue; if (!reloc.dirty) continue;
try reloc.resolve(atom, self); try reloc.resolve(atom_index, self);
} }
} }
fn freeAtom(self: *Coff, atom: *Atom) void { fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
log.debug("freeAtom {*}", .{atom}); log.debug("freeAtom {d}", .{atom_index});
const gpa = self.base.allocator;
// Remove any relocs and base relocs associated with this Atom // Remove any relocs and base relocs associated with this Atom
self.freeRelocationsForAtom(atom); Atom.freeRelocations(self, atom_index);
const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self); const sym = atom.getSymbol(self);
const sect_id = @enumToInt(sym.section_number) - 1; const sect_id = @enumToInt(sym.section_number) - 1;
const free_list = &self.sections.items(.free_list)[sect_id]; const free_list = &self.sections.items(.free_list)[sect_id];
@ -856,46 +908,69 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
var i: usize = 0; var i: usize = 0;
// TODO turn free_list into a hash map // TODO turn free_list into a hash map
while (i < free_list.items.len) { while (i < free_list.items.len) {
if (free_list.items[i] == atom) { if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i); _ = free_list.swapRemove(i);
continue; continue;
} }
if (free_list.items[i] == atom.prev) { if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true; already_have_free_list_node = true;
} }
i += 1; i += 1;
} }
} }
const maybe_last_atom = &self.sections.items(.last_atom)[sect_id]; const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
if (maybe_last_atom.*) |last_atom| { if (maybe_last_atom_index.*) |last_atom_index| {
if (last_atom == atom) { if (last_atom_index == atom_index) {
if (atom.prev) |prev| { if (atom.prev_index) |prev_index| {
// TODO shrink the section size here // TODO shrink the section size here
maybe_last_atom.* = prev; maybe_last_atom_index.* = prev_index;
} else { } else {
maybe_last_atom.* = null; maybe_last_atom_index.* = null;
} }
} }
} }
if (atom.prev) |prev| { if (atom.prev_index) |prev_index| {
prev.next = atom.next; const prev = self.getAtomPtr(prev_index);
prev.next_index = atom.next_index;
if (!already_have_free_list_node and prev.freeListEligible(self)) { if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can // The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here. // ignore the OOM here.
free_list.append(self.base.allocator, prev) catch {}; free_list.append(gpa, prev_index) catch {};
} }
} else { } else {
atom.prev = null; self.getAtomPtr(atom_index).prev_index = null;
} }
if (atom.next) |next| { if (atom.next_index) |next_index| {
next.prev = atom.prev; self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else { } else {
atom.next = null; self.getAtomPtr(atom_index).next_index = null;
} }
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
const sym_index = atom.getSymbolIndex().?;
self.locals_free_list.append(gpa, sym_index) catch {};
// Try freeing GOT atom if this decl had one
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
if (self.got_entries_table.get(got_target)) |got_index| {
self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {};
self.got_entries.items[got_index] = .{
.target = .{ .sym_index = 0, .file = null },
.sym_index = 0,
};
_ = self.got_entries_table.remove(got_target);
log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
}
self.locals.items[sym_index].section_number = .UNDEFINED;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
self.getAtomPtr(atom_index).sym_index = 0;
} }
pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
@ -912,8 +987,10 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
const decl_index = func.owner_decl; const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index); const decl = module.declPtr(decl_index);
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
self.freeUnnamedConsts(decl_index); self.freeUnnamedConsts(decl_index);
self.freeRelocationsForAtom(&decl.link.coff); Atom.freeRelocations(self, atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator); var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit(); defer code_buffer.deinit();
@ -928,7 +1005,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
.none, .none,
); );
const code = switch (res) { const code = switch (res) {
.appended => code_buffer.items, .ok => code_buffer.items,
.fail => |em| { .fail => |em| {
decl.analysis = .codegen_failure; decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em); try module.failed_decls.put(module.gpa, decl_index, em);
@ -957,12 +1034,8 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
} }
const unnamed_consts = gop.value_ptr; const unnamed_consts = gop.value_ptr;
const atom = try gpa.create(Atom); const atom_index = try self.createAtom();
errdefer gpa.destroy(atom);
atom.* = Atom.empty;
atom.sym_index = try self.allocateSymbol();
const sym = atom.getSymbolPtr(self);
const sym_name = blk: { const sym_name = blk: {
const decl_name = try decl.getFullyQualifiedName(mod); const decl_name = try decl.getFullyQualifiedName(mod);
defer gpa.free(decl_name); defer gpa.free(decl_name);
@ -971,18 +1044,18 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index }); break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
}; };
defer gpa.free(sym_name); defer gpa.free(sym_name);
{
const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, sym_name); try self.setSymbolName(sym, sym_name);
sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1); sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
}
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{ const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{
.parent_atom_index = atom.sym_index, .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
}); });
const code = switch (res) { const code = switch (res) {
.externally_managed => |x| x, .ok => code_buffer.items,
.appended => code_buffer.items,
.fail => |em| { .fail => |em| {
decl.analysis = .codegen_failure; decl.analysis = .codegen_failure;
try mod.failed_decls.put(mod.gpa, decl_index, em); try mod.failed_decls.put(mod.gpa, decl_index, em);
@ -992,19 +1065,20 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
}; };
const required_alignment = tv.ty.abiAlignment(self.base.options.target); const required_alignment = tv.ty.abiAlignment(self.base.options.target);
const atom = self.getAtomPtr(atom_index);
atom.alignment = required_alignment; atom.alignment = required_alignment;
atom.size = @intCast(u32, code.len); atom.size = @intCast(u32, code.len);
sym.value = try self.allocateAtom(atom, atom.size, atom.alignment); atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
errdefer self.freeAtom(atom); errdefer self.freeAtom(atom_index);
try unnamed_consts.append(gpa, atom); try unnamed_consts.append(gpa, atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, sym.value }); log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, atom.getSymbol(self).value });
log.debug(" (required alignment 0x{x})", .{required_alignment}); log.debug(" (required alignment 0x{x})", .{required_alignment});
try self.writeAtom(atom, code); try self.writeAtom(atom_index, code);
return atom.sym_index; return atom.getSymbolIndex().?;
} }
pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void { pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void {
@ -1029,7 +1103,9 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
} }
} }
self.freeRelocationsForAtom(&decl.link.coff); const atom_index = try self.getOrCreateAtomForDecl(decl_index);
Atom.freeRelocations(self, atom_index);
const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator); var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit(); defer code_buffer.deinit();
@ -1039,11 +1115,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
.ty = decl.ty, .ty = decl.ty,
.val = decl_val, .val = decl_val,
}, &code_buffer, .none, .{ }, &code_buffer, .none, .{
.parent_atom_index = decl.link.coff.sym_index, .parent_atom_index = atom.getSymbolIndex().?,
}); });
const code = switch (res) { const code = switch (res) {
.externally_managed => |x| x, .ok => code_buffer.items,
.appended => code_buffer.items,
.fail => |em| { .fail => |em| {
decl.analysis = .codegen_failure; decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em); try module.failed_decls.put(module.gpa, decl_index, em);
@ -1058,7 +1133,20 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index)); return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
} }
fn getDeclOutputSection(self: *Coff, decl: *Module.Decl) u16 { pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom.Index {
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{
.atom = try self.createAtom(),
.section = self.getDeclOutputSection(decl_index),
.exports = .{},
};
}
return gop.value_ptr.atom;
}
fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty; const ty = decl.ty;
const zig_ty = ty.zigTypeTag(); const zig_ty = ty.zigTypeTag();
const val = decl.val; const val = decl.val;
@ -1093,15 +1181,12 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(self.base.options.target); const required_alignment = decl.getAlignment(self.base.options.target);
const decl_ptr = self.decls.getPtr(decl_index).?; const decl_metadata = self.decls.get(decl_index).?;
if (decl_ptr.* == null) { const atom_index = decl_metadata.atom;
decl_ptr.* = self.getDeclOutputSection(decl); const atom = self.getAtom(atom_index);
} const sect_index = decl_metadata.section;
const sect_index = decl_ptr.*.?;
const code_len = @intCast(u32, code.len); const code_len = @intCast(u32, code.len);
const atom = &decl.link.coff;
assert(atom.sym_index != 0); // Caller forgot to allocateDeclIndexes()
if (atom.size != 0) { if (atom.size != 0) {
const sym = atom.getSymbolPtr(self); const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name); try self.setSymbolName(sym, decl_name);
@ -1111,62 +1196,51 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
const capacity = atom.capacity(self); const capacity = atom.capacity(self);
const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment); const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment);
if (need_realloc) { if (need_realloc) {
const vaddr = try self.growAtom(atom, code_len, required_alignment); const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, sym.value, vaddr }); log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, sym.value, vaddr });
log.debug(" (required alignment 0x{x}", .{required_alignment}); log.debug(" (required alignment 0x{x}", .{required_alignment});
if (vaddr != sym.value) { if (vaddr != sym.value) {
sym.value = vaddr; sym.value = vaddr;
log.debug(" (updating GOT entry)", .{}); log.debug(" (updating GOT entry)", .{});
const got_target = SymbolWithLoc{ .sym_index = atom.sym_index, .file = null }; const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
const got_atom = self.getGotAtomForSymbol(got_target).?; const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target); self.markRelocsDirtyByTarget(got_target);
try self.writePtrWidthAtom(got_atom); try self.writePtrWidthAtom(got_atom_index);
} }
} else if (code_len < atom.size) { } else if (code_len < atom.size) {
self.shrinkAtom(atom, code_len); self.shrinkAtom(atom_index, code_len);
} }
atom.size = code_len; self.getAtomPtr(atom_index).size = code_len;
} else { } else {
const sym = atom.getSymbolPtr(self); const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name); try self.setSymbolName(sym, decl_name);
sym.section_number = @intToEnum(coff.SectionNumber, sect_index + 1); sym.section_number = @intToEnum(coff.SectionNumber, sect_index + 1);
sym.type = .{ .complex_type = complex_type, .base_type = .NULL }; sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
const vaddr = try self.allocateAtom(atom, code_len, required_alignment); const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
errdefer self.freeAtom(atom); errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ decl_name, vaddr }); log.debug("allocated atom for {s} at 0x{x}", .{ decl_name, vaddr });
atom.size = code_len; self.getAtomPtr(atom_index).size = code_len;
sym.value = vaddr; sym.value = vaddr;
const got_target = SymbolWithLoc{ .sym_index = atom.sym_index, .file = null }; const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
const got_index = try self.allocateGotEntry(got_target); const got_index = try self.allocateGotEntry(got_target);
const got_atom = try self.createGotAtom(got_target); const got_atom_index = try self.createGotAtom(got_target);
self.got_entries.items[got_index].sym_index = got_atom.sym_index; const got_atom = self.getAtom(got_atom_index);
try self.writePtrWidthAtom(got_atom); self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
try self.writePtrWidthAtom(got_atom_index);
} }
self.markRelocsDirtyByTarget(atom.getSymbolWithLoc()); self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
try self.writeAtom(atom, code); try self.writeAtom(atom_index, code);
}
fn freeRelocationsForAtom(self: *Coff, atom: *Atom) void {
var removed_relocs = self.relocs.fetchRemove(atom);
if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
var removed_base_relocs = self.base_relocs.fetchRemove(atom);
if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(self.base.allocator);
} }
fn freeUnnamedConsts(self: *Coff, decl_index: Module.Decl.Index) void { fn freeUnnamedConsts(self: *Coff, decl_index: Module.Decl.Index) void {
const gpa = self.base.allocator; const gpa = self.base.allocator;
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return; const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| { for (unnamed_consts.items) |atom_index| {
self.freeAtom(atom); self.freeAtom(atom_index);
self.locals_free_list.append(gpa, atom.sym_index) catch {};
self.locals.items[atom.sym_index].section_number = .UNDEFINED;
_ = self.atom_by_index_table.remove(atom.sym_index);
log.debug(" adding local symbol index {d} to free list", .{atom.sym_index});
atom.sym_index = 0;
} }
unnamed_consts.clearAndFree(gpa); unnamed_consts.clearAndFree(gpa);
} }
@ -1181,35 +1255,11 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl}); log.debug("freeDecl {*}", .{decl});
const kv = self.decls.fetchRemove(decl_index); if (self.decls.fetchRemove(decl_index)) |const_kv| {
if (kv.?.value) |_| { var kv = const_kv;
self.freeAtom(&decl.link.coff); self.freeAtom(kv.value.atom);
self.freeUnnamedConsts(decl_index); self.freeUnnamedConsts(decl_index);
} kv.value.exports.deinit(self.base.allocator);
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
const gpa = self.base.allocator;
const sym_index = decl.link.coff.sym_index;
if (sym_index != 0) {
self.locals_free_list.append(gpa, sym_index) catch {};
// Try freeing GOT atom if this decl had one
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
if (self.got_entries_table.get(got_target)) |got_index| {
self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {};
self.got_entries.items[got_index] = .{
.target = .{ .sym_index = 0, .file = null },
.sym_index = 0,
};
_ = self.got_entries_table.remove(got_target);
log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
}
self.locals.items[sym_index].section_number = .UNDEFINED;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
decl.link.coff.sym_index = 0;
} }
} }
@ -1262,9 +1312,10 @@ pub fn updateDeclExports(
const gpa = self.base.allocator; const gpa = self.base.allocator;
const decl = module.declPtr(decl_index); const decl = module.declPtr(decl_index);
const atom = &decl.link.coff; const atom_index = try self.getOrCreateAtomForDecl(decl_index);
if (atom.sym_index == 0) return; const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self); const decl_sym = atom.getSymbol(self);
const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| { for (exports) |exp| {
log.debug("adding new export '{s}'", .{exp.options.name}); log.debug("adding new export '{s}'", .{exp.options.name});
@ -1299,9 +1350,9 @@ pub fn updateDeclExports(
continue; continue;
} }
const sym_index = exp.link.coff.sym_index orelse blk: { const sym_index = decl_metadata.getExport(self, exp.options.name) orelse blk: {
const sym_index = try self.allocateSymbol(); const sym_index = try self.allocateSymbol();
exp.link.coff.sym_index = sym_index; try decl_metadata.exports.append(gpa, sym_index);
break :blk sym_index; break :blk sym_index;
}; };
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null }; const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
@ -1324,16 +1375,15 @@ pub fn updateDeclExports(
} }
} }
pub fn deleteExport(self: *Coff, exp: Export) void { pub fn deleteDeclExport(self: *Coff, decl_index: Module.Decl.Index, name: []const u8) void {
if (self.llvm_object) |_| return; if (self.llvm_object) |_| return;
const sym_index = exp.sym_index orelse return; const metadata = self.decls.getPtr(decl_index) orelse return;
const sym_index = metadata.getExportPtr(self, name) orelse return;
const gpa = self.base.allocator; const gpa = self.base.allocator;
const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
const sym = self.getSymbolPtr(sym_loc); const sym = self.getSymbolPtr(sym_loc);
const sym_name = self.getSymbolName(sym_loc); log.debug("deleting export '{s}'", .{name});
log.debug("deleting export '{s}'", .{sym_name});
assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED); assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED);
sym.* = .{ sym.* = .{
.name = [_]u8{0} ** 8, .name = [_]u8{0} ** 8,
@ -1343,9 +1393,9 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
.storage_class = .NULL, .storage_class = .NULL,
.number_of_aux_symbols = 0, .number_of_aux_symbols = 0,
}; };
self.locals_free_list.append(gpa, sym_index) catch {}; self.locals_free_list.append(gpa, sym_index.*) catch {};
if (self.resolver.fetchRemove(sym_name)) |entry| { if (self.resolver.fetchRemove(name)) |entry| {
defer gpa.free(entry.key); defer gpa.free(entry.key);
self.globals_free_list.append(gpa, entry.value) catch {}; self.globals_free_list.append(gpa, entry.value) catch {};
self.globals.items[entry.value] = .{ self.globals.items[entry.value] = .{
@ -1353,6 +1403,8 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
.file = null, .file = null,
}; };
} }
sym_index.* = 0;
} }
fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void { fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void {
@ -1417,9 +1469,10 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
if (self.imports_table.contains(global)) continue; if (self.imports_table.contains(global)) continue;
const import_index = try self.allocateImportEntry(global); const import_index = try self.allocateImportEntry(global);
const import_atom = try self.createImportAtom(); const import_atom_index = try self.createImportAtom();
self.imports.items[import_index].sym_index = import_atom.sym_index; const import_atom = self.getAtom(import_atom_index);
try self.writePtrWidthAtom(import_atom); self.imports.items[import_index].sym_index = import_atom.getSymbolIndex().?;
try self.writePtrWidthAtom(import_atom_index);
} }
if (build_options.enable_logging) { if (build_options.enable_logging) {
@ -1453,20 +1506,14 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
} }
} }
pub fn getDeclVAddr( pub fn getDeclVAddr(self: *Coff, decl_index: Module.Decl.Index, reloc_info: link.File.RelocInfo) !u64 {
self: *Coff,
decl_index: Module.Decl.Index,
reloc_info: link.File.RelocInfo,
) !u64 {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
assert(self.llvm_object == null); assert(self.llvm_object == null);
assert(decl.link.coff.sym_index != 0);
const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?; const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
const target = SymbolWithLoc{ .sym_index = decl.link.coff.sym_index, .file = null }; const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?;
try atom.addRelocation(self, .{ const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
const target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
try Atom.addRelocation(self, atom_index, .{
.type = .direct, .type = .direct,
.target = target, .target = target,
.offset = @intCast(u32, reloc_info.offset), .offset = @intCast(u32, reloc_info.offset),
@ -1474,7 +1521,7 @@ pub fn getDeclVAddr(
.pcrel = false, .pcrel = false,
.length = 3, .length = 3,
}); });
try atom.addBaseRelocation(self, @intCast(u32, reloc_info.offset)); try Atom.addBaseRelocation(self, atom_index, @intCast(u32, reloc_info.offset));
return 0; return 0;
} }
@ -1501,10 +1548,10 @@ pub fn getGlobalSymbol(self: *Coff, name: []const u8) !u32 {
return global_index; return global_index;
} }
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void { pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void {
_ = self; _ = self;
_ = module; _ = module;
_ = decl; _ = decl_index;
log.debug("TODO implement updateDeclLineNumber", .{}); log.debug("TODO implement updateDeclLineNumber", .{});
} }
@ -1525,7 +1572,8 @@ fn writeBaseRelocations(self: *Coff) !void {
var it = self.base_relocs.iterator(); var it = self.base_relocs.iterator();
while (it.next()) |entry| { while (it.next()) |entry| {
const atom = entry.key_ptr.*; const atom_index = entry.key_ptr.*;
const atom = self.getAtom(atom_index);
const offsets = entry.value_ptr.*; const offsets = entry.value_ptr.*;
for (offsets.items) |offset| { for (offsets.items) |offset| {
@ -1609,7 +1657,8 @@ fn writeImportTable(self: *Coff) !void {
const gpa = self.base.allocator; const gpa = self.base.allocator;
const section = self.sections.get(self.idata_section_index.?); const section = self.sections.get(self.idata_section_index.?);
const last_atom = section.last_atom orelse return; const last_atom_index = section.last_atom_index orelse return;
const last_atom = self.getAtom(last_atom_index);
const iat_rva = section.header.virtual_address; const iat_rva = section.header.virtual_address;
const iat_size = last_atom.getSymbol(self).value + last_atom.size * 2 - iat_rva; // account for sentinel zero pointer const iat_size = last_atom.getSymbol(self).value + last_atom.size * 2 - iat_rva; // account for sentinel zero pointer
@ -2047,27 +2096,37 @@ pub fn getOrPutGlobalPtr(self: *Coff, name: []const u8) !GetOrPutGlobalPtrResult
return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr }; return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
} }
pub fn getAtom(self: *const Coff, atom_index: Atom.Index) Atom {
assert(atom_index < self.atoms.items.len);
return self.atoms.items[atom_index];
}
pub fn getAtomPtr(self: *Coff, atom_index: Atom.Index) *Atom {
assert(atom_index < self.atoms.items.len);
return &self.atoms.items[atom_index];
}
/// Returns atom if there is an atom referenced by the symbol described by `sym_loc` descriptor. /// Returns atom if there is an atom referenced by the symbol described by `sym_loc` descriptor.
/// Returns null on failure. /// Returns null on failure.
pub fn getAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom { pub fn getAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
assert(sym_loc.file == null); // TODO linking with object files assert(sym_loc.file == null); // TODO linking with object files
return self.atom_by_index_table.get(sym_loc.sym_index); return self.atom_by_index_table.get(sym_loc.sym_index);
} }
/// Returns GOT atom that references `sym_loc` if one exists. /// Returns GOT atom that references `sym_loc` if one exists.
/// Returns null otherwise. /// Returns null otherwise.
pub fn getGotAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom { pub fn getGotAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
const got_index = self.got_entries_table.get(sym_loc) orelse return null; const got_index = self.got_entries_table.get(sym_loc) orelse return null;
const got_entry = self.got_entries.items[got_index]; const got_entry = self.got_entries.items[got_index];
return self.getAtomForSymbol(.{ .sym_index = got_entry.sym_index, .file = null }); return self.getAtomIndexForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
} }
/// Returns import atom that references `sym_loc` if one exists. /// Returns import atom that references `sym_loc` if one exists.
/// Returns null otherwise. /// Returns null otherwise.
pub fn getImportAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom { pub fn getImportAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
const imports_index = self.imports_table.get(sym_loc) orelse return null; const imports_index = self.imports_table.get(sym_loc) orelse return null;
const imports_entry = self.imports.items[imports_index]; const imports_entry = self.imports.items[imports_index];
return self.getAtomForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null }); return self.getAtomIndexForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
} }
fn setSectionName(self: *Coff, header: *coff.SectionHeader, name: []const u8) !void { fn setSectionName(self: *Coff, header: *coff.SectionHeader, name: []const u8) !void {

View File

@ -27,42 +27,44 @@ alignment: u32,
/// Points to the previous and next neighbors, based on the `text_offset`. /// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `Atom`. /// This can be used to find, for example, the capacity of this `Atom`.
prev: ?*Atom, prev_index: ?Index,
next: ?*Atom, next_index: ?Index,
pub const empty = Atom{ pub const Index = u32;
.sym_index = 0,
.file = null, pub fn getSymbolIndex(self: Atom) ?u32 {
.size = 0, if (self.sym_index == 0) return null;
.alignment = 0, return self.sym_index;
.prev = null, }
.next = null,
};
/// Returns symbol referencing this atom. /// Returns symbol referencing this atom.
pub fn getSymbol(self: Atom, coff_file: *const Coff) *const coff.Symbol { pub fn getSymbol(self: Atom, coff_file: *const Coff) *const coff.Symbol {
const sym_index = self.getSymbolIndex().?;
return coff_file.getSymbol(.{ return coff_file.getSymbol(.{
.sym_index = self.sym_index, .sym_index = sym_index,
.file = self.file, .file = self.file,
}); });
} }
/// Returns pointer-to-symbol referencing this atom. /// Returns pointer-to-symbol referencing this atom.
pub fn getSymbolPtr(self: Atom, coff_file: *Coff) *coff.Symbol { pub fn getSymbolPtr(self: Atom, coff_file: *Coff) *coff.Symbol {
const sym_index = self.getSymbolIndex().?;
return coff_file.getSymbolPtr(.{ return coff_file.getSymbolPtr(.{
.sym_index = self.sym_index, .sym_index = sym_index,
.file = self.file, .file = self.file,
}); });
} }
pub fn getSymbolWithLoc(self: Atom) SymbolWithLoc { pub fn getSymbolWithLoc(self: Atom) SymbolWithLoc {
return .{ .sym_index = self.sym_index, .file = self.file }; const sym_index = self.getSymbolIndex().?;
return .{ .sym_index = sym_index, .file = self.file };
} }
/// Returns the name of this atom. /// Returns the name of this atom.
pub fn getName(self: Atom, coff_file: *const Coff) []const u8 { pub fn getName(self: Atom, coff_file: *const Coff) []const u8 {
const sym_index = self.getSymbolIndex().?;
return coff_file.getSymbolName(.{ return coff_file.getSymbolName(.{
.sym_index = self.sym_index, .sym_index = sym_index,
.file = self.file, .file = self.file,
}); });
} }
@ -70,7 +72,8 @@ pub fn getName(self: Atom, coff_file: *const Coff) []const u8 {
/// Returns how much room there is to grow in virtual address space. /// Returns how much room there is to grow in virtual address space.
pub fn capacity(self: Atom, coff_file: *const Coff) u32 { pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
const self_sym = self.getSymbol(coff_file); const self_sym = self.getSymbol(coff_file);
if (self.next) |next| { if (self.next_index) |next_index| {
const next = coff_file.getAtom(next_index);
const next_sym = next.getSymbol(coff_file); const next_sym = next.getSymbol(coff_file);
return next_sym.value - self_sym.value; return next_sym.value - self_sym.value;
} else { } else {
@ -82,7 +85,8 @@ pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool { pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
// No need to keep a free list node for the last atom. // No need to keep a free list node for the last atom.
const next = self.next orelse return false; const next_index = self.next_index orelse return false;
const next = coff_file.getAtom(next_index);
const self_sym = self.getSymbol(coff_file); const self_sym = self.getSymbol(coff_file);
const next_sym = next.getSymbol(coff_file); const next_sym = next.getSymbol(coff_file);
const cap = next_sym.value - self_sym.value; const cap = next_sym.value - self_sym.value;
@ -92,22 +96,33 @@ pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
return surplus >= Coff.min_text_capacity; return surplus >= Coff.min_text_capacity;
} }
pub fn addRelocation(self: *Atom, coff_file: *Coff, reloc: Relocation) !void { pub fn addRelocation(coff_file: *Coff, atom_index: Index, reloc: Relocation) !void {
const gpa = coff_file.base.allocator; const gpa = coff_file.base.allocator;
log.debug(" (adding reloc of type {s} to target %{d})", .{ @tagName(reloc.type), reloc.target.sym_index }); log.debug(" (adding reloc of type {s} to target %{d})", .{ @tagName(reloc.type), reloc.target.sym_index });
const gop = try coff_file.relocs.getOrPut(gpa, self); const gop = try coff_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) { if (!gop.found_existing) {
gop.value_ptr.* = .{}; gop.value_ptr.* = .{};
} }
try gop.value_ptr.append(gpa, reloc); try gop.value_ptr.append(gpa, reloc);
} }
pub fn addBaseRelocation(self: *Atom, coff_file: *Coff, offset: u32) !void { pub fn addBaseRelocation(coff_file: *Coff, atom_index: Index, offset: u32) !void {
const gpa = coff_file.base.allocator; const gpa = coff_file.base.allocator;
log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{ offset, self.sym_index }); log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{
const gop = try coff_file.base_relocs.getOrPut(gpa, self); offset,
coff_file.getAtom(atom_index).getSymbolIndex().?,
});
const gop = try coff_file.base_relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) { if (!gop.found_existing) {
gop.value_ptr.* = .{}; gop.value_ptr.* = .{};
} }
try gop.value_ptr.append(gpa, offset); try gop.value_ptr.append(gpa, offset);
} }
pub fn freeRelocations(coff_file: *Coff, atom_index: Index) void {
const gpa = coff_file.base.allocator;
var removed_relocs = coff_file.relocs.fetchRemove(atom_index);
if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
var removed_base_relocs = coff_file.base_relocs.fetchRemove(atom_index);
if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(gpa);
}

View File

@ -46,33 +46,35 @@ length: u2,
dirty: bool = true, dirty: bool = true,
/// Returns an Atom which is the target node of this relocation edge (if any). /// Returns an Atom which is the target node of this relocation edge (if any).
pub fn getTargetAtom(self: Relocation, coff_file: *Coff) ?*Atom { pub fn getTargetAtomIndex(self: Relocation, coff_file: *const Coff) ?Atom.Index {
switch (self.type) { switch (self.type) {
.got, .got,
.got_page, .got_page,
.got_pageoff, .got_pageoff,
=> return coff_file.getGotAtomForSymbol(self.target), => return coff_file.getGotAtomIndexForSymbol(self.target),
.direct, .direct,
.page, .page,
.pageoff, .pageoff,
=> return coff_file.getAtomForSymbol(self.target), => return coff_file.getAtomIndexForSymbol(self.target),
.import, .import,
.import_page, .import_page,
.import_pageoff, .import_pageoff,
=> return coff_file.getImportAtomForSymbol(self.target), => return coff_file.getImportAtomIndexForSymbol(self.target),
} }
} }
pub fn resolve(self: *Relocation, atom: *Atom, coff_file: *Coff) !void { pub fn resolve(self: *Relocation, atom_index: Atom.Index, coff_file: *Coff) !void {
const atom = coff_file.getAtom(atom_index);
const source_sym = atom.getSymbol(coff_file); const source_sym = atom.getSymbol(coff_file);
const source_section = coff_file.sections.get(@enumToInt(source_sym.section_number) - 1).header; const source_section = coff_file.sections.get(@enumToInt(source_sym.section_number) - 1).header;
const source_vaddr = source_sym.value + self.offset; const source_vaddr = source_sym.value + self.offset;
const file_offset = source_section.pointer_to_raw_data + source_sym.value - source_section.virtual_address; const file_offset = source_section.pointer_to_raw_data + source_sym.value - source_section.virtual_address;
const target_atom = self.getTargetAtom(coff_file) orelse return; const target_atom_index = self.getTargetAtomIndex(coff_file) orelse return;
const target_atom = coff_file.getAtom(target_atom_index);
const target_vaddr = target_atom.getSymbol(coff_file).value; const target_vaddr = target_atom.getSymbol(coff_file).value;
const target_vaddr_with_addend = target_vaddr + self.addend; const target_vaddr_with_addend = target_vaddr + self.addend;
@ -107,7 +109,7 @@ const Context = struct {
image_base: u64, image_base: u64,
}; };
fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void { fn resolveAarch64(self: Relocation, ctx: Context, coff_file: *Coff) !void {
var buffer: [@sizeOf(u64)]u8 = undefined; var buffer: [@sizeOf(u64)]u8 = undefined;
switch (self.length) { switch (self.length) {
2 => { 2 => {
@ -197,7 +199,7 @@ fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
} }
} }
fn resolveX86(self: *Relocation, ctx: Context, coff_file: *Coff) !void { fn resolveX86(self: Relocation, ctx: Context, coff_file: *Coff) !void {
switch (self.type) { switch (self.type) {
.got_page => unreachable, .got_page => unreachable,
.got_pageoff => unreachable, .got_pageoff => unreachable,

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

100
src/link/Elf/Atom.zig Normal file
View File

@ -0,0 +1,100 @@
const Atom = @This();
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
const Elf = @import("../Elf.zig");
/// Each decl always gets a local symbol with the fully qualified name.
/// The vaddr and size are found here directly.
/// The file offset is found by computing the vaddr offset from the section vaddr
/// the symbol references, and adding that to the file offset of the section.
/// If this field is 0, it means the codegen size = 0 and there is no symbol or
/// offset table entry.
local_sym_index: u32,
/// This field is undefined for symbols with size = 0.
offset_table_index: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
prev_index: ?Index,
next_index: ?Index,
pub const Index = u32;
pub const Reloc = struct {
target: u32,
offset: u64,
addend: u32,
prev_vaddr: u64,
};
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.local_sym_index == 0) return null;
return self.local_sym_index;
}
pub fn getSymbol(self: Atom, elf_file: *const Elf) elf.Elf64_Sym {
return elf_file.getSymbol(self.getSymbolIndex().?);
}
pub fn getSymbolPtr(self: Atom, elf_file: *Elf) *elf.Elf64_Sym {
return elf_file.getSymbolPtr(self.getSymbolIndex().?);
}
pub fn getName(self: Atom, elf_file: *const Elf) []const u8 {
return elf_file.getSymbolName(self.getSymbolIndex().?);
}
pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
assert(self.getSymbolIndex() != null);
const target = elf_file.base.options.target;
const ptr_bits = target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got = elf_file.program_headers.items[elf_file.phdr_got_index.?];
return got.p_vaddr + self.offset_table_index * ptr_bytes;
}
/// Returns how much room there is to grow in virtual address space.
/// File offset relocation happens transparently, so it is not included in
/// this calculation.
pub fn capacity(self: Atom, elf_file: *const Elf) u64 {
const self_sym = self.getSymbol(elf_file);
if (self.next_index) |next_index| {
const next = elf_file.getAtom(next_index);
const next_sym = next.getSymbol(elf_file);
return next_sym.st_value - self_sym.st_value;
} else {
// We are the last block. The capacity is limited only by virtual address space.
return std.math.maxInt(u32) - self_sym.st_value;
}
}
pub fn freeListEligible(self: Atom, elf_file: *const Elf) bool {
// No need to keep a free list node for the last block.
const next_index = self.next_index orelse return false;
const next = elf_file.getAtom(next_index);
const self_sym = self.getSymbol(elf_file);
const next_sym = next.getSymbol(elf_file);
const cap = next_sym.st_value - self_sym.st_value;
const ideal_cap = Elf.padToIdeal(self_sym.st_size);
if (cap <= ideal_cap) return false;
const surplus = cap - ideal_cap;
return surplus >= Elf.min_text_capacity;
}
pub fn addRelocation(elf_file: *Elf, atom_index: Index, reloc: Reloc) !void {
const gpa = elf_file.base.allocator;
const gop = try elf_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, reloc);
}
pub fn freeRelocations(elf_file: *Elf, atom_index: Index) void {
var removed_relocs = elf_file.relocs.fetchRemove(atom_index);
if (removed_relocs) |*relocs| relocs.value.deinit(elf_file.base.allocator);
}

Some files were not shown because too many files have changed in this diff Show More