diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8a9de7bdc1..f8029fdcde 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -513,7 +513,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/zig/Ast.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/CrossTarget.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/c_builtins.zig"
- "${CMAKE_SOURCE_DIR}/lib/std/zig/parse.zig"
+ "${CMAKE_SOURCE_DIR}/lib/std/zig/Parse.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/render.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/string_literal.zig"
"${CMAKE_SOURCE_DIR}/lib/std/zig/system.zig"
diff --git a/build.zig b/build.zig
index 91aee6062a..2e5c3ddd96 100644
--- a/build.zig
+++ b/build.zig
@@ -1,19 +1,18 @@
const std = @import("std");
const builtin = std.builtin;
-const Builder = std.build.Builder;
const tests = @import("test/tests.zig");
const BufMap = std.BufMap;
const mem = std.mem;
const ArrayList = std.ArrayList;
const io = std.io;
const fs = std.fs;
-const InstallDirectoryOptions = std.build.InstallDirectoryOptions;
+const InstallDirectoryOptions = std.Build.InstallDirectoryOptions;
const assert = std.debug.assert;
const zig_version = std.builtin.Version{ .major = 0, .minor = 11, .patch = 0 };
const stack_size = 32 * 1024 * 1024;
-pub fn build(b: *Builder) !void {
+pub fn build(b: *std.Build) !void {
const release = b.option(bool, "release", "Build in release mode") orelse false;
const only_c = b.option(bool, "only-c", "Translate the Zig compiler to C code, with only the C backend enabled") orelse false;
const target = t: {
@@ -23,7 +22,7 @@ pub fn build(b: *Builder) !void {
}
break :t b.standardTargetOptions(.{ .default_target = default_target });
};
- const mode: std.builtin.Mode = if (release) switch (target.getCpuArch()) {
+ const optimize: std.builtin.OptimizeMode = if (release) switch (target.getCpuArch()) {
.wasm32 => .ReleaseSmall,
else => .ReleaseFast,
} else .Debug;
@@ -33,7 +32,12 @@ pub fn build(b: *Builder) !void {
const test_step = b.step("test", "Run all the tests");
- const docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
+ const docgen_exe = b.addExecutable(.{
+ .name = "docgen",
+ .root_source_file = .{ .path = "doc/docgen.zig" },
+ .target = .{},
+ .optimize = .Debug,
+ });
docgen_exe.single_threaded = single_threaded;
const rel_zig_exe = try fs.path.relative(b.allocator, b.build_root, b.zig_exe);
@@ -53,10 +57,12 @@ pub fn build(b: *Builder) !void {
const docs_step = b.step("docs", "Build documentation");
docs_step.dependOn(&docgen_cmd.step);
- const test_cases = b.addTest("src/test.zig");
+ const test_cases = b.addTest(.{
+ .root_source_file = .{ .path = "src/test.zig" },
+ .optimize = optimize,
+ });
test_cases.main_pkg_path = ".";
test_cases.stack_size = stack_size;
- test_cases.setBuildMode(mode);
test_cases.single_threaded = single_threaded;
const fmt_build_zig = b.addFmt(&[_][]const u8{"build.zig"});
@@ -151,17 +157,15 @@ pub fn build(b: *Builder) !void {
const mem_leak_frames: u32 = b.option(u32, "mem-leak-frames", "How many stack frames to print when a memory leak occurs. Tests get 2x this amount.") orelse blk: {
if (strip == true) break :blk @as(u32, 0);
- if (mode != .Debug) break :blk 0;
+ if (optimize != .Debug) break :blk 0;
break :blk 4;
};
- const exe = addCompilerStep(b);
+ const exe = addCompilerStep(b, optimize, target);
exe.strip = strip;
exe.sanitize_thread = sanitize_thread;
exe.build_id = b.option(bool, "build-id", "Include a build id note") orelse false;
exe.install();
- exe.setBuildMode(mode);
- exe.setTarget(target);
const compile_step = b.step("compile", "Build the self-hosted compiler");
compile_step.dependOn(&exe.step);
@@ -197,7 +201,7 @@ pub fn build(b: *Builder) !void {
test_cases.linkLibC();
}
- const is_debug = mode == .Debug;
+ const is_debug = optimize == .Debug;
const enable_logging = b.option(bool, "log", "Enable debug logging with --debug-log") orelse is_debug;
const enable_link_snapshots = b.option(bool, "link-snapshot", "Whether to enable linker state snapshots") orelse false;
@@ -362,25 +366,25 @@ pub fn build(b: *Builder) !void {
test_step.dependOn(test_cases_step);
}
- var chosen_modes: [4]builtin.Mode = undefined;
+ var chosen_opt_modes_buf: [4]builtin.Mode = undefined;
var chosen_mode_index: usize = 0;
if (!skip_debug) {
- chosen_modes[chosen_mode_index] = builtin.Mode.Debug;
+ chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.Debug;
chosen_mode_index += 1;
}
if (!skip_release_safe) {
- chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseSafe;
+ chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.ReleaseSafe;
chosen_mode_index += 1;
}
if (!skip_release_fast) {
- chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseFast;
+ chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.ReleaseFast;
chosen_mode_index += 1;
}
if (!skip_release_small) {
- chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseSmall;
+ chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.ReleaseSmall;
chosen_mode_index += 1;
}
- const modes = chosen_modes[0..chosen_mode_index];
+ const optimization_modes = chosen_opt_modes_buf[0..chosen_mode_index];
// run stage1 `zig fmt` on this build.zig file just to make sure it works
test_step.dependOn(&fmt_build_zig.step);
@@ -393,7 +397,7 @@ pub fn build(b: *Builder) !void {
"test/behavior.zig",
"behavior",
"Run the behavior tests",
- modes,
+ optimization_modes,
skip_single_threaded,
skip_non_native,
skip_libc,
@@ -407,7 +411,7 @@ pub fn build(b: *Builder) !void {
"lib/compiler_rt.zig",
"compiler-rt",
"Run the compiler_rt tests",
- modes,
+ optimization_modes,
true, // skip_single_threaded
skip_non_native,
true, // skip_libc
@@ -421,7 +425,7 @@ pub fn build(b: *Builder) !void {
"lib/c.zig",
"universal-libc",
"Run the universal libc tests",
- modes,
+ optimization_modes,
true, // skip_single_threaded
skip_non_native,
true, // skip_libc
@@ -429,11 +433,11 @@ pub fn build(b: *Builder) !void {
skip_stage2_tests or true, // TODO get these all passing
));
- test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
+ test_step.dependOn(tests.addCompareOutputTests(b, test_filter, optimization_modes));
test_step.dependOn(tests.addStandaloneTests(
b,
test_filter,
- modes,
+ optimization_modes,
skip_non_native,
enable_macos_sdk,
target,
@@ -446,10 +450,10 @@ pub fn build(b: *Builder) !void {
enable_symlinks_windows,
));
test_step.dependOn(tests.addCAbiTests(b, skip_non_native, skip_release));
- test_step.dependOn(tests.addLinkTests(b, test_filter, modes, enable_macos_sdk, skip_stage2_tests, enable_symlinks_windows));
- test_step.dependOn(tests.addStackTraceTests(b, test_filter, modes));
- test_step.dependOn(tests.addCliTests(b, test_filter, modes));
- test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
+ test_step.dependOn(tests.addLinkTests(b, test_filter, optimization_modes, enable_macos_sdk, skip_stage2_tests, enable_symlinks_windows));
+ test_step.dependOn(tests.addStackTraceTests(b, test_filter, optimization_modes));
+ test_step.dependOn(tests.addCliTests(b, test_filter, optimization_modes));
+ test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, optimization_modes));
test_step.dependOn(tests.addTranslateCTests(b, test_filter));
if (!skip_run_translated_c) {
test_step.dependOn(tests.addRunTranslatedCTests(b, test_filter, target));
@@ -463,7 +467,7 @@ pub fn build(b: *Builder) !void {
"lib/std/std.zig",
"std",
"Run the standard library tests",
- modes,
+ optimization_modes,
skip_single_threaded,
skip_non_native,
skip_libc,
@@ -474,7 +478,7 @@ pub fn build(b: *Builder) !void {
try addWasiUpdateStep(b, version);
}
-fn addWasiUpdateStep(b: *Builder, version: [:0]const u8) !void {
+fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
const semver = try std.SemanticVersion.parse(version);
var target: std.zig.CrossTarget = .{
@@ -483,9 +487,7 @@ fn addWasiUpdateStep(b: *Builder, version: [:0]const u8) !void {
};
target.cpu_features_add.addFeature(@enumToInt(std.Target.wasm.Feature.bulk_memory));
- const exe = addCompilerStep(b);
- exe.setBuildMode(.ReleaseSmall);
- exe.setTarget(target);
+ const exe = addCompilerStep(b, .ReleaseSmall, target);
const exe_options = b.addOptions();
exe.addOptions("build_options", exe_options);
@@ -512,8 +514,17 @@ fn addWasiUpdateStep(b: *Builder, version: [:0]const u8) !void {
update_zig1_step.dependOn(&run_opt.step);
}
-fn addCompilerStep(b: *Builder) *std.build.LibExeObjStep {
- const exe = b.addExecutable("zig", "src/main.zig");
+fn addCompilerStep(
+ b: *std.Build,
+ optimize: std.builtin.OptimizeMode,
+ target: std.zig.CrossTarget,
+) *std.Build.CompileStep {
+ const exe = b.addExecutable(.{
+ .name = "zig",
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
exe.stack_size = stack_size;
return exe;
}
@@ -533,9 +544,9 @@ const exe_cflags = [_][]const u8{
};
fn addCmakeCfgOptionsToExe(
- b: *Builder,
+ b: *std.Build,
cfg: CMakeConfig,
- exe: *std.build.LibExeObjStep,
+ exe: *std.Build.CompileStep,
use_zig_libcxx: bool,
) !void {
if (exe.target.isDarwin()) {
@@ -614,7 +625,7 @@ fn addCmakeCfgOptionsToExe(
}
}
-fn addStaticLlvmOptionsToExe(exe: *std.build.LibExeObjStep) !void {
+fn addStaticLlvmOptionsToExe(exe: *std.Build.CompileStep) !void {
// Adds the Zig C++ sources which both stage1 and stage2 need.
//
// We need this because otherwise zig_clang_cc1_main.cpp ends up pulling
@@ -651,9 +662,9 @@ fn addStaticLlvmOptionsToExe(exe: *std.build.LibExeObjStep) !void {
}
fn addCxxKnownPath(
- b: *Builder,
+ b: *std.Build,
ctx: CMakeConfig,
- exe: *std.build.LibExeObjStep,
+ exe: *std.Build.CompileStep,
objname: []const u8,
errtxt: ?[]const u8,
need_cpp_includes: bool,
@@ -686,7 +697,7 @@ fn addCxxKnownPath(
}
}
-fn addCMakeLibraryList(exe: *std.build.LibExeObjStep, list: []const u8) void {
+fn addCMakeLibraryList(exe: *std.Build.CompileStep, list: []const u8) void {
var it = mem.tokenize(u8, list, ";");
while (it.next()) |lib| {
if (mem.startsWith(u8, lib, "-l")) {
@@ -700,7 +711,7 @@ fn addCMakeLibraryList(exe: *std.build.LibExeObjStep, list: []const u8) void {
}
const CMakeConfig = struct {
- llvm_linkage: std.build.LibExeObjStep.Linkage,
+ llvm_linkage: std.Build.CompileStep.Linkage,
cmake_binary_dir: []const u8,
cmake_prefix_path: []const u8,
cmake_static_library_prefix: []const u8,
@@ -717,7 +728,7 @@ const CMakeConfig = struct {
const max_config_h_bytes = 1 * 1024 * 1024;
-fn findConfigH(b: *Builder, config_h_path_option: ?[]const u8) ?[]const u8 {
+fn findConfigH(b: *std.Build, config_h_path_option: ?[]const u8) ?[]const u8 {
if (config_h_path_option) |path| {
var config_h_or_err = fs.cwd().openFile(path, .{});
if (config_h_or_err) |*file| {
@@ -763,7 +774,7 @@ fn findConfigH(b: *Builder, config_h_path_option: ?[]const u8) ?[]const u8 {
} else unreachable; // TODO should not need `else unreachable`.
}
-fn parseConfigH(b: *Builder, config_h_text: []const u8) ?CMakeConfig {
+fn parseConfigH(b: *std.Build, config_h_text: []const u8) ?CMakeConfig {
var ctx: CMakeConfig = .{
.llvm_linkage = undefined,
.cmake_binary_dir = undefined,
@@ -852,7 +863,7 @@ fn parseConfigH(b: *Builder, config_h_text: []const u8) ?CMakeConfig {
return ctx;
}
-fn toNativePathSep(b: *Builder, s: []const u8) []u8 {
+fn toNativePathSep(b: *std.Build, s: []const u8) []u8 {
const duplicated = b.allocator.dupe(u8, s) catch unreachable;
for (duplicated) |*byte| switch (byte.*) {
'/' => byte.* = fs.path.sep,
@@ -861,166 +872,6 @@ fn toNativePathSep(b: *Builder, s: []const u8) []u8 {
return duplicated;
}
-const softfloat_sources = [_][]const u8{
- "deps/SoftFloat-3e/source/8086/f128M_isSignalingNaN.c",
- "deps/SoftFloat-3e/source/8086/extF80M_isSignalingNaN.c",
- "deps/SoftFloat-3e/source/8086/s_commonNaNToF128M.c",
- "deps/SoftFloat-3e/source/8086/s_commonNaNToExtF80M.c",
- "deps/SoftFloat-3e/source/8086/s_commonNaNToF16UI.c",
- "deps/SoftFloat-3e/source/8086/s_commonNaNToF32UI.c",
- "deps/SoftFloat-3e/source/8086/s_commonNaNToF64UI.c",
- "deps/SoftFloat-3e/source/8086/s_f128MToCommonNaN.c",
- "deps/SoftFloat-3e/source/8086/s_extF80MToCommonNaN.c",
- "deps/SoftFloat-3e/source/8086/s_f16UIToCommonNaN.c",
- "deps/SoftFloat-3e/source/8086/s_f32UIToCommonNaN.c",
- "deps/SoftFloat-3e/source/8086/s_f64UIToCommonNaN.c",
- "deps/SoftFloat-3e/source/8086/s_propagateNaNF128M.c",
- "deps/SoftFloat-3e/source/8086/s_propagateNaNExtF80M.c",
- "deps/SoftFloat-3e/source/8086/s_propagateNaNF16UI.c",
- "deps/SoftFloat-3e/source/8086/softfloat_raiseFlags.c",
- "deps/SoftFloat-3e/source/f128M_add.c",
- "deps/SoftFloat-3e/source/f128M_div.c",
- "deps/SoftFloat-3e/source/f128M_eq.c",
- "deps/SoftFloat-3e/source/f128M_eq_signaling.c",
- "deps/SoftFloat-3e/source/f128M_le.c",
- "deps/SoftFloat-3e/source/f128M_le_quiet.c",
- "deps/SoftFloat-3e/source/f128M_lt.c",
- "deps/SoftFloat-3e/source/f128M_lt_quiet.c",
- "deps/SoftFloat-3e/source/f128M_mul.c",
- "deps/SoftFloat-3e/source/f128M_mulAdd.c",
- "deps/SoftFloat-3e/source/f128M_rem.c",
- "deps/SoftFloat-3e/source/f128M_roundToInt.c",
- "deps/SoftFloat-3e/source/f128M_sqrt.c",
- "deps/SoftFloat-3e/source/f128M_sub.c",
- "deps/SoftFloat-3e/source/f128M_to_f16.c",
- "deps/SoftFloat-3e/source/f128M_to_f32.c",
- "deps/SoftFloat-3e/source/f128M_to_f64.c",
- "deps/SoftFloat-3e/source/f128M_to_extF80M.c",
- "deps/SoftFloat-3e/source/f128M_to_i32.c",
- "deps/SoftFloat-3e/source/f128M_to_i32_r_minMag.c",
- "deps/SoftFloat-3e/source/f128M_to_i64.c",
- "deps/SoftFloat-3e/source/f128M_to_i64_r_minMag.c",
- "deps/SoftFloat-3e/source/f128M_to_ui32.c",
- "deps/SoftFloat-3e/source/f128M_to_ui32_r_minMag.c",
- "deps/SoftFloat-3e/source/f128M_to_ui64.c",
- "deps/SoftFloat-3e/source/f128M_to_ui64_r_minMag.c",
- "deps/SoftFloat-3e/source/extF80M_add.c",
- "deps/SoftFloat-3e/source/extF80M_div.c",
- "deps/SoftFloat-3e/source/extF80M_eq.c",
- "deps/SoftFloat-3e/source/extF80M_le.c",
- "deps/SoftFloat-3e/source/extF80M_lt.c",
- "deps/SoftFloat-3e/source/extF80M_mul.c",
- "deps/SoftFloat-3e/source/extF80M_rem.c",
- "deps/SoftFloat-3e/source/extF80M_roundToInt.c",
- "deps/SoftFloat-3e/source/extF80M_sqrt.c",
- "deps/SoftFloat-3e/source/extF80M_sub.c",
- "deps/SoftFloat-3e/source/extF80M_to_f16.c",
- "deps/SoftFloat-3e/source/extF80M_to_f32.c",
- "deps/SoftFloat-3e/source/extF80M_to_f64.c",
- "deps/SoftFloat-3e/source/extF80M_to_f128M.c",
- "deps/SoftFloat-3e/source/f16_add.c",
- "deps/SoftFloat-3e/source/f16_div.c",
- "deps/SoftFloat-3e/source/f16_eq.c",
- "deps/SoftFloat-3e/source/f16_isSignalingNaN.c",
- "deps/SoftFloat-3e/source/f16_lt.c",
- "deps/SoftFloat-3e/source/f16_mul.c",
- "deps/SoftFloat-3e/source/f16_mulAdd.c",
- "deps/SoftFloat-3e/source/f16_rem.c",
- "deps/SoftFloat-3e/source/f16_roundToInt.c",
- "deps/SoftFloat-3e/source/f16_sqrt.c",
- "deps/SoftFloat-3e/source/f16_sub.c",
- "deps/SoftFloat-3e/source/f16_to_extF80M.c",
- "deps/SoftFloat-3e/source/f16_to_f128M.c",
- "deps/SoftFloat-3e/source/f16_to_f64.c",
- "deps/SoftFloat-3e/source/f32_to_extF80M.c",
- "deps/SoftFloat-3e/source/f32_to_f128M.c",
- "deps/SoftFloat-3e/source/f64_to_extF80M.c",
- "deps/SoftFloat-3e/source/f64_to_f128M.c",
- "deps/SoftFloat-3e/source/f64_to_f16.c",
- "deps/SoftFloat-3e/source/i32_to_f128M.c",
- "deps/SoftFloat-3e/source/s_add256M.c",
- "deps/SoftFloat-3e/source/s_addCarryM.c",
- "deps/SoftFloat-3e/source/s_addComplCarryM.c",
- "deps/SoftFloat-3e/source/s_addF128M.c",
- "deps/SoftFloat-3e/source/s_addExtF80M.c",
- "deps/SoftFloat-3e/source/s_addM.c",
- "deps/SoftFloat-3e/source/s_addMagsF16.c",
- "deps/SoftFloat-3e/source/s_addMagsF32.c",
- "deps/SoftFloat-3e/source/s_addMagsF64.c",
- "deps/SoftFloat-3e/source/s_approxRecip32_1.c",
- "deps/SoftFloat-3e/source/s_approxRecipSqrt32_1.c",
- "deps/SoftFloat-3e/source/s_approxRecipSqrt_1Ks.c",
- "deps/SoftFloat-3e/source/s_approxRecip_1Ks.c",
- "deps/SoftFloat-3e/source/s_compare128M.c",
- "deps/SoftFloat-3e/source/s_compare96M.c",
- "deps/SoftFloat-3e/source/s_compareNonnormExtF80M.c",
- "deps/SoftFloat-3e/source/s_countLeadingZeros16.c",
- "deps/SoftFloat-3e/source/s_countLeadingZeros32.c",
- "deps/SoftFloat-3e/source/s_countLeadingZeros64.c",
- "deps/SoftFloat-3e/source/s_countLeadingZeros8.c",
- "deps/SoftFloat-3e/source/s_eq128.c",
- "deps/SoftFloat-3e/source/s_invalidF128M.c",
- "deps/SoftFloat-3e/source/s_invalidExtF80M.c",
- "deps/SoftFloat-3e/source/s_isNaNF128M.c",
- "deps/SoftFloat-3e/source/s_le128.c",
- "deps/SoftFloat-3e/source/s_lt128.c",
- "deps/SoftFloat-3e/source/s_mul128MTo256M.c",
- "deps/SoftFloat-3e/source/s_mul64To128M.c",
- "deps/SoftFloat-3e/source/s_mulAddF128M.c",
- "deps/SoftFloat-3e/source/s_mulAddF16.c",
- "deps/SoftFloat-3e/source/s_mulAddF32.c",
- "deps/SoftFloat-3e/source/s_mulAddF64.c",
- "deps/SoftFloat-3e/source/s_negXM.c",
- "deps/SoftFloat-3e/source/s_normExtF80SigM.c",
- "deps/SoftFloat-3e/source/s_normRoundPackMToF128M.c",
- "deps/SoftFloat-3e/source/s_normRoundPackMToExtF80M.c",
- "deps/SoftFloat-3e/source/s_normRoundPackToF16.c",
- "deps/SoftFloat-3e/source/s_normRoundPackToF32.c",
- "deps/SoftFloat-3e/source/s_normRoundPackToF64.c",
- "deps/SoftFloat-3e/source/s_normSubnormalF128SigM.c",
- "deps/SoftFloat-3e/source/s_normSubnormalF16Sig.c",
- "deps/SoftFloat-3e/source/s_normSubnormalF32Sig.c",
- "deps/SoftFloat-3e/source/s_normSubnormalF64Sig.c",
- "deps/SoftFloat-3e/source/s_remStepMBy32.c",
- "deps/SoftFloat-3e/source/s_roundMToI64.c",
- "deps/SoftFloat-3e/source/s_roundMToUI64.c",
- "deps/SoftFloat-3e/source/s_roundPackMToExtF80M.c",
- "deps/SoftFloat-3e/source/s_roundPackMToF128M.c",
- "deps/SoftFloat-3e/source/s_roundPackToF16.c",
- "deps/SoftFloat-3e/source/s_roundPackToF32.c",
- "deps/SoftFloat-3e/source/s_roundPackToF64.c",
- "deps/SoftFloat-3e/source/s_roundToI32.c",
- "deps/SoftFloat-3e/source/s_roundToI64.c",
- "deps/SoftFloat-3e/source/s_roundToUI32.c",
- "deps/SoftFloat-3e/source/s_roundToUI64.c",
- "deps/SoftFloat-3e/source/s_shiftLeftM.c",
- "deps/SoftFloat-3e/source/s_shiftNormSigF128M.c",
- "deps/SoftFloat-3e/source/s_shiftRightJam256M.c",
- "deps/SoftFloat-3e/source/s_shiftRightJam32.c",
- "deps/SoftFloat-3e/source/s_shiftRightJam64.c",
- "deps/SoftFloat-3e/source/s_shiftRightJamM.c",
- "deps/SoftFloat-3e/source/s_shiftRightM.c",
- "deps/SoftFloat-3e/source/s_shortShiftLeft64To96M.c",
- "deps/SoftFloat-3e/source/s_shortShiftLeftM.c",
- "deps/SoftFloat-3e/source/s_shortShiftRightExtendM.c",
- "deps/SoftFloat-3e/source/s_shortShiftRightJam64.c",
- "deps/SoftFloat-3e/source/s_shortShiftRightJamM.c",
- "deps/SoftFloat-3e/source/s_shortShiftRightM.c",
- "deps/SoftFloat-3e/source/s_sub1XM.c",
- "deps/SoftFloat-3e/source/s_sub256M.c",
- "deps/SoftFloat-3e/source/s_subM.c",
- "deps/SoftFloat-3e/source/s_subMagsF16.c",
- "deps/SoftFloat-3e/source/s_subMagsF32.c",
- "deps/SoftFloat-3e/source/s_subMagsF64.c",
- "deps/SoftFloat-3e/source/s_tryPropagateNaNF128M.c",
- "deps/SoftFloat-3e/source/s_tryPropagateNaNExtF80M.c",
- "deps/SoftFloat-3e/source/softfloat_state.c",
- "deps/SoftFloat-3e/source/ui32_to_f128M.c",
- "deps/SoftFloat-3e/source/ui64_to_f128M.c",
- "deps/SoftFloat-3e/source/ui32_to_extF80M.c",
- "deps/SoftFloat-3e/source/ui64_to_extF80M.c",
-};
-
const zig_cpp_sources = [_][]const u8{
// These are planned to stay even when we are self-hosted.
"src/zig_llvm.cpp",
diff --git a/doc/langref.html.in b/doc/langref.html.in
index fd4aa8ae76..5d087f72a2 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -871,6 +871,13 @@ pub fn main() void {
However, it is possible to embed non-UTF-8 bytes into a string literal using \xNN notation.
+ Indexing into a string containing non-ASCII bytes will return individual bytes, whether valid
+ UTF-8 or not.
+ The {#link|Zig Standard Library#} provides routines for checking the validity of UTF-8 encoded
+ strings, accessing their code points and other encoding/decoding related tasks in
+ {#syntax#}std.unicode{#endsyntax#}.
+
+
Unicode code point literals have type {#syntax#}comptime_int{#endsyntax#}, the same as
{#link|Integer Literals#}. All {#link|Escape Sequences#} are valid in both string literals
and Unicode code point literals.
@@ -894,9 +901,12 @@ pub fn main() void {
print("{}\n", .{'e' == '\x65'}); // true
print("{d}\n", .{'\u{1f4a9}'}); // 128169
print("{d}\n", .{'💯'}); // 128175
- print("{}\n", .{mem.eql(u8, "hello", "h\x65llo")}); // true
- print("0x{x}\n", .{"\xff"[0]}); // non-UTF-8 strings are possible with \xNN notation.
print("{u}\n", .{'âš¡'});
+ print("{}\n", .{mem.eql(u8, "hello", "h\x65llo")}); // true
+ print("{}\n", .{mem.eql(u8, "💯", "\xf0\x9f\x92\xaf")}); // also true
+ const invalid_utf8 = "\xff\xfe"; // non-UTF-8 strings are possible with \xNN notation.
+ print("0x{x}\n", .{invalid_utf8[1]}); // indexing them returns individual bytes...
+ print("0x{x}\n", .{"💯"[1]}); // ...as does indexing part-way through non-ASCII characters
}
{#code_end#}
{#see_also|Arrays|Source Encoding#}
@@ -8799,6 +8809,15 @@ pub const PrefetchOptions = struct {
{#link|Optional Pointers#} are allowed. Casting an optional pointer which is {#link|null#}
to a non-optional pointer invokes safety-checked {#link|Undefined Behavior#}.
+
+ {#syntax#}@ptrCast{#endsyntax#} cannot be used for:
+
+
+ - Removing {#syntax#}const{#endsyntax#} or {#syntax#}volatile{#endsyntax#} qualifier, use {#link|@qualCast#}.
+ - Changing pointer address space, use {#link|@addrSpaceCast#}.
+ - Increasing pointer alignment, use {#link|@alignCast#}.
+ - Casting a non-slice pointer to a slice, use slicing syntax {#syntax#}ptr[start..end]{#endsyntax#}.
+
{#header_close#}
{#header_open|@ptrToInt#}
@@ -8811,6 +8830,13 @@ pub const PrefetchOptions = struct {
{#header_close#}
+ {#header_open|@qualCast#}
+ {#syntax#}@qualCast(comptime DestType: type, value: anytype) DestType{#endsyntax#}
+
+ Remove {#syntax#}const{#endsyntax#} or {#syntax#}volatile{#endsyntax#} qualifier from a pointer.
+
+ {#header_close#}
+
{#header_open|@rem#}
{#syntax#}@rem(numerator: T, denominator: T) T{#endsyntax#}
@@ -9180,8 +9206,7 @@ fn doTheTest() !void {
when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
{#header_open|@sin#}
@@ -9191,8 +9216,7 @@ fn doTheTest() !void {
when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
@@ -9203,8 +9227,7 @@ fn doTheTest() !void {
when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
@@ -9215,8 +9238,7 @@ fn doTheTest() !void {
Uses a dedicated hardware instruction when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
@@ -9227,8 +9249,7 @@ fn doTheTest() !void {
when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
{#header_open|@exp2#}
@@ -9238,8 +9259,7 @@ fn doTheTest() !void {
when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
{#header_open|@log#}
@@ -9249,8 +9269,7 @@ fn doTheTest() !void {
when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
{#header_open|@log2#}
@@ -9260,8 +9279,7 @@ fn doTheTest() !void {
when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
{#header_open|@log10#}
@@ -9271,8 +9289,7 @@ fn doTheTest() !void {
when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
{#header_open|@fabs#}
@@ -9282,8 +9299,7 @@ fn doTheTest() !void {
when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
{#header_open|@floor#}
@@ -9293,8 +9309,7 @@ fn doTheTest() !void {
Uses a dedicated hardware instruction when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
{#header_open|@ceil#}
@@ -9304,8 +9319,7 @@ fn doTheTest() !void {
Uses a dedicated hardware instruction when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
{#header_open|@trunc#}
@@ -9315,8 +9329,7 @@ fn doTheTest() !void {
Uses a dedicated hardware instruction when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
{#header_open|@round#}
@@ -9326,8 +9339,7 @@ fn doTheTest() !void {
when available.
- Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
- some float operations are not yet implemented for all float types.
+ Supports {#link|Floats#} and {#link|Vectors#} of floats.
{#header_close#}
@@ -9528,11 +9540,15 @@ fn foo(comptime T: type, ptr: *T) T {
To add standard build options to a build.zig file:
{#code_begin|syntax|build#}
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const exe = b.addExecutable("example", "example.zig");
- exe.setBuildMode(b.standardReleaseOptions());
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
+ const exe = b.addExecutable(.{
+ .name = "example",
+ .root_source_file = .{ .path = "example.zig" },
+ .optimize = optimize,
+ });
b.default_step.dependOn(&exe.step);
}
{#code_end#}
@@ -9588,7 +9604,7 @@ pub fn build(b: *Builder) void {
{#header_close#}
{#header_open|Single Threaded Builds#}
- Zig has a compile option --single-threaded which has the following effects:
+ Zig has a compile option -fsingle-threaded which has the following effects:
- All {#link|Thread Local Variables#} are treated as regular {#link|Container Level Variables#}.
- The overhead of {#link|Async Functions#} becomes equivalent to function call overhead.
@@ -10547,22 +10563,26 @@ const separator = if (builtin.os.tag == .windows) '\\' else '/';
This build.zig file is automatically generated
by zig init-exe.
{#code_begin|syntax|build_executable#}
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
+pub fn build(b: *std.Build) void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
- // Standard release options allow the person running `zig build` to select
- // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
- const mode = b.standardReleaseOptions();
+ // Standard optimization options allow the person running `zig build` to select
+ // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
+ // set a preferred release mode, allowing the user to decide how to optimize.
+ const optimize = b.standardOptimizeOption(.{});
- const exe = b.addExecutable("example", "src/main.zig");
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "example",
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
exe.install();
const run_cmd = exe.run();
@@ -10581,16 +10601,21 @@ pub fn build(b: *Builder) void {
This build.zig file is automatically generated
by zig init-lib.
{#code_begin|syntax|build_library#}
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
- const lib = b.addStaticLibrary("example", "src/main.zig");
- lib.setBuildMode(mode);
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
+ const lib = b.addStaticLibrary(.{
+ .name = "example",
+ .root_source_file = .{ .path = "src/main.zig" },
+ .optimize = optimize,
+ });
lib.install();
- var main_tests = b.addTest("src/main.zig");
- main_tests.setBuildMode(mode);
+ const main_tests = b.addTest(.{
+ .root_source_file = .{ .path = "src/main.zig" },
+ .optimize = optimize,
+ });
const test_step = b.step("test", "Run library tests");
test_step.dependOn(&main_tests.step);
@@ -10949,12 +10974,17 @@ int main(int argc, char **argv) {
}
{#end_syntax_block#}
{#code_begin|syntax|build_c#}
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const lib = b.addSharedLibrary("mathtest", "mathtest.zig", b.version(1, 0, 0));
-
- const exe = b.addExecutable("test", null);
+pub fn build(b: *std.Build) void {
+ const lib = b.addSharedLibrary(.{
+ .name = "mathtest",
+ .root_source_file = .{ .path = "mathtest.zig" },
+ .version = .{ .major = 1, .minor = 0, .patch = 0 },
+ });
+ const exe = b.addExecutable(.{
+ .name = "test",
+ });
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
exe.linkLibrary(lib);
exe.linkSystemLibrary("c");
@@ -11011,12 +11041,17 @@ int main(int argc, char **argv) {
}
{#end_syntax_block#}
{#code_begin|syntax|build_object#}
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const obj = b.addObject("base64", "base64.zig");
+pub fn build(b: *std.Build) void {
+ const obj = b.addObject(.{
+ .name = "base64",
+ .root_source_file = .{ .path = "base64.zig" },
+ });
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ });
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
exe.addObject(obj);
exe.linkSystemLibrary("c");
diff --git a/lib/build_runner.zig b/lib/build_runner.zig
index 4df2eb1d62..f2b2eba950 100644
--- a/lib/build_runner.zig
+++ b/lib/build_runner.zig
@@ -3,7 +3,6 @@ const std = @import("std");
const builtin = @import("builtin");
const io = std.io;
const fmt = std.fmt;
-const Builder = std.build.Builder;
const mem = std.mem;
const process = std.process;
const ArrayList = std.ArrayList;
@@ -42,12 +41,15 @@ pub fn main() !void {
return error.InvalidArgs;
};
- const builder = try Builder.create(
+ const host = try std.zig.system.NativeTargetInfo.detect(.{});
+
+ const builder = try std.Build.create(
allocator,
zig_exe,
build_root,
cache_root,
global_cache_root,
+ host,
);
defer builder.destroy();
@@ -58,7 +60,7 @@ pub fn main() !void {
const stdout_stream = io.getStdOut().writer();
var install_prefix: ?[]const u8 = null;
- var dir_list = Builder.DirList{};
+ var dir_list = std.Build.DirList{};
// before arg parsing, check for the NO_COLOR environment variable
// if it exists, default the color setting to .off
@@ -230,7 +232,7 @@ pub fn main() !void {
};
}
-fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void {
+fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !void {
// run the build script to collect the options
if (!already_ran_build) {
builder.resolveInstallPrefix(null, .{});
@@ -330,7 +332,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void
);
}
-fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: anytype) void {
+fn usageAndErr(builder: *std.Build, already_ran_build: bool, out_stream: anytype) void {
usage(builder, already_ran_build, out_stream) catch {};
process.exit(1);
}
diff --git a/lib/c.zig b/lib/c.zig
index 82f9f5b2e1..8581ec4e14 100644
--- a/lib/c.zig
+++ b/lib/c.zig
@@ -354,7 +354,7 @@ fn clone() callconv(.Naked) void {
\\ ecall
);
},
- .mips, .mipsel => {
+ .mips, .mipsel, .mips64, .mips64el => {
// __clone(func, stack, flags, arg, ptid, tls, ctid)
// 3, 4, 5, 6, 7, 8, 9
diff --git a/lib/compiler_rt/README.md b/lib/compiler_rt/README.md
index b2c6a10695..d8df7f5489 100644
--- a/lib/compiler_rt/README.md
+++ b/lib/compiler_rt/README.md
@@ -27,482 +27,545 @@ then statically linked and therefore is a transparent dependency for the
programmer.
For details see `../compiler_rt.zig`.
-The routines in this folder are listed below.
-Routines are annotated as `type source routine // description`, with `routine`
-being the name used in aforementioned `compiler_rt.zig`.
-`dev` means deviating from compiler_rt, `port` ported, `source` is the
-information source for the implementation, `none` means unimplemented.
-Some examples for the naming convention are:
-- dev source name_routine, name_routine2 various implementations for performance, simplicity etc
-- port llvm compiler-rt library routines from [LLVM](http://compiler-rt.llvm.org/)
- * LLVM emits library calls to compiler-rt, if the hardware lacks functionality
-- port musl libc routines from [musl](https://musl.libc.org/)
-If the library or information source is uncommon, use the entry `other` for `source`.
-Please do not break the search by inserting entries in another format than `impl space source`.
-
Bugs should be solved by trying to duplicate the bug upstream, if possible.
* If the bug exists upstream, get it fixed upstream and port the fix downstream to Zig.
* If the bug only exists in Zig, use the corresponding C code and debug
both implementations side by side to figure out what is wrong.
-## Integer library routines
+Routines with status are given below. Sources were besides
+"The Art of Computer Programming" by Donald E. Knuth, "HackersDelight" by Henry S. Warren,
+"Bit Twiddling Hacks" collected by Sean Eron Anderson, "Berkeley SoftFloat" by John R. Hauser,
+LLVM "compiler-rt" as it was MIT-licensed, "musl libc" and thoughts + work of contributors.
-#### Integer Bit operations
+The compiler-rt routines have not yet been audited.
+See https://github.com/ziglang/zig/issues/1504.
-- dev HackersDelight __clzsi2 // count leading zeros
-- dev HackersDelight __clzdi2 // count leading zeros
-- dev HackersDelight __clzti2 // count leading zeros
-- dev HackersDelight __ctzsi2 // count trailing zeros
-- dev HackersDelight __ctzdi2 // count trailing zeros
-- dev HackersDelight __ctzti2 // count trailing zeros
-- dev __ctzsi2 __ffssi2 // find least significant 1 bit
-- dev __ctzsi2 __ffsdi2 // find least significant 1 bit
-- dev __ctzsi2 __ffsti2 // find least significant 1 bit
-- dev BitTwiddlingHacks __paritysi2 // bit parity
-- dev BitTwiddlingHacks __paritydi2 // bit parity
-- dev BitTwiddlingHacks __parityti2 // bit parity
-- dev TAOCP __popcountsi2 // bit population
-- dev TAOCP __popcountdi2 // bit population
-- dev TAOCP __popcountti2 // bit population
-- dev other __bswapsi2 // a byteswapped
-- dev other __bswapdi2 // a byteswapped
-- dev other __bswapti2 // a byteswapped
+From left to right the columns mean 1. if the routine is implemented (✗ or ✓),
+2. the name, 3. input (`a`), 4. input (`b`), 5. return value,
+6. an explanation of the functionality, .. to repeat the comment from the
+column a row above and/or additional return values.
+Some routines have more extensive comments supplemented with a reference text.
-#### Integer Comparison
+Integer and Float Operations
-- port llvm __cmpsi2 // a,b: i32, (a 0, (a==b) -> 1, (a>b) -> 2
-- port llvm __cmpdi2 // a,b: i64
-- port llvm __cmpti2 // a,b: i128
-- port llvm __ucmpsi2 // a,b: u32, (a 0, (a==b) -> 1, (a>b) -> 2
-- port llvm __ucmpdi2 // a,b: u64
-- port llvm __ucmpti2 // a,b: u128
+| Done | Name | a | b | Out | Comment |
+| ------ | ------------- | ---- | ---- | ---- | ------------------------------ |
+| | | | | | **Integer Bit Operations** |
+| ✓ | __clzsi2 | u32 | ∅ | i32 | count leading zeros |
+| ✓ | __clzdi2 | u64 | ∅ | i32 | count leading zeros |
+| ✓ | __clzti2 | u128 | ∅ | i32 | count leading zeros |
+| ✓ | __ctzsi2 | u32 | ∅ | i32 | count trailing zeros |
+| ✓ | __ctzdi2 | u64 | ∅ | i32 | count trailing zeros |
+| ✓ | __ctzti2 | u128 | ∅ | i32 | count trailing zeros |
+| ✓ | __ffssi2 | u32 | ∅ | i32 | find least significant 1 bit |
+| ✓ | __ffsdi2 | u64 | ∅ | i32 | find least significant 1 bit |
+| ✓ | __ffsti2 | u128 | ∅ | i32 | find least significant 1 bit |
+| ✓ | __paritysi2 | u32 | ∅ | i32 | bit parity |
+| ✓ | __paritydi2 | u64 | ∅ | i32 | bit parity |
+| ✓ | __parityti2 | u128 | ∅ | i32 | bit parity |
+| ✓ | __popcountsi2 | u32 | ∅ | i32 | bit population |
+| ✓ | __popcountdi2 | u64 | ∅ | i32 | bit population |
+| ✓ | __popcountti2 | u128 | ∅ | i32 | bit population |
+| ✓ | __bswapsi2 | u32 | ∅ | i32 | byte swap |
+| ✓ | __bswapdi2 | u64 | ∅ | i32 | byte swap |
+| ✓ | __bswapti2 | u128 | ∅ | i32 | byte swap |
+| | | | | | **Integer Comparison** |
+| ✓ | __cmpsi2 | i32 | i32 | i32 | `(a 0, (a==b) -> 1, (a>b) -> 2` |
+| ✓ | __cmpdi2 | i64 | i64 | i32 | .. |
+| ✓ | __cmpti2 | i128 | i128 | i32 | .. |
+| ✓ | __ucmpsi2 | u32 | u32 | i32 | `(a 0, (a==b) -> 1, (a>b) -> 2` |
+| ✓ | __ucmpdi2 | u64 | u64 | i32 | .. |
+| ✓ | __ucmpti2 | u128 | u128 | i32 | .. |
+| | | | | | **Integer Arithmetic** |
+| ✗ | __ashlsi3 | i32 | i32 | i32 | `a << b` [^unused_rl78] |
+| ✓ | __ashldi3 | i64 | i32 | i64 | .. |
+| ✓ | __ashlti3 | i128 | i32 | i128 | .. |
+| ✓ | __aeabi_llsl | i32 | i32 | i32 | .. ARM |
+| ✗ | __ashrsi3 | i32 | i32 | i32 | `a >> b` arithmetic (sign fill) [^unused_rl78] |
+| ✓ | __ashrdi3 | i64 | i32 | i64 | .. |
+| ✓ | __ashrti3 | i128 | i32 | i128 | .. |
+| ✓ | __aeabi_lasr | i64 | i32 | i64 | .. ARM |
+| ✗ | __lshrsi3 | i32 | i32 | i32 | `a >> b` logical (zero fill) [^unused_rl78] |
+| ✓ | __lshrdi3 | i64 | i32 | i64 | .. |
+| ✓ | __lshrti3 | i128 | i32 | i128 | .. |
+| ✓ | __aeabi_llsr | i64 | i32 | i64 | .. ARM |
+| ✓ | __negsi2 | i32 | i32 | i32 | `-a` [^libgcc_compat] |
+| ✓ | __negdi2 | i64 | i64 | i64 | .. |
+| ✓ | __negti2 | i128 | i128 | i128 | .. |
+| ✓ | __mulsi3 | i32 | i32 | i32 | `a * b` |
+| ✓ | __muldi3 | i64 | i64 | i64 | .. |
+| ✓ | __multi3 | i128 | i128 | i128 | .. |
+| ✓ | __divsi3 | i32 | i32 | i32 | `a / b` |
+| ✓ | __divdi3 | i64 | i64 | i64 | .. |
+| ✓ | __divti3 | i128 | i128 | i128 | .. |
+| ✓ | __aeabi_idiv | i32 | i32 | i32 | .. ARM |
+| ✓ | __udivsi3 | u32 | u32 | u32 | `a / b` |
+| ✓ | __udivdi3 | u64 | u64 | u64 | .. |
+| ✓ | __udivti3 | u128 | u128 | u128 | .. |
+| ✓ | __aeabi_uidiv | i32 | i32 | i32 | .. ARM |
+| ✓ | __modsi3 | i32 | i32 | i32 | `a % b` |
+| ✓ | __moddi3 | i64 | i64 | i64 | .. |
+| ✓ | __modti3 | i128 | i128 | i128 | .. |
+| ✓ | __umodsi3 | u32 | u32 | u32 | `a % b` |
+| ✓ | __umoddi3 | u64 | u64 | u64 | .. |
+| ✓ | __umodti3 | u128 | u128 | u128 | .. |
+| ✓ | __udivmodsi4 | u32 | u32 | u32 | `a / b, rem.* = a % b` |
+| ✓ | __udivmoddi4 | u64 | u64 | u64 | .. |
+| ✓ | __udivmodti4 | u128 | u128 | u128 | .. |
+| ✓ | __divmodsi4 | i32 | i32 | i32 | `a / b, rem.* = a % b` |
+| ✓ | __divmoddi4 | i64 | i64 | i64 | .. |
+| ✗ | __divmodti4 | i128 | i128 | i128 | .. [^libgcc_compat] |
+| | | | | | **Integer Arithmetic with Trapping Overflow**|
+| ✓ | __absvsi2 | i32 | i32 | i32 | abs(a) |
+| ✓ | __absvdi2 | i64 | i64 | i64 | .. |
+| ✓ | __absvti2 | i128 | i128 | i128 | .. |
+| ✓ | __negvsi2 | i32 | i32 | i32 | `-a` [^libgcc_compat] |
+| ✓ | __negvdi2 | i64 | i64 | i64 | .. |
+| ✓ | __negvti2 | i128 | i128 | i128 | .. |
+| ✗ | __addvsi3 | i32 | i32 | i32 | `a + b` |
+| ✗ | __addvdi3 | i64 | i64 | i64 | .. |
+| ✗ | __addvti3 | i128 | i128 | i128 | .. |
+| ✗ | __subvsi3 | i32 | i32 | i32 | `a - b` |
+| ✗ | __subvdi3 | i64 | i64 | i64 | .. |
+| ✗ | __subvti3 | i128 | i128 | i128 | .. |
+| ✗ | __mulvsi3 | i32 | i32 | i32 | `a * b` |
+| ✗ | __mulvdi3 | i64 | i64 | i64 | .. |
+| ✗ | __mulvti3 | i128 | i128 | i128 | .. |
+| | | | | | **Integer Arithmetic which Return on Overflow** [^noptr_faster] |
+| ✓ | __addosi4 | i32 | i32 | i32 | `a + b`, overflow->ov.*=1 else 0 [^perf_addition] |
+| ✓ | __addodi4 | i64 | i64 | i64 | .. |
+| ✓ | __addoti4 | i128 | i128 | i128 | .. |
+| ✓ | __subosi4 | i32 | i32 | i32 | `a - b`, overflow->ov.*=1 else 0 [^perf_addition] |
+| ✓ | __subodi4 | i64 | i64 | i64 | .. |
+| ✓ | __suboti4 | i128 | i128 | i128 | .. |
+| ✓ | __mulosi4 | i32 | i32 | i32 | `a * b`, overflow->ov.*=1 else 0 |
+| ✓ | __mulodi4 | i64 | i64 | i64 | .. |
+| ✓ | __muloti4 | i128 | i128 | i128 | .. |
+| | | | | | **Float Conversion** |
+| ✓ | __extendsfdf2 | f32 | ∅ | f64 | .. |
+| ✓ | __extendsftf2 | f32 | ∅ | f128 | .. |
+| ✓ | __extendsfxf2 | f32 | ∅ | f80 | .. |
+| ✓ | __extenddftf2 | f64 | ∅ | f128 | .. |
+| ✓ | __extenddfxf2 | f64 | ∅ | f80 | .. |
+| ✓ | __truncsfhf2 | f32 | ∅ | f16 | rounding towards zero |
+| ✓ | __truncdfhf2 | f64 | ∅ | f16 | .. |
+| ✓ | __truncdfsf2 | f64 | ∅ | f32 | .. |
+| ✓ | __trunctfhf2 | f128 | ∅ | f16 | .. |
+| ✓ | __trunctfsf2 | f128 | ∅ | f32 | .. |
+| ✓ | __trunctfdf2 | f128 | ∅ | f64 | .. |
+| ✓ | __trunctfxf2 | f128 | ∅ | f80 | .. |
+| ✓ | __truncxfhf2 | f80 | ∅ | f16 | .. |
+| ✓ | __truncxfsf2 | f80 | ∅ | f32 | .. |
+| ✓ | __truncxfdf2 | f80 | ∅ | f64 | .. |
+| ✓ | __aeabi_f2h | f32 | ∅ | f16 | .. ARM |
+| ✓ | __gnu_f2h_ieee | f32 | ∅ | f16 | ..GNU naming convention |
+| ✓ | __aeabi_d2h | f64 | ∅ | f16 | .. ARM |
+| ✓ | __aeabi_d2f | f64 | ∅ | f32 | .. ARM |
+| ✓ | __trunckfsf2 | f128 | ∅ | f32 | .. PPC |
+| ✓ | _Qp_qtos |*f128 | ∅ | f32 | .. SPARC |
+| ✓ | __trunckfdf2 | f128 | ∅ | f64 | .. PPC |
+| ✓ | _Qp_qtod |*f128 | ∅ | f64 | .. SPARC |
+| ✓ | __fixhfsi | f16 | ∅ | i32 | float to int, rounding towards zero |
+| ✓ | __fixsfsi | f32 | ∅ | i32 | .. |
+| ✓ | __fixdfsi | f64 | ∅ | i32 | .. |
+| ✓ | __fixtfsi | f128 | ∅ | i32 | .. |
+| ✓ | __fixxfsi | f80 | ∅ | i32 | .. |
+| ✓ | __fixhfdi | f16 | ∅ | i64 | .. |
+| ✓ | __fixsfdi | f32 | ∅ | i64 | .. |
+| ✓ | __fixdfdi | f64 | ∅ | i64 | .. |
+| ✓ | __fixtfdi | f128 | ∅ | i64 | .. |
+| ✓ | __fixxfdi | f80 | ∅ | i64 | .. |
+| ✓ | __fixhfti | f16 | ∅ | i128 | .. |
+| ✓ | __fixsfti | f32 | ∅ | i128 | .. |
+| ✓ | __fixdfti | f64 | ∅ | i128 | .. |
+| ✓ | __fixtfti | f128 | ∅ | i128 | .. |
+| ✓ | __fixxfti | f80 | ∅ | i128 | .. |
+| ✓ | __fixunshfsi | f16 | ∅ | u32 | float to uint, rounding towards zero. negative values become 0. |
+| ✓ | __fixunssfsi | f32 | ∅ | u32 | .. |
+| ✓ | __fixunsdfsi | f64 | ∅ | u32 | .. |
+| ✓ | __fixunstfsi | f128 | ∅ | u32 | .. |
+| ✓ | __fixunsxfsi | f80 | ∅ | u32 | .. |
+| ✓ | __fixunshfdi | f16 | ∅ | u64 | .. |
+| ✓ | __fixunssfdi | f32 | ∅ | u64 | .. |
+| ✓ | __fixunsdfdi | f64 | ∅ | u64 | .. |
+| ✓ | __fixunstfdi | f128 | ∅ | u64 | .. |
+| ✓ | __fixunsxfdi | f80 | ∅ | u64 | .. |
+| ✓ | __fixunshfti | f16 | ∅ | u128 | .. |
+| ✓ | __fixunssfti | f32 | ∅ | u128 | .. |
+| ✓ | __fixunsdfti | f64 | ∅ | u128 | .. |
+| ✓ | __fixunstfti | f128 | ∅ | u128 | .. |
+| ✓ | __fixunsxfti | f80 | ∅ | u128 | .. |
+| ✓ | __floatsihf | i32 | ∅ | f16 | int to float |
+| ✓ | __floatsisf | i32 | ∅ | f32 | .. |
+| ✓ | __floatsidf | i32 | ∅ | f64 | .. |
+| ✓ | __floatsitf | i32 | ∅ | f128 | .. |
+| ✓ | __floatsixf | i32 | ∅ | f80 | .. |
+| ✓ | __floatdisf | i64 | ∅ | f32 | .. |
+| ✓ | __floatdidf | i64 | ∅ | f64 | .. |
+| ✓ | __floatditf | i64 | ∅ | f128 | .. |
+| ✓ | __floatdixf | i64 | ∅ | f80 | .. |
+| ✓ | __floattihf | i128 | ∅ | f16 | .. |
+| ✓ | __floattisf | i128 | ∅ | f32 | .. |
+| ✓ | __floattidf | i128 | ∅ | f64 | .. |
+| ✓ | __floattitf | i128 | ∅ | f128 | .. |
+| ✓ | __floattixf | i128 | ∅ | f80 | .. |
+| ✓ | __floatunsihf | u32 | ∅ | f16 | uint to float |
+| ✓ | __floatunsisf | u32 | ∅ | f32 | .. |
+| ✓ | __floatunsidf | u32 | ∅ | f64 | .. |
+| ✓ | __floatunsitf | u32 | ∅ | f128 | .. |
+| ✓ | __floatunsixf | u32 | ∅ | f80 | .. |
+| ✓ | __floatundihf | u64 | ∅ | f16 | .. |
+| ✓ | __floatundisf | u64 | ∅ | f32 | .. |
+| ✓ | __floatundidf | u64 | ∅ | f64 | .. |
+| ✓ | __floatunditf | u64 | ∅ | f128 | .. |
+| ✓ | __floatundixf | u64 | ∅ | f80 | .. |
+| ✓ | __floatuntihf | u128 | ∅ | f16 | .. |
+| ✓ | __floatuntisf | u128 | ∅ | f32 | .. |
+| ✓ | __floatuntidf | u128 | ∅ | f64 | .. |
+| ✓ | __floatuntitf | u128 | ∅ | f128 | .. |
+| ✓ | __floatuntixf | u128 | ∅ | f80 | .. |
+| | | | | | **Float Comparison** |
+| ✓ | __cmphf2 | f16 | f16 | i32 | `(a-1, (a==b)->0, (a>b)->1, Nan->1` |
+| ✓ | __cmpsf2 | f32 | f32 | i32 | exported from __lesf2, __ledf2, __letf2 (below) |
+| ✓ | __cmpdf2 | f64 | f64 | i32 | But: if NaN is a possibility, use another routine. |
+| ✓ | __cmptf2 | f128 | f128 | i32 | .. |
+| ✓ | __cmpxf2 | f80 | f80 | i32 | .. |
+| ✓ | _Qp_cmp |*f128 |*f128 | i32 | .. SPARC |
+| ✓ | __unordhf2 | f16 | f16 | i32 | `(a==+-NaN or b==+-NaN) -> !=0, else -> 0` |
+| ✓ | __unordsf2 | f32 | f32 | i32 | .. |
+| ✓ | __unorddf2 | f64 | f64 | i32 | Note: only reliable for (input!=NaN) |
+| ✓ | __unordtf2 | f128 | f128 | i32 | .. |
+| ✓ | __unordxf2 | f80 | f80 | i32 | .. |
+| ✓ | __aeabi_fcmpun | f32 | f32 | i32 | .. ARM |
+| ✓ | __aeabi_dcmpun | f32 | f32 | i32 | .. ARM |
+| ✓ | __unordkf2 | f128 | f128 | i32 | .. PPC |
+| ✓ | __eqhf2 | f16 | f16 | i32 | `(a!=NaN) and (b!=Nan) and (a==b) -> output=0` |
+| ✓ | __eqsf2 | f32 | f32 | i32 | .. |
+| ✓ | __eqdf2 | f64 | f64 | i32 | .. |
+| ✓ | __eqtf2 | f128 | f128 | i32 | .. |
+| ✓ | __eqxf2 | f80 | f80 | i32 | .. |
+| ✓ | __aeabi_fcmpeq | f32 | f32 | i32 | .. ARM |
+| ✓ | __aeabi_dcmpeq | f32 | f32 | i32 | .. ARM |
+| ✓ | __eqkf2 | f128 | f128 | i32 | .. PPC |
+| ✓ | _Qp_feq |*f128 |*f128 | bool | .. SPARC |
+| ✓ | __nehf2 | f16 | f16 | i32 | `(a==NaN) or (b==Nan) or (a!=b) -> output!=0` |
+| ✓ | __nesf2 | f32 | f32 | i32 | Note: __eqXf2 and __neXf2 have same return value |
+| ✓ | __nedf2 | f64 | f64 | i32 | .. |
+| ✓ | __netf2 | f128 | f128 | i32 | .. |
+| ✓ | __nexf2 | f80 | f80 | i32 | .. |
+| ✓ | __nekf2 | f128 | f128 | i32 | .. PPC |
+| ✓ | _Qp_fne |*f128 |*f128 | bool | .. SPARC |
+| ✓ | __gehf2 | f16 | f16 | i32 | `(a!=Nan) and (b!=Nan) and (a>=b) -> output>=0` |
+| ✓ | __gesf2 | f32 | f32 | i32 | .. |
+| ✓ | __gedf2 | f64 | f64 | i32 | .. |
+| ✓ | __getf2 | f128 | f128 | i32 | .. |
+| ✓ | __gexf2 | f80 | f80 | i32 | .. |
+| ✓ | __gekf2 | f128 | f128 | i32 | .. PPC |
+| ✓ | _Qp_fge |*f128 |*f128 | bool | .. SPARC |
+| ✓ | __lthf2 | f16 | f16 | i32 | `(a!=Nan) and (b!=Nan) and (a output<0` |
+| ✓ | __ltsf2 | f32 | f32 | i32 | .. |
+| ✓ | __ltdf2 | f64 | f64 | i32 | .. |
+| ✓ | __lttf2 | f128 | f128 | i32 | .. |
+| ✓ | __ltxf2 | f80 | f80 | i32 | .. |
+| ✓ | __ltkf2 | f128 | f128 | i32 | .. PPC |
+| ✓ | __aeabi_fcmplt | f32 | f32 | i32 | .. ARM |
+| ✓ | __aeabi_dcmplt | f32 | f32 | i32 | .. ARM |
+| ✓ | _Qp_flt |*f128 |*f128 | bool | .. SPARC |
+| ✓ | __lehf2 | f16 | f16 | i32 | `(a!=Nan) and (b!=Nan) and (a<=b) -> output<=0` |
+| ✓ | __lesf2 | f32 | f32 | i32 | .. |
+| ✓ | __ledf2 | f64 | f64 | i32 | .. |
+| ✓ | __letf2 | f128 | f128 | i32 | .. |
+| ✓ | __lexf2 | f80 | f80 | i32 | .. |
+| ✓ | __aeabi_fcmple | f32 | f32 | i32 | .. ARM |
+| ✓ | __aeabi_dcmple | f32 | f32 | i32 | .. ARM |
+| ✓ | __lekf2 | f128 | f128 | i32 | .. PPC |
+| ✓ | _Qp_fle |*f128 |*f128 | bool | .. SPARC |
+| ✓ | __gthf2 | f16 | f16 | i32 | `(a!=Nan) and (b!=Nan) and (a>b) -> output>0` |
+| ✓ | __gtsf2 | f32 | f32 | i32 | .. |
+| ✓ | __gtdf2 | f64 | f64 | i32 | .. |
+| ✓ | __gttf2 | f128 | f128 | i32 | .. |
+| ✓ | __gtxf2 | f80 | f80 | i32 | .. |
+| ✓ | __gtkf2 | f128 | f128 | i32 | .. PPC |
+| ✓ | _Qp_fgt |*f128 |*f128 | bool | .. SPARC |
+| | | | | | **Float Arithmetic** |
+| ✓ | __addhf3 | f32 | f32 | f32 | `a + b` |
+| ✓ | __addsf3 | f32 | f32 | f32 | .. |
+| ✓ | __adddf3 | f64 | f64 | f64 | .. |
+| ✓ | __addtf3 | f128 | f128 | f128 | .. |
+| ✓ | __addxf3 | f80 | f80 | f80 | .. |
+| ✓ | __aeabi_fadd | f32 | f32 | f32 | .. ARM |
+| ✓ | __aeabi_dadd | f64 | f64 | f64 | .. ARM |
+| ✓ | __addkf3 | f128 | f128 | f128 | .. PPC |
+| ✓ | _Qp_add |*f128 |*f128 | void | .. SPARC args *c,*a,*b c=a+b |
+| ✓ | __subhf3 | f32 | f32 | f32 | `a - b` |
+| ✓ | __subsf3 | f32 | f32 | f32 | .. |
+| ✓ | __subdf3 | f64 | f64 | f64 | .. |
+| ✓ | __subtf3 | f128 | f128 | f128 | .. |
+| ✓ | __subxf3 | f80 | f80 | f80 | .. |
+| ✓ | __aeabi_fsub | f32 | f32 | f32 | .. ARM |
+| ✓ | __aeabi_dsub | f64 | f64 | f64 | .. ARM |
+| ✓ | __subkf3 | f128 | f128 | f128 | .. PPC |
+| ✓ | _Qp_sub |*f128 |*f128 | void | .. SPARC args *c,*a,*b c=a-b |
+| ✓ | __mulhf3 | f32 | f32 | f32 | `a * b` |
+| ✓ | __mulsf3 | f32 | f32 | f32 | .. |
+| ✓ | __muldf3 | f64 | f64 | f64 | .. |
+| ✓ | __multf3 | f128 | f128 | f128 | .. |
+| ✓ | __mulxf3 | f80 | f80 | f80 | .. |
+| ✓ | __aeabi_fmul | f32 | f32 | f32 | .. ARM |
+| ✓ | __aeabi_dmul | f64 | f64 | f64 | .. ARM |
+| ✓ | __mulkf3 | f128 | f128 | f128 | .. PPC |
+| ✓ | _Qp_mul |*f128 |*f128 | void | .. SPARC args *c,*a,*b c=a*b |
+| ✓ | __divsf3 | f32 | f32 | f32 | `a / b` |
+| ✓ | __divdf3 | f64 | f64 | f64 | .. |
+| ✓ | __divtf3 | f128 | f128 | f128 | .. |
+| ✓ | __divxf3 | f80 | f80 | f80 | .. |
+| ✓ | __aeabi_fdiv | f32 | f32 | f32 | .. ARM |
+| ✓ | __aeabi_ddiv | f64 | f64 | f64 | .. ARM |
+| ✓ | __divkf3 | f128 | f128 | f128 | .. PPC |
+| ✓ | _Qp_div |*f128 |*f128 | void | .. SPARC args *c,*a,*b c=a*b |
+| ✓ | __negsf2 | f32 | ∅ | f32[^unused_rl78] | -a (can be lowered directly to a xor) |
+| ✓ | __negdf2 | f64 | ∅ | f64 | .. |
+| ✓ | __negtf2 | f128 | ∅ | f128 | .. |
+| ✓ | __negxf2 | f80 | ∅ | f80 | .. |
+| | | | | | **Floating point raised to integer power** |
+| ✗ | __powihf2 | f16 | f16 | f16 | `a ^ b` |
+| ✗ | __powisf2 | f32 | f32 | f32 | .. |
+| ✗ | __powidf2 | f64 | f64 | f64 | .. |
+| ✗ | __powitf2 | f128 | f128 | f128 | .. |
+| ✗ | __powixf2 | f80 | f80 | f80 | .. |
+| ✓ | __mulhc3 | all4 | f16 | f16 | `(a+ib) * (c+id)` |
+| ✓ | __mulsc3 | all4 | f32 | f32 | .. |
+| ✓ | __muldc3 | all4 | f64 | f64 | .. |
+| ✓ | __multc3 | all4 | f128 | f128 | .. |
+| ✓ | __mulxc3 | all4 | f80 | f80 | .. |
+| ✓ | __divhc3 | all4 | f16 | f16 | `(a+ib) / (c+id)` |
+| ✓ | __divsc3 | all4 | f32 | f32 | .. |
+| ✓ | __divdc3 | all4 | f64 | f64 | .. |
+| ✓ | __divtc3 | all4 | f128 | f128 | .. |
+| ✓ | __divxc3 | all4 | f80 | f80 | .. |
-#### Integer Arithmetic
+[^unused_rl78]: Unused in LLVM, but used for example by rl78.
+[^libgcc_compat]: Unused in backends and for symbol-level compatibility with libgcc.
+[^noptr_faster]: Operations without pointer and without C struct semantics lead to better optimizations.
+[^perf_addition]: Has better performance than standard method due to 2s complement semantics.
+Not provided by LLVM and libgcc.
-- none none __ashlsi3 // a,b: i32, a << b unused in llvm, TODO (e.g. used by rl78)
-- port llvm __ashldi3 // a,b: u64
-- port llvm __ashlti3 // a,b: u128
-- none none __ashrsi3 // a,b: i32, a >> b arithmetic (sign fill) TODO (e.g. used by rl78)
-- port llvm __ashrdi3 // ..
-- port llvm __ashrti3 //
-- none none __lshrsi3 // a,b: i32, a >> b logical (zero fill) TODO (e.g. used by rl78)
-- port llvm __lshrdi3 //
-- port llvm __lshrti3 //
-- port llvm __negdi2 // a: i32, -a, symbol-level compatibility with libgcc
-- port llvm __negti2 // unnecessary: unused in backends
-- port llvm __mulsi3 // a,b: i32, a * b
-- port llvm __muldi3 //
-- port llvm __multi3 //
-- port llvm __divsi3 // a,b: i32, a / b
-- port llvm __divdi3 //
-- port llvm __divti3 //
-- port llvm __udivsi3 // a,b: u32, a / b
-- port llvm __udivdi3 //
-- port llvm __udivti3 //
-- port llvm __modsi3 // a,b: i32, a % b
-- port llvm __moddi3 //
-- port llvm __modti3 //
-- port llvm __umodsi3 // a,b: u32, a % b
-- port llvm __umoddi3 //
-- port llvm __umodti3 //
-- port llvm __udivmoddi4 // a,b: u32, a / b, rem.* = a % b unsigned
-- port llvm __udivmodti4 //
-- port llvm __udivmodsi4 //
-- port llvm __divmodsi4 // a,b: i32, a / b, rem.* = a % b signed, ARM
-- port llvm __divmoddi4 //
-
-#### Integer Arithmetic with trapping overflow
-
-- dev BitTwiddlingHacks __absvsi2 // abs(a)
-- dev BitTwiddlingHacks __absvdi2 // abs(a)
-- dev BitTwiddlingHacks __absvti2 // abs(a)
-- port llvm __negvsi2 // -a symbol-level compatibility: libgcc
-- port llvm __negvdi2 // -a unnecessary: unused in backends
-- port llvm __negvti2 // -a
-- TODO upstreaming __addvsi3..__mulvti3 after testing panics works
-- dev HackersDelight __addvsi3 // a + b
-- dev HackersDelight __addvdi3 //
-- dev HackersDelight __addvti3 //
-- dev HackersDelight __subvsi3 // a - b
-- dev HackersDelight __subvdi3 //
-- dev HackersDelight __subvti3 //
-- dev HackersDelight __mulvsi3 // a * b
-- dev HackersDelight __mulvdi3 //
-- dev HackersDelight __mulvti3 //
-
-#### Integer Arithmetic which returns if overflow (would be faster without pointer)
-
-- dev HackersDelight __addosi4 // a + b, overflow->ov.*=1 else 0
-- dev HackersDelight __addodi4 // (completeness + performance, llvm does not use them)
-- dev HackersDelight __addoti4 //
-- dev HackersDelight __subosi4 // a - b, overflow->ov.*=1 else 0
-- dev HackersDelight __subodi4 // (completeness + performance, llvm does not use them)
-- dev HackersDelight __suboti4 //
-- dev HackersDelight __mulosi4 // a * b, overflow->ov.*=1 else 0
-- dev HackersDelight __mulodi4 // (required by llvm)
-- dev HackersDelight __muloti4 //
-
-## Float library routines
-
-TODO: review source of implementation
-
-#### Float Conversion
-
-- dev other __extendsfdf2 // a: f32 -> f64, TODO: missing tests
-- dev other __extendsftf2 // a: f32 -> f128
-- dev llvm __extendsfxf2 // a: f32 -> f80, TODO: missing tests
-- dev other __extenddftf2 // a: f64 -> f128
-- dev llvm __extenddfxf2 // a: f64 -> f80
-- dev other __truncdfsf2 // a: f64 -> f32, rounding towards zero
-- dev other __trunctfdf2 // a: f128-> f64
-- dev other __trunctfsf2 // a: f128-> f32
-- dev llvm __truncxfsf2 // a: f80 -> f32, TODO: missing tests
-- dev llvm __truncxfdf2 // a: f80 -> f64, TODO: missing tests
-
-- dev unclear __fixsfsi // a: f32 -> i32, rounding towards zero
-- dev unclear __fixdfsi // a: f64 -> i32
-- dev unclear __fixtfsi // a: f128-> i32
-- dev unclear __fixxfsi // a: f80 -> i32, TODO: missing tests
-- dev unclear __fixsfdi // a: f32 -> i64, rounding towards zero
-- dev unclear __fixdfdi // ..
-- dev unclear __fixtfdi //
-- dev unclear __fixxfdi // TODO: missing tests
-- dev unclear __fixsfti // a: f32 -> i128, rounding towards zero
-- dev unclear __fixdfti // ..
-- dev unclear __fixtfdi //
-- dev unclear __fixxfti // TODO: missing tests
-
-- dev unclear __fixunssfsi // a: f32 -> u32, rounding towards zero. negative values become 0.
-- dev unclear __fixunsdfsi // ..
-- dev unclear __fixunstfsi //
-- dev unclear __fixunsxfsi // TODO: missing tests
-- dev unclear __fixunssfdi // a: f32 -> u64, rounding towards zero. negative values become 0.
-- dev unclear __fixunsdfdi //
-- dev unclear __fixunstfdi //
-- dev unclear __fixunsxfdi // TODO: missing tests
-- dev unclear __fixunssfti // a: f32 -> u128, rounding towards zero. negative values become 0.
-- dev unclear __fixunsdfti //
-- dev unclear __fixunstfdi //
-- dev unclear __fixunsxfti // TODO: some more tests needed for base coverage
-
-- dev unclear __floatsisf // a: i32 -> f32
-- dev unclear __floatsidf // a: i32 -> f64, TODO: missing tests
-- dev unclear __floatsitf // ..
-- dev unclear __floatsixf // TODO: missing tests
-- dev unclear __floatdisf // a: i64 -> f32
-- dev unclear __floatdidf //
-- dev unclear __floatditf //
-- dev unclear __floatdixf // TODO: missing tests
-- dev unclear __floattisf // a: i128-> f32
-- dev unclear __floattidf //
-- dev unclear __floattitf //
-- dev unclear __floattixf // TODO: missing tests
-
-- dev unclear __floatunsisf // a: u32 -> f32
-- dev unclear __floatunsidf // TODO: missing tests
-- dev unclear __floatunsitf //
-- dev unclear __floatunsixf // TODO: missing tests
-- dev unclear __floatundisf // a: u64 -> f32
-- dev unclear __floatundidf //
-- dev unclear __floatunditf //
-- dev unclear __floatundixf // TODO: missing tests
-- dev unclear __floatuntisf // a: u128-> f32
-- dev unclear __floatuntidf //
-- dev unclear __floatuntitf //
-- dev unclear __floatuntixf // TODO: missing tests
-
-#### Float Comparison
-
-- dev other __cmpsf2 // a,b:f32, (a-1,(a==b)->0,(a>b)->1,Nan->1
-- dev other __cmpdf2 // exported from __lesf2, __ledf2, __letf2 (below)
-- dev other __cmptf2 // But: if NaN is a possibility, use another routine.
-- dev other __unordsf2 // a,b:f32, (a==+-NaN or b==+-NaN) -> !=0, else -> 0
-- dev other __unorddf2 // __only reliable for (input!=NaN)__
-- dev other __unordtf2 // TODO: missing tests
-- dev other __eqsf2 // (a!=NaN) and (b!=Nan) and (a==b) -> output=0
-- dev other __eqdf2 //
-- dev other __eqtf2 //
-- dev other __nesf2 // (a==NaN) or (b==Nan) or (a!=b) -> output!=0
-- dev other __nedf2 //
-- dev other __netf2 // __eqtf2 and __netf2 have same return value -> tested with __eqsf2
-- dev other __gesf2 // (a!=Nan) and (b!=Nan) and (a>=b) -> output>=0
-- dev other __gedf2 //
-- dev other __getf2 // TODO: missing tests
-- dev other __ltsf2 // (a!=Nan) and (b!=Nan) and (a output<0
-- dev other __ltdf2 //
-- dev other __lttf2 // TODO: missing tests
-- dev other __lesf2 // (a!=Nan) and (b!=Nan) and (a<=b) -> output<=0
-- dev other __ledf2 //
-- dev other __letf2 // TODO: missing tests
-- dev other __gtsf2 // (a!=Nan) and (b!=Nan) and (a>b) -> output>0
-- dev other __gtdf2 //
-- dev other __gttf2 // TODO: missing tests
-
-#### Float Arithmetic
-
-- dev unclear __addsf3 // a + b f32, TODO: missing tests
-- dev unclear __adddf3 // a + b f64, TODO: missing tests
-- dev unclear __addtf3 // a + b f128
-- dev unclear __addxf3 // a + b f80
-- dev unclear __aeabi_fadd // a + b f64 ARM: AAPCS
-- dev unclear __aeabi_dadd // a + b f64 ARM: AAPCS
-- dev unclear __subsf3 // a - b, TODO: missing tests
-- dev unclear __subdf3 // a - b, TODO: missing tests
-- dev unclear __subtf3 // a - b
-- dev unclear __subxf3 // a - b f80, TODO: missing tests
-- dev unclear __aeabi_fsub // a - b f64 ARM: AAPCS
-- dev unclear __aeabi_dsub // a - b f64 ARM: AAPCS
-- dev unclear __mulsf3 // a * b, TODO: missing tests
-- dev unclear __muldf3 // a * b, TODO: missing tests
-- dev unclear __multf3 // a * b
-- dev unclear __mulxf3 // a * b
-- dev unclear __divsf3 // a / b, TODO: review tests
-- dev unclear __divdf3 // a / b, TODO: review tests
-- dev unclear __divtf3 // a / b
-- dev unclear __divxf3 // a / b
-- dev unclear __negsf2 // -a symbol-level compatibility: libgcc uses this for the rl78
-- dev unclear __negdf2 // -a unnecessary: can be lowered directly to a xor
-- dev unclear __negtf2 // -a, TODO: missing tests
-- dev unclear __negxf2 // -a, TODO: missing tests
-
-#### Floating point raised to integer power
-- dev unclear __powisf2 // a ^ b, TODO
-- dev unclear __powidf2 //
-- dev unclear __powitf2 //
-- dev unclear __powixf2 //
-- dev unclear __mulsc3 // (a+ib) * (c+id)
-- dev unclear __muldc3 //
-- dev unclear __multc3 //
-- dev unclear __mulxc3 //
-- dev unclear __divsc3 // (a+ib) * / (c+id)
-- dev unclear __divdc3 //
-- dev unclear __divtc3 //
-- dev unclear __divxc3 //
-
-## Decimal float library routines
+Decimal float library routines
BID means Binary Integer Decimal encoding, DPD means Densely Packed Decimal encoding.
BID should be only chosen for binary data, DPD for decimal data (ASCII, Unicode etc).
-If possible, use BCD instead of DPD to represent numbers not accurately representable
-in binary like the number 0.2.
+For example the number 0.2 is not accurately representable in binary data.
-All routines are TODO.
+| Done | Name | a | b | Out | Comment |
+| ------ | ------------- | --------- | --------- | --------- | ---------------------------- |
+| | | | | | **Decimal Float Conversion** |
+| ✗ | __dpd_extendsddd2 | dec32 | ∅ | dec64 | conversion |
+| ✗ | __bid_extendsddd2 | dec32 | ∅ | dec64 | .. |
+| ✗ | __dpd_extendsdtd2 | dec32 | ∅ | dec128| .. |
+| ✗ | __bid_extendsdtd2 | dec32 | ∅ | dec128| .. |
+| ✗ | __dpd_extendddtd2 | dec64 | ∅ | dec128| .. |
+| ✗ | __bid_extendddtd2 | dec64 | ∅ | dec128| .. |
+| ✗ | __dpd_truncddsd2 | dec64 | ∅ | dec32 | .. |
+| ✗ | __bid_truncddsd2 | dec64 | ∅ | dec32 | .. |
+| ✗ | __dpd_trunctdsd2 | dec128 | ∅ | dec32 | .. |
+| ✗ | __bid_trunctdsd2 | dec128 | ∅ | dec32 | .. |
+| ✗ | __dpd_trunctddd2 | dec128 | ∅ | dec64 | .. |
+| ✗ | __bid_trunctddd2 | dec128 | ∅ | dec64 | .. |
+| ✗ | __dpd_extendsfdd | float | ∅ | dec64 | .. |
+| ✗ | __bid_extendsfdd | float | ∅ | dec64 | .. |
+| ✗ | __dpd_extendsftd | float | ∅ | dec128| .. |
+| ✗ | __bid_extendsftd | float | ∅ | dec128| .. |
+| ✗ | __dpd_extenddftd | double | ∅ | dec128| .. |
+| ✗ | __bid_extenddftd | double | ∅ | dec128| .. |
+| ✗ | __dpd_extendxftd |long double | ∅ | dec128| .. |
+| ✗ | __bid_extendxftd |long double | ∅ | dec128| .. |
+| ✗ | __dpd_truncdfsd | double | ∅ | dec32 | .. |
+| ✗ | __bid_truncdfsd | double | ∅ | dec32 | .. |
+| ✗ | __dpd_truncxfsd |long double | ∅ | dec32 | .. |
+| ✗ | __bid_truncxfsd |long double | ∅ | dec32 | .. |
+| ✗ | __dpd_trunctfsd |long double | ∅ | dec32 | .. |
+| ✗ | __bid_trunctfsd |long double | ∅ | dec32 | .. |
+| ✗ | __dpd_truncxfdd |long double | ∅ | dec64 | .. |
+| ✗ | __bid_truncxfdd |long double | ∅ | dec64 | .. |
+| ✗ | __dpd_trunctfdd |long double | ∅ | dec64 | .. |
+| ✗ | __bid_trunctfdd |long double | ∅ | dec64 | .. |
+| ✗ | __dpd_truncddsf | dec64 | ∅ | float | .. |
+| ✗ | __bid_truncddsf | dec64 | ∅ | float | .. |
+| ✗ | __dpd_trunctdsf | dec128 | ∅ | float | .. |
+| ✗ | __bid_trunctdsf | dec128 | ∅ | float | .. |
+| ✗ | __dpd_extendsddf | dec32 | ∅ | double| .. |
+| ✗ | __bid_extendsddf | dec32 | ∅ | double| .. |
+| ✗ | __dpd_trunctddf | dec128 | ∅ | double| .. |
+| ✗ | __bid_trunctddf | dec128 | ∅ | double| .. |
+| ✗ | __dpd_extendsdxf | dec32 | ∅ |long double| .. |
+| ✗ | __bid_extendsdxf | dec32 | ∅ |long double| .. |
+| ✗ | __dpd_extendddxf | dec64 | ∅ |long double| .. |
+| ✗ | __bid_extendddxf | dec64 | ∅ |long double| .. |
+| ✗ | __dpd_trunctdxf | dec128 | ∅ |long double| .. |
+| ✗ | __bid_trunctdxf | dec128 | ∅ |long double| .. |
+| ✗ | __dpd_extendsdtf | dec32 | ∅ |long double| .. |
+| ✗ | __bid_extendsdtf | dec32 | ∅ |long double| .. |
+| ✗ | __dpd_extendddtf | dec64 | ∅ |long double| .. |
+| ✗ | __bid_extendddtf | dec64 | ∅ |long double| .. |
+| ✗ | __dpd_extendsfsd | float | ∅ | dec32 | same size conversions |
+| ✗ | __bid_extendsfsd | float | ∅ | dec32 | .. |
+| ✗ | __dpd_extenddfdd | double | ∅ | dec64 | .. |
+| ✗ | __bid_extenddfdd | double | ∅ | dec64 | .. |
+| ✗ | __dpd_extendtftd |long double | ∅ | dec128| .. |
+| ✗ | __bid_extendtftd |long double | ∅ | dec128| .. |
+| ✗ | __dpd_truncsdsf | dec32 | ∅ | float | .. |
+| ✗ | __bid_truncsdsf | dec32 | ∅ | float | .. |
+| ✗ | __dpd_truncdddf | dec64 | ∅ | float | conversion |
+| ✗ | __bid_truncdddf | dec64 | ∅ | float | .. |
+| ✗ | __dpd_trunctdtf | dec128 | ∅ |long double| .. |
+| ✗ | __bid_trunctdtf | dec128 | ∅ |long double| .. |
+| ✗ | __dpd_fixsdsi | dec32 | ∅ | int | .. |
+| ✗ | __bid_fixsdsi | dec32 | ∅ | int | .. |
+| ✗ | __dpd_fixddsi | dec64 | ∅ | int | .. |
+| ✗ | __bid_fixddsi | dec64 | ∅ | int | .. |
+| ✗ | __dpd_fixtdsi | dec128 | ∅ | int | .. |
+| ✗ | __bid_fixtdsi | dec128 | ∅ | int | .. |
+| ✗ | __dpd_fixsddi | dec32 | ∅ | long | .. |
+| ✗ | __bid_fixsddi | dec32 | ∅ | long | .. |
+| ✗ | __dpd_fixdddi | dec64 | ∅ | long | .. |
+| ✗ | __bid_fixdddi | dec64 | ∅ | long | .. |
+| ✗ | __dpd_fixtddi | dec128 | ∅ | long | .. |
+| ✗ | __bid_fixtddi | dec128 | ∅ | long | .. |
+| ✗ | __dpd_fixunssdsi | dec32 | ∅ |unsigned int | .. All negative values become zero. |
+| ✗ | __bid_fixunssdsi | dec32 | ∅ |unsigned int | .. |
+| ✗ | __dpd_fixunsddsi | dec64 | ∅ |unsigned int | .. |
+| ✗ | __bid_fixunsddsi | dec64 | ∅ |unsigned int | .. |
+| ✗ | __dpd_fixunstdsi | dec128 | ∅ |unsigned int | .. |
+| ✗ | __bid_fixunstdsi | dec128 | ∅ |unsigned int | .. |
+| ✗ | __dpd_fixunssddi | dec32 | ∅ |unsigned long| .. |
+| ✗ | __bid_fixunssddi | dec32 | ∅ |unsigned long| .. |
+| ✗ | __dpd_fixunsdddi | dec64 | ∅ |unsigned long| .. |
+| ✗ | __bid_fixunsdddi | dec64 | ∅ |unsigned long| .. |
+| ✗ | __dpd_fixunstddi | dec128 | ∅ |unsigned long| .. |
+| ✗ | __bid_fixunstddi | dec128 | ∅ |unsigned long| .. |
+| ✗ | __dpd_floatsisd | int | ∅ | dec32 | .. |
+| ✗ | __bid_floatsisd | int | ∅ | dec32 | .. |
+| ✗ | __dpd_floatsidd | int | ∅ | dec64 | .. |
+| ✗ | __bid_floatsidd | int | ∅ | dec64 | .. |
+| ✗ | __dpd_floatsitd | int | ∅ | dec128 | .. |
+| ✗ | __bid_floatsitd | int | ∅ | dec128 | .. |
+| ✗ | __dpd_floatdisd | long | ∅ | dec32 | .. |
+| ✗ | __bid_floatdisd | long | ∅ | dec32 | .. |
+| ✗ | __dpd_floatdidd | long | ∅ | dec64 | .. |
+| ✗ | __bid_floatdidd | long | ∅ | dec64 | .. |
+| ✗ | __dpd_floatditd | long | ∅ | dec128 | .. |
+| ✗ | __bid_floatditd | long | ∅ | dec128 | .. |
+| ✗ | __dpd_floatunssisd | unsigned int| ∅ | dec32 | .. |
+| ✗ | __bid_floatunssisd | unsigned int| ∅ | dec32 | .. |
+| ✗ | __dpd_floatunssidd | unsigned int| ∅ | dec64 | .. |
+| ✗ | __bid_floatunssidd | unsigned int| ∅ | dec64 | .. |
+| ✗ | __dpd_floatunssitd | unsigned int| ∅ | dec128 | .. |
+| ✗ | __bid_floatunssitd | unsigned int| ∅ | dec128 | .. |
+| ✗ | __dpd_floatunsdisd |unsigned long| ∅ | dec32 | .. |
+| ✗ | __bid_floatunsdisd |unsigned long| ∅ | dec32 | .. |
+| ✗ | __dpd_floatunsdidd |unsigned long| ∅ | dec64 | .. |
+| ✗ | __bid_floatunsdidd |unsigned long| ∅ | dec64 | .. |
+| ✗ | __dpd_floatunsditd |unsigned long| ∅ | dec128 | .. |
+| ✗ | __bid_floatunsditd |unsigned long| ∅ | dec128 | .. |
+| | | | | | **Decimal Float Comparison** |
+| ✗ | __dpd_unordsd2 | dec32 | dec32 | c_int | `a +-NaN or a +-NaN -> 1(nonzero), else -> 0` |
+| ✗ | __bid_unordsd2 | dec32 | dec32 | c_int | .. |
+| ✗ | __dpd_unorddd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __bid_unorddd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __dpd_unordtd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __bid_unordtd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __dpd_eqsd2 | dec32 | dec32 | c_int |`a!=+-NaN and b!=+-Nan and a==b -> 0, else -> 1(nonzero)`|
+| ✗ | __bid_eqsd2 | dec32 | dec32 | c_int | .. |
+| ✗ | __dpd_eqdd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __bid_eqdd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __dpd_eqtd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __bid_eqtd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __dpd_nesd2 | dec32 | dec32 | c_int | `a==+-NaN or b==+-NaN or a!=b -> 1(nonzero), else -> 0` |
+| ✗ | __bid_nesd2 | dec32 | dec32 | c_int | .. |
+| ✗ | __dpd_nedd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __bid_nedd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __dpd_netd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __bid_netd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __dpd_gesd2 | dec32 | dec32 | c_int | `a!=+-NaN and b!=+-NaN and a>=b -> >=0, else -> <0` |
+| ✗ | __bid_gesd2 | dec32 | dec32 | c_int | .. |
+| ✗ | __dpd_gedd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __bid_gedd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __dpd_getd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __bid_getd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __dpd_ltsd2 | dec32 | dec32 | c_int | `a!=+-NaN and b!=+-NaN and a <0, else -> >=0` |
+| ✗ | __bid_ltsd2 | dec32 | dec32 | c_int | .. |
+| ✗ | __dpd_ltdd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __bid_ltdd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __dpd_lttd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __bid_lttd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __dpd_lesd2 | dec32 | dec32 | c_int | `a!=+-NaN and b!=+-NaN and a<=b -> <=0, else -> >=0` |
+| ✗ | __bid_lesd2 | dec32 | dec32 | c_int | .. |
+| ✗ | __dpd_ledd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __bid_ledd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __dpd_letd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __bid_letd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __dpd_gtsd2 | dec32 | dec32 | c_int | `a!=+-NaN and b!=+-NaN and a>b -> >0, else -> <=0` |
+| ✗ | __bid_gtsd2 | dec32 | dec32 | c_int | .. |
+| ✗ | __dpd_gtdd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __bid_gtdd2 | dec64 | dec64 | c_int | .. |
+| ✗ | __dpd_gttd2 | dec128 | dec128 | c_int | .. |
+| ✗ | __bid_gttd2 | dec128 | dec128 | c_int | .. |
+| | | | | | **Decimal Float Arithmetic**[^options] |
+| ✗ | __dpd_addsd3 | dec32 | dec32 | dec32 |`a + b`|
+| ✗ | __bid_addsd3 | dec32 | dec32 | dec32 | .. |
+| ✗ | __dpd_adddd3 | dec64 | dec64 | dec64 | .. |
+| ✗ | __bid_adddd3 | dec64 | dec64 | dec64 | .. |
+| ✗ | __dpd_addtd3 | dec128 | dec128 | dec128 | .. |
+| ✗ | __bid_addtd3 | dec128 | dec128 | dec128 | .. |
+| ✗ | __dpd_subsd3 | dec32 | dec32 | dec32 |`a - b`|
+| ✗ | __bid_subsd3 | dec32 | dec32 | dec32 | .. |
+| ✗ | __dpd_subdd3 | dec64 | dec64 | dec64 | .. |
+| ✗ | __bid_subdd3 | dec64 | dec64 | dec64 | .. |
+| ✗ | __dpd_subtd3 | dec128 | dec128 | dec128 | .. |
+| ✗ | __bid_subtd3 | dec128 | dec128 | dec128 | .. |
+| ✗ | __dpd_mulsd3 | dec32 | dec32 | dec32 |`a * b`|
+| ✗ | __bid_mulsd3 | dec32 | dec32 | dec32 | .. |
+| ✗ | __dpd_muldd3 | dec64 | dec64 | dec64 | .. |
+| ✗ | __bid_muldd3 | dec64 | dec64 | dec64 | .. |
+| ✗ | __dpd_multd3 | dec128 | dec128 | dec128 | .. |
+| ✗ | __bid_multd3 | dec128 | dec128 | dec128 | .. |
+| ✗ | __dpd_divsd3 | dec32 | dec32 | dec32 |`a / b`|
+| ✗ | __bid_divsd3 | dec32 | dec32 | dec32 | .. |
+| ✗ | __dpd_divdd3 | dec64 | dec64 | dec64 | .. |
+| ✗ | __bid_divdd3 | dec64 | dec64 | dec64 | .. |
+| ✗ | __dpd_divtd3 | dec128 | dec128 | dec128 | .. |
+| ✗ | __bid_divtd3 | dec128 | dec128 | dec128 | .. |
+| ✗ | __dpd_negsd2 | dec32 | dec32 | dec32 | `-a` |
+| ✗ | __bid_negsd2 | dec32 | dec32 | dec32 | .. |
+| ✗ | __dpd_negdd2 | dec64 | dec64 | dec64 | .. |
+| ✗ | __bid_negdd2 | dec64 | dec64 | dec64 | .. |
+| ✗ | __dpd_negtd2 | dec128 | dec128 | dec128 | .. |
+| ✗ | __bid_negtd2 | dec128 | dec128 | dec128 | .. |
-#### Decimal float Conversion
+[^options]: These numbers include options with routines for +-0 and +-Nan.
-- __dpd_extendsddd2 // dec32->dec64
-- __bid_extendsddd2 // dec32->dec64
-- __dpd_extendsdtd2 // dec32->dec128
-- __bid_extendsdtd2 // dec32->dec128
-- __dpd_extendddtd2 // dec64->dec128
-- __bid_extendddtd2 // dec64->dec128
-- __dpd_truncddsd2 // dec64->dec32
-- __bid_truncddsd2 // dec64->dec32
-- __dpd_trunctdsd2 // dec128->dec32
-- __bid_trunctdsd2 // dec128->dec32
-- __dpd_trunctddd2 // dec128->dec64
-- __bid_trunctddd2 // dec128->dec64
+Fixed-point fractional library routines
-- __dpd_extendsfdd // float->dec64
-- __bid_extendsfdd // float->dec64
-- __dpd_extendsftd // float->dec128
-- __bid_extendsftd // float->dec128
-- __dpd_extenddftd // double->dec128
-- __bid_extenddftd // double->dec128
-- __dpd_extendxftd // long double->dec128
-- __bid_extendxftd // long double->dec128
-- __dpd_truncdfsd // double->dec32
-- __bid_truncdfsd // double->dec32
-- __dpd_truncxfsd // long double->dec32
-- __bid_truncxfsd // long double->dec32
-- __dpd_trunctfsd // long double->dec32
-- __bid_trunctfsd // long double->dec32
-- __dpd_truncxfdd // long double->dec64
-- __bid_truncxfdd // long double->dec64
-- __dpd_trunctfdd // long double->dec64
-- __bid_trunctfdd // long double->dec64
+TODO brief explanation + implementation
-- __dpd_truncddsf // dec64->float
-- __bid_truncddsf // dec64->float
-- __dpd_trunctdsf // dec128->float
-- __bid_trunctdsf // dec128->float
-- __dpd_extendsddf // dec32->double
-- __bid_extendsddf // dec32->double
-- __dpd_trunctddf // dec128->double
-- __bid_trunctddf // dec128->double
-- __dpd_extendsdxf // dec32->long double
-- __bid_extendsdxf // dec32->long double
-- __dpd_extendddxf // dec64->long double
-- __bid_extendddxf // dec64->long double
-- __dpd_trunctdxf // dec128->long double
-- __bid_trunctdxf // dec128->long double
-- __dpd_extendsdtf // dec32->long double
-- __bid_extendsdtf // dec32->long double
-- __dpd_extendddtf // dec64->long double
-- __bid_extendddtf // dec64->long double
+| Done | Name | a | b | Out | Comment |
+| ------ | ------------- | --------- | --------- | --------- | -------------------------- |
+| | | | | | **Fixed-Point Fractional** |
-Same size conversion:
-- __dpd_extendsfsd // float->dec32
-- __bid_extendsfsd // float->dec32
-- __dpd_extenddfdd // double->dec64
-- __bid_extenddfdd // double->dec64
-- __dpd_extendtftd //long double->dec128
-- __bid_extendtftd //long double->dec128
-- __dpd_truncsdsf // dec32->float
-- __bid_truncsdsf // dec32->float
-- __dpd_truncdddf // dec64->float
-- __bid_truncdddf // dec64->float
-- __dpd_trunctdtf // dec128->long double
-- __bid_trunctdtf // dec128->long double
+Further content:
+- aarch64 outline atomics
+- atomics
+- msvc things like _alldiv, _aulldiv, _allrem
+- clear cache
+- tls emulation
+- math routines (cos, sin, tan, ceil, floor, exp, exp2, fabs, log, log10, log2, sincos, sqrt)
+- bcmp
+- ieee float routines (fma, fmax, fmin, fmod, fabs, float rounding, )
+- arm routines (memory routines + memclr [setting to 0], divmod routines and stubs for unwind_cpp)
+- memory routines (memcmp, memcpy, memset, memmove)
+- objective-c __isPlatformVersionAtLeast check
+- stack probe routines
-- __dpd_fixsdsi // dec32->int
-- __bid_fixsdsi // dec32->int
-- __dpd_fixddsi // dec64->int
-- __bid_fixddsi // dec64->int
-- __dpd_fixtdsi // dec128->int
-- __bid_fixtdsi // dec128->int
+Future work
-- __dpd_fixsddi // dec32->long
-- __bid_fixsddi // dec32->long
-- __dpd_fixdddi // dec64->long
-- __bid_fixdddi // dec64->long
-- __dpd_fixtddi // dec128->long
-- __bid_fixtddi // dec128->long
-
-- __dpd_fixunssdsi // dec32->unsigned int, All negative values become zero.
-- __bid_fixunssdsi // dec32->unsigned int
-- __dpd_fixunsddsi // dec64->unsigned int
-- __bid_fixunsddsi // dec64->unsigned int
-- __dpd_fixunstdsi // dec128->unsigned int
-- __bid_fixunstdsi // dec128->unsigned int
-
-- __dpd_fixunssddi // dec32->unsigned long, All negative values become zero.
-- __bid_fixunssddi // dec32->unsigned long
-- __dpd_fixunsdddi // dec64->unsigned long
-- __bid_fixunsdddi // dec64->unsigned long
-- __dpd_fixunstddi // dec128->unsigned long
-- __bid_fixunstddi // dec128->unsigned long
-
-- __dpd_floatsisd // int->dec32
-- __bid_floatsisd // int->dec32
-- __dpd_floatsidd // int->dec64
-- __bid_floatsidd // int->dec64
-- __dpd_floatsitd // int->dec128
-- __bid_floatsitd // int->dec128
-
-- __dpd_floatdisd // long->dec32
-- __bid_floatdisd // long->dec32
-- __dpd_floatdidd // long->dec64
-- __bid_floatdidd // long->dec64
-- __dpd_floatditd // long->dec128
-- __bid_floatditd // long->dec128
-
-- __dpd_floatunssisd // unsigned int->dec32
-- __bid_floatunssisd // unsigned int->dec32
-- __dpd_floatunssidd // unsigned int->dec64
-- __bid_floatunssidd // unsigned int->dec64
-- __dpd_floatunssitd // unsigned int->dec128
-- __bid_floatunssitd // unsigned int->dec128
-
-- __dpd_floatunsdisd // unsigned long->dec32
-- __bid_floatunsdisd // unsigned long->dec32
-- __dpd_floatunsdidd // unsigned long->dec64
-- __bid_floatunsdidd // unsigned long->dec64
-- __dpd_floatunsditd // unsigned long->dec128
-- __bid_floatunsditd // unsigned long->dec128
-
-#### Decimal float Comparison
-
-All decimal float comparison routines return c_int.
-
-- __dpd_unordsd2 // a,b: dec32, a +-NaN or a +-NaN -> 1(nonzero), else -> 0
-- __bid_unordsd2 // a,b: dec32
-- __dpd_unorddd2 // a,b: dec64
-- __bid_unorddd2 // a,b: dec64
-- __dpd_unordtd2 // a,b: dec128
-- __bid_unordtd2 // a,b: dec128
-
-- __dpd_eqsd2 // a,b: dec32, a!=+-NaN and b!=+-Nan and a==b -> 0, else -> 1(nonzero)
-- __bid_eqsd2 // a,b: dec32
-- __dpd_eqdd2 // a,b: dec64
-- __bid_eqdd2 // a,b: dec64
-- __dpd_eqtd2 // a,b: dec128
-- __bid_eqtd2 // a,b: dec128
-
-- __dpd_nesd2 // a,b: dec32, a==+-NaN or b==+-NaN or a!=b -> 1(nonzero), else -> 0
-- __bid_nesd2 // a,b: dec32
-- __dpd_nedd2 // a,b: dec64
-- __bid_nedd2 // a,b: dec64
-- __dpd_netd2 // a,b: dec128
-- __bid_netd2 // a,b: dec128
-
-- __dpd_gesd2 // a,b: dec32, a!=+-NaN and b!=+-NaN and a>=b -> >=0, else -> <0
-- __bid_gesd2 // a,b: dec32
-- __dpd_gedd2 // a,b: dec64
-- __bid_gedd2 // a,b: dec64
-- __dpd_getd2 // a,b: dec128
-- __bid_getd2 // a,b: dec128
-
-- __dpd_ltsd2 // a,b: dec32, a!=+-NaN and b!=+-NaN and a <0, else -> >=0
-- __bid_ltsd2 // a,b: dec32
-- __dpd_ltdd2 // a,b: dec64
-- __bid_ltdd2 // a,b: dec64
-- __dpd_lttd2 // a,b: dec128
-- __bid_lttd2 // a,b: dec128
-
-- __dpd_lesd2 // a,b: dec32, a!=+-NaN and b!=+-NaN and a<=b -> <=0, else -> >=0
-- __bid_lesd2 // a,b: dec32
-- __dpd_ledd2 // a,b: dec64
-- __bid_ledd2 // a,b: dec64
-- __dpd_letd2 // a,b: dec128
-- __bid_letd2 // a,b: dec128
-
-- __dpd_gtsd2 // a,b: dec32, a!=+-NaN and b!=+-NaN and a>b -> >0, else -> <=0
-- __bid_gtsd2 // a,b: dec32
-- __dpd_gtdd2 // a,b: dec64
-- __bid_gtdd2 // a,b: dec64
-- __dpd_gttd2 // a,b: dec128
-- __bid_gttd2 // a,b: dec128
-
-#### Decimal float Arithmetic
-
-These numbers include options with routines for +-0 and +-Nan.
-
-- __dpd_addsd3 // a,b: dec32 -> dec32, a + b
-- __bid_addsd3 // a,b: dec32 -> dec32
-- __dpd_adddd3 // a,b: dec64 -> dec64
-- __bid_adddd3 // a,b: dec64 -> dec64
-- __dpd_addtd3 // a,b: dec128-> dec128
-- __bid_addtd3 // a,b: dec128-> dec128
-- __dpd_subsd3 // a,b: dec32, a - b
-- __bid_subsd3 // a,b: dec32 -> dec32
-- __dpd_subdd3 // a,b: dec64 ..
-- __bid_subdd3 // a,b: dec64
-- __dpd_subtd3 // a,b: dec128
-- __bid_subtd3 // a,b: dec128
-- __dpd_mulsd3 // a,b: dec32, a * b
-- __bid_mulsd3 // a,b: dec32 -> dec32
-- __dpd_muldd3 // a,b: dec64 ..
-- __bid_muldd3 // a,b: dec64
-- __dpd_multd3 // a,b: dec128
-- __bid_multd3 // a,b: dec128
-- __dpd_divsd3 // a,b: dec32, a / b
-- __bid_divsd3 // a,b: dec32 -> dec32
-- __dpd_divdd3 // a,b: dec64 ..
-- __bid_divdd3 // a,b: dec64
-- __dpd_divtd3 // a,b: dec128
-- __bid_divtd3 // a,b: dec128
-- __dpd_negsd2 // a,b: dec32, -a
-- __bid_negsd2 // a,b: dec32 -> dec32
-- __dpd_negdd2 // a,b: dec64 ..
-- __bid_negdd2 // a,b: dec64
-- __dpd_negtd2 // a,b: dec128
-- __bid_negtd2 // a,b: dec128
-
-## Fixed-point fractional library routines
-
-TODO
-
-Too unclear for work items:
-- Miscellaneous routines => unclear, if supported (cache control and stack functions)
-- Zig-specific language runtime features, for example "Arbitrary length integer library routines"
+Arbitrary length integer library routines
diff --git a/lib/docs/main.js b/lib/docs/main.js
index d488d018a6..fae39c5fba 100644
--- a/lib/docs/main.js
+++ b/lib/docs/main.js
@@ -1354,6 +1354,10 @@ const NAV_MODES = {
payloadHtml += "ptrCast";
break;
}
+ case "qual_cast": {
+ payloadHtml += "qualCast";
+ break;
+ }
case "truncate": {
payloadHtml += "truncate";
break;
@@ -3158,7 +3162,6 @@ const NAV_MODES = {
canonTypeDecls = new Array(zigAnalysis.types.length);
for (let pkgI = 0; pkgI < zigAnalysis.packages.length; pkgI += 1) {
- if (pkgI === zigAnalysis.rootPkg && rootIsStd) continue;
let pkg = zigAnalysis.packages[pkgI];
let pkgNames = canonPkgPaths[pkgI];
if (pkgNames === undefined) continue;
diff --git a/lib/init-exe/build.zig b/lib/init-exe/build.zig
index 29b50b5cc4..2ef5b21fe9 100644
--- a/lib/init-exe/build.zig
+++ b/lib/init-exe/build.zig
@@ -1,34 +1,67 @@
const std = @import("std");
-pub fn build(b: *std.build.Builder) void {
+// Although this function looks imperative, note that its job is to
+// declaratively construct a build graph that will be executed by an external
+// runner.
+pub fn build(b: *std.Build) void {
// Standard target options allows the person running `zig build` to choose
// what target to build for. Here we do not override the defaults, which
// means any target is allowed, and the default is native. Other options
// for restricting supported target set are available.
const target = b.standardTargetOptions(.{});
- // Standard release options allow the person running `zig build` to select
- // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
- const mode = b.standardReleaseOptions();
+ // Standard optimization options allow the person running `zig build` to select
+ // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
+ // set a preferred release mode, allowing the user to decide how to optimize.
+ const optimize = b.standardOptimizeOption(.{});
- const exe = b.addExecutable("$", "src/main.zig");
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "$",
+ // In this case the main source file is merely a path, however, in more
+ // complicated build scripts, this could be a generated file.
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
+
+ // This declares intent for the executable to be installed into the
+ // standard location when the user invokes the "install" step (the default
+ // step when running `zig build`).
exe.install();
+ // This *creates* a RunStep in the build graph, to be executed when another
+ // step is evaluated that depends on it. The next line below will establish
+ // such a dependency.
const run_cmd = exe.run();
+
+ // By making the run step depend on the install step, it will be run from the
+ // installation directory rather than directly from within the cache directory.
+ // This is not necessary, however, if the application depends on other installed
+ // files, this ensures they will be present and in the expected location.
run_cmd.step.dependOn(b.getInstallStep());
+
+ // This allows the user to pass arguments to the application in the build
+ // command itself, like this: `zig build run -- arg1 arg2 etc`
if (b.args) |args| {
run_cmd.addArgs(args);
}
+ // This creates a build step. It will be visible in the `zig build --help` menu,
+ // and can be selected like this: `zig build run`
+ // This will evaluate the `run` step rather than the default, which is "install".
const run_step = b.step("run", "Run the app");
run_step.dependOn(&run_cmd.step);
- const exe_tests = b.addTest("src/main.zig");
- exe_tests.setTarget(target);
- exe_tests.setBuildMode(mode);
+ // Creates a step for unit testing.
+ const exe_tests = b.addTest(.{
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
+ // Similar to creating the run step earlier, this exposes a `test` step to
+ // the `zig build --help` menu, providing a way for the user to request
+ // running the unit tests.
const test_step = b.step("test", "Run unit tests");
test_step.dependOn(&exe_tests.step);
}
diff --git a/lib/init-lib/build.zig b/lib/init-lib/build.zig
index b3876691a2..2887c170e6 100644
--- a/lib/init-lib/build.zig
+++ b/lib/init-lib/build.zig
@@ -1,17 +1,44 @@
const std = @import("std");
-pub fn build(b: *std.build.Builder) void {
- // Standard release options allow the person running `zig build` to select
- // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
- const mode = b.standardReleaseOptions();
+// Although this function looks imperative, note that its job is to
+// declaratively construct a build graph that will be executed by an external
+// runner.
+pub fn build(b: *std.Build) void {
+ // Standard target options allows the person running `zig build` to choose
+ // what target to build for. Here we do not override the defaults, which
+ // means any target is allowed, and the default is native. Other options
+ // for restricting supported target set are available.
+ const target = b.standardTargetOptions(.{});
- const lib = b.addStaticLibrary("$", "src/main.zig");
- lib.setBuildMode(mode);
+ // Standard optimization options allow the person running `zig build` to select
+ // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
+ // set a preferred release mode, allowing the user to decide how to optimize.
+ const optimize = b.standardOptimizeOption(.{});
+
+ const lib = b.addStaticLibrary(.{
+ .name = "$",
+ // In this case the main source file is merely a path, however, in more
+ // complicated build scripts, this could be a generated file.
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
+
+ // This declares intent for the library to be installed into the standard
+ // location when the user invokes the "install" step (the default step when
+ // running `zig build`).
lib.install();
- const main_tests = b.addTest("src/main.zig");
- main_tests.setBuildMode(mode);
+ // Creates a step for unit testing.
+ const main_tests = b.addTest(.{
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
+ // This creates a build step. It will be visible in the `zig build --help` menu,
+ // and can be selected like this: `zig build test`
+ // This will evaluate the `test` step rather than the default, which is "install".
const test_step = b.step("test", "Run library tests");
test_step.dependOn(&main_tests.step);
}
diff --git a/lib/libc/mingw/misc/strtoimax.c b/lib/libc/mingw/misc/strtoimax.c
index eef5da97cc..9e75f8a275 100644
--- a/lib/libc/mingw/misc/strtoimax.c
+++ b/lib/libc/mingw/misc/strtoimax.c
@@ -31,10 +31,7 @@
#define valid(n, b) ((n) >= 0 && (n) < (b))
intmax_t
-strtoimax(nptr, endptr, base)
- register const char * __restrict__ nptr;
- char ** __restrict__ endptr;
- register int base;
+strtoimax(const char * __restrict__ nptr, char ** __restrict__ endptr, int base)
{
register uintmax_t accum; /* accumulates converted value */
register int n; /* numeral from digit character */
diff --git a/lib/libc/mingw/misc/strtoumax.c b/lib/libc/mingw/misc/strtoumax.c
index e86cd76132..2c24db14d8 100644
--- a/lib/libc/mingw/misc/strtoumax.c
+++ b/lib/libc/mingw/misc/strtoumax.c
@@ -31,10 +31,7 @@
#define valid(n, b) ((n) >= 0 && (n) < (b))
uintmax_t
-strtoumax(nptr, endptr, base)
- register const char * __restrict__ nptr;
- char ** __restrict__ endptr;
- register int base;
+strtoumax(const char * __restrict__ nptr, char ** __restrict__ endptr, int base)
{
register uintmax_t accum; /* accumulates converted value */
register uintmax_t next; /* for computing next value of accum */
diff --git a/lib/libc/mingw/misc/wcstoimax.c b/lib/libc/mingw/misc/wcstoimax.c
index 9821cf07cc..99e97b9214 100644
--- a/lib/libc/mingw/misc/wcstoimax.c
+++ b/lib/libc/mingw/misc/wcstoimax.c
@@ -33,10 +33,7 @@
#define valid(n, b) ((n) >= 0 && (n) < (b))
intmax_t
-wcstoimax(nptr, endptr, base)
- register const wchar_t * __restrict__ nptr;
- wchar_t ** __restrict__ endptr;
- register int base;
+wcstoimax(const wchar_t * __restrict__ nptr, wchar_t ** __restrict__ endptr, int base)
{
register uintmax_t accum; /* accumulates converted value */
register int n; /* numeral from digit character */
diff --git a/lib/libc/mingw/misc/wcstoumax.c b/lib/libc/mingw/misc/wcstoumax.c
index 0f1ed540c2..97f2c30833 100644
--- a/lib/libc/mingw/misc/wcstoumax.c
+++ b/lib/libc/mingw/misc/wcstoumax.c
@@ -33,10 +33,7 @@
#define valid(n, b) ((n) >= 0 && (n) < (b))
uintmax_t
-wcstoumax(nptr, endptr, base)
- register const wchar_t * __restrict__ nptr;
- wchar_t ** __restrict__ endptr;
- register int base;
+wcstoumax(const wchar_t * __restrict__ nptr, wchar_t ** __restrict__ endptr, int base)
{
register uintmax_t accum; /* accumulates converted value */
register uintmax_t next; /* for computing next value of accum */
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
new file mode 100644
index 0000000000..86b16d234c
--- /dev/null
+++ b/lib/std/Build.zig
@@ -0,0 +1,1774 @@
+const std = @import("std.zig");
+const builtin = @import("builtin");
+const io = std.io;
+const fs = std.fs;
+const mem = std.mem;
+const debug = std.debug;
+const panic = std.debug.panic;
+const assert = debug.assert;
+const log = std.log;
+const ArrayList = std.ArrayList;
+const StringHashMap = std.StringHashMap;
+const Allocator = mem.Allocator;
+const process = std.process;
+const EnvMap = std.process.EnvMap;
+const fmt_lib = std.fmt;
+const File = std.fs.File;
+const CrossTarget = std.zig.CrossTarget;
+const NativeTargetInfo = std.zig.system.NativeTargetInfo;
+const Sha256 = std.crypto.hash.sha2.Sha256;
+const Build = @This();
+
+/// deprecated: use `CompileStep`.
+pub const LibExeObjStep = CompileStep;
+/// deprecated: use `Build`.
+pub const Builder = Build;
+/// deprecated: use `InstallDirStep.Options`
+pub const InstallDirectoryOptions = InstallDirStep.Options;
+
+pub const Step = @import("Build/Step.zig");
+pub const CheckFileStep = @import("Build/CheckFileStep.zig");
+pub const CheckObjectStep = @import("Build/CheckObjectStep.zig");
+pub const ConfigHeaderStep = @import("Build/ConfigHeaderStep.zig");
+pub const EmulatableRunStep = @import("Build/EmulatableRunStep.zig");
+pub const FmtStep = @import("Build/FmtStep.zig");
+pub const InstallArtifactStep = @import("Build/InstallArtifactStep.zig");
+pub const InstallDirStep = @import("Build/InstallDirStep.zig");
+pub const InstallFileStep = @import("Build/InstallFileStep.zig");
+pub const InstallRawStep = @import("Build/InstallRawStep.zig");
+pub const CompileStep = @import("Build/CompileStep.zig");
+pub const LogStep = @import("Build/LogStep.zig");
+pub const OptionsStep = @import("Build/OptionsStep.zig");
+pub const RemoveDirStep = @import("Build/RemoveDirStep.zig");
+pub const RunStep = @import("Build/RunStep.zig");
+pub const TranslateCStep = @import("Build/TranslateCStep.zig");
+pub const WriteFileStep = @import("Build/WriteFileStep.zig");
+
+install_tls: TopLevelStep,
+uninstall_tls: TopLevelStep,
+allocator: Allocator,
+user_input_options: UserInputOptionsMap,
+available_options_map: AvailableOptionsMap,
+available_options_list: ArrayList(AvailableOption),
+verbose: bool,
+verbose_link: bool,
+verbose_cc: bool,
+verbose_air: bool,
+verbose_llvm_ir: bool,
+verbose_cimport: bool,
+verbose_llvm_cpu_features: bool,
+/// The purpose of executing the command is for a human to read compile errors from the terminal
+prominent_compile_errors: bool,
+color: enum { auto, on, off } = .auto,
+reference_trace: ?u32 = null,
+invalid_user_input: bool,
+zig_exe: []const u8,
+default_step: *Step,
+env_map: *EnvMap,
+top_level_steps: ArrayList(*TopLevelStep),
+install_prefix: []const u8,
+dest_dir: ?[]const u8,
+lib_dir: []const u8,
+exe_dir: []const u8,
+h_dir: []const u8,
+install_path: []const u8,
+sysroot: ?[]const u8 = null,
+search_prefixes: ArrayList([]const u8),
+libc_file: ?[]const u8 = null,
+installed_files: ArrayList(InstalledFile),
+/// Path to the directory containing build.zig.
+build_root: []const u8,
+cache_root: []const u8,
+global_cache_root: []const u8,
+/// zig lib dir
+override_lib_dir: ?[]const u8,
+vcpkg_root: VcpkgRoot = .unattempted,
+pkg_config_pkg_list: ?(PkgConfigError![]const PkgConfigPkg) = null,
+args: ?[][]const u8 = null,
+debug_log_scopes: []const []const u8 = &.{},
+debug_compile_errors: bool = false,
+
+/// Experimental. Use system Darling installation to run cross compiled macOS build artifacts.
+enable_darling: bool = false,
+/// Use system QEMU installation to run cross compiled foreign architecture build artifacts.
+enable_qemu: bool = false,
+/// Darwin. Use Rosetta to run x86_64 macOS build artifacts on arm64 macOS.
+enable_rosetta: bool = false,
+/// Use system Wasmtime installation to run cross compiled wasm/wasi build artifacts.
+enable_wasmtime: bool = false,
+/// Use system Wine installation to run cross compiled Windows build artifacts.
+enable_wine: bool = false,
+/// After following the steps in https://github.com/ziglang/zig/wiki/Updating-libc#glibc,
+/// this will be the directory $glibc-build-dir/install/glibcs
+/// Given the example of the aarch64 target, this is the directory
+/// that contains the path `aarch64-linux-gnu/lib/ld-linux-aarch64.so.1`.
+glibc_runtimes_dir: ?[]const u8 = null,
+
+/// Information about the native target. Computed before build() is invoked.
+host: NativeTargetInfo,
+
+dep_prefix: []const u8 = "",
+
+modules: std.StringArrayHashMap(*Module),
+
+pub const ExecError = error{
+ ReadFailure,
+ ExitCodeFailure,
+ ProcessTerminated,
+ ExecNotSupported,
+} || std.ChildProcess.SpawnError;
+
+pub const PkgConfigError = error{
+ PkgConfigCrashed,
+ PkgConfigFailed,
+ PkgConfigNotInstalled,
+ PkgConfigInvalidOutput,
+};
+
+pub const PkgConfigPkg = struct {
+ name: []const u8,
+ desc: []const u8,
+};
+
+pub const CStd = enum {
+ C89,
+ C99,
+ C11,
+};
+
+const UserInputOptionsMap = StringHashMap(UserInputOption);
+const AvailableOptionsMap = StringHashMap(AvailableOption);
+
+const AvailableOption = struct {
+ name: []const u8,
+ type_id: TypeId,
+ description: []const u8,
+ /// If the `type_id` is `enum` this provides the list of enum options
+ enum_options: ?[]const []const u8,
+};
+
+const UserInputOption = struct {
+ name: []const u8,
+ value: UserValue,
+ used: bool,
+};
+
+const UserValue = union(enum) {
+ flag: void,
+ scalar: []const u8,
+ list: ArrayList([]const u8),
+ map: StringHashMap(*const UserValue),
+};
+
+const TypeId = enum {
+ bool,
+ int,
+ float,
+ @"enum",
+ string,
+ list,
+};
+
+const TopLevelStep = struct {
+ pub const base_id = .top_level;
+
+ step: Step,
+ description: []const u8,
+};
+
+pub const DirList = struct {
+ lib_dir: ?[]const u8 = null,
+ exe_dir: ?[]const u8 = null,
+ include_dir: ?[]const u8 = null,
+};
+
+pub fn create(
+ allocator: Allocator,
+ zig_exe: []const u8,
+ build_root: []const u8,
+ cache_root: []const u8,
+ global_cache_root: []const u8,
+ host: NativeTargetInfo,
+) !*Build {
+ const env_map = try allocator.create(EnvMap);
+ env_map.* = try process.getEnvMap(allocator);
+
+ const self = try allocator.create(Build);
+ self.* = Build{
+ .zig_exe = zig_exe,
+ .build_root = build_root,
+ .cache_root = try fs.path.relative(allocator, build_root, cache_root),
+ .global_cache_root = global_cache_root,
+ .verbose = false,
+ .verbose_link = false,
+ .verbose_cc = false,
+ .verbose_air = false,
+ .verbose_llvm_ir = false,
+ .verbose_cimport = false,
+ .verbose_llvm_cpu_features = false,
+ .prominent_compile_errors = false,
+ .invalid_user_input = false,
+ .allocator = allocator,
+ .user_input_options = UserInputOptionsMap.init(allocator),
+ .available_options_map = AvailableOptionsMap.init(allocator),
+ .available_options_list = ArrayList(AvailableOption).init(allocator),
+ .top_level_steps = ArrayList(*TopLevelStep).init(allocator),
+ .default_step = undefined,
+ .env_map = env_map,
+ .search_prefixes = ArrayList([]const u8).init(allocator),
+ .install_prefix = undefined,
+ .lib_dir = undefined,
+ .exe_dir = undefined,
+ .h_dir = undefined,
+ .dest_dir = env_map.get("DESTDIR"),
+ .installed_files = ArrayList(InstalledFile).init(allocator),
+ .install_tls = TopLevelStep{
+ .step = Step.initNoOp(.top_level, "install", allocator),
+ .description = "Copy build artifacts to prefix path",
+ },
+ .uninstall_tls = TopLevelStep{
+ .step = Step.init(.top_level, "uninstall", allocator, makeUninstall),
+ .description = "Remove build artifacts from prefix path",
+ },
+ .override_lib_dir = null,
+ .install_path = undefined,
+ .args = null,
+ .host = host,
+ .modules = std.StringArrayHashMap(*Module).init(allocator),
+ };
+ try self.top_level_steps.append(&self.install_tls);
+ try self.top_level_steps.append(&self.uninstall_tls);
+ self.default_step = &self.install_tls.step;
+ return self;
+}
+
+fn createChild(
+ parent: *Build,
+ dep_name: []const u8,
+ build_root: []const u8,
+ args: anytype,
+) !*Build {
+ const child = try createChildOnly(parent, dep_name, build_root);
+ try applyArgs(child, args);
+ return child;
+}
+
+fn createChildOnly(parent: *Build, dep_name: []const u8, build_root: []const u8) !*Build {
+ const allocator = parent.allocator;
+ const child = try allocator.create(Build);
+ child.* = .{
+ .allocator = allocator,
+ .install_tls = .{
+ .step = Step.initNoOp(.top_level, "install", allocator),
+ .description = "Copy build artifacts to prefix path",
+ },
+ .uninstall_tls = .{
+ .step = Step.init(.top_level, "uninstall", allocator, makeUninstall),
+ .description = "Remove build artifacts from prefix path",
+ },
+ .user_input_options = UserInputOptionsMap.init(allocator),
+ .available_options_map = AvailableOptionsMap.init(allocator),
+ .available_options_list = ArrayList(AvailableOption).init(allocator),
+ .verbose = parent.verbose,
+ .verbose_link = parent.verbose_link,
+ .verbose_cc = parent.verbose_cc,
+ .verbose_air = parent.verbose_air,
+ .verbose_llvm_ir = parent.verbose_llvm_ir,
+ .verbose_cimport = parent.verbose_cimport,
+ .verbose_llvm_cpu_features = parent.verbose_llvm_cpu_features,
+ .prominent_compile_errors = parent.prominent_compile_errors,
+ .color = parent.color,
+ .reference_trace = parent.reference_trace,
+ .invalid_user_input = false,
+ .zig_exe = parent.zig_exe,
+ .default_step = undefined,
+ .env_map = parent.env_map,
+ .top_level_steps = ArrayList(*TopLevelStep).init(allocator),
+ .install_prefix = undefined,
+ .dest_dir = parent.dest_dir,
+ .lib_dir = parent.lib_dir,
+ .exe_dir = parent.exe_dir,
+ .h_dir = parent.h_dir,
+ .install_path = parent.install_path,
+ .sysroot = parent.sysroot,
+ .search_prefixes = ArrayList([]const u8).init(allocator),
+ .libc_file = parent.libc_file,
+ .installed_files = ArrayList(InstalledFile).init(allocator),
+ .build_root = build_root,
+ .cache_root = parent.cache_root,
+ .global_cache_root = parent.global_cache_root,
+ .override_lib_dir = parent.override_lib_dir,
+ .debug_log_scopes = parent.debug_log_scopes,
+ .debug_compile_errors = parent.debug_compile_errors,
+ .enable_darling = parent.enable_darling,
+ .enable_qemu = parent.enable_qemu,
+ .enable_rosetta = parent.enable_rosetta,
+ .enable_wasmtime = parent.enable_wasmtime,
+ .enable_wine = parent.enable_wine,
+ .glibc_runtimes_dir = parent.glibc_runtimes_dir,
+ .host = parent.host,
+ .dep_prefix = parent.fmt("{s}{s}.", .{ parent.dep_prefix, dep_name }),
+ .modules = std.StringArrayHashMap(*Module).init(allocator),
+ };
+ try child.top_level_steps.append(&child.install_tls);
+ try child.top_level_steps.append(&child.uninstall_tls);
+ child.default_step = &child.install_tls.step;
+ return child;
+}
+
+fn applyArgs(b: *Build, args: anytype) !void {
+ inline for (@typeInfo(@TypeOf(args)).Struct.fields) |field| {
+ const v = @field(args, field.name);
+ const T = @TypeOf(v);
+ switch (T) {
+ CrossTarget => {
+ try b.user_input_options.put(field.name, .{
+ .name = field.name,
+ .value = .{ .scalar = try v.zigTriple(b.allocator) },
+ .used = false,
+ });
+ try b.user_input_options.put("cpu", .{
+ .name = "cpu",
+ .value = .{ .scalar = try serializeCpu(b.allocator, v.getCpu()) },
+ .used = false,
+ });
+ },
+ []const u8 => {
+ try b.user_input_options.put(field.name, .{
+ .name = field.name,
+ .value = .{ .scalar = v },
+ .used = false,
+ });
+ },
+ else => switch (@typeInfo(T)) {
+ .Bool => {
+ try b.user_input_options.put(field.name, .{
+ .name = field.name,
+ .value = .{ .scalar = if (v) "true" else "false" },
+ .used = false,
+ });
+ },
+ .Enum => {
+ try b.user_input_options.put(field.name, .{
+ .name = field.name,
+ .value = .{ .scalar = @tagName(v) },
+ .used = false,
+ });
+ },
+ .Int => {
+ try b.user_input_options.put(field.name, .{
+ .name = field.name,
+ .value = .{ .scalar = try std.fmt.allocPrint(b.allocator, "{d}", .{v}) },
+ .used = false,
+ });
+ },
+ else => @compileError("option '" ++ field.name ++ "' has unsupported type: " ++ @typeName(T)),
+ },
+ }
+ }
+ const Hasher = std.crypto.auth.siphash.SipHash128(1, 3);
+ // Random bytes to make unique. Refresh this with new random bytes when
+ // implementation is modified in a non-backwards-compatible way.
+ var hash = Hasher.init("ZaEsvQ5ClaA2IdH9");
+ hash.update(b.dep_prefix);
+ // TODO additionally update the hash with `args`.
+
+ var digest: [16]u8 = undefined;
+ hash.final(&digest);
+ var hash_basename: [digest.len * 2]u8 = undefined;
+ _ = std.fmt.bufPrint(&hash_basename, "{s}", .{std.fmt.fmtSliceHexLower(&digest)}) catch
+ unreachable;
+
+ const install_prefix = b.pathJoin(&.{ b.cache_root, "i", &hash_basename });
+ b.resolveInstallPrefix(install_prefix, .{});
+}
+
+pub fn destroy(self: *Build) void {
+ self.env_map.deinit();
+ self.top_level_steps.deinit();
+ self.allocator.destroy(self);
+}
+
+/// This function is intended to be called by lib/build_runner.zig, not a build.zig file.
+pub fn resolveInstallPrefix(self: *Build, install_prefix: ?[]const u8, dir_list: DirList) void {
+ if (self.dest_dir) |dest_dir| {
+ self.install_prefix = install_prefix orelse "/usr";
+ self.install_path = self.pathJoin(&.{ dest_dir, self.install_prefix });
+ } else {
+ self.install_prefix = install_prefix orelse
+ (self.pathJoin(&.{ self.build_root, "zig-out" }));
+ self.install_path = self.install_prefix;
+ }
+
+ var lib_list = [_][]const u8{ self.install_path, "lib" };
+ var exe_list = [_][]const u8{ self.install_path, "bin" };
+ var h_list = [_][]const u8{ self.install_path, "include" };
+
+ if (dir_list.lib_dir) |dir| {
+ if (std.fs.path.isAbsolute(dir)) lib_list[0] = self.dest_dir orelse "";
+ lib_list[1] = dir;
+ }
+
+ if (dir_list.exe_dir) |dir| {
+ if (std.fs.path.isAbsolute(dir)) exe_list[0] = self.dest_dir orelse "";
+ exe_list[1] = dir;
+ }
+
+ if (dir_list.include_dir) |dir| {
+ if (std.fs.path.isAbsolute(dir)) h_list[0] = self.dest_dir orelse "";
+ h_list[1] = dir;
+ }
+
+ self.lib_dir = self.pathJoin(&lib_list);
+ self.exe_dir = self.pathJoin(&exe_list);
+ self.h_dir = self.pathJoin(&h_list);
+}
+
+pub fn addOptions(self: *Build) *OptionsStep {
+ return OptionsStep.create(self);
+}
+
+pub const ExecutableOptions = struct {
+ name: []const u8,
+ root_source_file: ?FileSource = null,
+ version: ?std.builtin.Version = null,
+ target: CrossTarget = .{},
+ optimize: std.builtin.Mode = .Debug,
+ linkage: ?CompileStep.Linkage = null,
+};
+
+pub fn addExecutable(b: *Build, options: ExecutableOptions) *CompileStep {
+ return CompileStep.create(b, .{
+ .name = options.name,
+ .root_source_file = options.root_source_file,
+ .version = options.version,
+ .target = options.target,
+ .optimize = options.optimize,
+ .kind = .exe,
+ .linkage = options.linkage,
+ });
+}
+
+pub const ObjectOptions = struct {
+ name: []const u8,
+ root_source_file: ?FileSource = null,
+ target: CrossTarget,
+ optimize: std.builtin.Mode,
+};
+
+pub fn addObject(b: *Build, options: ObjectOptions) *CompileStep {
+ return CompileStep.create(b, .{
+ .name = options.name,
+ .root_source_file = options.root_source_file,
+ .target = options.target,
+ .optimize = options.optimize,
+ .kind = .obj,
+ });
+}
+
+pub const SharedLibraryOptions = struct {
+ name: []const u8,
+ root_source_file: ?FileSource = null,
+ version: ?std.builtin.Version = null,
+ target: CrossTarget,
+ optimize: std.builtin.Mode,
+};
+
+pub fn addSharedLibrary(b: *Build, options: SharedLibraryOptions) *CompileStep {
+ return CompileStep.create(b, .{
+ .name = options.name,
+ .root_source_file = options.root_source_file,
+ .kind = .lib,
+ .linkage = .dynamic,
+ .version = options.version,
+ .target = options.target,
+ .optimize = options.optimize,
+ });
+}
+
+pub const StaticLibraryOptions = struct {
+ name: []const u8,
+ root_source_file: ?FileSource = null,
+ target: CrossTarget,
+ optimize: std.builtin.Mode,
+ version: ?std.builtin.Version = null,
+};
+
+pub fn addStaticLibrary(b: *Build, options: StaticLibraryOptions) *CompileStep {
+ return CompileStep.create(b, .{
+ .name = options.name,
+ .root_source_file = options.root_source_file,
+ .kind = .lib,
+ .linkage = .static,
+ .version = options.version,
+ .target = options.target,
+ .optimize = options.optimize,
+ });
+}
+
+pub const TestOptions = struct {
+ name: []const u8 = "test",
+ kind: CompileStep.Kind = .@"test",
+ root_source_file: FileSource,
+ target: CrossTarget = .{},
+ optimize: std.builtin.Mode = .Debug,
+ version: ?std.builtin.Version = null,
+};
+
+pub fn addTest(b: *Build, options: TestOptions) *CompileStep {
+ return CompileStep.create(b, .{
+ .name = options.name,
+ .kind = options.kind,
+ .root_source_file = options.root_source_file,
+ .target = options.target,
+ .optimize = options.optimize,
+ });
+}
+
+pub const AssemblyOptions = struct {
+ name: []const u8,
+ source_file: FileSource,
+ target: CrossTarget,
+ optimize: std.builtin.Mode,
+};
+
+pub fn addAssembly(b: *Build, options: AssemblyOptions) *CompileStep {
+ const obj_step = CompileStep.create(b, .{
+ .name = options.name,
+ .root_source_file = null,
+ .target = options.target,
+ .optimize = options.optimize,
+ });
+ obj_step.addAssemblyFileSource(options.source_file.dupe(b));
+ return obj_step;
+}
+
+pub const AddModuleOptions = struct {
+ name: []const u8,
+ source_file: FileSource,
+ dependencies: []const ModuleDependency = &.{},
+};
+
+pub fn addModule(b: *Build, options: AddModuleOptions) void {
+ b.modules.put(b.dupe(options.name), b.createModule(.{
+ .source_file = options.source_file,
+ .dependencies = options.dependencies,
+ })) catch @panic("OOM");
+}
+
+pub const ModuleDependency = struct {
+ name: []const u8,
+ module: *Module,
+};
+
+pub const CreateModuleOptions = struct {
+ source_file: FileSource,
+ dependencies: []const ModuleDependency = &.{},
+};
+
+/// Prefer to use `addModule` which will make the module available to other
+/// packages which depend on this package.
+pub fn createModule(b: *Build, options: CreateModuleOptions) *Module {
+ const module = b.allocator.create(Module) catch @panic("OOM");
+ module.* = .{
+ .builder = b,
+ .source_file = options.source_file,
+ .dependencies = moduleDependenciesToArrayHashMap(b.allocator, options.dependencies),
+ };
+ return module;
+}
+
+fn moduleDependenciesToArrayHashMap(arena: Allocator, deps: []const ModuleDependency) std.StringArrayHashMap(*Module) {
+ var result = std.StringArrayHashMap(*Module).init(arena);
+ for (deps) |dep| {
+ result.put(dep.name, dep.module) catch @panic("OOM");
+ }
+ return result;
+}
+
+/// Initializes a RunStep with argv, which must at least have the path to the
+/// executable. More command line arguments can be added with `addArg`,
+/// `addArgs`, and `addArtifactArg`.
+/// Be careful using this function, as it introduces a system dependency.
+/// To run an executable built with zig build, see `CompileStep.run`.
+pub fn addSystemCommand(self: *Build, argv: []const []const u8) *RunStep {
+ assert(argv.len >= 1);
+ const run_step = RunStep.create(self, self.fmt("run {s}", .{argv[0]}));
+ run_step.addArgs(argv);
+ return run_step;
+}
+
+/// Using the `values` provided, produces a C header file, possibly based on a
+/// template input file (e.g. config.h.in).
+/// When an input template file is provided, this function will fail the build
+/// when an option not found in the input file is provided in `values`, and
+/// when an option found in the input file is missing from `values`.
+pub fn addConfigHeader(
+ b: *Build,
+ options: ConfigHeaderStep.Options,
+ values: anytype,
+) *ConfigHeaderStep {
+ const config_header_step = ConfigHeaderStep.create(b, options);
+ config_header_step.addValues(values);
+ return config_header_step;
+}
+
+/// Allocator.dupe without the need to handle out of memory.
+pub fn dupe(self: *Build, bytes: []const u8) []u8 {
+ return self.allocator.dupe(u8, bytes) catch @panic("OOM");
+}
+
+/// Duplicates an array of strings without the need to handle out of memory.
+pub fn dupeStrings(self: *Build, strings: []const []const u8) [][]u8 {
+ const array = self.allocator.alloc([]u8, strings.len) catch @panic("OOM");
+ for (strings) |s, i| {
+ array[i] = self.dupe(s);
+ }
+ return array;
+}
+
+/// Duplicates a path and converts all slashes to the OS's canonical path separator.
+pub fn dupePath(self: *Build, bytes: []const u8) []u8 {
+ const the_copy = self.dupe(bytes);
+ for (the_copy) |*byte| {
+ switch (byte.*) {
+ '/', '\\' => byte.* = fs.path.sep,
+ else => {},
+ }
+ }
+ return the_copy;
+}
+
+pub fn addWriteFile(self: *Build, file_path: []const u8, data: []const u8) *WriteFileStep {
+ const write_file_step = self.addWriteFiles();
+ write_file_step.add(file_path, data);
+ return write_file_step;
+}
+
+pub fn addWriteFiles(self: *Build) *WriteFileStep {
+ const write_file_step = self.allocator.create(WriteFileStep) catch @panic("OOM");
+ write_file_step.* = WriteFileStep.init(self);
+ return write_file_step;
+}
+
+pub fn addLog(self: *Build, comptime format: []const u8, args: anytype) *LogStep {
+ const data = self.fmt(format, args);
+ const log_step = self.allocator.create(LogStep) catch @panic("OOM");
+ log_step.* = LogStep.init(self, data);
+ return log_step;
+}
+
+pub fn addRemoveDirTree(self: *Build, dir_path: []const u8) *RemoveDirStep {
+ const remove_dir_step = self.allocator.create(RemoveDirStep) catch @panic("OOM");
+ remove_dir_step.* = RemoveDirStep.init(self, dir_path);
+ return remove_dir_step;
+}
+
+pub fn addFmt(self: *Build, paths: []const []const u8) *FmtStep {
+ return FmtStep.create(self, paths);
+}
+
+pub fn addTranslateC(self: *Build, options: TranslateCStep.Options) *TranslateCStep {
+ return TranslateCStep.create(self, options);
+}
+
+pub fn make(self: *Build, step_names: []const []const u8) !void {
+ try self.makePath(self.cache_root);
+
+ var wanted_steps = ArrayList(*Step).init(self.allocator);
+ defer wanted_steps.deinit();
+
+ if (step_names.len == 0) {
+ try wanted_steps.append(self.default_step);
+ } else {
+ for (step_names) |step_name| {
+ const s = try self.getTopLevelStepByName(step_name);
+ try wanted_steps.append(s);
+ }
+ }
+
+ for (wanted_steps.items) |s| {
+ try self.makeOneStep(s);
+ }
+}
+
+pub fn getInstallStep(self: *Build) *Step {
+ return &self.install_tls.step;
+}
+
+pub fn getUninstallStep(self: *Build) *Step {
+ return &self.uninstall_tls.step;
+}
+
+fn makeUninstall(uninstall_step: *Step) anyerror!void {
+ const uninstall_tls = @fieldParentPtr(TopLevelStep, "step", uninstall_step);
+ const self = @fieldParentPtr(Build, "uninstall_tls", uninstall_tls);
+
+ for (self.installed_files.items) |installed_file| {
+ const full_path = self.getInstallPath(installed_file.dir, installed_file.path);
+ if (self.verbose) {
+ log.info("rm {s}", .{full_path});
+ }
+ fs.cwd().deleteTree(full_path) catch {};
+ }
+
+ // TODO remove empty directories
+}
+
+fn makeOneStep(self: *Build, s: *Step) anyerror!void {
+ if (s.loop_flag) {
+ log.err("Dependency loop detected:\n {s}", .{s.name});
+ return error.DependencyLoopDetected;
+ }
+ s.loop_flag = true;
+
+ for (s.dependencies.items) |dep| {
+ self.makeOneStep(dep) catch |err| {
+ if (err == error.DependencyLoopDetected) {
+ log.err(" {s}", .{s.name});
+ }
+ return err;
+ };
+ }
+
+ s.loop_flag = false;
+
+ try s.make();
+}
+
+fn getTopLevelStepByName(self: *Build, name: []const u8) !*Step {
+ for (self.top_level_steps.items) |top_level_step| {
+ if (mem.eql(u8, top_level_step.step.name, name)) {
+ return &top_level_step.step;
+ }
+ }
+ log.err("Cannot run step '{s}' because it does not exist", .{name});
+ return error.InvalidStepName;
+}
+
+pub fn option(self: *Build, comptime T: type, name_raw: []const u8, description_raw: []const u8) ?T {
+ const name = self.dupe(name_raw);
+ const description = self.dupe(description_raw);
+ const type_id = comptime typeToEnum(T);
+ const enum_options = if (type_id == .@"enum") blk: {
+ const fields = comptime std.meta.fields(T);
+ var options = ArrayList([]const u8).initCapacity(self.allocator, fields.len) catch @panic("OOM");
+
+ inline for (fields) |field| {
+ options.appendAssumeCapacity(field.name);
+ }
+
+ break :blk options.toOwnedSlice() catch @panic("OOM");
+ } else null;
+ const available_option = AvailableOption{
+ .name = name,
+ .type_id = type_id,
+ .description = description,
+ .enum_options = enum_options,
+ };
+ if ((self.available_options_map.fetchPut(name, available_option) catch @panic("OOM")) != null) {
+ panic("Option '{s}' declared twice", .{name});
+ }
+ self.available_options_list.append(available_option) catch @panic("OOM");
+
+ const option_ptr = self.user_input_options.getPtr(name) orelse return null;
+ option_ptr.used = true;
+ switch (type_id) {
+ .bool => switch (option_ptr.value) {
+ .flag => return true,
+ .scalar => |s| {
+ if (mem.eql(u8, s, "true")) {
+ return true;
+ } else if (mem.eql(u8, s, "false")) {
+ return false;
+ } else {
+ log.err("Expected -D{s} to be a boolean, but received '{s}'\n", .{ name, s });
+ self.markInvalidUserInput();
+ return null;
+ }
+ },
+ .list, .map => {
+ log.err("Expected -D{s} to be a boolean, but received a {s}.\n", .{
+ name, @tagName(option_ptr.value),
+ });
+ self.markInvalidUserInput();
+ return null;
+ },
+ },
+ .int => switch (option_ptr.value) {
+ .flag, .list, .map => {
+ log.err("Expected -D{s} to be an integer, but received a {s}.\n", .{
+ name, @tagName(option_ptr.value),
+ });
+ self.markInvalidUserInput();
+ return null;
+ },
+ .scalar => |s| {
+ const n = std.fmt.parseInt(T, s, 10) catch |err| switch (err) {
+ error.Overflow => {
+ log.err("-D{s} value {s} cannot fit into type {s}.\n", .{ name, s, @typeName(T) });
+ self.markInvalidUserInput();
+ return null;
+ },
+ else => {
+ log.err("Expected -D{s} to be an integer of type {s}.\n", .{ name, @typeName(T) });
+ self.markInvalidUserInput();
+ return null;
+ },
+ };
+ return n;
+ },
+ },
+ .float => switch (option_ptr.value) {
+ .flag, .map, .list => {
+ log.err("Expected -D{s} to be a float, but received a {s}.\n", .{
+ name, @tagName(option_ptr.value),
+ });
+ self.markInvalidUserInput();
+ return null;
+ },
+ .scalar => |s| {
+ const n = std.fmt.parseFloat(T, s) catch {
+ log.err("Expected -D{s} to be a float of type {s}.\n", .{ name, @typeName(T) });
+ self.markInvalidUserInput();
+ return null;
+ };
+ return n;
+ },
+ },
+ .@"enum" => switch (option_ptr.value) {
+ .flag, .map, .list => {
+ log.err("Expected -D{s} to be an enum, but received a {s}.\n", .{
+ name, @tagName(option_ptr.value),
+ });
+ self.markInvalidUserInput();
+ return null;
+ },
+ .scalar => |s| {
+ if (std.meta.stringToEnum(T, s)) |enum_lit| {
+ return enum_lit;
+ } else {
+ log.err("Expected -D{s} to be of type {s}.\n", .{ name, @typeName(T) });
+ self.markInvalidUserInput();
+ return null;
+ }
+ },
+ },
+ .string => switch (option_ptr.value) {
+ .flag, .list, .map => {
+ log.err("Expected -D{s} to be a string, but received a {s}.\n", .{
+ name, @tagName(option_ptr.value),
+ });
+ self.markInvalidUserInput();
+ return null;
+ },
+ .scalar => |s| return s,
+ },
+ .list => switch (option_ptr.value) {
+ .flag, .map => {
+ log.err("Expected -D{s} to be a list, but received a {s}.\n", .{
+ name, @tagName(option_ptr.value),
+ });
+ self.markInvalidUserInput();
+ return null;
+ },
+ .scalar => |s| {
+ return self.allocator.dupe([]const u8, &[_][]const u8{s}) catch @panic("OOM");
+ },
+ .list => |lst| return lst.items,
+ },
+ }
+}
+
+pub fn step(self: *Build, name: []const u8, description: []const u8) *Step {
+ const step_info = self.allocator.create(TopLevelStep) catch @panic("OOM");
+ step_info.* = TopLevelStep{
+ .step = Step.initNoOp(.top_level, name, self.allocator),
+ .description = self.dupe(description),
+ };
+ self.top_level_steps.append(step_info) catch @panic("OOM");
+ return &step_info.step;
+}
+
+pub const StandardOptimizeOptionOptions = struct {
+ preferred_optimize_mode: ?std.builtin.Mode = null,
+};
+
+pub fn standardOptimizeOption(self: *Build, options: StandardOptimizeOptionOptions) std.builtin.Mode {
+ if (options.preferred_optimize_mode) |mode| {
+ if (self.option(bool, "release", "optimize for end users") orelse false) {
+ return mode;
+ } else {
+ return .Debug;
+ }
+ } else {
+ return self.option(
+ std.builtin.Mode,
+ "optimize",
+ "prioritize performance, safety, or binary size (-O flag)",
+ ) orelse .Debug;
+ }
+}
+
+pub const StandardTargetOptionsArgs = struct {
+ whitelist: ?[]const CrossTarget = null,
+
+ default_target: CrossTarget = CrossTarget{},
+};
+
+/// Exposes standard `zig build` options for choosing a target.
+pub fn standardTargetOptions(self: *Build, args: StandardTargetOptionsArgs) CrossTarget {
+ const maybe_triple = self.option(
+ []const u8,
+ "target",
+ "The CPU architecture, OS, and ABI to build for",
+ );
+ const mcpu = self.option([]const u8, "cpu", "Target CPU features to add or subtract");
+
+ if (maybe_triple == null and mcpu == null) {
+ return args.default_target;
+ }
+
+ const triple = maybe_triple orelse "native";
+
+ var diags: CrossTarget.ParseOptions.Diagnostics = .{};
+ const selected_target = CrossTarget.parse(.{
+ .arch_os_abi = triple,
+ .cpu_features = mcpu,
+ .diagnostics = &diags,
+ }) catch |err| switch (err) {
+ error.UnknownCpuModel => {
+ log.err("Unknown CPU: '{s}'\nAvailable CPUs for architecture '{s}':", .{
+ diags.cpu_name.?,
+ @tagName(diags.arch.?),
+ });
+ for (diags.arch.?.allCpuModels()) |cpu| {
+ log.err(" {s}", .{cpu.name});
+ }
+ self.markInvalidUserInput();
+ return args.default_target;
+ },
+ error.UnknownCpuFeature => {
+ log.err(
+ \\Unknown CPU feature: '{s}'
+ \\Available CPU features for architecture '{s}':
+ \\
+ , .{
+ diags.unknown_feature_name.?,
+ @tagName(diags.arch.?),
+ });
+ for (diags.arch.?.allFeaturesList()) |feature| {
+ log.err(" {s}: {s}", .{ feature.name, feature.description });
+ }
+ self.markInvalidUserInput();
+ return args.default_target;
+ },
+ error.UnknownOperatingSystem => {
+ log.err(
+ \\Unknown OS: '{s}'
+ \\Available operating systems:
+ \\
+ , .{diags.os_name.?});
+ inline for (std.meta.fields(std.Target.Os.Tag)) |field| {
+ log.err(" {s}", .{field.name});
+ }
+ self.markInvalidUserInput();
+ return args.default_target;
+ },
+ else => |e| {
+ log.err("Unable to parse target '{s}': {s}\n", .{ triple, @errorName(e) });
+ self.markInvalidUserInput();
+ return args.default_target;
+ },
+ };
+
+ const selected_canonicalized_triple = selected_target.zigTriple(self.allocator) catch @panic("OOM");
+
+ if (args.whitelist) |list| whitelist_check: {
+ // Make sure it's a match of one of the list.
+ var mismatch_triple = true;
+ var mismatch_cpu_features = true;
+ var whitelist_item = CrossTarget{};
+ for (list) |t| {
+ mismatch_cpu_features = true;
+ mismatch_triple = true;
+
+ const t_triple = t.zigTriple(self.allocator) catch @panic("OOM");
+ if (mem.eql(u8, t_triple, selected_canonicalized_triple)) {
+ mismatch_triple = false;
+ whitelist_item = t;
+ if (t.getCpuFeatures().isSuperSetOf(selected_target.getCpuFeatures())) {
+ mismatch_cpu_features = false;
+ break :whitelist_check;
+ } else {
+ break;
+ }
+ }
+ }
+ if (mismatch_triple) {
+ log.err("Chosen target '{s}' does not match one of the supported targets:", .{
+ selected_canonicalized_triple,
+ });
+ for (list) |t| {
+ const t_triple = t.zigTriple(self.allocator) catch @panic("OOM");
+ log.err(" {s}", .{t_triple});
+ }
+ } else {
+ assert(mismatch_cpu_features);
+ const whitelist_cpu = whitelist_item.getCpu();
+ const selected_cpu = selected_target.getCpu();
+ log.err("Chosen CPU model '{s}' does not match one of the supported targets:", .{
+ selected_cpu.model.name,
+ });
+ log.err(" Supported feature Set: ", .{});
+ const all_features = whitelist_cpu.arch.allFeaturesList();
+ var populated_cpu_features = whitelist_cpu.model.features;
+ populated_cpu_features.populateDependencies(all_features);
+ for (all_features) |feature, i_usize| {
+ const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
+ const in_cpu_set = populated_cpu_features.isEnabled(i);
+ if (in_cpu_set) {
+ log.err("{s} ", .{feature.name});
+ }
+ }
+ log.err(" Remove: ", .{});
+ for (all_features) |feature, i_usize| {
+ const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
+ const in_cpu_set = populated_cpu_features.isEnabled(i);
+ const in_actual_set = selected_cpu.features.isEnabled(i);
+ if (in_actual_set and !in_cpu_set) {
+ log.err("{s} ", .{feature.name});
+ }
+ }
+ }
+ self.markInvalidUserInput();
+ return args.default_target;
+ }
+
+ return selected_target;
+}
+
+pub fn addUserInputOption(self: *Build, name_raw: []const u8, value_raw: []const u8) !bool {
+ const name = self.dupe(name_raw);
+ const value = self.dupe(value_raw);
+ const gop = try self.user_input_options.getOrPut(name);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = UserInputOption{
+ .name = name,
+ .value = .{ .scalar = value },
+ .used = false,
+ };
+ return false;
+ }
+
+ // option already exists
+ switch (gop.value_ptr.value) {
+ .scalar => |s| {
+ // turn it into a list
+ var list = ArrayList([]const u8).init(self.allocator);
+ try list.append(s);
+ try list.append(value);
+ try self.user_input_options.put(name, .{
+ .name = name,
+ .value = .{ .list = list },
+ .used = false,
+ });
+ },
+ .list => |*list| {
+ // append to the list
+ try list.append(value);
+ try self.user_input_options.put(name, .{
+ .name = name,
+ .value = .{ .list = list.* },
+ .used = false,
+ });
+ },
+ .flag => {
+ log.warn("Option '-D{s}={s}' conflicts with flag '-D{s}'.", .{ name, value, name });
+ return true;
+ },
+ .map => |*map| {
+ _ = map;
+ log.warn("TODO maps as command line arguments is not implemented yet.", .{});
+ return true;
+ },
+ }
+ return false;
+}
+
+pub fn addUserInputFlag(self: *Build, name_raw: []const u8) !bool {
+ const name = self.dupe(name_raw);
+ const gop = try self.user_input_options.getOrPut(name);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .name = name,
+ .value = .{ .flag = {} },
+ .used = false,
+ };
+ return false;
+ }
+
+ // option already exists
+ switch (gop.value_ptr.value) {
+ .scalar => |s| {
+ log.err("Flag '-D{s}' conflicts with option '-D{s}={s}'.", .{ name, name, s });
+ return true;
+ },
+ .list, .map => {
+ log.err("Flag '-D{s}' conflicts with multiple options of the same name.", .{name});
+ return true;
+ },
+ .flag => {},
+ }
+ return false;
+}
+
+fn typeToEnum(comptime T: type) TypeId {
+ return switch (@typeInfo(T)) {
+ .Int => .int,
+ .Float => .float,
+ .Bool => .bool,
+ .Enum => .@"enum",
+ else => switch (T) {
+ []const u8 => .string,
+ []const []const u8 => .list,
+ else => @compileError("Unsupported type: " ++ @typeName(T)),
+ },
+ };
+}
+
+fn markInvalidUserInput(self: *Build) void {
+ self.invalid_user_input = true;
+}
+
+pub fn validateUserInputDidItFail(self: *Build) bool {
+ // make sure all args are used
+ var it = self.user_input_options.iterator();
+ while (it.next()) |entry| {
+ if (!entry.value_ptr.used) {
+ log.err("Invalid option: -D{s}", .{entry.key_ptr.*});
+ self.markInvalidUserInput();
+ }
+ }
+
+ return self.invalid_user_input;
+}
+
+pub fn spawnChild(self: *Build, argv: []const []const u8) !void {
+ return self.spawnChildEnvMap(null, self.env_map, argv);
+}
+
+fn printCmd(cwd: ?[]const u8, argv: []const []const u8) void {
+ if (cwd) |yes_cwd| std.debug.print("cd {s} && ", .{yes_cwd});
+ for (argv) |arg| {
+ std.debug.print("{s} ", .{arg});
+ }
+ std.debug.print("\n", .{});
+}
+
+pub fn spawnChildEnvMap(self: *Build, cwd: ?[]const u8, env_map: *const EnvMap, argv: []const []const u8) !void {
+ if (self.verbose) {
+ printCmd(cwd, argv);
+ }
+
+ if (!std.process.can_spawn)
+ return error.ExecNotSupported;
+
+ var child = std.ChildProcess.init(argv, self.allocator);
+ child.cwd = cwd;
+ child.env_map = env_map;
+
+ const term = child.spawnAndWait() catch |err| {
+ log.err("Unable to spawn {s}: {s}", .{ argv[0], @errorName(err) });
+ return err;
+ };
+
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ log.err("The following command exited with error code {}:", .{code});
+ printCmd(cwd, argv);
+ return error.UncleanExit;
+ }
+ },
+ else => {
+ log.err("The following command terminated unexpectedly:", .{});
+ printCmd(cwd, argv);
+
+ return error.UncleanExit;
+ },
+ }
+}
+
+pub fn makePath(self: *Build, path: []const u8) !void {
+ fs.cwd().makePath(self.pathFromRoot(path)) catch |err| {
+ log.err("Unable to create path {s}: {s}", .{ path, @errorName(err) });
+ return err;
+ };
+}
+
+pub fn installArtifact(self: *Build, artifact: *CompileStep) void {
+ self.getInstallStep().dependOn(&self.addInstallArtifact(artifact).step);
+}
+
+pub fn addInstallArtifact(self: *Build, artifact: *CompileStep) *InstallArtifactStep {
+ return InstallArtifactStep.create(self, artifact);
+}
+
+///`dest_rel_path` is relative to prefix path
+pub fn installFile(self: *Build, src_path: []const u8, dest_rel_path: []const u8) void {
+ self.getInstallStep().dependOn(&self.addInstallFileWithDir(.{ .path = src_path }, .prefix, dest_rel_path).step);
+}
+
+pub fn installDirectory(self: *Build, options: InstallDirectoryOptions) void {
+ self.getInstallStep().dependOn(&self.addInstallDirectory(options).step);
+}
+
+///`dest_rel_path` is relative to bin path
+pub fn installBinFile(self: *Build, src_path: []const u8, dest_rel_path: []const u8) void {
+ self.getInstallStep().dependOn(&self.addInstallFileWithDir(.{ .path = src_path }, .bin, dest_rel_path).step);
+}
+
+///`dest_rel_path` is relative to lib path
+pub fn installLibFile(self: *Build, src_path: []const u8, dest_rel_path: []const u8) void {
+ self.getInstallStep().dependOn(&self.addInstallFileWithDir(.{ .path = src_path }, .lib, dest_rel_path).step);
+}
+
+/// Output format (BIN vs Intel HEX) determined by filename
+pub fn installRaw(self: *Build, artifact: *CompileStep, dest_filename: []const u8, options: InstallRawStep.CreateOptions) *InstallRawStep {
+ const raw = self.addInstallRaw(artifact, dest_filename, options);
+ self.getInstallStep().dependOn(&raw.step);
+ return raw;
+}
+
+///`dest_rel_path` is relative to install prefix path
+pub fn addInstallFile(self: *Build, source: FileSource, dest_rel_path: []const u8) *InstallFileStep {
+ return self.addInstallFileWithDir(source.dupe(self), .prefix, dest_rel_path);
+}
+
+///`dest_rel_path` is relative to bin path
+pub fn addInstallBinFile(self: *Build, source: FileSource, dest_rel_path: []const u8) *InstallFileStep {
+ return self.addInstallFileWithDir(source.dupe(self), .bin, dest_rel_path);
+}
+
+///`dest_rel_path` is relative to lib path
+pub fn addInstallLibFile(self: *Build, source: FileSource, dest_rel_path: []const u8) *InstallFileStep {
+ return self.addInstallFileWithDir(source.dupe(self), .lib, dest_rel_path);
+}
+
+pub fn addInstallHeaderFile(b: *Build, src_path: []const u8, dest_rel_path: []const u8) *InstallFileStep {
+ return b.addInstallFileWithDir(.{ .path = src_path }, .header, dest_rel_path);
+}
+
+pub fn addInstallRaw(self: *Build, artifact: *CompileStep, dest_filename: []const u8, options: InstallRawStep.CreateOptions) *InstallRawStep {
+ return InstallRawStep.create(self, artifact, dest_filename, options);
+}
+
+pub fn addInstallFileWithDir(
+ self: *Build,
+ source: FileSource,
+ install_dir: InstallDir,
+ dest_rel_path: []const u8,
+) *InstallFileStep {
+ if (dest_rel_path.len == 0) {
+ panic("dest_rel_path must be non-empty", .{});
+ }
+ const install_step = self.allocator.create(InstallFileStep) catch @panic("OOM");
+ install_step.* = InstallFileStep.init(self, source.dupe(self), install_dir, dest_rel_path);
+ return install_step;
+}
+
+pub fn addInstallDirectory(self: *Build, options: InstallDirectoryOptions) *InstallDirStep {
+ const install_step = self.allocator.create(InstallDirStep) catch @panic("OOM");
+ install_step.* = InstallDirStep.init(self, options);
+ return install_step;
+}
+
+pub fn pushInstalledFile(self: *Build, dir: InstallDir, dest_rel_path: []const u8) void {
+ const file = InstalledFile{
+ .dir = dir,
+ .path = dest_rel_path,
+ };
+ self.installed_files.append(file.dupe(self)) catch @panic("OOM");
+}
+
+pub fn updateFile(self: *Build, source_path: []const u8, dest_path: []const u8) !void {
+ if (self.verbose) {
+ log.info("cp {s} {s} ", .{ source_path, dest_path });
+ }
+ const cwd = fs.cwd();
+ const prev_status = try fs.Dir.updateFile(cwd, source_path, cwd, dest_path, .{});
+ if (self.verbose) switch (prev_status) {
+ .stale => log.info("# installed", .{}),
+ .fresh => log.info("# up-to-date", .{}),
+ };
+}
+
+pub fn truncateFile(self: *Build, dest_path: []const u8) !void {
+ if (self.verbose) {
+ log.info("truncate {s}", .{dest_path});
+ }
+ const cwd = fs.cwd();
+ var src_file = cwd.createFile(dest_path, .{}) catch |err| switch (err) {
+ error.FileNotFound => blk: {
+ if (fs.path.dirname(dest_path)) |dirname| {
+ try cwd.makePath(dirname);
+ }
+ break :blk try cwd.createFile(dest_path, .{});
+ },
+ else => |e| return e,
+ };
+ src_file.close();
+}
+
+pub fn pathFromRoot(self: *Build, rel_path: []const u8) []u8 {
+ return fs.path.resolve(self.allocator, &[_][]const u8{ self.build_root, rel_path }) catch @panic("OOM");
+}
+
+pub fn pathJoin(self: *Build, paths: []const []const u8) []u8 {
+ return fs.path.join(self.allocator, paths) catch @panic("OOM");
+}
+
+pub fn fmt(self: *Build, comptime format: []const u8, args: anytype) []u8 {
+ return fmt_lib.allocPrint(self.allocator, format, args) catch @panic("OOM");
+}
+
+pub fn findProgram(self: *Build, names: []const []const u8, paths: []const []const u8) ![]const u8 {
+ // TODO report error for ambiguous situations
+ const exe_extension = @as(CrossTarget, .{}).exeFileExt();
+ for (self.search_prefixes.items) |search_prefix| {
+ for (names) |name| {
+ if (fs.path.isAbsolute(name)) {
+ return name;
+ }
+ const full_path = self.pathJoin(&.{
+ search_prefix,
+ "bin",
+ self.fmt("{s}{s}", .{ name, exe_extension }),
+ });
+ return fs.realpathAlloc(self.allocator, full_path) catch continue;
+ }
+ }
+ if (self.env_map.get("PATH")) |PATH| {
+ for (names) |name| {
+ if (fs.path.isAbsolute(name)) {
+ return name;
+ }
+ var it = mem.tokenize(u8, PATH, &[_]u8{fs.path.delimiter});
+ while (it.next()) |path| {
+ const full_path = self.pathJoin(&.{
+ path,
+ self.fmt("{s}{s}", .{ name, exe_extension }),
+ });
+ return fs.realpathAlloc(self.allocator, full_path) catch continue;
+ }
+ }
+ }
+ for (names) |name| {
+ if (fs.path.isAbsolute(name)) {
+ return name;
+ }
+ for (paths) |path| {
+ const full_path = self.pathJoin(&.{
+ path,
+ self.fmt("{s}{s}", .{ name, exe_extension }),
+ });
+ return fs.realpathAlloc(self.allocator, full_path) catch continue;
+ }
+ }
+ return error.FileNotFound;
+}
+
+pub fn execAllowFail(
+ self: *Build,
+ argv: []const []const u8,
+ out_code: *u8,
+ stderr_behavior: std.ChildProcess.StdIo,
+) ExecError![]u8 {
+ assert(argv.len != 0);
+
+ if (!std.process.can_spawn)
+ return error.ExecNotSupported;
+
+ const max_output_size = 400 * 1024;
+ var child = std.ChildProcess.init(argv, self.allocator);
+ child.stdin_behavior = .Ignore;
+ child.stdout_behavior = .Pipe;
+ child.stderr_behavior = stderr_behavior;
+ child.env_map = self.env_map;
+
+ try child.spawn();
+
+ const stdout = child.stdout.?.reader().readAllAlloc(self.allocator, max_output_size) catch {
+ return error.ReadFailure;
+ };
+ errdefer self.allocator.free(stdout);
+
+ const term = try child.wait();
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ out_code.* = @truncate(u8, code);
+ return error.ExitCodeFailure;
+ }
+ return stdout;
+ },
+ .Signal, .Stopped, .Unknown => |code| {
+ out_code.* = @truncate(u8, code);
+ return error.ProcessTerminated;
+ },
+ }
+}
+
+pub fn execFromStep(self: *Build, argv: []const []const u8, src_step: ?*Step) ![]u8 {
+ assert(argv.len != 0);
+
+ if (self.verbose) {
+ printCmd(null, argv);
+ }
+
+ if (!std.process.can_spawn) {
+ if (src_step) |s| log.err("{s}...", .{s.name});
+ log.err("Unable to spawn the following command: cannot spawn child process", .{});
+ printCmd(null, argv);
+ std.os.abort();
+ }
+
+ var code: u8 = undefined;
+ return self.execAllowFail(argv, &code, .Inherit) catch |err| switch (err) {
+ error.ExecNotSupported => {
+ if (src_step) |s| log.err("{s}...", .{s.name});
+ log.err("Unable to spawn the following command: cannot spawn child process", .{});
+ printCmd(null, argv);
+ std.os.abort();
+ },
+ error.FileNotFound => {
+ if (src_step) |s| log.err("{s}...", .{s.name});
+ log.err("Unable to spawn the following command: file not found", .{});
+ printCmd(null, argv);
+ std.os.exit(@truncate(u8, code));
+ },
+ error.ExitCodeFailure => {
+ if (src_step) |s| log.err("{s}...", .{s.name});
+ if (self.prominent_compile_errors) {
+ log.err("The step exited with error code {d}", .{code});
+ } else {
+ log.err("The following command exited with error code {d}:", .{code});
+ printCmd(null, argv);
+ }
+
+ std.os.exit(@truncate(u8, code));
+ },
+ error.ProcessTerminated => {
+ if (src_step) |s| log.err("{s}...", .{s.name});
+ log.err("The following command terminated unexpectedly:", .{});
+ printCmd(null, argv);
+ std.os.exit(@truncate(u8, code));
+ },
+ else => |e| return e,
+ };
+}
+
+pub fn exec(self: *Build, argv: []const []const u8) ![]u8 {
+ return self.execFromStep(argv, null);
+}
+
+pub fn addSearchPrefix(self: *Build, search_prefix: []const u8) void {
+ self.search_prefixes.append(self.dupePath(search_prefix)) catch @panic("OOM");
+}
+
+pub fn getInstallPath(self: *Build, dir: InstallDir, dest_rel_path: []const u8) []const u8 {
+ assert(!fs.path.isAbsolute(dest_rel_path)); // Install paths must be relative to the prefix
+ const base_dir = switch (dir) {
+ .prefix => self.install_path,
+ .bin => self.exe_dir,
+ .lib => self.lib_dir,
+ .header => self.h_dir,
+ .custom => |path| self.pathJoin(&.{ self.install_path, path }),
+ };
+ return fs.path.resolve(
+ self.allocator,
+ &[_][]const u8{ base_dir, dest_rel_path },
+ ) catch @panic("OOM");
+}
+
+pub const Dependency = struct {
+ builder: *Build,
+
+ pub fn artifact(d: *Dependency, name: []const u8) *CompileStep {
+ var found: ?*CompileStep = null;
+ for (d.builder.install_tls.step.dependencies.items) |dep_step| {
+ const inst = dep_step.cast(InstallArtifactStep) orelse continue;
+ if (mem.eql(u8, inst.artifact.name, name)) {
+ if (found != null) panic("artifact name '{s}' is ambiguous", .{name});
+ found = inst.artifact;
+ }
+ }
+ return found orelse {
+ for (d.builder.install_tls.step.dependencies.items) |dep_step| {
+ const inst = dep_step.cast(InstallArtifactStep) orelse continue;
+ log.info("available artifact: '{s}'", .{inst.artifact.name});
+ }
+ panic("unable to find artifact '{s}'", .{name});
+ };
+ }
+
+ pub fn module(d: *Dependency, name: []const u8) *Module {
+ return d.builder.modules.get(name) orelse {
+ panic("unable to find module '{s}'", .{name});
+ };
+ }
+};
+
+pub fn dependency(b: *Build, name: []const u8, args: anytype) *Dependency {
+ const build_runner = @import("root");
+ const deps = build_runner.dependencies;
+
+ inline for (@typeInfo(deps.imports).Struct.decls) |decl| {
+ if (mem.startsWith(u8, decl.name, b.dep_prefix) and
+ mem.endsWith(u8, decl.name, name) and
+ decl.name.len == b.dep_prefix.len + name.len)
+ {
+ const build_zig = @field(deps.imports, decl.name);
+ const build_root = @field(deps.build_root, decl.name);
+ return dependencyInner(b, name, build_root, build_zig, args);
+ }
+ }
+
+ const full_path = b.pathFromRoot("build.zig.zon");
+ std.debug.print("no dependency named '{s}' in '{s}'. All packages used in build.zig must be declared in this file.\n", .{ name, full_path });
+ std.process.exit(1);
+}
+
+fn dependencyInner(
+ b: *Build,
+ name: []const u8,
+ build_root: []const u8,
+ comptime build_zig: type,
+ args: anytype,
+) *Dependency {
+ const sub_builder = b.createChild(name, build_root, args) catch @panic("unhandled error");
+ sub_builder.runBuild(build_zig) catch @panic("unhandled error");
+
+ if (sub_builder.validateUserInputDidItFail()) {
+ std.debug.dumpCurrentStackTrace(@returnAddress());
+ }
+
+ const dep = b.allocator.create(Dependency) catch @panic("OOM");
+ dep.* = .{ .builder = sub_builder };
+ return dep;
+}
+
+pub fn runBuild(b: *Build, build_zig: anytype) anyerror!void {
+ switch (@typeInfo(@typeInfo(@TypeOf(build_zig.build)).Fn.return_type.?)) {
+ .Void => build_zig.build(b),
+ .ErrorUnion => try build_zig.build(b),
+ else => @compileError("expected return type of build to be 'void' or '!void'"),
+ }
+}
+
+test "builder.findProgram compiles" {
+ if (builtin.os.tag == .wasi) return error.SkipZigTest;
+
+ var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
+ defer arena.deinit();
+
+ const host = try NativeTargetInfo.detect(.{});
+
+ const builder = try Build.create(
+ arena.allocator(),
+ "zig",
+ "zig-cache",
+ "zig-cache",
+ "zig-cache",
+ host,
+ );
+ defer builder.destroy();
+ _ = builder.findProgram(&[_][]const u8{}, &[_][]const u8{}) catch null;
+}
+
+pub const Module = struct {
+ builder: *Build,
+ /// This could either be a generated file, in which case the module
+ /// contains exactly one file, or it could be a path to the root source
+ /// file of directory of files which constitute the module.
+ source_file: FileSource,
+ dependencies: std.StringArrayHashMap(*Module),
+};
+
+/// A file that is generated by a build step.
+/// This struct is an interface that is meant to be used with `@fieldParentPtr` to implement the actual path logic.
+pub const GeneratedFile = struct {
+ /// The step that generates the file
+ step: *Step,
+
+ /// The path to the generated file. Must be either absolute or relative to the build root.
+ /// This value must be set in the `fn make()` of the `step` and must not be `null` afterwards.
+ path: ?[]const u8 = null,
+
+ pub fn getPath(self: GeneratedFile) []const u8 {
+ return self.path orelse std.debug.panic(
+ "getPath() was called on a GeneratedFile that wasn't build yet. Is there a missing Step dependency on step '{s}'?",
+ .{self.step.name},
+ );
+ }
+};
+
+/// A file source is a reference to an existing or future file.
+///
+pub const FileSource = union(enum) {
+ /// A plain file path, relative to build root or absolute.
+ path: []const u8,
+
+ /// A file that is generated by an interface. Those files usually are
+ /// not available until built by a build step.
+ generated: *const GeneratedFile,
+
+ /// Returns a new file source that will have a relative path to the build root guaranteed.
+ /// This should be preferred over setting `.path` directly as it documents that the files are in the project directory.
+ pub fn relative(path: []const u8) FileSource {
+ std.debug.assert(!std.fs.path.isAbsolute(path));
+ return FileSource{ .path = path };
+ }
+
+ /// Returns a string that can be shown to represent the file source.
+ /// Either returns the path or `"generated"`.
+ pub fn getDisplayName(self: FileSource) []const u8 {
+ return switch (self) {
+ .path => self.path,
+ .generated => "generated",
+ };
+ }
+
+ /// Adds dependencies this file source implies to the given step.
+ pub fn addStepDependencies(self: FileSource, other_step: *Step) void {
+ switch (self) {
+ .path => {},
+ .generated => |gen| other_step.dependOn(gen.step),
+ }
+ }
+
+ /// Should only be called during make(), returns a path relative to the build root or absolute.
+ pub fn getPath(self: FileSource, builder: *Build) []const u8 {
+ const path = switch (self) {
+ .path => |p| builder.pathFromRoot(p),
+ .generated => |gen| gen.getPath(),
+ };
+ return path;
+ }
+
+ /// Duplicates the file source for a given builder.
+ pub fn dupe(self: FileSource, b: *Build) FileSource {
+ return switch (self) {
+ .path => |p| .{ .path = b.dupePath(p) },
+ .generated => |gen| .{ .generated = gen },
+ };
+ }
+};
+
+/// Allocates a new string for assigning a value to a named macro.
+/// If the value is omitted, it is set to 1.
+/// `name` and `value` need not live longer than the function call.
+pub fn constructCMacro(allocator: Allocator, name: []const u8, value: ?[]const u8) []const u8 {
+ var macro = allocator.alloc(
+ u8,
+ name.len + if (value) |value_slice| value_slice.len + 1 else 0,
+ ) catch |err| if (err == error.OutOfMemory) @panic("Out of memory") else unreachable;
+ mem.copy(u8, macro, name);
+ if (value) |value_slice| {
+ macro[name.len] = '=';
+ mem.copy(u8, macro[name.len + 1 ..], value_slice);
+ }
+ return macro;
+}
+
+pub const VcpkgRoot = union(VcpkgRootStatus) {
+ unattempted: void,
+ not_found: void,
+ found: []const u8,
+};
+
+pub const VcpkgRootStatus = enum {
+ unattempted,
+ not_found,
+ found,
+};
+
+pub const InstallDir = union(enum) {
+ prefix: void,
+ lib: void,
+ bin: void,
+ header: void,
+ /// A path relative to the prefix
+ custom: []const u8,
+
+ /// Duplicates the install directory including the path if set to custom.
+ pub fn dupe(self: InstallDir, builder: *Build) InstallDir {
+ if (self == .custom) {
+ // Written with this temporary to avoid RLS problems
+ const duped_path = builder.dupe(self.custom);
+ return .{ .custom = duped_path };
+ } else {
+ return self;
+ }
+ }
+};
+
+pub const InstalledFile = struct {
+ dir: InstallDir,
+ path: []const u8,
+
+ /// Duplicates the installed file path and directory.
+ pub fn dupe(self: InstalledFile, builder: *Build) InstalledFile {
+ return .{
+ .dir = self.dir.dupe(builder),
+ .path = builder.dupe(self.path),
+ };
+ }
+};
+
+pub fn serializeCpu(allocator: Allocator, cpu: std.Target.Cpu) ![]const u8 {
+ // TODO this logic can disappear if cpu model + features becomes part of the target triple
+ const all_features = cpu.arch.allFeaturesList();
+ var populated_cpu_features = cpu.model.features;
+ populated_cpu_features.populateDependencies(all_features);
+
+ if (populated_cpu_features.eql(cpu.features)) {
+ // The CPU name alone is sufficient.
+ return cpu.model.name;
+ } else {
+ var mcpu_buffer = ArrayList(u8).init(allocator);
+ try mcpu_buffer.appendSlice(cpu.model.name);
+
+ for (all_features) |feature, i_usize| {
+ const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
+ const in_cpu_set = populated_cpu_features.isEnabled(i);
+ const in_actual_set = cpu.features.isEnabled(i);
+ if (in_cpu_set and !in_actual_set) {
+ try mcpu_buffer.writer().print("-{s}", .{feature.name});
+ } else if (!in_cpu_set and in_actual_set) {
+ try mcpu_buffer.writer().print("+{s}", .{feature.name});
+ }
+ }
+
+ return try mcpu_buffer.toOwnedSlice();
+ }
+}
+
+test {
+ _ = CheckFileStep;
+ _ = CheckObjectStep;
+ _ = EmulatableRunStep;
+ _ = FmtStep;
+ _ = InstallArtifactStep;
+ _ = InstallDirStep;
+ _ = InstallFileStep;
+ _ = InstallRawStep;
+ _ = CompileStep;
+ _ = LogStep;
+ _ = OptionsStep;
+ _ = RemoveDirStep;
+ _ = RunStep;
+ _ = TranslateCStep;
+ _ = WriteFileStep;
+}
diff --git a/lib/std/build/CheckFileStep.zig b/lib/std/Build/CheckFileStep.zig
similarity index 83%
rename from lib/std/build/CheckFileStep.zig
rename to lib/std/Build/CheckFileStep.zig
index 2c06ab9279..b08a797e84 100644
--- a/lib/std/build/CheckFileStep.zig
+++ b/lib/std/Build/CheckFileStep.zig
@@ -1,7 +1,5 @@
const std = @import("../std.zig");
-const build = std.build;
-const Step = build.Step;
-const Builder = build.Builder;
+const Step = std.Build.Step;
const fs = std.fs;
const mem = std.mem;
@@ -10,17 +8,17 @@ const CheckFileStep = @This();
pub const base_id = .check_file;
step: Step,
-builder: *Builder,
+builder: *std.Build,
expected_matches: []const []const u8,
-source: build.FileSource,
+source: std.Build.FileSource,
max_bytes: usize = 20 * 1024 * 1024,
pub fn create(
- builder: *Builder,
- source: build.FileSource,
+ builder: *std.Build,
+ source: std.Build.FileSource,
expected_matches: []const []const u8,
) *CheckFileStep {
- const self = builder.allocator.create(CheckFileStep) catch unreachable;
+ const self = builder.allocator.create(CheckFileStep) catch @panic("OOM");
self.* = CheckFileStep{
.builder = builder,
.step = Step.init(.check_file, "CheckFile", builder.allocator, make),
diff --git a/lib/std/build/CheckObjectStep.zig b/lib/std/Build/CheckObjectStep.zig
similarity index 98%
rename from lib/std/build/CheckObjectStep.zig
rename to lib/std/Build/CheckObjectStep.zig
index 4ef350b418..5cb096581f 100644
--- a/lib/std/build/CheckObjectStep.zig
+++ b/lib/std/Build/CheckObjectStep.zig
@@ -1,6 +1,5 @@
const std = @import("../std.zig");
const assert = std.debug.assert;
-const build = std.build;
const fs = std.fs;
const macho = std.macho;
const math = std.math;
@@ -10,23 +9,22 @@ const testing = std.testing;
const CheckObjectStep = @This();
const Allocator = mem.Allocator;
-const Builder = build.Builder;
-const Step = build.Step;
-const EmulatableRunStep = build.EmulatableRunStep;
+const Step = std.Build.Step;
+const EmulatableRunStep = std.Build.EmulatableRunStep;
pub const base_id = .check_object;
step: Step,
-builder: *Builder,
-source: build.FileSource,
+builder: *std.Build,
+source: std.Build.FileSource,
max_bytes: usize = 20 * 1024 * 1024,
checks: std.ArrayList(Check),
dump_symtab: bool = false,
obj_format: std.Target.ObjectFormat,
-pub fn create(builder: *Builder, source: build.FileSource, obj_format: std.Target.ObjectFormat) *CheckObjectStep {
+pub fn create(builder: *std.Build, source: std.Build.FileSource, obj_format: std.Target.ObjectFormat) *CheckObjectStep {
const gpa = builder.allocator;
- const self = gpa.create(CheckObjectStep) catch unreachable;
+ const self = gpa.create(CheckObjectStep) catch @panic("OOM");
self.* = .{
.builder = builder,
.step = Step.init(.check_file, "CheckObject", gpa, make),
@@ -44,7 +42,7 @@ pub fn runAndCompare(self: *CheckObjectStep) *EmulatableRunStep {
const dependencies_len = self.step.dependencies.items.len;
assert(dependencies_len > 0);
const exe_step = self.step.dependencies.items[dependencies_len - 1];
- const exe = exe_step.cast(std.build.LibExeObjStep).?;
+ const exe = exe_step.cast(std.Build.CompileStep).?;
const emulatable_step = EmulatableRunStep.create(self.builder, "EmulatableRun", exe);
emulatable_step.step.dependOn(&self.step);
return emulatable_step;
@@ -216,10 +214,10 @@ const ComputeCompareExpected = struct {
};
const Check = struct {
- builder: *Builder,
+ builder: *std.Build,
actions: std.ArrayList(Action),
- fn create(b: *Builder) Check {
+ fn create(b: *std.Build) Check {
return .{
.builder = b,
.actions = std.ArrayList(Action).init(b.allocator),
@@ -230,14 +228,14 @@ const Check = struct {
self.actions.append(.{
.tag = .match,
.phrase = self.builder.dupe(phrase),
- }) catch unreachable;
+ }) catch @panic("OOM");
}
fn notPresent(self: *Check, phrase: []const u8) void {
self.actions.append(.{
.tag = .not_present,
.phrase = self.builder.dupe(phrase),
- }) catch unreachable;
+ }) catch @panic("OOM");
}
fn computeCmp(self: *Check, phrase: []const u8, expected: ComputeCompareExpected) void {
@@ -245,7 +243,7 @@ const Check = struct {
.tag = .compute_cmp,
.phrase = self.builder.dupe(phrase),
.expected = expected,
- }) catch unreachable;
+ }) catch @panic("OOM");
}
};
@@ -253,7 +251,7 @@ const Check = struct {
pub fn checkStart(self: *CheckObjectStep, phrase: []const u8) void {
var new_check = Check.create(self.builder);
new_check.match(phrase);
- self.checks.append(new_check) catch unreachable;
+ self.checks.append(new_check) catch @panic("OOM");
}
/// Adds another searched phrase to the latest created Check with `CheckObjectStep.checkStart(...)`.
@@ -295,7 +293,7 @@ pub fn checkComputeCompare(
) void {
var new_check = Check.create(self.builder);
new_check.computeCmp(program, expected);
- self.checks.append(new_check) catch unreachable;
+ self.checks.append(new_check) catch @panic("OOM");
}
fn make(step: *Step) !void {
diff --git a/lib/std/build/LibExeObjStep.zig b/lib/std/Build/CompileStep.zig
similarity index 81%
rename from lib/std/build/LibExeObjStep.zig
rename to lib/std/Build/CompileStep.zig
index cb37b24885..e0d90add3c 100644
--- a/lib/std/build/LibExeObjStep.zig
+++ b/lib/std/Build/CompileStep.zig
@@ -9,41 +9,39 @@ const ArrayList = std.ArrayList;
const StringHashMap = std.StringHashMap;
const Sha256 = std.crypto.hash.sha2.Sha256;
const Allocator = mem.Allocator;
-const build = @import("../build.zig");
-const Step = build.Step;
-const Builder = build.Builder;
+const Step = std.Build.Step;
const CrossTarget = std.zig.CrossTarget;
const NativeTargetInfo = std.zig.system.NativeTargetInfo;
-const FileSource = std.build.FileSource;
-const PkgConfigPkg = Builder.PkgConfigPkg;
-const PkgConfigError = Builder.PkgConfigError;
-const ExecError = Builder.ExecError;
-const Pkg = std.build.Pkg;
-const VcpkgRoot = std.build.VcpkgRoot;
-const InstallDir = std.build.InstallDir;
-const InstallArtifactStep = std.build.InstallArtifactStep;
-const GeneratedFile = std.build.GeneratedFile;
-const InstallRawStep = std.build.InstallRawStep;
-const EmulatableRunStep = std.build.EmulatableRunStep;
-const CheckObjectStep = std.build.CheckObjectStep;
-const RunStep = std.build.RunStep;
-const OptionsStep = std.build.OptionsStep;
-const ConfigHeaderStep = std.build.ConfigHeaderStep;
-const LibExeObjStep = @This();
+const FileSource = std.Build.FileSource;
+const PkgConfigPkg = std.Build.PkgConfigPkg;
+const PkgConfigError = std.Build.PkgConfigError;
+const ExecError = std.Build.ExecError;
+const Module = std.Build.Module;
+const VcpkgRoot = std.Build.VcpkgRoot;
+const InstallDir = std.Build.InstallDir;
+const InstallArtifactStep = std.Build.InstallArtifactStep;
+const GeneratedFile = std.Build.GeneratedFile;
+const InstallRawStep = std.Build.InstallRawStep;
+const EmulatableRunStep = std.Build.EmulatableRunStep;
+const CheckObjectStep = std.Build.CheckObjectStep;
+const RunStep = std.Build.RunStep;
+const OptionsStep = std.Build.OptionsStep;
+const ConfigHeaderStep = std.Build.ConfigHeaderStep;
+const CompileStep = @This();
-pub const base_id = .lib_exe_obj;
+pub const base_id: Step.Id = .compile;
step: Step,
-builder: *Builder,
+builder: *std.Build,
name: []const u8,
-target: CrossTarget = CrossTarget{},
+target: CrossTarget,
target_info: NativeTargetInfo,
+optimize: std.builtin.Mode,
linker_script: ?FileSource = null,
version_script: ?[]const u8 = null,
out_filename: []const u8,
linkage: ?Linkage = null,
version: ?std.builtin.Version,
-build_mode: std.builtin.Mode,
kind: Kind,
major_only_filename: ?[]const u8,
name_only_filename: ?[]const u8,
@@ -84,7 +82,7 @@ initial_memory: ?u64 = null,
max_memory: ?u64 = null,
shared_memory: bool = false,
global_base: ?u64 = null,
-c_std: Builder.CStd,
+c_std: std.Build.CStd,
override_lib_dir: ?[]const u8,
main_pkg_path: ?[]const u8,
exec_cmd_args: ?[]const ?[]const u8,
@@ -101,14 +99,14 @@ root_src: ?FileSource,
out_h_filename: []const u8,
out_lib_filename: []const u8,
out_pdb_filename: []const u8,
-packages: ArrayList(Pkg),
+modules: std.StringArrayHashMap(*Module),
object_src: []const u8,
link_objects: ArrayList(LinkObject),
include_dirs: ArrayList(IncludeDir),
c_macros: ArrayList([]const u8),
-installed_headers: ArrayList(*std.build.Step),
+installed_headers: ArrayList(*Step),
output_dir: ?[]const u8,
is_linking_libc: bool = false,
is_linking_libcpp: bool = false,
@@ -226,7 +224,7 @@ pub const CSourceFile = struct {
source: FileSource,
args: []const []const u8,
- pub fn dupe(self: CSourceFile, b: *Builder) CSourceFile {
+ pub fn dupe(self: CSourceFile, b: *std.Build) CSourceFile {
return .{
.source = self.source.dupe(b),
.args = b.dupeStrings(self.args),
@@ -236,7 +234,7 @@ pub const CSourceFile = struct {
pub const LinkObject = union(enum) {
static_path: FileSource,
- other_step: *LibExeObjStep,
+ other_step: *CompileStep,
system_lib: SystemLib,
assembly_file: FileSource,
c_source_file: *CSourceFile,
@@ -267,10 +265,20 @@ const FrameworkLinkInfo = struct {
pub const IncludeDir = union(enum) {
raw_path: []const u8,
raw_path_system: []const u8,
- other_step: *LibExeObjStep,
+ other_step: *CompileStep,
config_header_step: *ConfigHeaderStep,
};
+pub const Options = struct {
+ name: []const u8,
+ root_source_file: ?FileSource = null,
+ target: CrossTarget,
+ optimize: std.builtin.Mode,
+ kind: Kind,
+ linkage: ?Linkage = null,
+ version: ?std.builtin.Version = null,
+};
+
pub const Kind = enum {
exe,
lib,
@@ -279,11 +287,6 @@ pub const Kind = enum {
test_exe,
};
-pub const SharedLibKind = union(enum) {
- versioned: std.builtin.Version,
- unversioned: void,
-};
-
pub const Linkage = enum { dynamic, static };
pub const EmitOption = union(enum) {
@@ -292,7 +295,7 @@ pub const EmitOption = union(enum) {
emit: void,
emit_to: []const u8,
- fn getArg(self: @This(), b: *Builder, arg_name: []const u8) ?[]const u8 {
+ fn getArg(self: @This(), b: *std.Build, arg_name: []const u8) ?[]const u8 {
return switch (self) {
.no_emit => b.fmt("-fno-{s}", .{arg_name}),
.default => null,
@@ -302,78 +305,45 @@ pub const EmitOption = union(enum) {
}
};
-pub fn createSharedLibrary(builder: *Builder, name: []const u8, root_src: ?FileSource, kind: SharedLibKind) *LibExeObjStep {
- return initExtraArgs(builder, name, root_src, .lib, .dynamic, switch (kind) {
- .versioned => |ver| ver,
- .unversioned => null,
- });
-}
-
-pub fn createStaticLibrary(builder: *Builder, name: []const u8, root_src: ?FileSource) *LibExeObjStep {
- return initExtraArgs(builder, name, root_src, .lib, .static, null);
-}
-
-pub fn createObject(builder: *Builder, name: []const u8, root_src: ?FileSource) *LibExeObjStep {
- return initExtraArgs(builder, name, root_src, .obj, null, null);
-}
-
-pub fn createExecutable(builder: *Builder, name: []const u8, root_src: ?FileSource) *LibExeObjStep {
- return initExtraArgs(builder, name, root_src, .exe, null, null);
-}
-
-pub fn createTest(builder: *Builder, name: []const u8, root_src: FileSource) *LibExeObjStep {
- return initExtraArgs(builder, name, root_src, .@"test", null, null);
-}
-
-pub fn createTestExe(builder: *Builder, name: []const u8, root_src: FileSource) *LibExeObjStep {
- return initExtraArgs(builder, name, root_src, .test_exe, null, null);
-}
-
-fn initExtraArgs(
- builder: *Builder,
- name_raw: []const u8,
- root_src_raw: ?FileSource,
- kind: Kind,
- linkage: ?Linkage,
- ver: ?std.builtin.Version,
-) *LibExeObjStep {
- const name = builder.dupe(name_raw);
- const root_src: ?FileSource = if (root_src_raw) |rsrc| rsrc.dupe(builder) else null;
+pub fn create(builder: *std.Build, options: Options) *CompileStep {
+ const name = builder.dupe(options.name);
+ const root_src: ?FileSource = if (options.root_source_file) |rsrc| rsrc.dupe(builder) else null;
if (mem.indexOf(u8, name, "/") != null or mem.indexOf(u8, name, "\\") != null) {
panic("invalid name: '{s}'. It looks like a file path, but it is supposed to be the library or application name.", .{name});
}
- const self = builder.allocator.create(LibExeObjStep) catch unreachable;
- self.* = LibExeObjStep{
+ const self = builder.allocator.create(CompileStep) catch @panic("OOM");
+ self.* = CompileStep{
.strip = null,
.unwind_tables = null,
.builder = builder,
.verbose_link = false,
.verbose_cc = false,
- .build_mode = std.builtin.Mode.Debug,
- .linkage = linkage,
- .kind = kind,
+ .optimize = options.optimize,
+ .target = options.target,
+ .linkage = options.linkage,
+ .kind = options.kind,
.root_src = root_src,
.name = name,
.frameworks = StringHashMap(FrameworkLinkInfo).init(builder.allocator),
.step = Step.init(base_id, name, builder.allocator, make),
- .version = ver,
+ .version = options.version,
.out_filename = undefined,
.out_h_filename = builder.fmt("{s}.h", .{name}),
.out_lib_filename = undefined,
.out_pdb_filename = builder.fmt("{s}.pdb", .{name}),
.major_only_filename = null,
.name_only_filename = null,
- .packages = ArrayList(Pkg).init(builder.allocator),
+ .modules = std.StringArrayHashMap(*Module).init(builder.allocator),
.include_dirs = ArrayList(IncludeDir).init(builder.allocator),
.link_objects = ArrayList(LinkObject).init(builder.allocator),
.c_macros = ArrayList([]const u8).init(builder.allocator),
.lib_paths = ArrayList([]const u8).init(builder.allocator),
.rpaths = ArrayList([]const u8).init(builder.allocator),
.framework_dirs = ArrayList([]const u8).init(builder.allocator),
- .installed_headers = ArrayList(*std.build.Step).init(builder.allocator),
+ .installed_headers = ArrayList(*Step).init(builder.allocator),
.object_src = undefined,
- .c_std = Builder.CStd.C99,
+ .c_std = std.Build.CStd.C99,
.override_lib_dir = null,
.main_pkg_path = null,
.exec_cmd_args = null,
@@ -394,17 +364,14 @@ fn initExtraArgs(
.output_h_path_source = GeneratedFile{ .step = &self.step },
.output_pdb_path_source = GeneratedFile{ .step = &self.step },
- .target_info = undefined, // populated in computeOutFileNames
+ .target_info = NativeTargetInfo.detect(self.target) catch @panic("unhandled error"),
};
self.computeOutFileNames();
if (root_src) |rs| rs.addStepDependencies(&self.step);
return self;
}
-fn computeOutFileNames(self: *LibExeObjStep) void {
- self.target_info = NativeTargetInfo.detect(self.target) catch
- unreachable;
-
+fn computeOutFileNames(self: *CompileStep) void {
const target = self.target_info.target;
self.out_filename = std.zig.binNameAlloc(self.builder.allocator, .{
@@ -420,7 +387,7 @@ fn computeOutFileNames(self: *LibExeObjStep) void {
.static => .Static,
}) else null,
.version = self.version,
- }) catch unreachable;
+ }) catch @panic("OOM");
if (self.kind == .lib) {
if (self.linkage != null and self.linkage.? == .static) {
@@ -457,31 +424,46 @@ fn computeOutFileNames(self: *LibExeObjStep) void {
}
}
-pub fn setTarget(self: *LibExeObjStep, target: CrossTarget) void {
- self.target = target;
- self.computeOutFileNames();
-}
-
-pub fn setOutputDir(self: *LibExeObjStep, dir: []const u8) void {
+pub fn setOutputDir(self: *CompileStep, dir: []const u8) void {
self.output_dir = self.builder.dupePath(dir);
}
-pub fn install(self: *LibExeObjStep) void {
+pub fn install(self: *CompileStep) void {
self.builder.installArtifact(self);
}
-pub fn installRaw(self: *LibExeObjStep, dest_filename: []const u8, options: InstallRawStep.CreateOptions) *InstallRawStep {
+pub fn installRaw(self: *CompileStep, dest_filename: []const u8, options: InstallRawStep.CreateOptions) *InstallRawStep {
return self.builder.installRaw(self, dest_filename, options);
}
-pub fn installHeader(a: *LibExeObjStep, src_path: []const u8, dest_rel_path: []const u8) void {
+pub fn installHeader(a: *CompileStep, src_path: []const u8, dest_rel_path: []const u8) void {
const install_file = a.builder.addInstallHeaderFile(src_path, dest_rel_path);
a.builder.getInstallStep().dependOn(&install_file.step);
- a.installed_headers.append(&install_file.step) catch unreachable;
+ a.installed_headers.append(&install_file.step) catch @panic("OOM");
+}
+
+pub const InstallConfigHeaderOptions = struct {
+ install_dir: InstallDir = .header,
+ dest_rel_path: ?[]const u8 = null,
+};
+
+pub fn installConfigHeader(
+ cs: *CompileStep,
+ config_header: *ConfigHeaderStep,
+ options: InstallConfigHeaderOptions,
+) void {
+ const dest_rel_path = options.dest_rel_path orelse config_header.include_path;
+ const install_file = cs.builder.addInstallFileWithDir(
+ .{ .generated = &config_header.output_file },
+ options.install_dir,
+ dest_rel_path,
+ );
+ cs.builder.getInstallStep().dependOn(&install_file.step);
+ cs.installed_headers.append(&install_file.step) catch @panic("OOM");
}
pub fn installHeadersDirectory(
- a: *LibExeObjStep,
+ a: *CompileStep,
src_dir_path: []const u8,
dest_rel_path: []const u8,
) void {
@@ -493,15 +475,15 @@ pub fn installHeadersDirectory(
}
pub fn installHeadersDirectoryOptions(
- a: *LibExeObjStep,
- options: std.build.InstallDirStep.Options,
+ a: *CompileStep,
+ options: std.Build.InstallDirStep.Options,
) void {
const install_dir = a.builder.addInstallDirectory(options);
a.builder.getInstallStep().dependOn(&install_dir.step);
- a.installed_headers.append(&install_dir.step) catch unreachable;
+ a.installed_headers.append(&install_dir.step) catch @panic("OOM");
}
-pub fn installLibraryHeaders(a: *LibExeObjStep, l: *LibExeObjStep) void {
+pub fn installLibraryHeaders(a: *CompileStep, l: *CompileStep) void {
assert(l.kind == .lib);
const install_step = a.builder.getInstallStep();
// Copy each element from installed_headers, modifying the builder
@@ -510,7 +492,7 @@ pub fn installLibraryHeaders(a: *LibExeObjStep, l: *LibExeObjStep) void {
const step_copy = switch (step.id) {
inline .install_file, .install_dir => |id| blk: {
const T = id.Type();
- const ptr = a.builder.allocator.create(T) catch unreachable;
+ const ptr = a.builder.allocator.create(T) catch @panic("OOM");
ptr.* = step.cast(T).?.*;
ptr.override_source_builder = ptr.builder;
ptr.builder = a.builder;
@@ -518,15 +500,15 @@ pub fn installLibraryHeaders(a: *LibExeObjStep, l: *LibExeObjStep) void {
},
else => unreachable,
};
- a.installed_headers.append(step_copy) catch unreachable;
+ a.installed_headers.append(step_copy) catch @panic("OOM");
install_step.dependOn(step_copy);
}
- a.installed_headers.appendSlice(l.installed_headers.items) catch unreachable;
+ a.installed_headers.appendSlice(l.installed_headers.items) catch @panic("OOM");
}
/// Creates a `RunStep` with an executable built with `addExecutable`.
/// Add command line arguments with `addArg`.
-pub fn run(exe: *LibExeObjStep) *RunStep {
+pub fn run(exe: *CompileStep) *RunStep {
assert(exe.kind == .exe or exe.kind == .test_exe);
// It doesn't have to be native. We catch that if you actually try to run it.
@@ -550,7 +532,7 @@ pub fn run(exe: *LibExeObjStep) *RunStep {
/// Allows running foreign binaries through emulation platforms such as Qemu or Rosetta.
/// When a binary cannot be ran through emulation or the option is disabled, a warning
/// will be printed and the binary will *NOT* be ran.
-pub fn runEmulatable(exe: *LibExeObjStep) *EmulatableRunStep {
+pub fn runEmulatable(exe: *CompileStep) *EmulatableRunStep {
assert(exe.kind == .exe or exe.kind == .test_exe);
const run_step = EmulatableRunStep.create(exe.builder, exe.builder.fmt("run {s}", .{exe.step.name}), exe);
@@ -560,33 +542,33 @@ pub fn runEmulatable(exe: *LibExeObjStep) *EmulatableRunStep {
return run_step;
}
-pub fn checkObject(self: *LibExeObjStep, obj_format: std.Target.ObjectFormat) *CheckObjectStep {
+pub fn checkObject(self: *CompileStep, obj_format: std.Target.ObjectFormat) *CheckObjectStep {
return CheckObjectStep.create(self.builder, self.getOutputSource(), obj_format);
}
-pub fn setLinkerScriptPath(self: *LibExeObjStep, source: FileSource) void {
+pub fn setLinkerScriptPath(self: *CompileStep, source: FileSource) void {
self.linker_script = source.dupe(self.builder);
source.addStepDependencies(&self.step);
}
-pub fn linkFramework(self: *LibExeObjStep, framework_name: []const u8) void {
- self.frameworks.put(self.builder.dupe(framework_name), .{}) catch unreachable;
+pub fn linkFramework(self: *CompileStep, framework_name: []const u8) void {
+ self.frameworks.put(self.builder.dupe(framework_name), .{}) catch @panic("OOM");
}
-pub fn linkFrameworkNeeded(self: *LibExeObjStep, framework_name: []const u8) void {
+pub fn linkFrameworkNeeded(self: *CompileStep, framework_name: []const u8) void {
self.frameworks.put(self.builder.dupe(framework_name), .{
.needed = true,
- }) catch unreachable;
+ }) catch @panic("OOM");
}
-pub fn linkFrameworkWeak(self: *LibExeObjStep, framework_name: []const u8) void {
+pub fn linkFrameworkWeak(self: *CompileStep, framework_name: []const u8) void {
self.frameworks.put(self.builder.dupe(framework_name), .{
.weak = true,
- }) catch unreachable;
+ }) catch @panic("OOM");
}
/// Returns whether the library, executable, or object depends on a particular system library.
-pub fn dependsOnSystemLibrary(self: LibExeObjStep, name: []const u8) bool {
+pub fn dependsOnSystemLibrary(self: CompileStep, name: []const u8) bool {
if (isLibCLibrary(name)) {
return self.is_linking_libc;
}
@@ -602,49 +584,49 @@ pub fn dependsOnSystemLibrary(self: LibExeObjStep, name: []const u8) bool {
return false;
}
-pub fn linkLibrary(self: *LibExeObjStep, lib: *LibExeObjStep) void {
+pub fn linkLibrary(self: *CompileStep, lib: *CompileStep) void {
assert(lib.kind == .lib);
self.linkLibraryOrObject(lib);
}
-pub fn isDynamicLibrary(self: *LibExeObjStep) bool {
+pub fn isDynamicLibrary(self: *CompileStep) bool {
return self.kind == .lib and self.linkage == Linkage.dynamic;
}
-pub fn isStaticLibrary(self: *LibExeObjStep) bool {
+pub fn isStaticLibrary(self: *CompileStep) bool {
return self.kind == .lib and self.linkage != Linkage.dynamic;
}
-pub fn producesPdbFile(self: *LibExeObjStep) bool {
+pub fn producesPdbFile(self: *CompileStep) bool {
if (!self.target.isWindows() and !self.target.isUefi()) return false;
if (self.target.getObjectFormat() == .c) return false;
if (self.strip == true) return false;
return self.isDynamicLibrary() or self.kind == .exe or self.kind == .test_exe;
}
-pub fn linkLibC(self: *LibExeObjStep) void {
+pub fn linkLibC(self: *CompileStep) void {
self.is_linking_libc = true;
}
-pub fn linkLibCpp(self: *LibExeObjStep) void {
+pub fn linkLibCpp(self: *CompileStep) void {
self.is_linking_libcpp = true;
}
/// If the value is omitted, it is set to 1.
/// `name` and `value` need not live longer than the function call.
-pub fn defineCMacro(self: *LibExeObjStep, name: []const u8, value: ?[]const u8) void {
- const macro = std.build.constructCMacro(self.builder.allocator, name, value);
- self.c_macros.append(macro) catch unreachable;
+pub fn defineCMacro(self: *CompileStep, name: []const u8, value: ?[]const u8) void {
+ const macro = std.Build.constructCMacro(self.builder.allocator, name, value);
+ self.c_macros.append(macro) catch @panic("OOM");
}
/// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1.
-pub fn defineCMacroRaw(self: *LibExeObjStep, name_and_value: []const u8) void {
- self.c_macros.append(self.builder.dupe(name_and_value)) catch unreachable;
+pub fn defineCMacroRaw(self: *CompileStep, name_and_value: []const u8) void {
+ self.c_macros.append(self.builder.dupe(name_and_value)) catch @panic("OOM");
}
/// This one has no integration with anything, it just puts -lname on the command line.
/// Prefer to use `linkSystemLibrary` instead.
-pub fn linkSystemLibraryName(self: *LibExeObjStep, name: []const u8) void {
+pub fn linkSystemLibraryName(self: *CompileStep, name: []const u8) void {
self.link_objects.append(.{
.system_lib = .{
.name = self.builder.dupe(name),
@@ -652,12 +634,12 @@ pub fn linkSystemLibraryName(self: *LibExeObjStep, name: []const u8) void {
.weak = false,
.use_pkg_config = .no,
},
- }) catch unreachable;
+ }) catch @panic("OOM");
}
/// This one has no integration with anything, it just puts -needed-lname on the command line.
/// Prefer to use `linkSystemLibraryNeeded` instead.
-pub fn linkSystemLibraryNeededName(self: *LibExeObjStep, name: []const u8) void {
+pub fn linkSystemLibraryNeededName(self: *CompileStep, name: []const u8) void {
self.link_objects.append(.{
.system_lib = .{
.name = self.builder.dupe(name),
@@ -665,12 +647,12 @@ pub fn linkSystemLibraryNeededName(self: *LibExeObjStep, name: []const u8) void
.weak = false,
.use_pkg_config = .no,
},
- }) catch unreachable;
+ }) catch @panic("OOM");
}
/// Darwin-only. This one has no integration with anything, it just puts -weak-lname on the
/// command line. Prefer to use `linkSystemLibraryWeak` instead.
-pub fn linkSystemLibraryWeakName(self: *LibExeObjStep, name: []const u8) void {
+pub fn linkSystemLibraryWeakName(self: *CompileStep, name: []const u8) void {
self.link_objects.append(.{
.system_lib = .{
.name = self.builder.dupe(name),
@@ -678,12 +660,12 @@ pub fn linkSystemLibraryWeakName(self: *LibExeObjStep, name: []const u8) void {
.weak = true,
.use_pkg_config = .no,
},
- }) catch unreachable;
+ }) catch @panic("OOM");
}
/// This links against a system library, exclusively using pkg-config to find the library.
/// Prefer to use `linkSystemLibrary` instead.
-pub fn linkSystemLibraryPkgConfigOnly(self: *LibExeObjStep, lib_name: []const u8) void {
+pub fn linkSystemLibraryPkgConfigOnly(self: *CompileStep, lib_name: []const u8) void {
self.link_objects.append(.{
.system_lib = .{
.name = self.builder.dupe(lib_name),
@@ -691,12 +673,12 @@ pub fn linkSystemLibraryPkgConfigOnly(self: *LibExeObjStep, lib_name: []const u8
.weak = false,
.use_pkg_config = .force,
},
- }) catch unreachable;
+ }) catch @panic("OOM");
}
/// This links against a system library, exclusively using pkg-config to find the library.
/// Prefer to use `linkSystemLibraryNeeded` instead.
-pub fn linkSystemLibraryNeededPkgConfigOnly(self: *LibExeObjStep, lib_name: []const u8) void {
+pub fn linkSystemLibraryNeededPkgConfigOnly(self: *CompileStep, lib_name: []const u8) void {
self.link_objects.append(.{
.system_lib = .{
.name = self.builder.dupe(lib_name),
@@ -704,12 +686,12 @@ pub fn linkSystemLibraryNeededPkgConfigOnly(self: *LibExeObjStep, lib_name: []co
.weak = false,
.use_pkg_config = .force,
},
- }) catch unreachable;
+ }) catch @panic("OOM");
}
/// Run pkg-config for the given library name and parse the output, returning the arguments
/// that should be passed to zig to link the given library.
-pub fn runPkgConfig(self: *LibExeObjStep, lib_name: []const u8) ![]const []const u8 {
+pub fn runPkgConfig(self: *CompileStep, lib_name: []const u8) ![]const []const u8 {
const pkg_name = match: {
// First we have to map the library name to pkg config name. Unfortunately,
// there are several examples where this is not straightforward:
@@ -803,19 +785,19 @@ pub fn runPkgConfig(self: *LibExeObjStep, lib_name: []const u8) ![]const []const
return zig_args.toOwnedSlice();
}
-pub fn linkSystemLibrary(self: *LibExeObjStep, name: []const u8) void {
+pub fn linkSystemLibrary(self: *CompileStep, name: []const u8) void {
self.linkSystemLibraryInner(name, .{});
}
-pub fn linkSystemLibraryNeeded(self: *LibExeObjStep, name: []const u8) void {
+pub fn linkSystemLibraryNeeded(self: *CompileStep, name: []const u8) void {
self.linkSystemLibraryInner(name, .{ .needed = true });
}
-pub fn linkSystemLibraryWeak(self: *LibExeObjStep, name: []const u8) void {
+pub fn linkSystemLibraryWeak(self: *CompileStep, name: []const u8) void {
self.linkSystemLibraryInner(name, .{ .weak = true });
}
-fn linkSystemLibraryInner(self: *LibExeObjStep, name: []const u8, opts: struct {
+fn linkSystemLibraryInner(self: *CompileStep, name: []const u8, opts: struct {
needed: bool = false,
weak: bool = false,
}) void {
@@ -835,27 +817,27 @@ fn linkSystemLibraryInner(self: *LibExeObjStep, name: []const u8, opts: struct {
.weak = opts.weak,
.use_pkg_config = .yes,
},
- }) catch unreachable;
+ }) catch @panic("OOM");
}
-pub fn setNamePrefix(self: *LibExeObjStep, text: []const u8) void {
+pub fn setNamePrefix(self: *CompileStep, text: []const u8) void {
assert(self.kind == .@"test" or self.kind == .test_exe);
self.name_prefix = self.builder.dupe(text);
}
-pub fn setFilter(self: *LibExeObjStep, text: ?[]const u8) void {
+pub fn setFilter(self: *CompileStep, text: ?[]const u8) void {
assert(self.kind == .@"test" or self.kind == .test_exe);
self.filter = if (text) |t| self.builder.dupe(t) else null;
}
-pub fn setTestRunner(self: *LibExeObjStep, path: ?[]const u8) void {
+pub fn setTestRunner(self: *CompileStep, path: ?[]const u8) void {
assert(self.kind == .@"test" or self.kind == .test_exe);
self.test_runner = if (path) |p| self.builder.dupePath(p) else null;
}
/// Handy when you have many C/C++ source files and want them all to have the same flags.
-pub fn addCSourceFiles(self: *LibExeObjStep, files: []const []const u8, flags: []const []const u8) void {
- const c_source_files = self.builder.allocator.create(CSourceFiles) catch unreachable;
+pub fn addCSourceFiles(self: *CompileStep, files: []const []const u8, flags: []const []const u8) void {
+ const c_source_files = self.builder.allocator.create(CSourceFiles) catch @panic("OOM");
const files_copy = self.builder.dupeStrings(files);
const flags_copy = self.builder.dupeStrings(flags);
@@ -864,96 +846,92 @@ pub fn addCSourceFiles(self: *LibExeObjStep, files: []const []const u8, flags: [
.files = files_copy,
.flags = flags_copy,
};
- self.link_objects.append(.{ .c_source_files = c_source_files }) catch unreachable;
+ self.link_objects.append(.{ .c_source_files = c_source_files }) catch @panic("OOM");
}
-pub fn addCSourceFile(self: *LibExeObjStep, file: []const u8, flags: []const []const u8) void {
+pub fn addCSourceFile(self: *CompileStep, file: []const u8, flags: []const []const u8) void {
self.addCSourceFileSource(.{
.args = flags,
.source = .{ .path = file },
});
}
-pub fn addCSourceFileSource(self: *LibExeObjStep, source: CSourceFile) void {
- const c_source_file = self.builder.allocator.create(CSourceFile) catch unreachable;
+pub fn addCSourceFileSource(self: *CompileStep, source: CSourceFile) void {
+ const c_source_file = self.builder.allocator.create(CSourceFile) catch @panic("OOM");
c_source_file.* = source.dupe(self.builder);
- self.link_objects.append(.{ .c_source_file = c_source_file }) catch unreachable;
+ self.link_objects.append(.{ .c_source_file = c_source_file }) catch @panic("OOM");
source.source.addStepDependencies(&self.step);
}
-pub fn setVerboseLink(self: *LibExeObjStep, value: bool) void {
+pub fn setVerboseLink(self: *CompileStep, value: bool) void {
self.verbose_link = value;
}
-pub fn setVerboseCC(self: *LibExeObjStep, value: bool) void {
+pub fn setVerboseCC(self: *CompileStep, value: bool) void {
self.verbose_cc = value;
}
-pub fn setBuildMode(self: *LibExeObjStep, mode: std.builtin.Mode) void {
- self.build_mode = mode;
-}
-
-pub fn overrideZigLibDir(self: *LibExeObjStep, dir_path: []const u8) void {
+pub fn overrideZigLibDir(self: *CompileStep, dir_path: []const u8) void {
self.override_lib_dir = self.builder.dupePath(dir_path);
}
-pub fn setMainPkgPath(self: *LibExeObjStep, dir_path: []const u8) void {
+pub fn setMainPkgPath(self: *CompileStep, dir_path: []const u8) void {
self.main_pkg_path = self.builder.dupePath(dir_path);
}
-pub fn setLibCFile(self: *LibExeObjStep, libc_file: ?FileSource) void {
+pub fn setLibCFile(self: *CompileStep, libc_file: ?FileSource) void {
self.libc_file = if (libc_file) |f| f.dupe(self.builder) else null;
}
/// Returns the generated executable, library or object file.
/// To run an executable built with zig build, use `run`, or create an install step and invoke it.
-pub fn getOutputSource(self: *LibExeObjStep) FileSource {
+pub fn getOutputSource(self: *CompileStep) FileSource {
return FileSource{ .generated = &self.output_path_source };
}
/// Returns the generated import library. This function can only be called for libraries.
-pub fn getOutputLibSource(self: *LibExeObjStep) FileSource {
+pub fn getOutputLibSource(self: *CompileStep) FileSource {
assert(self.kind == .lib);
return FileSource{ .generated = &self.output_lib_path_source };
}
/// Returns the generated header file.
/// This function can only be called for libraries or object files which have `emit_h` set.
-pub fn getOutputHSource(self: *LibExeObjStep) FileSource {
+pub fn getOutputHSource(self: *CompileStep) FileSource {
assert(self.kind != .exe and self.kind != .test_exe and self.kind != .@"test");
assert(self.emit_h);
return FileSource{ .generated = &self.output_h_path_source };
}
/// Returns the generated PDB file. This function can only be called for Windows and UEFI.
-pub fn getOutputPdbSource(self: *LibExeObjStep) FileSource {
+pub fn getOutputPdbSource(self: *CompileStep) FileSource {
// TODO: Is this right? Isn't PDB for *any* PE/COFF file?
assert(self.target.isWindows() or self.target.isUefi());
return FileSource{ .generated = &self.output_pdb_path_source };
}
-pub fn addAssemblyFile(self: *LibExeObjStep, path: []const u8) void {
+pub fn addAssemblyFile(self: *CompileStep, path: []const u8) void {
self.link_objects.append(.{
.assembly_file = .{ .path = self.builder.dupe(path) },
- }) catch unreachable;
+ }) catch @panic("OOM");
}
-pub fn addAssemblyFileSource(self: *LibExeObjStep, source: FileSource) void {
+pub fn addAssemblyFileSource(self: *CompileStep, source: FileSource) void {
const source_duped = source.dupe(self.builder);
- self.link_objects.append(.{ .assembly_file = source_duped }) catch unreachable;
+ self.link_objects.append(.{ .assembly_file = source_duped }) catch @panic("OOM");
source_duped.addStepDependencies(&self.step);
}
-pub fn addObjectFile(self: *LibExeObjStep, source_file: []const u8) void {
+pub fn addObjectFile(self: *CompileStep, source_file: []const u8) void {
self.addObjectFileSource(.{ .path = source_file });
}
-pub fn addObjectFileSource(self: *LibExeObjStep, source: FileSource) void {
- self.link_objects.append(.{ .static_path = source.dupe(self.builder) }) catch unreachable;
+pub fn addObjectFileSource(self: *CompileStep, source: FileSource) void {
+ self.link_objects.append(.{ .static_path = source.dupe(self.builder) }) catch @panic("OOM");
source.addStepDependencies(&self.step);
}
-pub fn addObject(self: *LibExeObjStep, obj: *LibExeObjStep) void {
+pub fn addObject(self: *CompileStep, obj: *CompileStep) void {
assert(obj.kind == .obj);
self.linkLibraryOrObject(obj);
}
@@ -963,59 +941,59 @@ pub const addIncludeDir = @compileError("deprecated; use addIncludePath");
pub const addLibPath = @compileError("deprecated, use addLibraryPath");
pub const addFrameworkDir = @compileError("deprecated, use addFrameworkPath");
-pub fn addSystemIncludePath(self: *LibExeObjStep, path: []const u8) void {
- self.include_dirs.append(IncludeDir{ .raw_path_system = self.builder.dupe(path) }) catch unreachable;
+pub fn addSystemIncludePath(self: *CompileStep, path: []const u8) void {
+ self.include_dirs.append(IncludeDir{ .raw_path_system = self.builder.dupe(path) }) catch @panic("OOM");
}
-pub fn addIncludePath(self: *LibExeObjStep, path: []const u8) void {
- self.include_dirs.append(IncludeDir{ .raw_path = self.builder.dupe(path) }) catch unreachable;
+pub fn addIncludePath(self: *CompileStep, path: []const u8) void {
+ self.include_dirs.append(IncludeDir{ .raw_path = self.builder.dupe(path) }) catch @panic("OOM");
}
-pub fn addConfigHeader(self: *LibExeObjStep, config_header: *ConfigHeaderStep) void {
+pub fn addConfigHeader(self: *CompileStep, config_header: *ConfigHeaderStep) void {
self.step.dependOn(&config_header.step);
self.include_dirs.append(.{ .config_header_step = config_header }) catch @panic("OOM");
}
-pub fn addLibraryPath(self: *LibExeObjStep, path: []const u8) void {
- self.lib_paths.append(self.builder.dupe(path)) catch unreachable;
+pub fn addLibraryPath(self: *CompileStep, path: []const u8) void {
+ self.lib_paths.append(self.builder.dupe(path)) catch @panic("OOM");
}
-pub fn addRPath(self: *LibExeObjStep, path: []const u8) void {
- self.rpaths.append(self.builder.dupe(path)) catch unreachable;
+pub fn addRPath(self: *CompileStep, path: []const u8) void {
+ self.rpaths.append(self.builder.dupe(path)) catch @panic("OOM");
}
-pub fn addFrameworkPath(self: *LibExeObjStep, dir_path: []const u8) void {
- self.framework_dirs.append(self.builder.dupe(dir_path)) catch unreachable;
+pub fn addFrameworkPath(self: *CompileStep, dir_path: []const u8) void {
+ self.framework_dirs.append(self.builder.dupe(dir_path)) catch @panic("OOM");
}
-pub fn addPackage(self: *LibExeObjStep, package: Pkg) void {
- self.packages.append(self.builder.dupePkg(package)) catch unreachable;
- self.addRecursiveBuildDeps(package);
+/// Adds a module to be used with `@import` and exposing it in the current
+/// package's module table using `name`.
+pub fn addModule(cs: *CompileStep, name: []const u8, module: *Module) void {
+ cs.modules.put(cs.builder.dupe(name), module) catch @panic("OOM");
+ cs.addRecursiveBuildDeps(module);
}
-pub fn addOptions(self: *LibExeObjStep, package_name: []const u8, options: *OptionsStep) void {
- self.addPackage(options.getPackage(package_name));
+/// Adds a module to be used with `@import` without exposing it in the current
+/// package's module table.
+pub fn addAnonymousModule(cs: *CompileStep, name: []const u8, options: std.Build.CreateModuleOptions) void {
+ const module = cs.builder.createModule(options);
+ return addModule(cs, name, module);
}
-fn addRecursiveBuildDeps(self: *LibExeObjStep, package: Pkg) void {
- package.source.addStepDependencies(&self.step);
- if (package.dependencies) |deps| {
- for (deps) |dep| {
- self.addRecursiveBuildDeps(dep);
- }
+pub fn addOptions(cs: *CompileStep, module_name: []const u8, options: *OptionsStep) void {
+ addModule(cs, module_name, options.createModule());
+}
+
+fn addRecursiveBuildDeps(cs: *CompileStep, module: *Module) void {
+ module.source_file.addStepDependencies(&cs.step);
+ for (module.dependencies.values()) |dep| {
+ cs.addRecursiveBuildDeps(dep);
}
}
-pub fn addPackagePath(self: *LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void {
- self.addPackage(Pkg{
- .name = self.builder.dupe(name),
- .source = .{ .path = self.builder.dupe(pkg_index_path) },
- });
-}
-
/// If Vcpkg was found on the system, it will be added to include and lib
/// paths for the specified target.
-pub fn addVcpkgPaths(self: *LibExeObjStep, linkage: LibExeObjStep.Linkage) !void {
+pub fn addVcpkgPaths(self: *CompileStep, linkage: CompileStep.Linkage) !void {
// Ideally in the Unattempted case we would call the function recursively
// after findVcpkgRoot and have only one switch statement, but the compiler
// cannot resolve the error set.
@@ -1050,31 +1028,36 @@ pub fn addVcpkgPaths(self: *LibExeObjStep, linkage: LibExeObjStep.Linkage) !void
}
}
-pub fn setExecCmd(self: *LibExeObjStep, args: []const ?[]const u8) void {
+pub fn setExecCmd(self: *CompileStep, args: []const ?[]const u8) void {
assert(self.kind == .@"test");
- const duped_args = self.builder.allocator.alloc(?[]u8, args.len) catch unreachable;
+ const duped_args = self.builder.allocator.alloc(?[]u8, args.len) catch @panic("OOM");
for (args) |arg, i| {
duped_args[i] = if (arg) |a| self.builder.dupe(a) else null;
}
self.exec_cmd_args = duped_args;
}
-fn linkLibraryOrObject(self: *LibExeObjStep, other: *LibExeObjStep) void {
+fn linkLibraryOrObject(self: *CompileStep, other: *CompileStep) void {
self.step.dependOn(&other.step);
- self.link_objects.append(.{ .other_step = other }) catch unreachable;
- self.include_dirs.append(.{ .other_step = other }) catch unreachable;
+ self.link_objects.append(.{ .other_step = other }) catch @panic("OOM");
+ self.include_dirs.append(.{ .other_step = other }) catch @panic("OOM");
}
-fn makePackageCmd(self: *LibExeObjStep, pkg: Pkg, zig_args: *ArrayList([]const u8)) error{OutOfMemory}!void {
- const builder = self.builder;
-
+fn appendModuleArgs(
+ cs: *CompileStep,
+ zig_args: *ArrayList([]const u8),
+ name: []const u8,
+ module: *Module,
+) error{OutOfMemory}!void {
try zig_args.append("--pkg-begin");
- try zig_args.append(pkg.name);
- try zig_args.append(builder.pathFromRoot(pkg.source.getPath(self.builder)));
+ try zig_args.append(name);
+ try zig_args.append(module.builder.pathFromRoot(module.source_file.getPath(module.builder)));
- if (pkg.dependencies) |dependencies| {
- for (dependencies) |sub_pkg| {
- try self.makePackageCmd(sub_pkg, zig_args);
+ {
+ const keys = module.dependencies.keys();
+ for (module.dependencies.values()) |sub_module, i| {
+ const sub_name = keys[i];
+ try cs.appendModuleArgs(zig_args, sub_name, sub_module);
}
}
@@ -1082,7 +1065,7 @@ fn makePackageCmd(self: *LibExeObjStep, pkg: Pkg, zig_args: *ArrayList([]const u
}
fn make(step: *Step) !void {
- const self = @fieldParentPtr(LibExeObjStep, "step", step);
+ const self = @fieldParentPtr(CompileStep, "step", step);
const builder = self.builder;
if (self.root_src == null and self.link_objects.items.len == 0) {
@@ -1093,7 +1076,7 @@ fn make(step: *Step) !void {
var zig_args = ArrayList([]const u8).init(builder.allocator);
defer zig_args.deinit();
- zig_args.append(builder.zig_exe) catch unreachable;
+ try zig_args.append(builder.zig_exe);
const cmd = switch (self.kind) {
.lib => "build-lib",
@@ -1102,7 +1085,7 @@ fn make(step: *Step) !void {
.@"test" => "test",
.test_exe => "test",
};
- zig_args.append(cmd) catch unreachable;
+ try zig_args.append(cmd);
if (builder.color != .auto) {
try zig_args.append("--color");
@@ -1307,12 +1290,12 @@ fn make(step: *Step) !void {
try zig_args.append("--debug-compile-errors");
}
- if (builder.verbose_cimport) zig_args.append("--verbose-cimport") catch unreachable;
- if (builder.verbose_air) zig_args.append("--verbose-air") catch unreachable;
- if (builder.verbose_llvm_ir) zig_args.append("--verbose-llvm-ir") catch unreachable;
- if (builder.verbose_link or self.verbose_link) zig_args.append("--verbose-link") catch unreachable;
- if (builder.verbose_cc or self.verbose_cc) zig_args.append("--verbose-cc") catch unreachable;
- if (builder.verbose_llvm_cpu_features) zig_args.append("--verbose-llvm-cpu-features") catch unreachable;
+ if (builder.verbose_cimport) try zig_args.append("--verbose-cimport");
+ if (builder.verbose_air) try zig_args.append("--verbose-air");
+ if (builder.verbose_llvm_ir) try zig_args.append("--verbose-llvm-ir");
+ if (builder.verbose_link or self.verbose_link) try zig_args.append("--verbose-link");
+ if (builder.verbose_cc or self.verbose_cc) try zig_args.append("--verbose-cc");
+ if (builder.verbose_llvm_cpu_features) try zig_args.append("--verbose-llvm-cpu-features");
if (self.emit_analysis.getArg(builder, "emit-analysis")) |arg| try zig_args.append(arg);
if (self.emit_asm.getArg(builder, "emit-asm")) |arg| try zig_args.append(arg);
@@ -1376,9 +1359,9 @@ fn make(step: *Step) !void {
try zig_args.append(libc_file);
}
- switch (self.build_mode) {
+ switch (self.optimize) {
.Debug => {}, // Skip since it's the default.
- else => zig_args.append(builder.fmt("-O{s}", .{@tagName(self.build_mode)})) catch unreachable,
+ else => try zig_args.append(builder.fmt("-O{s}", .{@tagName(self.optimize)})),
}
try zig_args.append("--cache-dir");
@@ -1387,8 +1370,8 @@ fn make(step: *Step) !void {
try zig_args.append("--global-cache-dir");
try zig_args.append(builder.pathFromRoot(builder.global_cache_root));
- zig_args.append("--name") catch unreachable;
- zig_args.append(self.name) catch unreachable;
+ try zig_args.append("--name");
+ try zig_args.append(self.name);
if (self.linkage) |some| switch (some) {
.dynamic => try zig_args.append("-dynamic"),
@@ -1396,8 +1379,8 @@ fn make(step: *Step) !void {
};
if (self.kind == .lib and self.linkage != null and self.linkage.? == .dynamic) {
if (self.version) |version| {
- zig_args.append("--version") catch unreachable;
- zig_args.append(builder.fmt("{}", .{version})) catch unreachable;
+ try zig_args.append("--version");
+ try zig_args.append(builder.fmt("{}", .{version}));
}
if (self.target.isDarwin()) {
@@ -1495,37 +1478,10 @@ fn make(step: *Step) !void {
}
if (!self.target.isNative()) {
- try zig_args.append("-target");
- try zig_args.append(try self.target.zigTriple(builder.allocator));
-
- // TODO this logic can disappear if cpu model + features becomes part of the target triple
- const cross = self.target.toTarget();
- const all_features = cross.cpu.arch.allFeaturesList();
- var populated_cpu_features = cross.cpu.model.features;
- populated_cpu_features.populateDependencies(all_features);
-
- if (populated_cpu_features.eql(cross.cpu.features)) {
- // The CPU name alone is sufficient.
- try zig_args.append("-mcpu");
- try zig_args.append(cross.cpu.model.name);
- } else {
- var mcpu_buffer = ArrayList(u8).init(builder.allocator);
-
- try mcpu_buffer.writer().print("-mcpu={s}", .{cross.cpu.model.name});
-
- for (all_features) |feature, i_usize| {
- const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
- const in_cpu_set = populated_cpu_features.isEnabled(i);
- const in_actual_set = cross.cpu.features.isEnabled(i);
- if (in_cpu_set and !in_actual_set) {
- try mcpu_buffer.writer().print("-{s}", .{feature.name});
- } else if (!in_cpu_set and in_actual_set) {
- try mcpu_buffer.writer().print("+{s}", .{feature.name});
- }
- }
-
- try zig_args.append(try mcpu_buffer.toOwnedSlice());
- }
+ try zig_args.appendSlice(&.{
+ "-target", try self.target.zigTriple(builder.allocator),
+ "-mcpu", try std.Build.serializeCpu(builder.allocator, self.target.getCpu()),
+ });
if (self.target.dynamic_linker.get()) |dynamic_linker| {
try zig_args.append("--dynamic-linker");
@@ -1632,8 +1588,12 @@ fn make(step: *Step) !void {
try zig_args.append("--test-no-exec");
}
- for (self.packages.items) |pkg| {
- try self.makePackageCmd(pkg, &zig_args);
+ {
+ const keys = self.modules.keys();
+ for (self.modules.values()) |module, i| {
+ const name = keys[i];
+ try self.appendModuleArgs(&zig_args, name, module);
+ }
}
for (self.include_dirs.items) |include_dir| {
@@ -1682,8 +1642,9 @@ fn make(step: *Step) !void {
}
},
.config_header_step => |config_header| {
- try zig_args.append("-I");
- try zig_args.append(config_header.output_dir);
+ const full_file_path = config_header.output_file.path.?;
+ const header_dir_path = full_file_path[0 .. full_file_path.len - config_header.include_path.len];
+ try zig_args.appendSlice(&.{ "-I", header_dir_path });
},
}
}
@@ -1720,13 +1681,13 @@ fn make(step: *Step) !void {
const name = entry.key_ptr.*;
const info = entry.value_ptr.*;
if (info.needed) {
- zig_args.append("-needed_framework") catch unreachable;
+ try zig_args.append("-needed_framework");
} else if (info.weak) {
- zig_args.append("-weak_framework") catch unreachable;
+ try zig_args.append("-weak_framework");
} else {
- zig_args.append("-framework") catch unreachable;
+ try zig_args.append("-framework");
}
- zig_args.append(name) catch unreachable;
+ try zig_args.append(name);
}
} else {
if (self.framework_dirs.items.len > 0) {
@@ -1817,7 +1778,7 @@ fn make(step: *Step) !void {
// Slow path for arguments that need to be escaped. We'll need to allocate and copy
var escaped = try ArrayList(u8).initCapacity(args_arena.allocator(), arg.len + 1);
const writer = escaped.writer();
- writer.writeAll(arg[0..arg_idx]) catch unreachable;
+ try writer.writeAll(arg[0..arg_idx]);
for (arg[arg_idx..]) |to_escape| {
if (to_escape == '\\' or to_escape == '"') try writer.writeByte('\\');
try writer.writeByte(to_escape);
@@ -1943,30 +1904,35 @@ fn findVcpkgRoot(allocator: Allocator) !?[]const u8 {
return vcpkg_path;
}
-pub fn doAtomicSymLinks(allocator: Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
+pub fn doAtomicSymLinks(
+ allocator: Allocator,
+ output_path: []const u8,
+ filename_major_only: []const u8,
+ filename_name_only: []const u8,
+) !void {
const out_dir = fs.path.dirname(output_path) orelse ".";
const out_basename = fs.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3
- const major_only_path = fs.path.join(
+ const major_only_path = try fs.path.join(
allocator,
&[_][]const u8{ out_dir, filename_major_only },
- ) catch unreachable;
+ );
fs.atomicSymLink(allocator, out_basename, major_only_path) catch |err| {
log.err("Unable to symlink {s} -> {s}", .{ major_only_path, out_basename });
return err;
};
// sym link for libfoo.so to libfoo.so.1
- const name_only_path = fs.path.join(
+ const name_only_path = try fs.path.join(
allocator,
&[_][]const u8{ out_dir, filename_name_only },
- ) catch unreachable;
+ );
fs.atomicSymLink(allocator, filename_major_only, name_only_path) catch |err| {
log.err("Unable to symlink {s} -> {s}", .{ name_only_path, filename_major_only });
return err;
};
}
-fn execPkgConfigList(self: *Builder, out_code: *u8) (PkgConfigError || ExecError)![]const PkgConfigPkg {
+fn execPkgConfigList(self: *std.Build, out_code: *u8) (PkgConfigError || ExecError)![]const PkgConfigPkg {
const stdout = try self.execAllowFail(&[_][]const u8{ "pkg-config", "--list-all" }, out_code, .Ignore);
var list = ArrayList(PkgConfigPkg).init(self.allocator);
errdefer list.deinit();
@@ -1982,7 +1948,7 @@ fn execPkgConfigList(self: *Builder, out_code: *u8) (PkgConfigError || ExecError
return list.toOwnedSlice();
}
-fn getPkgConfigList(self: *Builder) ![]const PkgConfigPkg {
+fn getPkgConfigList(self: *std.Build) ![]const PkgConfigPkg {
if (self.pkg_config_pkg_list) |res| {
return res;
}
@@ -2006,40 +1972,6 @@ fn getPkgConfigList(self: *Builder) ![]const PkgConfigPkg {
}
}
-test "addPackage" {
- if (builtin.os.tag == .wasi) return error.SkipZigTest;
-
- var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
- defer arena.deinit();
-
- var builder = try Builder.create(
- arena.allocator(),
- "test",
- "test",
- "test",
- "test",
- );
- defer builder.destroy();
-
- const pkg_dep = Pkg{
- .name = "pkg_dep",
- .source = .{ .path = "/not/a/pkg_dep.zig" },
- };
- const pkg_top = Pkg{
- .name = "pkg_dep",
- .source = .{ .path = "/not/a/pkg_top.zig" },
- .dependencies = &[_]Pkg{pkg_dep},
- };
-
- var exe = builder.addExecutable("not_an_executable", "/not/an/executable.zig");
- exe.addPackage(pkg_top);
-
- try std.testing.expectEqual(@as(usize, 1), exe.packages.items.len);
-
- const dupe = exe.packages.items[0];
- try std.testing.expectEqualStrings(pkg_top.name, dupe.name);
-}
-
fn addFlag(args: *ArrayList([]const u8), comptime name: []const u8, opt: ?bool) !void {
const cond = opt orelse return;
try args.ensureUnusedCapacity(1);
@@ -2070,7 +2002,7 @@ const TransitiveDeps = struct {
}
}
- fn addInner(td: *TransitiveDeps, other: *LibExeObjStep, dyn: bool) !void {
+ fn addInner(td: *TransitiveDeps, other: *CompileStep, dyn: bool) !void {
// Inherit dependency on libc and libc++
td.is_linking_libcpp = td.is_linking_libcpp or other.is_linking_libcpp;
td.is_linking_libc = td.is_linking_libc or other.is_linking_libc;
diff --git a/lib/std/build/ConfigHeaderStep.zig b/lib/std/Build/ConfigHeaderStep.zig
similarity index 51%
rename from lib/std/build/ConfigHeaderStep.zig
rename to lib/std/Build/ConfigHeaderStep.zig
index 400c06525e..ca4d69dfa9 100644
--- a/lib/std/build/ConfigHeaderStep.zig
+++ b/lib/std/Build/ConfigHeaderStep.zig
@@ -1,17 +1,25 @@
const std = @import("../std.zig");
const ConfigHeaderStep = @This();
-const Step = std.build.Step;
-const Builder = std.build.Builder;
+const Step = std.Build.Step;
pub const base_id: Step.Id = .config_header;
-pub const Style = enum {
+pub const Style = union(enum) {
/// The configure format supported by autotools. It uses `#undef foo` to
/// mark lines that can be substituted with different values.
- autoconf,
+ autoconf: std.Build.FileSource,
/// The configure format supported by CMake. It uses `@@FOO@@` and
/// `#cmakedefine` for template substitution.
- cmake,
+ cmake: std.Build.FileSource,
+ /// Instead of starting with an input file, start with nothing.
+ blank,
+
+ pub fn getFileSource(style: Style) ?std.Build.FileSource {
+ switch (style) {
+ .autoconf, .cmake => |s| return s,
+ .blank => return null,
+ }
+ }
};
pub const Value = union(enum) {
@@ -24,35 +32,51 @@ pub const Value = union(enum) {
};
step: Step,
-builder: *Builder,
-source: std.build.FileSource,
-style: Style,
-values: std.StringHashMap(Value),
-max_bytes: usize = 2 * 1024 * 1024,
-output_dir: []const u8,
-output_basename: []const u8,
+builder: *std.Build,
+values: std.StringArrayHashMap(Value),
+output_file: std.Build.GeneratedFile,
-pub fn create(builder: *Builder, source: std.build.FileSource, style: Style) *ConfigHeaderStep {
+style: Style,
+max_bytes: usize,
+include_path: []const u8,
+
+pub const Options = struct {
+ style: Style = .blank,
+ max_bytes: usize = 2 * 1024 * 1024,
+ include_path: ?[]const u8 = null,
+};
+
+pub fn create(builder: *std.Build, options: Options) *ConfigHeaderStep {
const self = builder.allocator.create(ConfigHeaderStep) catch @panic("OOM");
- const name = builder.fmt("configure header {s}", .{source.getDisplayName()});
+ const name = if (options.style.getFileSource()) |s|
+ builder.fmt("configure {s} header {s}", .{ @tagName(options.style), s.getDisplayName() })
+ else
+ builder.fmt("configure {s} header", .{@tagName(options.style)});
self.* = .{
.builder = builder,
.step = Step.init(base_id, name, builder.allocator, make),
- .source = source,
- .style = style,
- .values = std.StringHashMap(Value).init(builder.allocator),
- .output_dir = undefined,
- .output_basename = "config.h",
+ .style = options.style,
+ .values = std.StringArrayHashMap(Value).init(builder.allocator),
+
+ .max_bytes = options.max_bytes,
+ .include_path = "config.h",
+ .output_file = .{ .step = &self.step },
};
- switch (source) {
+
+ if (options.style.getFileSource()) |s| switch (s) {
.path => |p| {
const basename = std.fs.path.basename(p);
if (std.mem.endsWith(u8, basename, ".h.in")) {
- self.output_basename = basename[0 .. basename.len - 3];
+ self.include_path = basename[0 .. basename.len - 3];
}
},
else => {},
+ };
+
+ if (options.include_path) |include_path| {
+ self.include_path = include_path;
}
+
return self;
}
@@ -62,47 +86,57 @@ pub fn addValues(self: *ConfigHeaderStep, values: anytype) void {
fn addValuesInner(self: *ConfigHeaderStep, values: anytype) !void {
inline for (@typeInfo(@TypeOf(values)).Struct.fields) |field| {
- switch (@typeInfo(field.type)) {
- .Null => {
- try self.values.put(field.name, .undef);
- },
- .Void => {
- try self.values.put(field.name, .defined);
- },
- .Bool => {
- try self.values.put(field.name, .{ .boolean = @field(values, field.name) });
- },
- .ComptimeInt => {
- try self.values.put(field.name, .{ .int = @field(values, field.name) });
- },
- .EnumLiteral => {
- try self.values.put(field.name, .{ .ident = @tagName(@field(values, field.name)) });
- },
- .Pointer => |ptr| {
- switch (@typeInfo(ptr.child)) {
- .Array => |array| {
- if (ptr.size == .One and array.child == u8) {
- try self.values.put(field.name, .{ .string = @field(values, field.name) });
- continue;
- }
- },
- else => {},
- }
+ try putValue(self, field.name, field.type, @field(values, field.name));
+ }
+}
- @compileError("unsupported ConfigHeaderStep value type: " ++
- @typeName(field.type));
- },
- else => @compileError("unsupported ConfigHeaderStep value type: " ++
- @typeName(field.type)),
- }
+fn putValue(self: *ConfigHeaderStep, field_name: []const u8, comptime T: type, v: T) !void {
+ switch (@typeInfo(T)) {
+ .Null => {
+ try self.values.put(field_name, .undef);
+ },
+ .Void => {
+ try self.values.put(field_name, .defined);
+ },
+ .Bool => {
+ try self.values.put(field_name, .{ .boolean = v });
+ },
+ .Int => {
+ try self.values.put(field_name, .{ .int = v });
+ },
+ .ComptimeInt => {
+ try self.values.put(field_name, .{ .int = v });
+ },
+ .EnumLiteral => {
+ try self.values.put(field_name, .{ .ident = @tagName(v) });
+ },
+ .Optional => {
+ if (v) |x| {
+ return putValue(self, field_name, @TypeOf(x), x);
+ } else {
+ try self.values.put(field_name, .undef);
+ }
+ },
+ .Pointer => |ptr| {
+ switch (@typeInfo(ptr.child)) {
+ .Array => |array| {
+ if (ptr.size == .One and array.child == u8) {
+ try self.values.put(field_name, .{ .string = v });
+ return;
+ }
+ },
+ else => {},
+ }
+
+ @compileError("unsupported ConfigHeaderStep value type: " ++ @typeName(T));
+ },
+ else => @compileError("unsupported ConfigHeaderStep value type: " ++ @typeName(T)),
}
}
fn make(step: *Step) !void {
const self = @fieldParentPtr(ConfigHeaderStep, "step", step);
const gpa = self.builder.allocator;
- const src_path = self.source.getPath(self.builder);
- const contents = try std.fs.cwd().readFileAlloc(gpa, src_path, self.max_bytes);
// The cache is used here not really as a way to speed things up - because writing
// the data to a file would probably be very fast - but as a way to find a canonical
@@ -119,9 +153,30 @@ fn make(step: *Step) !void {
// Random bytes to make ConfigHeaderStep unique. Refresh this with new
// random bytes when ConfigHeaderStep implementation is modified in a
// non-backwards-compatible way.
- var hash = Hasher.init("X1pQzdDt91Zlh7Eh");
- hash.update(self.source.getDisplayName());
- hash.update(contents);
+ var hash = Hasher.init("PGuDTpidxyMqnkGM");
+
+ var output = std.ArrayList(u8).init(gpa);
+ defer output.deinit();
+
+ try output.appendSlice("/* This file was generated by ConfigHeaderStep using the Zig Build System. */\n");
+
+ switch (self.style) {
+ .autoconf => |file_source| {
+ const src_path = file_source.getPath(self.builder);
+ const contents = try std.fs.cwd().readFileAlloc(gpa, src_path, self.max_bytes);
+ try render_autoconf(contents, &output, self.values, src_path);
+ },
+ .cmake => |file_source| {
+ const src_path = file_source.getPath(self.builder);
+ const contents = try std.fs.cwd().readFileAlloc(gpa, src_path, self.max_bytes);
+ try render_cmake(contents, &output, self.values, src_path);
+ },
+ .blank => {
+ try render_blank(&output, self.values, self.include_path);
+ },
+ }
+
+ hash.update(output.items);
var digest: [16]u8 = undefined;
hash.final(&digest);
@@ -132,38 +187,42 @@ fn make(step: *Step) !void {
.{std.fmt.fmtSliceHexLower(&digest)},
) catch unreachable;
- self.output_dir = try std.fs.path.join(gpa, &[_][]const u8{
+ const output_dir = try std.fs.path.join(gpa, &[_][]const u8{
self.builder.cache_root, "o", &hash_basename,
});
- var dir = std.fs.cwd().makeOpenPath(self.output_dir, .{}) catch |err| {
- std.debug.print("unable to make path {s}: {s}\n", .{ self.output_dir, @errorName(err) });
+
+ // If output_path has directory parts, deal with them. Example:
+ // output_dir is zig-cache/o/HASH
+ // output_path is libavutil/avconfig.h
+ // We want to open directory zig-cache/o/HASH/libavutil/
+ // but keep output_dir as zig-cache/o/HASH for -I include
+ const sub_dir_path = if (std.fs.path.dirname(self.include_path)) |d|
+ try std.fs.path.join(gpa, &.{ output_dir, d })
+ else
+ output_dir;
+
+ var dir = std.fs.cwd().makeOpenPath(sub_dir_path, .{}) catch |err| {
+ std.debug.print("unable to make path {s}: {s}\n", .{ output_dir, @errorName(err) });
return err;
};
defer dir.close();
- var values_copy = try self.values.clone();
- defer values_copy.deinit();
+ try dir.writeFile(std.fs.path.basename(self.include_path), output.items);
- var output = std.ArrayList(u8).init(gpa);
- defer output.deinit();
- try output.ensureTotalCapacity(contents.len);
-
- try output.appendSlice("/* This file was generated by ConfigHeaderStep using the Zig Build System. */\n");
-
- switch (self.style) {
- .autoconf => try render_autoconf(contents, &output, &values_copy, src_path),
- .cmake => try render_cmake(contents, &output, &values_copy, src_path),
- }
-
- try dir.writeFile(self.output_basename, output.items);
+ self.output_file.path = try std.fs.path.join(self.builder.allocator, &.{
+ output_dir, self.include_path,
+ });
}
fn render_autoconf(
contents: []const u8,
output: *std.ArrayList(u8),
- values_copy: *std.StringHashMap(Value),
+ values: std.StringArrayHashMap(Value),
src_path: []const u8,
) !void {
+ var values_copy = try values.clone();
+ defer values_copy.deinit();
+
var any_errors = false;
var line_index: u32 = 0;
var line_it = std.mem.split(u8, contents, "\n");
@@ -181,7 +240,7 @@ fn render_autoconf(
continue;
}
const name = it.rest();
- const kv = values_copy.fetchRemove(name) orelse {
+ const kv = values_copy.fetchSwapRemove(name) orelse {
std.debug.print("{s}:{d}: error: unspecified config header value: '{s}'\n", .{
src_path, line_index + 1, name,
});
@@ -191,12 +250,8 @@ fn render_autoconf(
try renderValue(output, name, kv.value);
}
- {
- var it = values_copy.iterator();
- while (it.next()) |entry| {
- const name = entry.key_ptr.*;
- std.debug.print("{s}: error: config header value unused: '{s}'\n", .{ src_path, name });
- }
+ for (values_copy.keys()) |name| {
+ std.debug.print("{s}: error: config header value unused: '{s}'\n", .{ src_path, name });
}
if (any_errors) {
@@ -207,9 +262,12 @@ fn render_autoconf(
fn render_cmake(
contents: []const u8,
output: *std.ArrayList(u8),
- values_copy: *std.StringHashMap(Value),
+ values: std.StringArrayHashMap(Value),
src_path: []const u8,
) !void {
+ var values_copy = try values.clone();
+ defer values_copy.deinit();
+
var any_errors = false;
var line_index: u32 = 0;
var line_it = std.mem.split(u8, contents, "\n");
@@ -233,7 +291,7 @@ fn render_cmake(
any_errors = true;
continue;
};
- const kv = values_copy.fetchRemove(name) orelse {
+ const kv = values_copy.fetchSwapRemove(name) orelse {
std.debug.print("{s}:{d}: error: unspecified config header value: '{s}'\n", .{
src_path, line_index + 1, name,
});
@@ -243,12 +301,8 @@ fn render_cmake(
try renderValue(output, name, kv.value);
}
- {
- var it = values_copy.iterator();
- while (it.next()) |entry| {
- const name = entry.key_ptr.*;
- std.debug.print("{s}: error: config header value unused: '{s}'\n", .{ src_path, name });
- }
+ for (values_copy.keys()) |name| {
+ std.debug.print("{s}: error: config header value unused: '{s}'\n", .{ src_path, name });
}
if (any_errors) {
@@ -256,6 +310,36 @@ fn render_cmake(
}
}
+fn render_blank(
+ output: *std.ArrayList(u8),
+ defines: std.StringArrayHashMap(Value),
+ include_path: []const u8,
+) !void {
+ const include_guard_name = try output.allocator.dupe(u8, include_path);
+ for (include_guard_name) |*byte| {
+ switch (byte.*) {
+ 'a'...'z' => byte.* = byte.* - 'a' + 'A',
+ 'A'...'Z', '0'...'9' => continue,
+ else => byte.* = '_',
+ }
+ }
+
+ try output.appendSlice("#ifndef ");
+ try output.appendSlice(include_guard_name);
+ try output.appendSlice("\n#define ");
+ try output.appendSlice(include_guard_name);
+ try output.appendSlice("\n");
+
+ const values = defines.values();
+ for (defines.keys()) |name, i| {
+ try renderValue(output, name, values[i]);
+ }
+
+ try output.appendSlice("#endif /* ");
+ try output.appendSlice(include_guard_name);
+ try output.appendSlice(" */\n");
+}
+
fn renderValue(output: *std.ArrayList(u8), name: []const u8, value: Value) !void {
switch (value) {
.undef => {
diff --git a/lib/std/build/EmulatableRunStep.zig b/lib/std/Build/EmulatableRunStep.zig
similarity index 95%
rename from lib/std/build/EmulatableRunStep.zig
rename to lib/std/Build/EmulatableRunStep.zig
index 52ce8edfac..5517f7f9aa 100644
--- a/lib/std/build/EmulatableRunStep.zig
+++ b/lib/std/Build/EmulatableRunStep.zig
@@ -5,11 +5,9 @@
//! without having to verify if it's possible to be ran against.
const std = @import("../std.zig");
-const build = std.build;
-const Step = std.build.Step;
-const Builder = std.build.Builder;
-const LibExeObjStep = std.build.LibExeObjStep;
-const RunStep = std.build.RunStep;
+const Step = std.Build.Step;
+const CompileStep = std.Build.CompileStep;
+const RunStep = std.Build.RunStep;
const fs = std.fs;
const process = std.process;
@@ -22,10 +20,10 @@ pub const base_id = .emulatable_run;
const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
step: Step,
-builder: *Builder,
+builder: *std.Build,
/// The artifact (executable) to be run by this step
-exe: *LibExeObjStep,
+exe: *CompileStep,
/// Set this to `null` to ignore the exit code for the purpose of determining a successful execution
expected_exit_code: ?u8 = 0,
@@ -47,9 +45,9 @@ hide_foreign_binaries_warning: bool,
/// binary through emulation when any of the emulation options such as `enable_rosetta` are set to true.
/// When set to false, and the binary is foreign, running the executable is skipped.
/// Asserts given artifact is an executable.
-pub fn create(builder: *Builder, name: []const u8, artifact: *LibExeObjStep) *EmulatableRunStep {
+pub fn create(builder: *std.Build, name: []const u8, artifact: *CompileStep) *EmulatableRunStep {
std.debug.assert(artifact.kind == .exe or artifact.kind == .test_exe);
- const self = builder.allocator.create(EmulatableRunStep) catch unreachable;
+ const self = builder.allocator.create(EmulatableRunStep) catch @panic("OOM");
const option_name = "hide-foreign-warnings";
const hide_warnings = if (builder.available_options_map.get(option_name) == null) warn: {
@@ -156,9 +154,9 @@ fn warnAboutForeignBinaries(step: *EmulatableRunStep) void {
const builder = step.builder;
const artifact = step.exe;
- const host_name = builder.host.target.zigTriple(builder.allocator) catch unreachable;
- const foreign_name = artifact.target.zigTriple(builder.allocator) catch unreachable;
- const target_info = std.zig.system.NativeTargetInfo.detect(artifact.target) catch unreachable;
+ const host_name = builder.host.target.zigTriple(builder.allocator) catch @panic("unhandled error");
+ const foreign_name = artifact.target.zigTriple(builder.allocator) catch @panic("unhandled error");
+ const target_info = std.zig.system.NativeTargetInfo.detect(artifact.target) catch @panic("unhandled error");
const need_cross_glibc = artifact.target.isGnuLibC() and artifact.is_linking_libc;
switch (builder.host.getExternalExecutor(target_info, .{
.qemu_fixes_dl = need_cross_glibc and builder.glibc_runtimes_dir != null,
diff --git a/lib/std/build/FmtStep.zig b/lib/std/Build/FmtStep.zig
similarity index 66%
rename from lib/std/build/FmtStep.zig
rename to lib/std/Build/FmtStep.zig
index 62923623f2..6404d22f13 100644
--- a/lib/std/build/FmtStep.zig
+++ b/lib/std/Build/FmtStep.zig
@@ -1,25 +1,20 @@
const std = @import("../std.zig");
-const build = @import("../build.zig");
-const Step = build.Step;
-const Builder = build.Builder;
-const BufMap = std.BufMap;
-const mem = std.mem;
-
+const Step = std.Build.Step;
const FmtStep = @This();
pub const base_id = .fmt;
step: Step,
-builder: *Builder,
+builder: *std.Build,
argv: [][]const u8,
-pub fn create(builder: *Builder, paths: []const []const u8) *FmtStep {
- const self = builder.allocator.create(FmtStep) catch unreachable;
+pub fn create(builder: *std.Build, paths: []const []const u8) *FmtStep {
+ const self = builder.allocator.create(FmtStep) catch @panic("OOM");
const name = "zig fmt";
self.* = FmtStep{
.step = Step.init(.fmt, name, builder.allocator, make),
.builder = builder,
- .argv = builder.allocator.alloc([]u8, paths.len + 2) catch unreachable,
+ .argv = builder.allocator.alloc([]u8, paths.len + 2) catch @panic("OOM"),
};
self.argv[0] = builder.zig_exe;
diff --git a/lib/std/build/InstallArtifactStep.zig b/lib/std/Build/InstallArtifactStep.zig
similarity index 80%
rename from lib/std/build/InstallArtifactStep.zig
rename to lib/std/Build/InstallArtifactStep.zig
index 537b8c8fd9..c419c85fdf 100644
--- a/lib/std/build/InstallArtifactStep.zig
+++ b/lib/std/Build/InstallArtifactStep.zig
@@ -1,32 +1,29 @@
const std = @import("../std.zig");
-const build = @import("../build.zig");
-const Step = build.Step;
-const Builder = build.Builder;
-const LibExeObjStep = std.build.LibExeObjStep;
-const InstallDir = std.build.InstallDir;
+const Step = std.Build.Step;
+const CompileStep = std.Build.CompileStep;
+const InstallDir = std.Build.InstallDir;
+const InstallArtifactStep = @This();
pub const base_id = .install_artifact;
step: Step,
-builder: *Builder,
-artifact: *LibExeObjStep,
+builder: *std.Build,
+artifact: *CompileStep,
dest_dir: InstallDir,
pdb_dir: ?InstallDir,
h_dir: ?InstallDir,
-const Self = @This();
-
-pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
+pub fn create(builder: *std.Build, artifact: *CompileStep) *InstallArtifactStep {
if (artifact.install_step) |s| return s;
- const self = builder.allocator.create(Self) catch unreachable;
- self.* = Self{
+ const self = builder.allocator.create(InstallArtifactStep) catch @panic("OOM");
+ self.* = InstallArtifactStep{
.builder = builder,
.step = Step.init(.install_artifact, builder.fmt("install {s}", .{artifact.step.name}), builder.allocator, make),
.artifact = artifact,
.dest_dir = artifact.override_dest_dir orelse switch (artifact.kind) {
.obj => @panic("Cannot install a .obj build artifact."),
- .@"test" => @panic("Cannot install a test build artifact, use addTestExe instead."),
+ .@"test" => @panic("Cannot install a .test build artifact, use .test_exe instead."),
.exe, .test_exe => InstallDir{ .bin = {} },
.lib => InstallDir{ .lib = {} },
},
@@ -64,13 +61,13 @@ pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
}
fn make(step: *Step) !void {
- const self = @fieldParentPtr(Self, "step", step);
+ const self = @fieldParentPtr(InstallArtifactStep, "step", step);
const builder = self.builder;
const full_dest_path = builder.getInstallPath(self.dest_dir, self.artifact.out_filename);
try builder.updateFile(self.artifact.getOutputSource().getPath(builder), full_dest_path);
if (self.artifact.isDynamicLibrary() and self.artifact.version != null and self.artifact.target.wantSharedLibSymLinks()) {
- try LibExeObjStep.doAtomicSymLinks(builder.allocator, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?);
+ try CompileStep.doAtomicSymLinks(builder.allocator, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?);
}
if (self.artifact.isDynamicLibrary() and self.artifact.target.isWindows() and self.artifact.emit_implib != .no_emit) {
const full_implib_path = builder.getInstallPath(self.dest_dir, self.artifact.out_lib_filename);
diff --git a/lib/std/build/InstallDirStep.zig b/lib/std/Build/InstallDirStep.zig
similarity index 92%
rename from lib/std/build/InstallDirStep.zig
rename to lib/std/Build/InstallDirStep.zig
index 0a41e1aaef..41dbb3e35a 100644
--- a/lib/std/build/InstallDirStep.zig
+++ b/lib/std/Build/InstallDirStep.zig
@@ -1,19 +1,17 @@
const std = @import("../std.zig");
const mem = std.mem;
const fs = std.fs;
-const build = @import("../build.zig");
-const Step = build.Step;
-const Builder = build.Builder;
-const InstallDir = std.build.InstallDir;
+const Step = std.Build.Step;
+const InstallDir = std.Build.InstallDir;
const InstallDirStep = @This();
const log = std.log;
step: Step,
-builder: *Builder,
+builder: *std.Build,
options: Options,
/// This is used by the build system when a file being installed comes from one
/// package but is being installed by another.
-override_source_builder: ?*Builder = null,
+override_source_builder: ?*std.Build = null,
pub const base_id = .install_dir;
@@ -31,7 +29,7 @@ pub const Options = struct {
/// `@import("test.zig")` would be a compile error.
blank_extensions: []const []const u8 = &.{},
- fn dupe(self: Options, b: *Builder) Options {
+ fn dupe(self: Options, b: *std.Build) Options {
return .{
.source_dir = b.dupe(self.source_dir),
.install_dir = self.install_dir.dupe(b),
@@ -43,7 +41,7 @@ pub const Options = struct {
};
pub fn init(
- builder: *Builder,
+ builder: *std.Build,
options: Options,
) InstallDirStep {
builder.pushInstalledFile(options.install_dir, options.install_subdir);
diff --git a/lib/std/build/InstallFileStep.zig b/lib/std/Build/InstallFileStep.zig
similarity index 82%
rename from lib/std/build/InstallFileStep.zig
rename to lib/std/Build/InstallFileStep.zig
index 37203e64c5..8c8d8ad2d4 100644
--- a/lib/std/build/InstallFileStep.zig
+++ b/lib/std/Build/InstallFileStep.zig
@@ -1,24 +1,22 @@
const std = @import("../std.zig");
-const build = @import("../build.zig");
-const Step = build.Step;
-const Builder = build.Builder;
-const FileSource = std.build.FileSource;
-const InstallDir = std.build.InstallDir;
+const Step = std.Build.Step;
+const FileSource = std.Build.FileSource;
+const InstallDir = std.Build.InstallDir;
const InstallFileStep = @This();
pub const base_id = .install_file;
step: Step,
-builder: *Builder,
+builder: *std.Build,
source: FileSource,
dir: InstallDir,
dest_rel_path: []const u8,
/// This is used by the build system when a file being installed comes from one
/// package but is being installed by another.
-override_source_builder: ?*Builder = null,
+override_source_builder: ?*std.Build = null,
pub fn init(
- builder: *Builder,
+ builder: *std.Build,
source: FileSource,
dir: InstallDir,
dest_rel_path: []const u8,
diff --git a/lib/std/build/InstallRawStep.zig b/lib/std/Build/InstallRawStep.zig
similarity index 81%
rename from lib/std/build/InstallRawStep.zig
rename to lib/std/Build/InstallRawStep.zig
index e8266dff5a..014c44f287 100644
--- a/lib/std/build/InstallRawStep.zig
+++ b/lib/std/Build/InstallRawStep.zig
@@ -7,11 +7,10 @@ const InstallRawStep = @This();
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const ArrayListUnmanaged = std.ArrayListUnmanaged;
-const Builder = std.build.Builder;
const File = std.fs.File;
-const InstallDir = std.build.InstallDir;
-const LibExeObjStep = std.build.LibExeObjStep;
-const Step = std.build.Step;
+const InstallDir = std.Build.InstallDir;
+const CompileStep = std.Build.CompileStep;
+const Step = std.Build.Step;
const elf = std.elf;
const fs = std.fs;
const io = std.io;
@@ -25,12 +24,12 @@ pub const RawFormat = enum {
};
step: Step,
-builder: *Builder,
-artifact: *LibExeObjStep,
+builder: *std.Build,
+artifact: *CompileStep,
dest_dir: InstallDir,
dest_filename: []const u8,
options: CreateOptions,
-output_file: std.build.GeneratedFile,
+output_file: std.Build.GeneratedFile,
pub const CreateOptions = struct {
format: ?RawFormat = null,
@@ -39,8 +38,13 @@ pub const CreateOptions = struct {
pad_to: ?u64 = null,
};
-pub fn create(builder: *Builder, artifact: *LibExeObjStep, dest_filename: []const u8, options: CreateOptions) *InstallRawStep {
- const self = builder.allocator.create(InstallRawStep) catch unreachable;
+pub fn create(
+ builder: *std.Build,
+ artifact: *CompileStep,
+ dest_filename: []const u8,
+ options: CreateOptions,
+) *InstallRawStep {
+ const self = builder.allocator.create(InstallRawStep) catch @panic("OOM");
self.* = InstallRawStep{
.step = Step.init(.install_raw, builder.fmt("install raw binary {s}", .{artifact.step.name}), builder.allocator, make),
.builder = builder,
@@ -53,7 +57,7 @@ pub fn create(builder: *Builder, artifact: *LibExeObjStep, dest_filename: []cons
},
.dest_filename = dest_filename,
.options = options,
- .output_file = std.build.GeneratedFile{ .step = &self.step },
+ .output_file = std.Build.GeneratedFile{ .step = &self.step },
};
self.step.dependOn(&artifact.step);
@@ -61,8 +65,8 @@ pub fn create(builder: *Builder, artifact: *LibExeObjStep, dest_filename: []cons
return self;
}
-pub fn getOutputSource(self: *const InstallRawStep) std.build.FileSource {
- return std.build.FileSource{ .generated = &self.output_file };
+pub fn getOutputSource(self: *const InstallRawStep) std.Build.FileSource {
+ return std.Build.FileSource{ .generated = &self.output_file };
}
fn make(step: *Step) !void {
@@ -78,7 +82,7 @@ fn make(step: *Step) !void {
const full_dest_path = b.getInstallPath(self.dest_dir, self.dest_filename);
self.output_file.path = full_dest_path;
- fs.cwd().makePath(b.getInstallPath(self.dest_dir, "")) catch unreachable;
+ try fs.cwd().makePath(b.getInstallPath(self.dest_dir, ""));
var argv_list = std.ArrayList([]const u8).init(b.allocator);
try argv_list.appendSlice(&.{ b.zig_exe, "objcopy" });
diff --git a/lib/std/build/LogStep.zig b/lib/std/Build/LogStep.zig
similarity index 72%
rename from lib/std/build/LogStep.zig
rename to lib/std/Build/LogStep.zig
index fd937b00f9..6d51df8cbd 100644
--- a/lib/std/build/LogStep.zig
+++ b/lib/std/Build/LogStep.zig
@@ -1,17 +1,15 @@
const std = @import("../std.zig");
const log = std.log;
-const build = @import("../build.zig");
-const Step = build.Step;
-const Builder = build.Builder;
+const Step = std.Build.Step;
const LogStep = @This();
pub const base_id = .log;
step: Step,
-builder: *Builder,
+builder: *std.Build,
data: []const u8,
-pub fn init(builder: *Builder, data: []const u8) LogStep {
+pub fn init(builder: *std.Build, data: []const u8) LogStep {
return LogStep{
.builder = builder,
.step = Step.init(.log, builder.fmt("log {s}", .{data}), builder.allocator, make),
diff --git a/lib/std/build/OptionsStep.zig b/lib/std/Build/OptionsStep.zig
similarity index 78%
rename from lib/std/build/OptionsStep.zig
rename to lib/std/Build/OptionsStep.zig
index fb06cc2179..8a50456539 100644
--- a/lib/std/build/OptionsStep.zig
+++ b/lib/std/Build/OptionsStep.zig
@@ -1,12 +1,10 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
-const build = std.build;
const fs = std.fs;
-const Step = build.Step;
-const Builder = build.Builder;
-const GeneratedFile = build.GeneratedFile;
-const LibExeObjStep = build.LibExeObjStep;
-const FileSource = build.FileSource;
+const Step = std.Build.Step;
+const GeneratedFile = std.Build.GeneratedFile;
+const CompileStep = std.Build.CompileStep;
+const FileSource = std.Build.FileSource;
const OptionsStep = @This();
@@ -14,14 +12,14 @@ pub const base_id = .options;
step: Step,
generated_file: GeneratedFile,
-builder: *Builder,
+builder: *std.Build,
contents: std.ArrayList(u8),
artifact_args: std.ArrayList(OptionArtifactArg),
file_source_args: std.ArrayList(OptionFileSourceArg),
-pub fn create(builder: *Builder) *OptionsStep {
- const self = builder.allocator.create(OptionsStep) catch unreachable;
+pub fn create(builder: *std.Build) *OptionsStep {
+ const self = builder.allocator.create(OptionsStep) catch @panic("OOM");
self.* = .{
.builder = builder,
.step = Step.init(.options, "options", builder.allocator, make),
@@ -36,44 +34,48 @@ pub fn create(builder: *Builder) *OptionsStep {
}
pub fn addOption(self: *OptionsStep, comptime T: type, name: []const u8, value: T) void {
+ return addOptionFallible(self, T, name, value) catch @panic("unhandled error");
+}
+
+fn addOptionFallible(self: *OptionsStep, comptime T: type, name: []const u8, value: T) !void {
const out = self.contents.writer();
switch (T) {
[]const []const u8 => {
- out.print("pub const {}: []const []const u8 = &[_][]const u8{{\n", .{std.zig.fmtId(name)}) catch unreachable;
+ try out.print("pub const {}: []const []const u8 = &[_][]const u8{{\n", .{std.zig.fmtId(name)});
for (value) |slice| {
- out.print(" \"{}\",\n", .{std.zig.fmtEscapes(slice)}) catch unreachable;
+ try out.print(" \"{}\",\n", .{std.zig.fmtEscapes(slice)});
}
- out.writeAll("};\n") catch unreachable;
+ try out.writeAll("};\n");
return;
},
[:0]const u8 => {
- out.print("pub const {}: [:0]const u8 = \"{}\";\n", .{ std.zig.fmtId(name), std.zig.fmtEscapes(value) }) catch unreachable;
+ try out.print("pub const {}: [:0]const u8 = \"{}\";\n", .{ std.zig.fmtId(name), std.zig.fmtEscapes(value) });
return;
},
[]const u8 => {
- out.print("pub const {}: []const u8 = \"{}\";\n", .{ std.zig.fmtId(name), std.zig.fmtEscapes(value) }) catch unreachable;
+ try out.print("pub const {}: []const u8 = \"{}\";\n", .{ std.zig.fmtId(name), std.zig.fmtEscapes(value) });
return;
},
?[:0]const u8 => {
- out.print("pub const {}: ?[:0]const u8 = ", .{std.zig.fmtId(name)}) catch unreachable;
+ try out.print("pub const {}: ?[:0]const u8 = ", .{std.zig.fmtId(name)});
if (value) |payload| {
- out.print("\"{}\";\n", .{std.zig.fmtEscapes(payload)}) catch unreachable;
+ try out.print("\"{}\";\n", .{std.zig.fmtEscapes(payload)});
} else {
- out.writeAll("null;\n") catch unreachable;
+ try out.writeAll("null;\n");
}
return;
},
?[]const u8 => {
- out.print("pub const {}: ?[]const u8 = ", .{std.zig.fmtId(name)}) catch unreachable;
+ try out.print("pub const {}: ?[]const u8 = ", .{std.zig.fmtId(name)});
if (value) |payload| {
- out.print("\"{}\";\n", .{std.zig.fmtEscapes(payload)}) catch unreachable;
+ try out.print("\"{}\";\n", .{std.zig.fmtEscapes(payload)});
} else {
- out.writeAll("null;\n") catch unreachable;
+ try out.writeAll("null;\n");
}
return;
},
std.builtin.Version => {
- out.print(
+ try out.print(
\\pub const {}: @import("std").builtin.Version = .{{
\\ .major = {d},
\\ .minor = {d},
@@ -86,11 +88,11 @@ pub fn addOption(self: *OptionsStep, comptime T: type, name: []const u8, value:
value.major,
value.minor,
value.patch,
- }) catch unreachable;
+ });
return;
},
std.SemanticVersion => {
- out.print(
+ try out.print(
\\pub const {}: @import("std").SemanticVersion = .{{
\\ .major = {d},
\\ .minor = {d},
@@ -102,38 +104,38 @@ pub fn addOption(self: *OptionsStep, comptime T: type, name: []const u8, value:
value.major,
value.minor,
value.patch,
- }) catch unreachable;
+ });
if (value.pre) |some| {
- out.print(" .pre = \"{}\",\n", .{std.zig.fmtEscapes(some)}) catch unreachable;
+ try out.print(" .pre = \"{}\",\n", .{std.zig.fmtEscapes(some)});
}
if (value.build) |some| {
- out.print(" .build = \"{}\",\n", .{std.zig.fmtEscapes(some)}) catch unreachable;
+ try out.print(" .build = \"{}\",\n", .{std.zig.fmtEscapes(some)});
}
- out.writeAll("};\n") catch unreachable;
+ try out.writeAll("};\n");
return;
},
else => {},
}
switch (@typeInfo(T)) {
.Enum => |enum_info| {
- out.print("pub const {} = enum {{\n", .{std.zig.fmtId(@typeName(T))}) catch unreachable;
+ try out.print("pub const {} = enum {{\n", .{std.zig.fmtId(@typeName(T))});
inline for (enum_info.fields) |field| {
- out.print(" {},\n", .{std.zig.fmtId(field.name)}) catch unreachable;
+ try out.print(" {},\n", .{std.zig.fmtId(field.name)});
}
- out.writeAll("};\n") catch unreachable;
- out.print("pub const {}: {s} = {s}.{s};\n", .{
+ try out.writeAll("};\n");
+ try out.print("pub const {}: {s} = {s}.{s};\n", .{
std.zig.fmtId(name),
std.zig.fmtId(@typeName(T)),
std.zig.fmtId(@typeName(T)),
std.zig.fmtId(@tagName(value)),
- }) catch unreachable;
+ });
return;
},
else => {},
}
- out.print("pub const {}: {s} = ", .{ std.zig.fmtId(name), @typeName(T) }) catch unreachable;
- printLiteral(out, value, 0) catch unreachable;
- out.writeAll(";\n") catch unreachable;
+ try out.print("pub const {}: {s} = ", .{ std.zig.fmtId(name), @typeName(T) });
+ try printLiteral(out, value, 0);
+ try out.writeAll(";\n");
}
// TODO: non-recursive?
@@ -191,19 +193,22 @@ pub fn addOptionFileSource(
self.file_source_args.append(.{
.name = name,
.source = source.dupe(self.builder),
- }) catch unreachable;
+ }) catch @panic("OOM");
source.addStepDependencies(&self.step);
}
/// The value is the path in the cache dir.
/// Adds a dependency automatically.
-pub fn addOptionArtifact(self: *OptionsStep, name: []const u8, artifact: *LibExeObjStep) void {
- self.artifact_args.append(.{ .name = self.builder.dupe(name), .artifact = artifact }) catch unreachable;
+pub fn addOptionArtifact(self: *OptionsStep, name: []const u8, artifact: *CompileStep) void {
+ self.artifact_args.append(.{ .name = self.builder.dupe(name), .artifact = artifact }) catch @panic("OOM");
self.step.dependOn(&artifact.step);
}
-pub fn getPackage(self: *OptionsStep, package_name: []const u8) build.Pkg {
- return .{ .name = package_name, .source = self.getSource() };
+pub fn createModule(self: *OptionsStep) *std.Build.Module {
+ return self.builder.createModule(.{
+ .source_file = self.getSource(),
+ .dependencies = &.{},
+ });
}
pub fn getSource(self: *OptionsStep) FileSource {
@@ -268,7 +273,7 @@ fn hashContentsToFileName(self: *OptionsStep) [64]u8 {
const OptionArtifactArg = struct {
name: []const u8,
- artifact: *LibExeObjStep,
+ artifact: *CompileStep,
};
const OptionFileSourceArg = struct {
@@ -281,12 +286,16 @@ test "OptionsStep" {
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
- var builder = try Builder.create(
+
+ const host = try std.zig.system.NativeTargetInfo.detect(.{});
+
+ var builder = try std.Build.create(
arena.allocator(),
"test",
"test",
"test",
"test",
+ host,
);
defer builder.destroy();
@@ -361,5 +370,5 @@ test "OptionsStep" {
\\
, options.contents.items);
- _ = try std.zig.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0));
+ _ = try std.zig.Ast.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0), .zig);
}
diff --git a/lib/std/build/RemoveDirStep.zig b/lib/std/Build/RemoveDirStep.zig
similarity index 79%
rename from lib/std/build/RemoveDirStep.zig
rename to lib/std/Build/RemoveDirStep.zig
index 959414e54f..f3b71dcec1 100644
--- a/lib/std/build/RemoveDirStep.zig
+++ b/lib/std/Build/RemoveDirStep.zig
@@ -1,18 +1,16 @@
const std = @import("../std.zig");
const log = std.log;
const fs = std.fs;
-const build = @import("../build.zig");
-const Step = build.Step;
-const Builder = build.Builder;
+const Step = std.Build.Step;
const RemoveDirStep = @This();
pub const base_id = .remove_dir;
step: Step,
-builder: *Builder,
+builder: *std.Build,
dir_path: []const u8,
-pub fn init(builder: *Builder, dir_path: []const u8) RemoveDirStep {
+pub fn init(builder: *std.Build, dir_path: []const u8) RemoveDirStep {
return RemoveDirStep{
.builder = builder,
.step = Step.init(.remove_dir, builder.fmt("RemoveDir {s}", .{dir_path}), builder.allocator, make),
diff --git a/lib/std/build/RunStep.zig b/lib/std/Build/RunStep.zig
similarity index 87%
rename from lib/std/build/RunStep.zig
rename to lib/std/Build/RunStep.zig
index 5183a328cd..07f2363623 100644
--- a/lib/std/build/RunStep.zig
+++ b/lib/std/Build/RunStep.zig
@@ -1,17 +1,15 @@
const std = @import("../std.zig");
const builtin = @import("builtin");
-const build = std.build;
-const Step = build.Step;
-const Builder = build.Builder;
-const LibExeObjStep = build.LibExeObjStep;
-const WriteFileStep = build.WriteFileStep;
+const Step = std.Build.Step;
+const CompileStep = std.Build.CompileStep;
+const WriteFileStep = std.Build.WriteFileStep;
const fs = std.fs;
const mem = std.mem;
const process = std.process;
const ArrayList = std.ArrayList;
const EnvMap = process.EnvMap;
const Allocator = mem.Allocator;
-const ExecError = build.Builder.ExecError;
+const ExecError = std.Build.ExecError;
const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
@@ -20,7 +18,7 @@ const RunStep = @This();
pub const base_id: Step.Id = .run;
step: Step,
-builder: *Builder,
+builder: *std.Build,
/// See also addArg and addArgs to modifying this directly
argv: ArrayList(Arg),
@@ -50,13 +48,13 @@ pub const StdIoAction = union(enum) {
};
pub const Arg = union(enum) {
- artifact: *LibExeObjStep,
- file_source: build.FileSource,
+ artifact: *CompileStep,
+ file_source: std.Build.FileSource,
bytes: []u8,
};
-pub fn create(builder: *Builder, name: []const u8) *RunStep {
- const self = builder.allocator.create(RunStep) catch unreachable;
+pub fn create(builder: *std.Build, name: []const u8) *RunStep {
+ const self = builder.allocator.create(RunStep) catch @panic("OOM");
self.* = RunStep{
.builder = builder,
.step = Step.init(base_id, name, builder.allocator, make),
@@ -68,20 +66,20 @@ pub fn create(builder: *Builder, name: []const u8) *RunStep {
return self;
}
-pub fn addArtifactArg(self: *RunStep, artifact: *LibExeObjStep) void {
- self.argv.append(Arg{ .artifact = artifact }) catch unreachable;
+pub fn addArtifactArg(self: *RunStep, artifact: *CompileStep) void {
+ self.argv.append(Arg{ .artifact = artifact }) catch @panic("OOM");
self.step.dependOn(&artifact.step);
}
-pub fn addFileSourceArg(self: *RunStep, file_source: build.FileSource) void {
+pub fn addFileSourceArg(self: *RunStep, file_source: std.Build.FileSource) void {
self.argv.append(Arg{
.file_source = file_source.dupe(self.builder),
- }) catch unreachable;
+ }) catch @panic("OOM");
file_source.addStepDependencies(&self.step);
}
pub fn addArg(self: *RunStep, arg: []const u8) void {
- self.argv.append(Arg{ .bytes = self.builder.dupe(arg) }) catch unreachable;
+ self.argv.append(Arg{ .bytes = self.builder.dupe(arg) }) catch @panic("OOM");
}
pub fn addArgs(self: *RunStep, args: []const []const u8) void {
@@ -91,7 +89,7 @@ pub fn addArgs(self: *RunStep, args: []const []const u8) void {
}
pub fn clearEnvironment(self: *RunStep) void {
- const new_env_map = self.builder.allocator.create(EnvMap) catch unreachable;
+ const new_env_map = self.builder.allocator.create(EnvMap) catch @panic("OOM");
new_env_map.* = EnvMap.init(self.builder.allocator);
self.env_map = new_env_map;
}
@@ -101,7 +99,7 @@ pub fn addPathDir(self: *RunStep, search_path: []const u8) void {
}
/// For internal use only, users of `RunStep` should use `addPathDir` directly.
-pub fn addPathDirInternal(step: *Step, builder: *Builder, search_path: []const u8) void {
+pub fn addPathDirInternal(step: *Step, builder: *std.Build, search_path: []const u8) void {
const env_map = getEnvMapInternal(step, builder.allocator);
const key = "PATH";
@@ -109,9 +107,9 @@ pub fn addPathDirInternal(step: *Step, builder: *Builder, search_path: []const u
if (prev_path) |pp| {
const new_path = builder.fmt("{s}" ++ [1]u8{fs.path.delimiter} ++ "{s}", .{ pp, search_path });
- env_map.put(key, new_path) catch unreachable;
+ env_map.put(key, new_path) catch @panic("OOM");
} else {
- env_map.put(key, builder.dupePath(search_path)) catch unreachable;
+ env_map.put(key, builder.dupePath(search_path)) catch @panic("OOM");
}
}
@@ -122,12 +120,12 @@ pub fn getEnvMap(self: *RunStep) *EnvMap {
fn getEnvMapInternal(step: *Step, allocator: Allocator) *EnvMap {
const maybe_env_map = switch (step.id) {
.run => step.cast(RunStep).?.env_map,
- .emulatable_run => step.cast(build.EmulatableRunStep).?.env_map,
+ .emulatable_run => step.cast(std.Build.EmulatableRunStep).?.env_map,
else => unreachable,
};
return maybe_env_map orelse {
- const env_map = allocator.create(EnvMap) catch unreachable;
- env_map.* = process.getEnvMap(allocator) catch unreachable;
+ const env_map = allocator.create(EnvMap) catch @panic("OOM");
+ env_map.* = process.getEnvMap(allocator) catch @panic("unhandled error");
switch (step.id) {
.run => step.cast(RunStep).?.env_map = env_map,
.emulatable_run => step.cast(RunStep).?.env_map = env_map,
@@ -142,7 +140,7 @@ pub fn setEnvironmentVariable(self: *RunStep, key: []const u8, value: []const u8
env_map.put(
self.builder.dupe(key),
self.builder.dupe(value),
- ) catch unreachable;
+ ) catch @panic("unhandled error");
}
pub fn expectStdErrEqual(self: *RunStep, bytes: []const u8) void {
@@ -195,7 +193,7 @@ fn make(step: *Step) !void {
pub fn runCommand(
argv: []const []const u8,
- builder: *Builder,
+ builder: *std.Build,
expected_exit_code: ?u8,
stdout_action: StdIoAction,
stderr_action: StdIoAction,
@@ -236,7 +234,7 @@ pub fn runCommand(
switch (stdout_action) {
.expect_exact, .expect_matches => {
- stdout = child.stdout.?.reader().readAllAlloc(builder.allocator, max_stdout_size) catch unreachable;
+ stdout = try child.stdout.?.reader().readAllAlloc(builder.allocator, max_stdout_size);
},
.inherit, .ignore => {},
}
@@ -246,7 +244,7 @@ pub fn runCommand(
switch (stderr_action) {
.expect_exact, .expect_matches => {
- stderr = child.stderr.?.reader().readAllAlloc(builder.allocator, max_stdout_size) catch unreachable;
+ stderr = try child.stderr.?.reader().readAllAlloc(builder.allocator, max_stdout_size);
},
.inherit, .ignore => {},
}
@@ -357,13 +355,13 @@ fn printCmd(cwd: ?[]const u8, argv: []const []const u8) void {
std.debug.print("\n", .{});
}
-fn addPathForDynLibs(self: *RunStep, artifact: *LibExeObjStep) void {
+fn addPathForDynLibs(self: *RunStep, artifact: *CompileStep) void {
addPathForDynLibsInternal(&self.step, self.builder, artifact);
}
/// This should only be used for internal usage, this is called automatically
/// for the user.
-pub fn addPathForDynLibsInternal(step: *Step, builder: *Builder, artifact: *LibExeObjStep) void {
+pub fn addPathForDynLibsInternal(step: *Step, builder: *std.Build, artifact: *CompileStep) void {
for (artifact.link_objects.items) |link_object| {
switch (link_object) {
.other_step => |other| {
diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig
new file mode 100644
index 0000000000..ff0ceb2a51
--- /dev/null
+++ b/lib/std/Build/Step.zig
@@ -0,0 +1,97 @@
+id: Id,
+name: []const u8,
+makeFn: *const fn (self: *Step) anyerror!void,
+dependencies: std.ArrayList(*Step),
+loop_flag: bool,
+done_flag: bool,
+
+pub const Id = enum {
+ top_level,
+ compile,
+ install_artifact,
+ install_file,
+ install_dir,
+ log,
+ remove_dir,
+ fmt,
+ translate_c,
+ write_file,
+ run,
+ emulatable_run,
+ check_file,
+ check_object,
+ config_header,
+ install_raw,
+ options,
+ custom,
+
+ pub fn Type(comptime id: Id) type {
+ return switch (id) {
+ .top_level => Build.TopLevelStep,
+ .compile => Build.CompileStep,
+ .install_artifact => Build.InstallArtifactStep,
+ .install_file => Build.InstallFileStep,
+ .install_dir => Build.InstallDirStep,
+ .log => Build.LogStep,
+ .remove_dir => Build.RemoveDirStep,
+ .fmt => Build.FmtStep,
+ .translate_c => Build.TranslateCStep,
+ .write_file => Build.WriteFileStep,
+ .run => Build.RunStep,
+ .emulatable_run => Build.EmulatableRunStep,
+ .check_file => Build.CheckFileStep,
+ .check_object => Build.CheckObjectStep,
+ .config_header => Build.ConfigHeaderStep,
+ .install_raw => Build.InstallRawStep,
+ .options => Build.OptionsStep,
+ .custom => @compileError("no type available for custom step"),
+ };
+ }
+};
+
+pub fn init(
+ id: Id,
+ name: []const u8,
+ allocator: Allocator,
+ makeFn: *const fn (self: *Step) anyerror!void,
+) Step {
+ return Step{
+ .id = id,
+ .name = allocator.dupe(u8, name) catch @panic("OOM"),
+ .makeFn = makeFn,
+ .dependencies = std.ArrayList(*Step).init(allocator),
+ .loop_flag = false,
+ .done_flag = false,
+ };
+}
+
+pub fn initNoOp(id: Id, name: []const u8, allocator: Allocator) Step {
+ return init(id, name, allocator, makeNoOp);
+}
+
+pub fn make(self: *Step) !void {
+ if (self.done_flag) return;
+
+ try self.makeFn(self);
+ self.done_flag = true;
+}
+
+pub fn dependOn(self: *Step, other: *Step) void {
+ self.dependencies.append(other) catch @panic("OOM");
+}
+
+fn makeNoOp(self: *Step) anyerror!void {
+ _ = self;
+}
+
+pub fn cast(step: *Step, comptime T: type) ?*T {
+ if (step.id == T.base_id) {
+ return @fieldParentPtr(T, "step", step);
+ }
+ return null;
+}
+
+const Step = @This();
+const std = @import("../std.zig");
+const Build = std.Build;
+const Allocator = std.mem.Allocator;
diff --git a/lib/std/build/TranslateCStep.zig b/lib/std/Build/TranslateCStep.zig
similarity index 59%
rename from lib/std/build/TranslateCStep.zig
rename to lib/std/Build/TranslateCStep.zig
index 1f9bee463c..fb0adfd0ae 100644
--- a/lib/std/build/TranslateCStep.zig
+++ b/lib/std/Build/TranslateCStep.zig
@@ -1,9 +1,7 @@
const std = @import("../std.zig");
-const build = std.build;
-const Step = build.Step;
-const Builder = build.Builder;
-const LibExeObjStep = build.LibExeObjStep;
-const CheckFileStep = build.CheckFileStep;
+const Step = std.Build.Step;
+const CompileStep = std.Build.CompileStep;
+const CheckFileStep = std.Build.CheckFileStep;
const fs = std.fs;
const mem = std.mem;
const CrossTarget = std.zig.CrossTarget;
@@ -13,42 +11,61 @@ const TranslateCStep = @This();
pub const base_id = .translate_c;
step: Step,
-builder: *Builder,
-source: build.FileSource,
+builder: *std.Build,
+source: std.Build.FileSource,
include_dirs: std.ArrayList([]const u8),
c_macros: std.ArrayList([]const u8),
-output_dir: ?[]const u8,
out_basename: []const u8,
-target: CrossTarget = CrossTarget{},
-output_file: build.GeneratedFile,
+target: CrossTarget,
+optimize: std.builtin.OptimizeMode,
+output_file: std.Build.GeneratedFile,
-pub fn create(builder: *Builder, source: build.FileSource) *TranslateCStep {
- const self = builder.allocator.create(TranslateCStep) catch unreachable;
+pub const Options = struct {
+ source_file: std.Build.FileSource,
+ target: CrossTarget,
+ optimize: std.builtin.OptimizeMode,
+};
+
+pub fn create(builder: *std.Build, options: Options) *TranslateCStep {
+ const self = builder.allocator.create(TranslateCStep) catch @panic("OOM");
+ const source = options.source_file.dupe(builder);
self.* = TranslateCStep{
.step = Step.init(.translate_c, "translate-c", builder.allocator, make),
.builder = builder,
.source = source,
.include_dirs = std.ArrayList([]const u8).init(builder.allocator),
.c_macros = std.ArrayList([]const u8).init(builder.allocator),
- .output_dir = null,
.out_basename = undefined,
- .output_file = build.GeneratedFile{ .step = &self.step },
+ .target = options.target,
+ .optimize = options.optimize,
+ .output_file = std.Build.GeneratedFile{ .step = &self.step },
};
source.addStepDependencies(&self.step);
return self;
}
-pub fn setTarget(self: *TranslateCStep, target: CrossTarget) void {
- self.target = target;
-}
+pub const AddExecutableOptions = struct {
+ name: ?[]const u8 = null,
+ version: ?std.builtin.Version = null,
+ target: ?CrossTarget = null,
+ optimize: ?std.builtin.Mode = null,
+ linkage: ?CompileStep.Linkage = null,
+};
/// Creates a step to build an executable from the translated source.
-pub fn addExecutable(self: *TranslateCStep) *LibExeObjStep {
- return self.builder.addExecutableSource("translated_c", build.FileSource{ .generated = &self.output_file });
+pub fn addExecutable(self: *TranslateCStep, options: AddExecutableOptions) *CompileStep {
+ return self.builder.addExecutable(.{
+ .root_source_file = .{ .generated = &self.output_file },
+ .name = options.name orelse "translated_c",
+ .version = options.version,
+ .target = options.target orelse self.target,
+ .optimize = options.optimize orelse self.optimize,
+ .linkage = options.linkage,
+ });
}
pub fn addIncludeDir(self: *TranslateCStep, include_dir: []const u8) void {
- self.include_dirs.append(self.builder.dupePath(include_dir)) catch unreachable;
+ self.include_dirs.append(self.builder.dupePath(include_dir)) catch @panic("OOM");
}
pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8) *CheckFileStep {
@@ -58,13 +75,13 @@ pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8)
/// If the value is omitted, it is set to 1.
/// `name` and `value` need not live longer than the function call.
pub fn defineCMacro(self: *TranslateCStep, name: []const u8, value: ?[]const u8) void {
- const macro = build.constructCMacro(self.builder.allocator, name, value);
- self.c_macros.append(macro) catch unreachable;
+ const macro = std.Build.constructCMacro(self.builder.allocator, name, value);
+ self.c_macros.append(macro) catch @panic("OOM");
}
/// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1.
pub fn defineCMacroRaw(self: *TranslateCStep, name_and_value: []const u8) void {
- self.c_macros.append(self.builder.dupe(name_and_value)) catch unreachable;
+ self.c_macros.append(self.builder.dupe(name_and_value)) catch @panic("OOM");
}
fn make(step: *Step) !void {
@@ -82,6 +99,11 @@ fn make(step: *Step) !void {
try argv_list.append(try self.target.zigTriple(self.builder.allocator));
}
+ switch (self.optimize) {
+ .Debug => {}, // Skip since it's the default.
+ else => try argv_list.append(self.builder.fmt("-O{s}", .{@tagName(self.optimize)})),
+ }
+
for (self.include_dirs.items) |include_dir| {
try argv_list.append("-I");
try argv_list.append(include_dir);
@@ -98,15 +120,10 @@ fn make(step: *Step) !void {
const output_path = mem.trimRight(u8, output_path_nl, "\r\n");
self.out_basename = fs.path.basename(output_path);
- if (self.output_dir) |output_dir| {
- const full_dest = try fs.path.join(self.builder.allocator, &[_][]const u8{ output_dir, self.out_basename });
- try self.builder.updateFile(output_path, full_dest);
- } else {
- self.output_dir = fs.path.dirname(output_path).?;
- }
+ const output_dir = fs.path.dirname(output_path).?;
- self.output_file.path = fs.path.join(
+ self.output_file.path = try fs.path.join(
self.builder.allocator,
- &[_][]const u8{ self.output_dir.?, self.out_basename },
- ) catch unreachable;
+ &[_][]const u8{ output_dir, self.out_basename },
+ );
}
diff --git a/lib/std/build/WriteFileStep.zig b/lib/std/Build/WriteFileStep.zig
similarity index 78%
rename from lib/std/build/WriteFileStep.zig
rename to lib/std/Build/WriteFileStep.zig
index 4faae8f74e..3cd447e4b8 100644
--- a/lib/std/build/WriteFileStep.zig
+++ b/lib/std/Build/WriteFileStep.zig
@@ -1,7 +1,5 @@
const std = @import("../std.zig");
-const build = @import("../build.zig");
-const Step = build.Step;
-const Builder = build.Builder;
+const Step = std.Build.Step;
const fs = std.fs;
const ArrayList = std.ArrayList;
@@ -10,30 +8,28 @@ const WriteFileStep = @This();
pub const base_id = .write_file;
step: Step,
-builder: *Builder,
-output_dir: []const u8,
+builder: *std.Build,
files: std.TailQueue(File),
pub const File = struct {
- source: build.GeneratedFile,
+ source: std.Build.GeneratedFile,
basename: []const u8,
bytes: []const u8,
};
-pub fn init(builder: *Builder) WriteFileStep {
+pub fn init(builder: *std.Build) WriteFileStep {
return WriteFileStep{
.builder = builder,
.step = Step.init(.write_file, "writefile", builder.allocator, make),
.files = .{},
- .output_dir = undefined,
};
}
pub fn add(self: *WriteFileStep, basename: []const u8, bytes: []const u8) void {
- const node = self.builder.allocator.create(std.TailQueue(File).Node) catch unreachable;
+ const node = self.builder.allocator.create(std.TailQueue(File).Node) catch @panic("unhandled error");
node.* = .{
.data = .{
- .source = build.GeneratedFile{ .step = &self.step },
+ .source = std.Build.GeneratedFile{ .step = &self.step },
.basename = self.builder.dupePath(basename),
.bytes = self.builder.dupe(bytes),
},
@@ -43,11 +39,11 @@ pub fn add(self: *WriteFileStep, basename: []const u8, bytes: []const u8) void {
}
/// Gets a file source for the given basename. If the file does not exist, returns `null`.
-pub fn getFileSource(step: *WriteFileStep, basename: []const u8) ?build.FileSource {
+pub fn getFileSource(step: *WriteFileStep, basename: []const u8) ?std.Build.FileSource {
var it = step.files.first;
while (it) |node| : (it = node.next) {
if (std.mem.eql(u8, node.data.basename, basename))
- return build.FileSource{ .generated = &node.data.source };
+ return std.Build.FileSource{ .generated = &node.data.source };
}
return null;
}
@@ -89,11 +85,11 @@ fn make(step: *Step) !void {
.{std.fmt.fmtSliceHexLower(&digest)},
) catch unreachable;
- self.output_dir = try fs.path.join(self.builder.allocator, &[_][]const u8{
+ const output_dir = try fs.path.join(self.builder.allocator, &[_][]const u8{
self.builder.cache_root, "o", &hash_basename,
});
- var dir = fs.cwd().makeOpenPath(self.output_dir, .{}) catch |err| {
- std.debug.print("unable to make path {s}: {s}\n", .{ self.output_dir, @errorName(err) });
+ var dir = fs.cwd().makeOpenPath(output_dir, .{}) catch |err| {
+ std.debug.print("unable to make path {s}: {s}\n", .{ output_dir, @errorName(err) });
return err;
};
defer dir.close();
@@ -103,15 +99,15 @@ fn make(step: *Step) !void {
dir.writeFile(node.data.basename, node.data.bytes) catch |err| {
std.debug.print("unable to write {s} into {s}: {s}\n", .{
node.data.basename,
- self.output_dir,
+ output_dir,
@errorName(err),
});
return err;
};
- node.data.source.path = fs.path.join(
+ node.data.source.path = try fs.path.join(
self.builder.allocator,
- &[_][]const u8{ self.output_dir, node.data.basename },
- ) catch unreachable;
+ &[_][]const u8{ output_dir, node.data.basename },
+ );
}
}
}
diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig
index cf04a54116..57821d1b51 100644
--- a/lib/std/array_hash_map.zig
+++ b/lib/std/array_hash_map.zig
@@ -1145,7 +1145,8 @@ pub fn ArrayHashMapUnmanaged(
}
/// Create a copy of the hash map which can be modified separately.
- /// The copy uses the same context and allocator as this instance.
+ /// The copy uses the same context as this instance, but is allocated
+ /// with the provided allocator.
pub fn clone(self: Self, allocator: Allocator) !Self {
if (@sizeOf(ByIndexContext) != 0)
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index b6e78b07bd..2485668417 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -482,14 +482,14 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Return the last element from the list.
/// Asserts the list has at least one item.
- pub fn getLast(self: *Self) T {
+ pub fn getLast(self: Self) T {
const val = self.items[self.items.len - 1];
return val;
}
/// Return the last element from the list, or
/// return `null` if list is empty.
- pub fn getLastOrNull(self: *Self) ?T {
+ pub fn getLastOrNull(self: Self) ?T {
if (self.items.len == 0) return null;
return self.getLast();
}
@@ -961,14 +961,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Return the last element from the list.
/// Asserts the list has at least one item.
- pub fn getLast(self: *Self) T {
+ pub fn getLast(self: Self) T {
const val = self.items[self.items.len - 1];
return val;
}
/// Return the last element from the list, or
/// return `null` if list is empty.
- pub fn getLastOrNull(self: *Self) ?T {
+ pub fn getLastOrNull(self: Self) ?T {
if (self.items.len == 0) return null;
return self.getLast();
}
@@ -1719,3 +1719,27 @@ test "std.ArrayList(?u32).popOrNull()" {
try testing.expect(list.popOrNull().? == null);
try testing.expect(list.popOrNull() == null);
}
+
+test "std.ArrayList(u32).getLast()" {
+ const a = testing.allocator;
+
+ var list = ArrayList(u32).init(a);
+ defer list.deinit();
+
+ try list.append(2);
+ const const_list = list;
+ try testing.expectEqual(const_list.getLast(), 2);
+}
+
+test "std.ArrayList(u32).getLastOrNull()" {
+ const a = testing.allocator;
+
+ var list = ArrayList(u32).init(a);
+ defer list.deinit();
+
+ try testing.expectEqual(list.getLastOrNull(), null);
+
+ try list.append(2);
+ const const_list = list;
+ try testing.expectEqual(const_list.getLastOrNull().?, 2);
+}
diff --git a/lib/std/build.zig b/lib/std/build.zig
deleted file mode 100644
index 8137b76846..0000000000
--- a/lib/std/build.zig
+++ /dev/null
@@ -1,1781 +0,0 @@
-const std = @import("std.zig");
-const builtin = @import("builtin");
-const io = std.io;
-const fs = std.fs;
-const mem = std.mem;
-const debug = std.debug;
-const panic = std.debug.panic;
-const assert = debug.assert;
-const log = std.log;
-const ArrayList = std.ArrayList;
-const StringHashMap = std.StringHashMap;
-const Allocator = mem.Allocator;
-const process = std.process;
-const EnvMap = std.process.EnvMap;
-const fmt_lib = std.fmt;
-const File = std.fs.File;
-const CrossTarget = std.zig.CrossTarget;
-const NativeTargetInfo = std.zig.system.NativeTargetInfo;
-const Sha256 = std.crypto.hash.sha2.Sha256;
-const ThisModule = @This();
-
-pub const CheckFileStep = @import("build/CheckFileStep.zig");
-pub const CheckObjectStep = @import("build/CheckObjectStep.zig");
-pub const ConfigHeaderStep = @import("build/ConfigHeaderStep.zig");
-pub const EmulatableRunStep = @import("build/EmulatableRunStep.zig");
-pub const FmtStep = @import("build/FmtStep.zig");
-pub const InstallArtifactStep = @import("build/InstallArtifactStep.zig");
-pub const InstallDirStep = @import("build/InstallDirStep.zig");
-pub const InstallFileStep = @import("build/InstallFileStep.zig");
-pub const InstallRawStep = @import("build/InstallRawStep.zig");
-pub const LibExeObjStep = @import("build/LibExeObjStep.zig");
-pub const LogStep = @import("build/LogStep.zig");
-pub const OptionsStep = @import("build/OptionsStep.zig");
-pub const RemoveDirStep = @import("build/RemoveDirStep.zig");
-pub const RunStep = @import("build/RunStep.zig");
-pub const TranslateCStep = @import("build/TranslateCStep.zig");
-pub const WriteFileStep = @import("build/WriteFileStep.zig");
-
-pub const Builder = struct {
- install_tls: TopLevelStep,
- uninstall_tls: TopLevelStep,
- allocator: Allocator,
- user_input_options: UserInputOptionsMap,
- available_options_map: AvailableOptionsMap,
- available_options_list: ArrayList(AvailableOption),
- verbose: bool,
- verbose_link: bool,
- verbose_cc: bool,
- verbose_air: bool,
- verbose_llvm_ir: bool,
- verbose_cimport: bool,
- verbose_llvm_cpu_features: bool,
- /// The purpose of executing the command is for a human to read compile errors from the terminal
- prominent_compile_errors: bool,
- color: enum { auto, on, off } = .auto,
- reference_trace: ?u32 = null,
- invalid_user_input: bool,
- zig_exe: []const u8,
- default_step: *Step,
- env_map: *EnvMap,
- top_level_steps: ArrayList(*TopLevelStep),
- install_prefix: []const u8,
- dest_dir: ?[]const u8,
- lib_dir: []const u8,
- exe_dir: []const u8,
- h_dir: []const u8,
- install_path: []const u8,
- sysroot: ?[]const u8 = null,
- search_prefixes: ArrayList([]const u8),
- libc_file: ?[]const u8 = null,
- installed_files: ArrayList(InstalledFile),
- /// Path to the directory containing build.zig.
- build_root: []const u8,
- cache_root: []const u8,
- global_cache_root: []const u8,
- release_mode: ?std.builtin.Mode,
- is_release: bool,
- /// zig lib dir
- override_lib_dir: ?[]const u8,
- vcpkg_root: VcpkgRoot = .unattempted,
- pkg_config_pkg_list: ?(PkgConfigError![]const PkgConfigPkg) = null,
- args: ?[][]const u8 = null,
- debug_log_scopes: []const []const u8 = &.{},
- debug_compile_errors: bool = false,
-
- /// Experimental. Use system Darling installation to run cross compiled macOS build artifacts.
- enable_darling: bool = false,
- /// Use system QEMU installation to run cross compiled foreign architecture build artifacts.
- enable_qemu: bool = false,
- /// Darwin. Use Rosetta to run x86_64 macOS build artifacts on arm64 macOS.
- enable_rosetta: bool = false,
- /// Use system Wasmtime installation to run cross compiled wasm/wasi build artifacts.
- enable_wasmtime: bool = false,
- /// Use system Wine installation to run cross compiled Windows build artifacts.
- enable_wine: bool = false,
- /// After following the steps in https://github.com/ziglang/zig/wiki/Updating-libc#glibc,
- /// this will be the directory $glibc-build-dir/install/glibcs
- /// Given the example of the aarch64 target, this is the directory
- /// that contains the path `aarch64-linux-gnu/lib/ld-linux-aarch64.so.1`.
- glibc_runtimes_dir: ?[]const u8 = null,
-
- /// Information about the native target. Computed before build() is invoked.
- host: NativeTargetInfo,
-
- dep_prefix: []const u8 = "",
-
- pub const ExecError = error{
- ReadFailure,
- ExitCodeFailure,
- ProcessTerminated,
- ExecNotSupported,
- } || std.ChildProcess.SpawnError;
-
- pub const PkgConfigError = error{
- PkgConfigCrashed,
- PkgConfigFailed,
- PkgConfigNotInstalled,
- PkgConfigInvalidOutput,
- };
-
- pub const PkgConfigPkg = struct {
- name: []const u8,
- desc: []const u8,
- };
-
- pub const CStd = enum {
- C89,
- C99,
- C11,
- };
-
- const UserInputOptionsMap = StringHashMap(UserInputOption);
- const AvailableOptionsMap = StringHashMap(AvailableOption);
-
- const AvailableOption = struct {
- name: []const u8,
- type_id: TypeId,
- description: []const u8,
- /// If the `type_id` is `enum` this provides the list of enum options
- enum_options: ?[]const []const u8,
- };
-
- const UserInputOption = struct {
- name: []const u8,
- value: UserValue,
- used: bool,
- };
-
- const UserValue = union(enum) {
- flag: void,
- scalar: []const u8,
- list: ArrayList([]const u8),
- };
-
- const TypeId = enum {
- bool,
- int,
- float,
- @"enum",
- string,
- list,
- };
-
- const TopLevelStep = struct {
- pub const base_id = .top_level;
-
- step: Step,
- description: []const u8,
- };
-
- pub const DirList = struct {
- lib_dir: ?[]const u8 = null,
- exe_dir: ?[]const u8 = null,
- include_dir: ?[]const u8 = null,
- };
-
- pub fn create(
- allocator: Allocator,
- zig_exe: []const u8,
- build_root: []const u8,
- cache_root: []const u8,
- global_cache_root: []const u8,
- ) !*Builder {
- const env_map = try allocator.create(EnvMap);
- env_map.* = try process.getEnvMap(allocator);
-
- const host = try NativeTargetInfo.detect(.{});
-
- const self = try allocator.create(Builder);
- self.* = Builder{
- .zig_exe = zig_exe,
- .build_root = build_root,
- .cache_root = try fs.path.relative(allocator, build_root, cache_root),
- .global_cache_root = global_cache_root,
- .verbose = false,
- .verbose_link = false,
- .verbose_cc = false,
- .verbose_air = false,
- .verbose_llvm_ir = false,
- .verbose_cimport = false,
- .verbose_llvm_cpu_features = false,
- .prominent_compile_errors = false,
- .invalid_user_input = false,
- .allocator = allocator,
- .user_input_options = UserInputOptionsMap.init(allocator),
- .available_options_map = AvailableOptionsMap.init(allocator),
- .available_options_list = ArrayList(AvailableOption).init(allocator),
- .top_level_steps = ArrayList(*TopLevelStep).init(allocator),
- .default_step = undefined,
- .env_map = env_map,
- .search_prefixes = ArrayList([]const u8).init(allocator),
- .install_prefix = undefined,
- .lib_dir = undefined,
- .exe_dir = undefined,
- .h_dir = undefined,
- .dest_dir = env_map.get("DESTDIR"),
- .installed_files = ArrayList(InstalledFile).init(allocator),
- .install_tls = TopLevelStep{
- .step = Step.initNoOp(.top_level, "install", allocator),
- .description = "Copy build artifacts to prefix path",
- },
- .uninstall_tls = TopLevelStep{
- .step = Step.init(.top_level, "uninstall", allocator, makeUninstall),
- .description = "Remove build artifacts from prefix path",
- },
- .release_mode = null,
- .is_release = false,
- .override_lib_dir = null,
- .install_path = undefined,
- .args = null,
- .host = host,
- };
- try self.top_level_steps.append(&self.install_tls);
- try self.top_level_steps.append(&self.uninstall_tls);
- self.default_step = &self.install_tls.step;
- return self;
- }
-
- fn createChild(
- parent: *Builder,
- dep_name: []const u8,
- build_root: []const u8,
- args: anytype,
- ) !*Builder {
- const child = try createChildOnly(parent, dep_name, build_root);
- try applyArgs(child, args);
- return child;
- }
-
- fn createChildOnly(parent: *Builder, dep_name: []const u8, build_root: []const u8) !*Builder {
- const allocator = parent.allocator;
- const child = try allocator.create(Builder);
- child.* = .{
- .allocator = allocator,
- .install_tls = .{
- .step = Step.initNoOp(.top_level, "install", allocator),
- .description = "Copy build artifacts to prefix path",
- },
- .uninstall_tls = .{
- .step = Step.init(.top_level, "uninstall", allocator, makeUninstall),
- .description = "Remove build artifacts from prefix path",
- },
- .user_input_options = UserInputOptionsMap.init(allocator),
- .available_options_map = AvailableOptionsMap.init(allocator),
- .available_options_list = ArrayList(AvailableOption).init(allocator),
- .verbose = parent.verbose,
- .verbose_link = parent.verbose_link,
- .verbose_cc = parent.verbose_cc,
- .verbose_air = parent.verbose_air,
- .verbose_llvm_ir = parent.verbose_llvm_ir,
- .verbose_cimport = parent.verbose_cimport,
- .verbose_llvm_cpu_features = parent.verbose_llvm_cpu_features,
- .prominent_compile_errors = parent.prominent_compile_errors,
- .color = parent.color,
- .reference_trace = parent.reference_trace,
- .invalid_user_input = false,
- .zig_exe = parent.zig_exe,
- .default_step = undefined,
- .env_map = parent.env_map,
- .top_level_steps = ArrayList(*TopLevelStep).init(allocator),
- .install_prefix = undefined,
- .dest_dir = parent.dest_dir,
- .lib_dir = parent.lib_dir,
- .exe_dir = parent.exe_dir,
- .h_dir = parent.h_dir,
- .install_path = parent.install_path,
- .sysroot = parent.sysroot,
- .search_prefixes = ArrayList([]const u8).init(allocator),
- .libc_file = parent.libc_file,
- .installed_files = ArrayList(InstalledFile).init(allocator),
- .build_root = build_root,
- .cache_root = parent.cache_root,
- .global_cache_root = parent.global_cache_root,
- .release_mode = parent.release_mode,
- .is_release = parent.is_release,
- .override_lib_dir = parent.override_lib_dir,
- .debug_log_scopes = parent.debug_log_scopes,
- .debug_compile_errors = parent.debug_compile_errors,
- .enable_darling = parent.enable_darling,
- .enable_qemu = parent.enable_qemu,
- .enable_rosetta = parent.enable_rosetta,
- .enable_wasmtime = parent.enable_wasmtime,
- .enable_wine = parent.enable_wine,
- .glibc_runtimes_dir = parent.glibc_runtimes_dir,
- .host = parent.host,
- .dep_prefix = parent.fmt("{s}{s}.", .{ parent.dep_prefix, dep_name }),
- };
- try child.top_level_steps.append(&child.install_tls);
- try child.top_level_steps.append(&child.uninstall_tls);
- child.default_step = &child.install_tls.step;
- return child;
- }
-
- fn applyArgs(b: *Builder, args: anytype) !void {
- // TODO this function is the way that a build.zig file communicates
- // options to its dependencies. It is the programmatic way to give
- // command line arguments to a build.zig script.
- _ = args;
- const Hasher = std.crypto.auth.siphash.SipHash128(1, 3);
- // Random bytes to make unique. Refresh this with new random bytes when
- // implementation is modified in a non-backwards-compatible way.
- var hash = Hasher.init("ZaEsvQ5ClaA2IdH9");
- hash.update(b.dep_prefix);
- // TODO additionally update the hash with `args`.
-
- var digest: [16]u8 = undefined;
- hash.final(&digest);
- var hash_basename: [digest.len * 2]u8 = undefined;
- _ = std.fmt.bufPrint(&hash_basename, "{s}", .{std.fmt.fmtSliceHexLower(&digest)}) catch
- unreachable;
-
- const install_prefix = b.pathJoin(&.{ b.cache_root, "i", &hash_basename });
- b.resolveInstallPrefix(install_prefix, .{});
- }
-
- pub fn destroy(self: *Builder) void {
- self.env_map.deinit();
- self.top_level_steps.deinit();
- self.allocator.destroy(self);
- }
-
- /// This function is intended to be called by lib/build_runner.zig, not a build.zig file.
- pub fn resolveInstallPrefix(self: *Builder, install_prefix: ?[]const u8, dir_list: DirList) void {
- if (self.dest_dir) |dest_dir| {
- self.install_prefix = install_prefix orelse "/usr";
- self.install_path = self.pathJoin(&.{ dest_dir, self.install_prefix });
- } else {
- self.install_prefix = install_prefix orelse
- (self.pathJoin(&.{ self.build_root, "zig-out" }));
- self.install_path = self.install_prefix;
- }
-
- var lib_list = [_][]const u8{ self.install_path, "lib" };
- var exe_list = [_][]const u8{ self.install_path, "bin" };
- var h_list = [_][]const u8{ self.install_path, "include" };
-
- if (dir_list.lib_dir) |dir| {
- if (std.fs.path.isAbsolute(dir)) lib_list[0] = self.dest_dir orelse "";
- lib_list[1] = dir;
- }
-
- if (dir_list.exe_dir) |dir| {
- if (std.fs.path.isAbsolute(dir)) exe_list[0] = self.dest_dir orelse "";
- exe_list[1] = dir;
- }
-
- if (dir_list.include_dir) |dir| {
- if (std.fs.path.isAbsolute(dir)) h_list[0] = self.dest_dir orelse "";
- h_list[1] = dir;
- }
-
- self.lib_dir = self.pathJoin(&lib_list);
- self.exe_dir = self.pathJoin(&exe_list);
- self.h_dir = self.pathJoin(&h_list);
- }
-
- fn convertOptionalPathToFileSource(path: ?[]const u8) ?FileSource {
- return if (path) |p|
- FileSource{ .path = p }
- else
- null;
- }
-
- pub fn addExecutable(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
- return addExecutableSource(self, name, convertOptionalPathToFileSource(root_src));
- }
-
- pub fn addExecutableSource(builder: *Builder, name: []const u8, root_src: ?FileSource) *LibExeObjStep {
- return LibExeObjStep.createExecutable(builder, name, root_src);
- }
-
- pub fn addOptions(self: *Builder) *OptionsStep {
- return OptionsStep.create(self);
- }
-
- pub fn addObject(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
- return addObjectSource(self, name, convertOptionalPathToFileSource(root_src));
- }
-
- pub fn addObjectSource(builder: *Builder, name: []const u8, root_src: ?FileSource) *LibExeObjStep {
- return LibExeObjStep.createObject(builder, name, root_src);
- }
-
- pub fn addSharedLibrary(
- self: *Builder,
- name: []const u8,
- root_src: ?[]const u8,
- kind: LibExeObjStep.SharedLibKind,
- ) *LibExeObjStep {
- return addSharedLibrarySource(self, name, convertOptionalPathToFileSource(root_src), kind);
- }
-
- pub fn addSharedLibrarySource(
- self: *Builder,
- name: []const u8,
- root_src: ?FileSource,
- kind: LibExeObjStep.SharedLibKind,
- ) *LibExeObjStep {
- return LibExeObjStep.createSharedLibrary(self, name, root_src, kind);
- }
-
- pub fn addStaticLibrary(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
- return addStaticLibrarySource(self, name, convertOptionalPathToFileSource(root_src));
- }
-
- pub fn addStaticLibrarySource(self: *Builder, name: []const u8, root_src: ?FileSource) *LibExeObjStep {
- return LibExeObjStep.createStaticLibrary(self, name, root_src);
- }
-
- pub fn addTest(self: *Builder, root_src: []const u8) *LibExeObjStep {
- return LibExeObjStep.createTest(self, "test", .{ .path = root_src });
- }
-
- pub fn addTestSource(self: *Builder, root_src: FileSource) *LibExeObjStep {
- return LibExeObjStep.createTest(self, "test", root_src.dupe(self));
- }
-
- pub fn addTestExe(self: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep {
- return LibExeObjStep.createTestExe(self, name, .{ .path = root_src });
- }
-
- pub fn addTestExeSource(self: *Builder, name: []const u8, root_src: FileSource) *LibExeObjStep {
- return LibExeObjStep.createTestExe(self, name, root_src.dupe(self));
- }
-
- pub fn addAssemble(self: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
- return addAssembleSource(self, name, .{ .path = src });
- }
-
- pub fn addAssembleSource(self: *Builder, name: []const u8, src: FileSource) *LibExeObjStep {
- const obj_step = LibExeObjStep.createObject(self, name, null);
- obj_step.addAssemblyFileSource(src.dupe(self));
- return obj_step;
- }
-
- /// Initializes a RunStep with argv, which must at least have the path to the
- /// executable. More command line arguments can be added with `addArg`,
- /// `addArgs`, and `addArtifactArg`.
- /// Be careful using this function, as it introduces a system dependency.
- /// To run an executable built with zig build, see `LibExeObjStep.run`.
- pub fn addSystemCommand(self: *Builder, argv: []const []const u8) *RunStep {
- assert(argv.len >= 1);
- const run_step = RunStep.create(self, self.fmt("run {s}", .{argv[0]}));
- run_step.addArgs(argv);
- return run_step;
- }
-
- pub fn addConfigHeader(
- b: *Builder,
- source: FileSource,
- style: ConfigHeaderStep.Style,
- values: anytype,
- ) *ConfigHeaderStep {
- const config_header_step = ConfigHeaderStep.create(b, source, style);
- config_header_step.addValues(values);
- return config_header_step;
- }
-
- /// Allocator.dupe without the need to handle out of memory.
- pub fn dupe(self: *Builder, bytes: []const u8) []u8 {
- return self.allocator.dupe(u8, bytes) catch unreachable;
- }
-
- /// Duplicates an array of strings without the need to handle out of memory.
- pub fn dupeStrings(self: *Builder, strings: []const []const u8) [][]u8 {
- const array = self.allocator.alloc([]u8, strings.len) catch unreachable;
- for (strings) |s, i| {
- array[i] = self.dupe(s);
- }
- return array;
- }
-
- /// Duplicates a path and converts all slashes to the OS's canonical path separator.
- pub fn dupePath(self: *Builder, bytes: []const u8) []u8 {
- const the_copy = self.dupe(bytes);
- for (the_copy) |*byte| {
- switch (byte.*) {
- '/', '\\' => byte.* = fs.path.sep,
- else => {},
- }
- }
- return the_copy;
- }
-
- /// Duplicates a package recursively.
- pub fn dupePkg(self: *Builder, package: Pkg) Pkg {
- var the_copy = Pkg{
- .name = self.dupe(package.name),
- .source = package.source.dupe(self),
- };
-
- if (package.dependencies) |dependencies| {
- const new_dependencies = self.allocator.alloc(Pkg, dependencies.len) catch unreachable;
- the_copy.dependencies = new_dependencies;
-
- for (dependencies) |dep_package, i| {
- new_dependencies[i] = self.dupePkg(dep_package);
- }
- }
- return the_copy;
- }
-
- pub fn addWriteFile(self: *Builder, file_path: []const u8, data: []const u8) *WriteFileStep {
- const write_file_step = self.addWriteFiles();
- write_file_step.add(file_path, data);
- return write_file_step;
- }
-
- pub fn addWriteFiles(self: *Builder) *WriteFileStep {
- const write_file_step = self.allocator.create(WriteFileStep) catch unreachable;
- write_file_step.* = WriteFileStep.init(self);
- return write_file_step;
- }
-
- pub fn addLog(self: *Builder, comptime format: []const u8, args: anytype) *LogStep {
- const data = self.fmt(format, args);
- const log_step = self.allocator.create(LogStep) catch unreachable;
- log_step.* = LogStep.init(self, data);
- return log_step;
- }
-
- pub fn addRemoveDirTree(self: *Builder, dir_path: []const u8) *RemoveDirStep {
- const remove_dir_step = self.allocator.create(RemoveDirStep) catch unreachable;
- remove_dir_step.* = RemoveDirStep.init(self, dir_path);
- return remove_dir_step;
- }
-
- pub fn addFmt(self: *Builder, paths: []const []const u8) *FmtStep {
- return FmtStep.create(self, paths);
- }
-
- pub fn addTranslateC(self: *Builder, source: FileSource) *TranslateCStep {
- return TranslateCStep.create(self, source.dupe(self));
- }
-
- pub fn version(self: *const Builder, major: u32, minor: u32, patch: u32) LibExeObjStep.SharedLibKind {
- _ = self;
- return .{
- .versioned = .{
- .major = major,
- .minor = minor,
- .patch = patch,
- },
- };
- }
-
- pub fn make(self: *Builder, step_names: []const []const u8) !void {
- try self.makePath(self.cache_root);
-
- var wanted_steps = ArrayList(*Step).init(self.allocator);
- defer wanted_steps.deinit();
-
- if (step_names.len == 0) {
- try wanted_steps.append(self.default_step);
- } else {
- for (step_names) |step_name| {
- const s = try self.getTopLevelStepByName(step_name);
- try wanted_steps.append(s);
- }
- }
-
- for (wanted_steps.items) |s| {
- try self.makeOneStep(s);
- }
- }
-
- pub fn getInstallStep(self: *Builder) *Step {
- return &self.install_tls.step;
- }
-
- pub fn getUninstallStep(self: *Builder) *Step {
- return &self.uninstall_tls.step;
- }
-
- fn makeUninstall(uninstall_step: *Step) anyerror!void {
- const uninstall_tls = @fieldParentPtr(TopLevelStep, "step", uninstall_step);
- const self = @fieldParentPtr(Builder, "uninstall_tls", uninstall_tls);
-
- for (self.installed_files.items) |installed_file| {
- const full_path = self.getInstallPath(installed_file.dir, installed_file.path);
- if (self.verbose) {
- log.info("rm {s}", .{full_path});
- }
- fs.cwd().deleteTree(full_path) catch {};
- }
-
- // TODO remove empty directories
- }
-
- fn makeOneStep(self: *Builder, s: *Step) anyerror!void {
- if (s.loop_flag) {
- log.err("Dependency loop detected:\n {s}", .{s.name});
- return error.DependencyLoopDetected;
- }
- s.loop_flag = true;
-
- for (s.dependencies.items) |dep| {
- self.makeOneStep(dep) catch |err| {
- if (err == error.DependencyLoopDetected) {
- log.err(" {s}", .{s.name});
- }
- return err;
- };
- }
-
- s.loop_flag = false;
-
- try s.make();
- }
-
- fn getTopLevelStepByName(self: *Builder, name: []const u8) !*Step {
- for (self.top_level_steps.items) |top_level_step| {
- if (mem.eql(u8, top_level_step.step.name, name)) {
- return &top_level_step.step;
- }
- }
- log.err("Cannot run step '{s}' because it does not exist", .{name});
- return error.InvalidStepName;
- }
-
- pub fn option(self: *Builder, comptime T: type, name_raw: []const u8, description_raw: []const u8) ?T {
- const name = self.dupe(name_raw);
- const description = self.dupe(description_raw);
- const type_id = comptime typeToEnum(T);
- const enum_options = if (type_id == .@"enum") blk: {
- const fields = comptime std.meta.fields(T);
- var options = ArrayList([]const u8).initCapacity(self.allocator, fields.len) catch unreachable;
-
- inline for (fields) |field| {
- options.appendAssumeCapacity(field.name);
- }
-
- break :blk options.toOwnedSlice() catch unreachable;
- } else null;
- const available_option = AvailableOption{
- .name = name,
- .type_id = type_id,
- .description = description,
- .enum_options = enum_options,
- };
- if ((self.available_options_map.fetchPut(name, available_option) catch unreachable) != null) {
- panic("Option '{s}' declared twice", .{name});
- }
- self.available_options_list.append(available_option) catch unreachable;
-
- const option_ptr = self.user_input_options.getPtr(name) orelse return null;
- option_ptr.used = true;
- switch (type_id) {
- .bool => switch (option_ptr.value) {
- .flag => return true,
- .scalar => |s| {
- if (mem.eql(u8, s, "true")) {
- return true;
- } else if (mem.eql(u8, s, "false")) {
- return false;
- } else {
- log.err("Expected -D{s} to be a boolean, but received '{s}'\n", .{ name, s });
- self.markInvalidUserInput();
- return null;
- }
- },
- .list => {
- log.err("Expected -D{s} to be a boolean, but received a list.\n", .{name});
- self.markInvalidUserInput();
- return null;
- },
- },
- .int => switch (option_ptr.value) {
- .flag => {
- log.err("Expected -D{s} to be an integer, but received a boolean.\n", .{name});
- self.markInvalidUserInput();
- return null;
- },
- .scalar => |s| {
- const n = std.fmt.parseInt(T, s, 10) catch |err| switch (err) {
- error.Overflow => {
- log.err("-D{s} value {s} cannot fit into type {s}.\n", .{ name, s, @typeName(T) });
- self.markInvalidUserInput();
- return null;
- },
- else => {
- log.err("Expected -D{s} to be an integer of type {s}.\n", .{ name, @typeName(T) });
- self.markInvalidUserInput();
- return null;
- },
- };
- return n;
- },
- .list => {
- log.err("Expected -D{s} to be an integer, but received a list.\n", .{name});
- self.markInvalidUserInput();
- return null;
- },
- },
- .float => switch (option_ptr.value) {
- .flag => {
- log.err("Expected -D{s} to be a float, but received a boolean.\n", .{name});
- self.markInvalidUserInput();
- return null;
- },
- .scalar => |s| {
- const n = std.fmt.parseFloat(T, s) catch {
- log.err("Expected -D{s} to be a float of type {s}.\n", .{ name, @typeName(T) });
- self.markInvalidUserInput();
- return null;
- };
- return n;
- },
- .list => {
- log.err("Expected -D{s} to be a float, but received a list.\n", .{name});
- self.markInvalidUserInput();
- return null;
- },
- },
- .@"enum" => switch (option_ptr.value) {
- .flag => {
- log.err("Expected -D{s} to be a string, but received a boolean.\n", .{name});
- self.markInvalidUserInput();
- return null;
- },
- .scalar => |s| {
- if (std.meta.stringToEnum(T, s)) |enum_lit| {
- return enum_lit;
- } else {
- log.err("Expected -D{s} to be of type {s}.\n", .{ name, @typeName(T) });
- self.markInvalidUserInput();
- return null;
- }
- },
- .list => {
- log.err("Expected -D{s} to be a string, but received a list.\n", .{name});
- self.markInvalidUserInput();
- return null;
- },
- },
- .string => switch (option_ptr.value) {
- .flag => {
- log.err("Expected -D{s} to be a string, but received a boolean.\n", .{name});
- self.markInvalidUserInput();
- return null;
- },
- .list => {
- log.err("Expected -D{s} to be a string, but received a list.\n", .{name});
- self.markInvalidUserInput();
- return null;
- },
- .scalar => |s| return s,
- },
- .list => switch (option_ptr.value) {
- .flag => {
- log.err("Expected -D{s} to be a list, but received a boolean.\n", .{name});
- self.markInvalidUserInput();
- return null;
- },
- .scalar => |s| {
- return self.allocator.dupe([]const u8, &[_][]const u8{s}) catch unreachable;
- },
- .list => |lst| return lst.items,
- },
- }
- }
-
- pub fn step(self: *Builder, name: []const u8, description: []const u8) *Step {
- const step_info = self.allocator.create(TopLevelStep) catch unreachable;
- step_info.* = TopLevelStep{
- .step = Step.initNoOp(.top_level, name, self.allocator),
- .description = self.dupe(description),
- };
- self.top_level_steps.append(step_info) catch unreachable;
- return &step_info.step;
- }
-
- /// This provides the -Drelease option to the build user and does not give them the choice.
- pub fn setPreferredReleaseMode(self: *Builder, mode: std.builtin.Mode) void {
- if (self.release_mode != null) {
- @panic("setPreferredReleaseMode must be called before standardReleaseOptions and may not be called twice");
- }
- const description = self.fmt("Create a release build ({s})", .{@tagName(mode)});
- self.is_release = self.option(bool, "release", description) orelse false;
- self.release_mode = if (self.is_release) mode else std.builtin.Mode.Debug;
- }
-
- /// If you call this without first calling `setPreferredReleaseMode` then it gives the build user
- /// the choice of what kind of release.
- pub fn standardReleaseOptions(self: *Builder) std.builtin.Mode {
- if (self.release_mode) |mode| return mode;
-
- const release_safe = self.option(bool, "release-safe", "Optimizations on and safety on") orelse false;
- const release_fast = self.option(bool, "release-fast", "Optimizations on and safety off") orelse false;
- const release_small = self.option(bool, "release-small", "Size optimizations on and safety off") orelse false;
-
- const mode = if (release_safe and !release_fast and !release_small)
- std.builtin.Mode.ReleaseSafe
- else if (release_fast and !release_safe and !release_small)
- std.builtin.Mode.ReleaseFast
- else if (release_small and !release_fast and !release_safe)
- std.builtin.Mode.ReleaseSmall
- else if (!release_fast and !release_safe and !release_small)
- std.builtin.Mode.Debug
- else x: {
- log.err("Multiple release modes (of -Drelease-safe, -Drelease-fast and -Drelease-small)\n", .{});
- self.markInvalidUserInput();
- break :x std.builtin.Mode.Debug;
- };
- self.is_release = mode != .Debug;
- self.release_mode = mode;
- return mode;
- }
-
- pub const StandardTargetOptionsArgs = struct {
- whitelist: ?[]const CrossTarget = null,
-
- default_target: CrossTarget = CrossTarget{},
- };
-
- /// Exposes standard `zig build` options for choosing a target.
- pub fn standardTargetOptions(self: *Builder, args: StandardTargetOptionsArgs) CrossTarget {
- const maybe_triple = self.option(
- []const u8,
- "target",
- "The CPU architecture, OS, and ABI to build for",
- );
- const mcpu = self.option([]const u8, "cpu", "Target CPU features to add or subtract");
-
- if (maybe_triple == null and mcpu == null) {
- return args.default_target;
- }
-
- const triple = maybe_triple orelse "native";
-
- var diags: CrossTarget.ParseOptions.Diagnostics = .{};
- const selected_target = CrossTarget.parse(.{
- .arch_os_abi = triple,
- .cpu_features = mcpu,
- .diagnostics = &diags,
- }) catch |err| switch (err) {
- error.UnknownCpuModel => {
- log.err("Unknown CPU: '{s}'\nAvailable CPUs for architecture '{s}':", .{
- diags.cpu_name.?,
- @tagName(diags.arch.?),
- });
- for (diags.arch.?.allCpuModels()) |cpu| {
- log.err(" {s}", .{cpu.name});
- }
- self.markInvalidUserInput();
- return args.default_target;
- },
- error.UnknownCpuFeature => {
- log.err(
- \\Unknown CPU feature: '{s}'
- \\Available CPU features for architecture '{s}':
- \\
- , .{
- diags.unknown_feature_name.?,
- @tagName(diags.arch.?),
- });
- for (diags.arch.?.allFeaturesList()) |feature| {
- log.err(" {s}: {s}", .{ feature.name, feature.description });
- }
- self.markInvalidUserInput();
- return args.default_target;
- },
- error.UnknownOperatingSystem => {
- log.err(
- \\Unknown OS: '{s}'
- \\Available operating systems:
- \\
- , .{diags.os_name.?});
- inline for (std.meta.fields(std.Target.Os.Tag)) |field| {
- log.err(" {s}", .{field.name});
- }
- self.markInvalidUserInput();
- return args.default_target;
- },
- else => |e| {
- log.err("Unable to parse target '{s}': {s}\n", .{ triple, @errorName(e) });
- self.markInvalidUserInput();
- return args.default_target;
- },
- };
-
- const selected_canonicalized_triple = selected_target.zigTriple(self.allocator) catch unreachable;
-
- if (args.whitelist) |list| whitelist_check: {
- // Make sure it's a match of one of the list.
- var mismatch_triple = true;
- var mismatch_cpu_features = true;
- var whitelist_item = CrossTarget{};
- for (list) |t| {
- mismatch_cpu_features = true;
- mismatch_triple = true;
-
- const t_triple = t.zigTriple(self.allocator) catch unreachable;
- if (mem.eql(u8, t_triple, selected_canonicalized_triple)) {
- mismatch_triple = false;
- whitelist_item = t;
- if (t.getCpuFeatures().isSuperSetOf(selected_target.getCpuFeatures())) {
- mismatch_cpu_features = false;
- break :whitelist_check;
- } else {
- break;
- }
- }
- }
- if (mismatch_triple) {
- log.err("Chosen target '{s}' does not match one of the supported targets:", .{
- selected_canonicalized_triple,
- });
- for (list) |t| {
- const t_triple = t.zigTriple(self.allocator) catch unreachable;
- log.err(" {s}", .{t_triple});
- }
- } else {
- assert(mismatch_cpu_features);
- const whitelist_cpu = whitelist_item.getCpu();
- const selected_cpu = selected_target.getCpu();
- log.err("Chosen CPU model '{s}' does not match one of the supported targets:", .{
- selected_cpu.model.name,
- });
- log.err(" Supported feature Set: ", .{});
- const all_features = whitelist_cpu.arch.allFeaturesList();
- var populated_cpu_features = whitelist_cpu.model.features;
- populated_cpu_features.populateDependencies(all_features);
- for (all_features) |feature, i_usize| {
- const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
- const in_cpu_set = populated_cpu_features.isEnabled(i);
- if (in_cpu_set) {
- log.err("{s} ", .{feature.name});
- }
- }
- log.err(" Remove: ", .{});
- for (all_features) |feature, i_usize| {
- const i = @intCast(std.Target.Cpu.Feature.Set.Index, i_usize);
- const in_cpu_set = populated_cpu_features.isEnabled(i);
- const in_actual_set = selected_cpu.features.isEnabled(i);
- if (in_actual_set and !in_cpu_set) {
- log.err("{s} ", .{feature.name});
- }
- }
- }
- self.markInvalidUserInput();
- return args.default_target;
- }
-
- return selected_target;
- }
-
- pub fn addUserInputOption(self: *Builder, name_raw: []const u8, value_raw: []const u8) !bool {
- const name = self.dupe(name_raw);
- const value = self.dupe(value_raw);
- const gop = try self.user_input_options.getOrPut(name);
- if (!gop.found_existing) {
- gop.value_ptr.* = UserInputOption{
- .name = name,
- .value = .{ .scalar = value },
- .used = false,
- };
- return false;
- }
-
- // option already exists
- switch (gop.value_ptr.value) {
- .scalar => |s| {
- // turn it into a list
- var list = ArrayList([]const u8).init(self.allocator);
- list.append(s) catch unreachable;
- list.append(value) catch unreachable;
- self.user_input_options.put(name, .{
- .name = name,
- .value = .{ .list = list },
- .used = false,
- }) catch unreachable;
- },
- .list => |*list| {
- // append to the list
- list.append(value) catch unreachable;
- self.user_input_options.put(name, .{
- .name = name,
- .value = .{ .list = list.* },
- .used = false,
- }) catch unreachable;
- },
- .flag => {
- log.warn("Option '-D{s}={s}' conflicts with flag '-D{s}'.", .{ name, value, name });
- return true;
- },
- }
- return false;
- }
-
- pub fn addUserInputFlag(self: *Builder, name_raw: []const u8) !bool {
- const name = self.dupe(name_raw);
- const gop = try self.user_input_options.getOrPut(name);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{
- .name = name,
- .value = .{ .flag = {} },
- .used = false,
- };
- return false;
- }
-
- // option already exists
- switch (gop.value_ptr.value) {
- .scalar => |s| {
- log.err("Flag '-D{s}' conflicts with option '-D{s}={s}'.", .{ name, name, s });
- return true;
- },
- .list => {
- log.err("Flag '-D{s}' conflicts with multiple options of the same name.", .{name});
- return true;
- },
- .flag => {},
- }
- return false;
- }
-
- fn typeToEnum(comptime T: type) TypeId {
- return switch (@typeInfo(T)) {
- .Int => .int,
- .Float => .float,
- .Bool => .bool,
- .Enum => .@"enum",
- else => switch (T) {
- []const u8 => .string,
- []const []const u8 => .list,
- else => @compileError("Unsupported type: " ++ @typeName(T)),
- },
- };
- }
-
- fn markInvalidUserInput(self: *Builder) void {
- self.invalid_user_input = true;
- }
-
- pub fn validateUserInputDidItFail(self: *Builder) bool {
- // make sure all args are used
- var it = self.user_input_options.iterator();
- while (it.next()) |entry| {
- if (!entry.value_ptr.used) {
- log.err("Invalid option: -D{s}\n", .{entry.key_ptr.*});
- self.markInvalidUserInput();
- }
- }
-
- return self.invalid_user_input;
- }
-
- pub fn spawnChild(self: *Builder, argv: []const []const u8) !void {
- return self.spawnChildEnvMap(null, self.env_map, argv);
- }
-
- fn printCmd(cwd: ?[]const u8, argv: []const []const u8) void {
- if (cwd) |yes_cwd| std.debug.print("cd {s} && ", .{yes_cwd});
- for (argv) |arg| {
- std.debug.print("{s} ", .{arg});
- }
- std.debug.print("\n", .{});
- }
-
- pub fn spawnChildEnvMap(self: *Builder, cwd: ?[]const u8, env_map: *const EnvMap, argv: []const []const u8) !void {
- if (self.verbose) {
- printCmd(cwd, argv);
- }
-
- if (!std.process.can_spawn)
- return error.ExecNotSupported;
-
- var child = std.ChildProcess.init(argv, self.allocator);
- child.cwd = cwd;
- child.env_map = env_map;
-
- const term = child.spawnAndWait() catch |err| {
- log.err("Unable to spawn {s}: {s}", .{ argv[0], @errorName(err) });
- return err;
- };
-
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- log.err("The following command exited with error code {}:", .{code});
- printCmd(cwd, argv);
- return error.UncleanExit;
- }
- },
- else => {
- log.err("The following command terminated unexpectedly:", .{});
- printCmd(cwd, argv);
-
- return error.UncleanExit;
- },
- }
- }
-
- pub fn makePath(self: *Builder, path: []const u8) !void {
- fs.cwd().makePath(self.pathFromRoot(path)) catch |err| {
- log.err("Unable to create path {s}: {s}", .{ path, @errorName(err) });
- return err;
- };
- }
-
- pub fn installArtifact(self: *Builder, artifact: *LibExeObjStep) void {
- self.getInstallStep().dependOn(&self.addInstallArtifact(artifact).step);
- }
-
- pub fn addInstallArtifact(self: *Builder, artifact: *LibExeObjStep) *InstallArtifactStep {
- return InstallArtifactStep.create(self, artifact);
- }
-
- ///`dest_rel_path` is relative to prefix path
- pub fn installFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) void {
- self.getInstallStep().dependOn(&self.addInstallFileWithDir(.{ .path = src_path }, .prefix, dest_rel_path).step);
- }
-
- pub fn installDirectory(self: *Builder, options: InstallDirectoryOptions) void {
- self.getInstallStep().dependOn(&self.addInstallDirectory(options).step);
- }
-
- ///`dest_rel_path` is relative to bin path
- pub fn installBinFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) void {
- self.getInstallStep().dependOn(&self.addInstallFileWithDir(.{ .path = src_path }, .bin, dest_rel_path).step);
- }
-
- ///`dest_rel_path` is relative to lib path
- pub fn installLibFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) void {
- self.getInstallStep().dependOn(&self.addInstallFileWithDir(.{ .path = src_path }, .lib, dest_rel_path).step);
- }
-
- /// Output format (BIN vs Intel HEX) determined by filename
- pub fn installRaw(self: *Builder, artifact: *LibExeObjStep, dest_filename: []const u8, options: InstallRawStep.CreateOptions) *InstallRawStep {
- const raw = self.addInstallRaw(artifact, dest_filename, options);
- self.getInstallStep().dependOn(&raw.step);
- return raw;
- }
-
- ///`dest_rel_path` is relative to install prefix path
- pub fn addInstallFile(self: *Builder, source: FileSource, dest_rel_path: []const u8) *InstallFileStep {
- return self.addInstallFileWithDir(source.dupe(self), .prefix, dest_rel_path);
- }
-
- ///`dest_rel_path` is relative to bin path
- pub fn addInstallBinFile(self: *Builder, source: FileSource, dest_rel_path: []const u8) *InstallFileStep {
- return self.addInstallFileWithDir(source.dupe(self), .bin, dest_rel_path);
- }
-
- ///`dest_rel_path` is relative to lib path
- pub fn addInstallLibFile(self: *Builder, source: FileSource, dest_rel_path: []const u8) *InstallFileStep {
- return self.addInstallFileWithDir(source.dupe(self), .lib, dest_rel_path);
- }
-
- pub fn addInstallHeaderFile(b: *Builder, src_path: []const u8, dest_rel_path: []const u8) *InstallFileStep {
- return b.addInstallFileWithDir(.{ .path = src_path }, .header, dest_rel_path);
- }
-
- pub fn addInstallRaw(self: *Builder, artifact: *LibExeObjStep, dest_filename: []const u8, options: InstallRawStep.CreateOptions) *InstallRawStep {
- return InstallRawStep.create(self, artifact, dest_filename, options);
- }
-
- pub fn addInstallFileWithDir(
- self: *Builder,
- source: FileSource,
- install_dir: InstallDir,
- dest_rel_path: []const u8,
- ) *InstallFileStep {
- if (dest_rel_path.len == 0) {
- panic("dest_rel_path must be non-empty", .{});
- }
- const install_step = self.allocator.create(InstallFileStep) catch unreachable;
- install_step.* = InstallFileStep.init(self, source.dupe(self), install_dir, dest_rel_path);
- return install_step;
- }
-
- pub fn addInstallDirectory(self: *Builder, options: InstallDirectoryOptions) *InstallDirStep {
- const install_step = self.allocator.create(InstallDirStep) catch unreachable;
- install_step.* = InstallDirStep.init(self, options);
- return install_step;
- }
-
- pub fn pushInstalledFile(self: *Builder, dir: InstallDir, dest_rel_path: []const u8) void {
- const file = InstalledFile{
- .dir = dir,
- .path = dest_rel_path,
- };
- self.installed_files.append(file.dupe(self)) catch unreachable;
- }
-
- pub fn updateFile(self: *Builder, source_path: []const u8, dest_path: []const u8) !void {
- if (self.verbose) {
- log.info("cp {s} {s} ", .{ source_path, dest_path });
- }
- const cwd = fs.cwd();
- const prev_status = try fs.Dir.updateFile(cwd, source_path, cwd, dest_path, .{});
- if (self.verbose) switch (prev_status) {
- .stale => log.info("# installed", .{}),
- .fresh => log.info("# up-to-date", .{}),
- };
- }
-
- pub fn truncateFile(self: *Builder, dest_path: []const u8) !void {
- if (self.verbose) {
- log.info("truncate {s}", .{dest_path});
- }
- const cwd = fs.cwd();
- var src_file = cwd.createFile(dest_path, .{}) catch |err| switch (err) {
- error.FileNotFound => blk: {
- if (fs.path.dirname(dest_path)) |dirname| {
- try cwd.makePath(dirname);
- }
- break :blk try cwd.createFile(dest_path, .{});
- },
- else => |e| return e,
- };
- src_file.close();
- }
-
- pub fn pathFromRoot(self: *Builder, rel_path: []const u8) []u8 {
- return fs.path.resolve(self.allocator, &[_][]const u8{ self.build_root, rel_path }) catch unreachable;
- }
-
- /// Shorthand for `std.fs.path.join(builder.allocator, paths) catch unreachable`
- pub fn pathJoin(self: *Builder, paths: []const []const u8) []u8 {
- return fs.path.join(self.allocator, paths) catch unreachable;
- }
-
- pub fn fmt(self: *Builder, comptime format: []const u8, args: anytype) []u8 {
- return fmt_lib.allocPrint(self.allocator, format, args) catch unreachable;
- }
-
- pub fn findProgram(self: *Builder, names: []const []const u8, paths: []const []const u8) ![]const u8 {
- // TODO report error for ambiguous situations
- const exe_extension = @as(CrossTarget, .{}).exeFileExt();
- for (self.search_prefixes.items) |search_prefix| {
- for (names) |name| {
- if (fs.path.isAbsolute(name)) {
- return name;
- }
- const full_path = self.pathJoin(&.{
- search_prefix,
- "bin",
- self.fmt("{s}{s}", .{ name, exe_extension }),
- });
- return fs.realpathAlloc(self.allocator, full_path) catch continue;
- }
- }
- if (self.env_map.get("PATH")) |PATH| {
- for (names) |name| {
- if (fs.path.isAbsolute(name)) {
- return name;
- }
- var it = mem.tokenize(u8, PATH, &[_]u8{fs.path.delimiter});
- while (it.next()) |path| {
- const full_path = self.pathJoin(&.{
- path,
- self.fmt("{s}{s}", .{ name, exe_extension }),
- });
- return fs.realpathAlloc(self.allocator, full_path) catch continue;
- }
- }
- }
- for (names) |name| {
- if (fs.path.isAbsolute(name)) {
- return name;
- }
- for (paths) |path| {
- const full_path = self.pathJoin(&.{
- path,
- self.fmt("{s}{s}", .{ name, exe_extension }),
- });
- return fs.realpathAlloc(self.allocator, full_path) catch continue;
- }
- }
- return error.FileNotFound;
- }
-
- pub fn execAllowFail(
- self: *Builder,
- argv: []const []const u8,
- out_code: *u8,
- stderr_behavior: std.ChildProcess.StdIo,
- ) ExecError![]u8 {
- assert(argv.len != 0);
-
- if (!std.process.can_spawn)
- return error.ExecNotSupported;
-
- const max_output_size = 400 * 1024;
- var child = std.ChildProcess.init(argv, self.allocator);
- child.stdin_behavior = .Ignore;
- child.stdout_behavior = .Pipe;
- child.stderr_behavior = stderr_behavior;
- child.env_map = self.env_map;
-
- try child.spawn();
-
- const stdout = child.stdout.?.reader().readAllAlloc(self.allocator, max_output_size) catch {
- return error.ReadFailure;
- };
- errdefer self.allocator.free(stdout);
-
- const term = try child.wait();
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- out_code.* = @truncate(u8, code);
- return error.ExitCodeFailure;
- }
- return stdout;
- },
- .Signal, .Stopped, .Unknown => |code| {
- out_code.* = @truncate(u8, code);
- return error.ProcessTerminated;
- },
- }
- }
-
- pub fn execFromStep(self: *Builder, argv: []const []const u8, src_step: ?*Step) ![]u8 {
- assert(argv.len != 0);
-
- if (self.verbose) {
- printCmd(null, argv);
- }
-
- if (!std.process.can_spawn) {
- if (src_step) |s| log.err("{s}...", .{s.name});
- log.err("Unable to spawn the following command: cannot spawn child process", .{});
- printCmd(null, argv);
- std.os.abort();
- }
-
- var code: u8 = undefined;
- return self.execAllowFail(argv, &code, .Inherit) catch |err| switch (err) {
- error.ExecNotSupported => {
- if (src_step) |s| log.err("{s}...", .{s.name});
- log.err("Unable to spawn the following command: cannot spawn child process", .{});
- printCmd(null, argv);
- std.os.abort();
- },
- error.FileNotFound => {
- if (src_step) |s| log.err("{s}...", .{s.name});
- log.err("Unable to spawn the following command: file not found", .{});
- printCmd(null, argv);
- std.os.exit(@truncate(u8, code));
- },
- error.ExitCodeFailure => {
- if (src_step) |s| log.err("{s}...", .{s.name});
- if (self.prominent_compile_errors) {
- log.err("The step exited with error code {d}", .{code});
- } else {
- log.err("The following command exited with error code {d}:", .{code});
- printCmd(null, argv);
- }
-
- std.os.exit(@truncate(u8, code));
- },
- error.ProcessTerminated => {
- if (src_step) |s| log.err("{s}...", .{s.name});
- log.err("The following command terminated unexpectedly:", .{});
- printCmd(null, argv);
- std.os.exit(@truncate(u8, code));
- },
- else => |e| return e,
- };
- }
-
- pub fn exec(self: *Builder, argv: []const []const u8) ![]u8 {
- return self.execFromStep(argv, null);
- }
-
- pub fn addSearchPrefix(self: *Builder, search_prefix: []const u8) void {
- self.search_prefixes.append(self.dupePath(search_prefix)) catch unreachable;
- }
-
- pub fn getInstallPath(self: *Builder, dir: InstallDir, dest_rel_path: []const u8) []const u8 {
- assert(!fs.path.isAbsolute(dest_rel_path)); // Install paths must be relative to the prefix
- const base_dir = switch (dir) {
- .prefix => self.install_path,
- .bin => self.exe_dir,
- .lib => self.lib_dir,
- .header => self.h_dir,
- .custom => |path| self.pathJoin(&.{ self.install_path, path }),
- };
- return fs.path.resolve(
- self.allocator,
- &[_][]const u8{ base_dir, dest_rel_path },
- ) catch unreachable;
- }
-
- pub const Dependency = struct {
- builder: *Builder,
-
- pub fn artifact(d: *Dependency, name: []const u8) *LibExeObjStep {
- var found: ?*LibExeObjStep = null;
- for (d.builder.install_tls.step.dependencies.items) |dep_step| {
- const inst = dep_step.cast(InstallArtifactStep) orelse continue;
- if (mem.eql(u8, inst.artifact.name, name)) {
- if (found != null) panic("artifact name '{s}' is ambiguous", .{name});
- found = inst.artifact;
- }
- }
- return found orelse {
- for (d.builder.install_tls.step.dependencies.items) |dep_step| {
- const inst = dep_step.cast(InstallArtifactStep) orelse continue;
- log.info("available artifact: '{s}'", .{inst.artifact.name});
- }
- panic("unable to find artifact '{s}'", .{name});
- };
- }
- };
-
- pub fn dependency(b: *Builder, name: []const u8, args: anytype) *Dependency {
- const build_runner = @import("root");
- const deps = build_runner.dependencies;
-
- inline for (@typeInfo(deps.imports).Struct.decls) |decl| {
- if (mem.startsWith(u8, decl.name, b.dep_prefix) and
- mem.endsWith(u8, decl.name, name) and
- decl.name.len == b.dep_prefix.len + name.len)
- {
- const build_zig = @field(deps.imports, decl.name);
- const build_root = @field(deps.build_root, decl.name);
- return dependencyInner(b, name, build_root, build_zig, args);
- }
- }
-
- const full_path = b.pathFromRoot("build.zig.ini");
- std.debug.print("no dependency named '{s}' in '{s}'\n", .{ name, full_path });
- std.process.exit(1);
- }
-
- fn dependencyInner(
- b: *Builder,
- name: []const u8,
- build_root: []const u8,
- comptime build_zig: type,
- args: anytype,
- ) *Dependency {
- const sub_builder = b.createChild(name, build_root, args) catch unreachable;
- sub_builder.runBuild(build_zig) catch unreachable;
- const dep = b.allocator.create(Dependency) catch unreachable;
- dep.* = .{ .builder = sub_builder };
- return dep;
- }
-
- pub fn runBuild(b: *Builder, build_zig: anytype) anyerror!void {
- switch (@typeInfo(@typeInfo(@TypeOf(build_zig.build)).Fn.return_type.?)) {
- .Void => build_zig.build(b),
- .ErrorUnion => try build_zig.build(b),
- else => @compileError("expected return type of build to be 'void' or '!void'"),
- }
- }
-};
-
-test "builder.findProgram compiles" {
- if (builtin.os.tag == .wasi) return error.SkipZigTest;
-
- var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
- defer arena.deinit();
-
- const builder = try Builder.create(
- arena.allocator(),
- "zig",
- "zig-cache",
- "zig-cache",
- "zig-cache",
- );
- defer builder.destroy();
- _ = builder.findProgram(&[_][]const u8{}, &[_][]const u8{}) catch null;
-}
-
-pub const Pkg = struct {
- name: []const u8,
- source: FileSource,
- dependencies: ?[]const Pkg = null,
-};
-
-/// A file that is generated by a build step.
-/// This struct is an interface that is meant to be used with `@fieldParentPtr` to implement the actual path logic.
-pub const GeneratedFile = struct {
- /// The step that generates the file
- step: *Step,
-
- /// The path to the generated file. Must be either absolute or relative to the build root.
- /// This value must be set in the `fn make()` of the `step` and must not be `null` afterwards.
- path: ?[]const u8 = null,
-
- pub fn getPath(self: GeneratedFile) []const u8 {
- return self.path orelse std.debug.panic(
- "getPath() was called on a GeneratedFile that wasn't build yet. Is there a missing Step dependency on step '{s}'?",
- .{self.step.name},
- );
- }
-};
-
-/// A file source is a reference to an existing or future file.
-///
-pub const FileSource = union(enum) {
- /// A plain file path, relative to build root or absolute.
- path: []const u8,
-
- /// A file that is generated by an interface. Those files usually are
- /// not available until built by a build step.
- generated: *const GeneratedFile,
-
- /// Returns a new file source that will have a relative path to the build root guaranteed.
- /// This should be preferred over setting `.path` directly as it documents that the files are in the project directory.
- pub fn relative(path: []const u8) FileSource {
- std.debug.assert(!std.fs.path.isAbsolute(path));
- return FileSource{ .path = path };
- }
-
- /// Returns a string that can be shown to represent the file source.
- /// Either returns the path or `"generated"`.
- pub fn getDisplayName(self: FileSource) []const u8 {
- return switch (self) {
- .path => self.path,
- .generated => "generated",
- };
- }
-
- /// Adds dependencies this file source implies to the given step.
- pub fn addStepDependencies(self: FileSource, step: *Step) void {
- switch (self) {
- .path => {},
- .generated => |gen| step.dependOn(gen.step),
- }
- }
-
- /// Should only be called during make(), returns a path relative to the build root or absolute.
- pub fn getPath(self: FileSource, builder: *Builder) []const u8 {
- const path = switch (self) {
- .path => |p| builder.pathFromRoot(p),
- .generated => |gen| gen.getPath(),
- };
- return path;
- }
-
- /// Duplicates the file source for a given builder.
- pub fn dupe(self: FileSource, b: *Builder) FileSource {
- return switch (self) {
- .path => |p| .{ .path = b.dupePath(p) },
- .generated => |gen| .{ .generated = gen },
- };
- }
-};
-
-/// Allocates a new string for assigning a value to a named macro.
-/// If the value is omitted, it is set to 1.
-/// `name` and `value` need not live longer than the function call.
-pub fn constructCMacro(allocator: Allocator, name: []const u8, value: ?[]const u8) []const u8 {
- var macro = allocator.alloc(
- u8,
- name.len + if (value) |value_slice| value_slice.len + 1 else 0,
- ) catch |err| if (err == error.OutOfMemory) @panic("Out of memory") else unreachable;
- mem.copy(u8, macro, name);
- if (value) |value_slice| {
- macro[name.len] = '=';
- mem.copy(u8, macro[name.len + 1 ..], value_slice);
- }
- return macro;
-}
-
-/// deprecated: use `InstallDirStep.Options`
-pub const InstallDirectoryOptions = InstallDirStep.Options;
-
-pub const Step = struct {
- id: Id,
- name: []const u8,
- makeFn: MakeFn,
- dependencies: ArrayList(*Step),
- loop_flag: bool,
- done_flag: bool,
-
- const MakeFn = *const fn (self: *Step) anyerror!void;
-
- pub const Id = enum {
- top_level,
- lib_exe_obj,
- install_artifact,
- install_file,
- install_dir,
- log,
- remove_dir,
- fmt,
- translate_c,
- write_file,
- run,
- emulatable_run,
- check_file,
- check_object,
- config_header,
- install_raw,
- options,
- custom,
-
- pub fn Type(comptime id: Id) type {
- return switch (id) {
- .top_level => Builder.TopLevelStep,
- .lib_exe_obj => LibExeObjStep,
- .install_artifact => InstallArtifactStep,
- .install_file => InstallFileStep,
- .install_dir => InstallDirStep,
- .log => LogStep,
- .remove_dir => RemoveDirStep,
- .fmt => FmtStep,
- .translate_c => TranslateCStep,
- .write_file => WriteFileStep,
- .run => RunStep,
- .emulatable_run => EmulatableRunStep,
- .check_file => CheckFileStep,
- .check_object => CheckObjectStep,
- .config_header => ConfigHeaderStep,
- .install_raw => InstallRawStep,
- .options => OptionsStep,
- .custom => @compileError("no type available for custom step"),
- };
- }
- };
-
- pub fn init(id: Id, name: []const u8, allocator: Allocator, makeFn: MakeFn) Step {
- return Step{
- .id = id,
- .name = allocator.dupe(u8, name) catch unreachable,
- .makeFn = makeFn,
- .dependencies = ArrayList(*Step).init(allocator),
- .loop_flag = false,
- .done_flag = false,
- };
- }
- pub fn initNoOp(id: Id, name: []const u8, allocator: Allocator) Step {
- return init(id, name, allocator, makeNoOp);
- }
-
- pub fn make(self: *Step) !void {
- if (self.done_flag) return;
-
- try self.makeFn(self);
- self.done_flag = true;
- }
-
- pub fn dependOn(self: *Step, other: *Step) void {
- self.dependencies.append(other) catch unreachable;
- }
-
- fn makeNoOp(self: *Step) anyerror!void {
- _ = self;
- }
-
- pub fn cast(step: *Step, comptime T: type) ?*T {
- if (step.id == T.base_id) {
- return @fieldParentPtr(T, "step", step);
- }
- return null;
- }
-};
-
-pub const VcpkgRoot = union(VcpkgRootStatus) {
- unattempted: void,
- not_found: void,
- found: []const u8,
-};
-
-pub const VcpkgRootStatus = enum {
- unattempted,
- not_found,
- found,
-};
-
-pub const InstallDir = union(enum) {
- prefix: void,
- lib: void,
- bin: void,
- header: void,
- /// A path relative to the prefix
- custom: []const u8,
-
- /// Duplicates the install directory including the path if set to custom.
- pub fn dupe(self: InstallDir, builder: *Builder) InstallDir {
- if (self == .custom) {
- // Written with this temporary to avoid RLS problems
- const duped_path = builder.dupe(self.custom);
- return .{ .custom = duped_path };
- } else {
- return self;
- }
- }
-};
-
-pub const InstalledFile = struct {
- dir: InstallDir,
- path: []const u8,
-
- /// Duplicates the installed file path and directory.
- pub fn dupe(self: InstalledFile, builder: *Builder) InstalledFile {
- return .{
- .dir = self.dir.dupe(builder),
- .path = builder.dupe(self.path),
- };
- }
-};
-
-test "dupePkg()" {
- if (builtin.os.tag == .wasi) return error.SkipZigTest;
-
- var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
- defer arena.deinit();
- var builder = try Builder.create(
- arena.allocator(),
- "test",
- "test",
- "test",
- "test",
- );
- defer builder.destroy();
-
- var pkg_dep = Pkg{
- .name = "pkg_dep",
- .source = .{ .path = "/not/a/pkg_dep.zig" },
- };
- var pkg_top = Pkg{
- .name = "pkg_top",
- .source = .{ .path = "/not/a/pkg_top.zig" },
- .dependencies = &[_]Pkg{pkg_dep},
- };
- const dupe = builder.dupePkg(pkg_top);
-
- const original_deps = pkg_top.dependencies.?;
- const dupe_deps = dupe.dependencies.?;
-
- // probably the same top level package details
- try std.testing.expectEqualStrings(pkg_top.name, dupe.name);
-
- // probably the same dependencies
- try std.testing.expectEqual(original_deps.len, dupe_deps.len);
- try std.testing.expectEqual(original_deps[0].name, pkg_dep.name);
-
- // could segfault otherwise if pointers in duplicated package's fields are
- // the same as those in stack allocated package's fields
- try std.testing.expect(dupe_deps.ptr != original_deps.ptr);
- try std.testing.expect(dupe.name.ptr != pkg_top.name.ptr);
- try std.testing.expect(dupe.source.path.ptr != pkg_top.source.path.ptr);
- try std.testing.expect(dupe_deps[0].name.ptr != pkg_dep.name.ptr);
- try std.testing.expect(dupe_deps[0].source.path.ptr != pkg_dep.source.path.ptr);
-}
-
-test {
- _ = CheckFileStep;
- _ = CheckObjectStep;
- _ = EmulatableRunStep;
- _ = FmtStep;
- _ = InstallArtifactStep;
- _ = InstallDirStep;
- _ = InstallFileStep;
- _ = InstallRawStep;
- _ = LibExeObjStep;
- _ = LogStep;
- _ = OptionsStep;
- _ = RemoveDirStep;
- _ = RunStep;
- _ = TranslateCStep;
- _ = WriteFileStep;
-}
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 4d949946d8..74c61d229b 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -131,13 +131,16 @@ pub const CodeModel = enum {
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
-pub const Mode = enum {
+pub const OptimizeMode = enum {
Debug,
ReleaseSafe,
ReleaseFast,
ReleaseSmall,
};
+/// Deprecated; use OptimizeMode.
+pub const Mode = OptimizeMode;
+
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const CallingConvention = enum {
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 4a816c8318..21d7b4fe3e 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -1164,7 +1164,7 @@ fn windowsCreateProcessPathExt(
var app_name_unicode_string = windows.UNICODE_STRING{
.Length = app_name_len_bytes,
.MaximumLength = app_name_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(app_name_wildcard.ptr)),
+ .Buffer = @qualCast([*:0]u16, app_name_wildcard.ptr),
};
const rc = windows.ntdll.NtQueryDirectoryFile(
dir.fd,
@@ -1261,7 +1261,7 @@ fn windowsCreateProcessPathExt(
var app_name_unicode_string = windows.UNICODE_STRING{
.Length = app_name_len_bytes,
.MaximumLength = app_name_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(app_name_appended.ptr)),
+ .Buffer = @qualCast([*:0]u16, app_name_appended.ptr),
};
// Re-use the directory handle but this time we call with the appended app name
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 94b25c79a1..f2ee24a9e3 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -1,11 +1,12 @@
const std = @import("std.zig");
+const builtin = @import("builtin");
+
const io = std.io;
const math = std.math;
const assert = std.debug.assert;
const mem = std.mem;
const unicode = std.unicode;
const meta = std.meta;
-const builtin = @import("builtin");
const errol = @import("fmt/errol.zig");
const lossyCast = std.math.lossyCast;
const expectFmt = std.testing.expectFmt;
@@ -190,7 +191,7 @@ pub fn format(
.precision = precision,
},
writer,
- default_max_depth,
+ std.options.fmt_max_depth,
);
}
@@ -2140,15 +2141,15 @@ test "buffer" {
{
var buf1: [32]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf1);
- try formatType(1234, "", FormatOptions{}, fbs.writer(), default_max_depth);
+ try formatType(1234, "", FormatOptions{}, fbs.writer(), std.options.fmt_max_depth);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1234"));
fbs.reset();
- try formatType('a', "c", FormatOptions{}, fbs.writer(), default_max_depth);
+ try formatType('a', "c", FormatOptions{}, fbs.writer(), std.options.fmt_max_depth);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "a"));
fbs.reset();
- try formatType(0b1100, "b", FormatOptions{}, fbs.writer(), default_max_depth);
+ try formatType(0b1100, "b", FormatOptions{}, fbs.writer(), std.options.fmt_max_depth);
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1100"));
}
}
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 244f3a38ce..2300ad044a 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -1763,7 +1763,7 @@ pub const Dir = struct {
var nt_name = w.UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w)),
+ .Buffer = @qualCast([*:0]u16, sub_path_w),
};
var attr = w.OBJECT_ATTRIBUTES{
.Length = @sizeOf(w.OBJECT_ATTRIBUTES),
diff --git a/lib/std/fs/file.zig b/lib/std/fs/file.zig
index a6ecc37d92..1ba4bc18fd 100644
--- a/lib/std/fs/file.zig
+++ b/lib/std/fs/file.zig
@@ -179,7 +179,7 @@ pub const File = struct {
lock_nonblocking: bool = false,
/// For POSIX systems this is the file system mode the file will
- /// be created with.
+ /// be created with. On other systems this is always 0.
mode: Mode = default_mode,
/// Setting this to `.blocking` prevents `O.NONBLOCK` from being passed even
@@ -307,6 +307,7 @@ pub const File = struct {
/// is unique to each filesystem.
inode: INode,
size: u64,
+ /// This is available on POSIX systems and is always 0 otherwise.
mode: Mode,
kind: Kind,
diff --git a/lib/std/math/big.zig b/lib/std/math/big.zig
index c7fc0b17f5..c0d8e74eb2 100644
--- a/lib/std/math/big.zig
+++ b/lib/std/math/big.zig
@@ -13,7 +13,6 @@ pub const Log2Limb = std.math.Log2Int(Limb);
comptime {
assert(std.math.floorPowerOfTwo(usize, limb_info.bits) == limb_info.bits);
- assert(limb_info.bits <= 64); // u128 set is unsupported
assert(limb_info.signedness == .unsigned);
}
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index d222d6913b..21f5015c6c 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -30,7 +30,7 @@ pub fn calcLimbLen(scalar: anytype) usize {
}
const w_value = std.math.absCast(scalar);
- return @divFloor(@intCast(Limb, math.log2(w_value)), limb_bits) + 1;
+ return @intCast(usize, @divFloor(@intCast(Limb, math.log2(w_value)), limb_bits) + 1);
}
pub fn calcToStringLimbsBufferLen(a_len: usize, base: u8) usize {
@@ -238,10 +238,7 @@ pub const Mutable = struct {
var i: usize = 0;
while (true) : (i += 1) {
self.limbs[i] = @truncate(Limb, w_value);
-
- // TODO: shift == 64 at compile-time fails. Fails on u128 limbs.
- w_value >>= limb_bits / 2;
- w_value >>= limb_bits / 2;
+ w_value >>= limb_bits;
if (w_value == 0) break;
}
@@ -258,9 +255,7 @@ pub const Mutable = struct {
comptime var i = 0;
inline while (true) : (i += 1) {
self.limbs[i] = w_value & mask;
-
- w_value >>= limb_bits / 2;
- w_value >>= limb_bits / 2;
+ w_value >>= limb_bits;
if (w_value == 0) break;
}
diff --git a/lib/std/meta.zig b/lib/std/meta.zig
index c7ec4b1702..7ab4c9f25c 100644
--- a/lib/std/meta.zig
+++ b/lib/std/meta.zig
@@ -332,7 +332,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
@compileError("Unable to derive a sentinel pointer type from " ++ @typeName(T));
}
-const assumeSentinel = @compileError("This function has been removed, consider using std.mem.sliceTo() or if needed a @ptrCast()");
+pub const assumeSentinel = @compileError("This function has been removed, consider using std.mem.sliceTo() or if needed a @ptrCast()");
pub fn containerLayout(comptime T: type) Type.ContainerLayout {
return switch (@typeInfo(T)) {
diff --git a/lib/std/os.zig b/lib/std/os.zig
index 32463aa30e..3cee30c32d 100644
--- a/lib/std/os.zig
+++ b/lib/std/os.zig
@@ -4513,7 +4513,7 @@ pub fn faccessatW(dirfd: fd_t, sub_path_w: [*:0]const u16, mode: u32, flags: u32
var nt_name = windows.UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w)),
+ .Buffer = @qualCast([*:0]u16, sub_path_w),
};
var attr = windows.OBJECT_ATTRIBUTES{
.Length = @sizeOf(windows.OBJECT_ATTRIBUTES),
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index d9d5fb3204..5d6c9f5cc9 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -40,6 +40,7 @@ const arch_bits = switch (native_arch) {
.riscv64 => @import("linux/riscv64.zig"),
.sparc64 => @import("linux/sparc64.zig"),
.mips, .mipsel => @import("linux/mips.zig"),
+ .mips64, .mips64el => @import("linux/mips64.zig"),
.powerpc => @import("linux/powerpc.zig"),
.powerpc64, .powerpc64le => @import("linux/powerpc64.zig"),
else => struct {},
@@ -101,6 +102,7 @@ pub const SYS = switch (@import("builtin").cpu.arch) {
.riscv64 => syscalls.RiscV64,
.sparc64 => syscalls.Sparc64,
.mips, .mipsel => syscalls.Mips,
+ .mips64, .mips64el => syscalls.Mips64,
.powerpc => syscalls.PowerPC,
.powerpc64, .powerpc64le => syscalls.PowerPC64,
else => @compileError("The Zig Standard Library is missing syscall definitions for the target CPU architecture"),
diff --git a/lib/std/os/linux/mips64.zig b/lib/std/os/linux/mips64.zig
new file mode 100644
index 0000000000..dfc1c9b576
--- /dev/null
+++ b/lib/std/os/linux/mips64.zig
@@ -0,0 +1,413 @@
+const std = @import("../../std.zig");
+const maxInt = std.math.maxInt;
+const linux = std.os.linux;
+const SYS = linux.SYS;
+const socklen_t = linux.socklen_t;
+const iovec = std.os.iovec;
+const iovec_const = std.os.iovec_const;
+const uid_t = linux.uid_t;
+const gid_t = linux.gid_t;
+const pid_t = linux.pid_t;
+const sockaddr = linux.sockaddr;
+const timespec = linux.timespec;
+
+pub fn syscall0(number: SYS) usize {
+ return asm volatile (
+ \\ syscall
+ \\ blez $7, 1f
+ \\ dsubu $2, $0, $2
+ \\ 1:
+ : [ret] "={$2}" (-> usize),
+ : [number] "{$2}" (@enumToInt(number)),
+ : "$1", "$3", "$4", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
+ );
+}
+
+pub fn syscall_pipe(fd: *[2]i32) usize {
+ return asm volatile (
+ \\ .set noat
+ \\ .set noreorder
+ \\ syscall
+ \\ blez $7, 1f
+ \\ nop
+ \\ b 2f
+ \\ subu $2, $0, $2
+ \\ 1:
+ \\ sw $2, 0($4)
+ \\ sw $3, 4($4)
+ \\ 2:
+ : [ret] "={$2}" (-> usize),
+ : [number] "{$2}" (@enumToInt(SYS.pipe)),
+ [fd] "{$4}" (fd),
+ : "$1", "$3", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
+ );
+}
+
+pub fn syscall1(number: SYS, arg1: usize) usize {
+ return asm volatile (
+ \\ syscall
+ \\ blez $7, 1f
+ \\ dsubu $2, $0, $2
+ \\ 1:
+ : [ret] "={$2}" (-> usize),
+ : [number] "{$2}" (@enumToInt(number)),
+ [arg1] "{$4}" (arg1),
+ : "$1", "$3", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
+ );
+}
+
+pub fn syscall2(number: SYS, arg1: usize, arg2: usize) usize {
+ return asm volatile (
+ \\ syscall
+ \\ blez $7, 1f
+ \\ dsubu $2, $0, $2
+ \\ 1:
+ : [ret] "={$2}" (-> usize),
+ : [number] "{$2}" (@enumToInt(number)),
+ [arg1] "{$4}" (arg1),
+ [arg2] "{$5}" (arg2),
+ : "$1", "$3", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
+ );
+}
+
+pub fn syscall3(number: SYS, arg1: usize, arg2: usize, arg3: usize) usize {
+ return asm volatile (
+ \\ syscall
+ \\ blez $7, 1f
+ \\ dsubu $2, $0, $2
+ \\ 1:
+ : [ret] "={$2}" (-> usize),
+ : [number] "{$2}" (@enumToInt(number)),
+ [arg1] "{$4}" (arg1),
+ [arg2] "{$5}" (arg2),
+ [arg3] "{$6}" (arg3),
+ : "$1", "$3", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
+ );
+}
+
+pub fn syscall4(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize {
+ return asm volatile (
+ \\ syscall
+ \\ blez $7, 1f
+ \\ dsubu $2, $0, $2
+ \\ 1:
+ : [ret] "={$2}" (-> usize),
+ : [number] "{$2}" (@enumToInt(number)),
+ [arg1] "{$4}" (arg1),
+ [arg2] "{$5}" (arg2),
+ [arg3] "{$6}" (arg3),
+ [arg4] "{$7}" (arg4),
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
+ );
+}
+
+pub fn syscall5(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
+ return asm volatile (
+ \\ syscall
+ \\ blez $7, 1f
+ \\ dsubu $2, $0, $2
+ \\ 1:
+ : [ret] "={$2}" (-> usize),
+ : [number] "{$2}" (@enumToInt(number)),
+ [arg1] "{$4}" (arg1),
+ [arg2] "{$5}" (arg2),
+ [arg3] "{$6}" (arg3),
+ [arg4] "{$7}" (arg4),
+ [arg5] "{$8}" (arg5),
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
+ );
+}
+
+// NOTE: The o32 calling convention requires the callee to reserve 16 bytes for
+// the first four arguments even though they're passed in $a0-$a3.
+
+pub fn syscall6(
+ number: SYS,
+ arg1: usize,
+ arg2: usize,
+ arg3: usize,
+ arg4: usize,
+ arg5: usize,
+ arg6: usize,
+) usize {
+ return asm volatile (
+ \\ syscall
+ \\ blez $7, 1f
+ \\ dsubu $2, $0, $2
+ \\ 1:
+ : [ret] "={$2}" (-> usize),
+ : [number] "{$2}" (@enumToInt(number)),
+ [arg1] "{$4}" (arg1),
+ [arg2] "{$5}" (arg2),
+ [arg3] "{$6}" (arg3),
+ [arg4] "{$7}" (arg4),
+ [arg5] "{$8}" (arg5),
+ [arg6] "{$9}" (arg6),
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
+ );
+}
+
+pub fn syscall7(
+ number: SYS,
+ arg1: usize,
+ arg2: usize,
+ arg3: usize,
+ arg4: usize,
+ arg5: usize,
+ arg6: usize,
+ arg7: usize,
+) usize {
+ return asm volatile (
+ \\ syscall
+ \\ blez $7, 1f
+ \\ dsubu $2, $0, $2
+ \\ 1:
+ : [ret] "={$2}" (-> usize),
+ : [number] "{$2}" (@enumToInt(number)),
+ [arg1] "{$4}" (arg1),
+ [arg2] "{$5}" (arg2),
+ [arg3] "{$6}" (arg3),
+ [arg4] "{$7}" (arg4),
+ [arg5] "{$8}" (arg5),
+ [arg6] "{$9}" (arg6),
+ [arg7] "{$10}" (arg7),
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
+ );
+}
+
+const CloneFn = *const fn (arg: usize) callconv(.C) u8;
+
+/// This matches the libc clone function.
+pub extern fn clone(func: CloneFn, stack: usize, flags: u32, arg: usize, ptid: *i32, tls: usize, ctid: *i32) usize;
+
+pub fn restore() callconv(.Naked) void {
+ return asm volatile ("syscall"
+ :
+ : [number] "{$2}" (@enumToInt(SYS.rt_sigreturn)),
+ : "$1", "$3", "$4", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
+ );
+}
+
+pub fn restore_rt() callconv(.Naked) void {
+ return asm volatile ("syscall"
+ :
+ : [number] "{$2}" (@enumToInt(SYS.rt_sigreturn)),
+ : "$1", "$3", "$4", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
+ );
+}
+
+pub const O = struct {
+ pub const CREAT = 0o0400;
+ pub const EXCL = 0o02000;
+ pub const NOCTTY = 0o04000;
+ pub const TRUNC = 0o01000;
+ pub const APPEND = 0o0010;
+ pub const NONBLOCK = 0o0200;
+ pub const DSYNC = 0o0020;
+ pub const SYNC = 0o040020;
+ pub const RSYNC = 0o040020;
+ pub const DIRECTORY = 0o0200000;
+ pub const NOFOLLOW = 0o0400000;
+ pub const CLOEXEC = 0o02000000;
+
+ pub const ASYNC = 0o010000;
+ pub const DIRECT = 0o0100000;
+ pub const LARGEFILE = 0o020000;
+ pub const NOATIME = 0o01000000;
+ pub const PATH = 0o010000000;
+ pub const TMPFILE = 0o020200000;
+ pub const NDELAY = NONBLOCK;
+};
+
+pub const F = struct {
+ pub const DUPFD = 0;
+ pub const GETFD = 1;
+ pub const SETFD = 2;
+ pub const GETFL = 3;
+ pub const SETFL = 4;
+
+ pub const SETOWN = 24;
+ pub const GETOWN = 23;
+ pub const SETSIG = 10;
+ pub const GETSIG = 11;
+
+ pub const GETLK = 33;
+ pub const SETLK = 34;
+ pub const SETLKW = 35;
+
+ pub const RDLCK = 0;
+ pub const WRLCK = 1;
+ pub const UNLCK = 2;
+
+ pub const SETOWN_EX = 15;
+ pub const GETOWN_EX = 16;
+
+ pub const GETOWNER_UIDS = 17;
+};
+
+pub const LOCK = struct {
+ pub const SH = 1;
+ pub const EX = 2;
+ pub const UN = 8;
+ pub const NB = 4;
+};
+
+pub const MMAP2_UNIT = 4096;
+
+pub const MAP = struct {
+ pub const NORESERVE = 0x0400;
+ pub const GROWSDOWN = 0x1000;
+ pub const DENYWRITE = 0x2000;
+ pub const EXECUTABLE = 0x4000;
+ pub const LOCKED = 0x8000;
+ pub const @"32BIT" = 0x40;
+};
+
+pub const VDSO = struct {
+ pub const CGT_SYM = "__kernel_clock_gettime";
+ pub const CGT_VER = "LINUX_2.6.39";
+};
+
+pub const Flock = extern struct {
+ type: i16,
+ whence: i16,
+ __pad0: [4]u8,
+ start: off_t,
+ len: off_t,
+ pid: pid_t,
+ __unused: [4]u8,
+};
+
+pub const msghdr = extern struct {
+ name: ?*sockaddr,
+ namelen: socklen_t,
+ iov: [*]iovec,
+ iovlen: i32,
+ control: ?*anyopaque,
+ controllen: socklen_t,
+ flags: i32,
+};
+
+pub const msghdr_const = extern struct {
+ name: ?*const sockaddr,
+ namelen: socklen_t,
+ iov: [*]const iovec_const,
+ iovlen: i32,
+ control: ?*const anyopaque,
+ controllen: socklen_t,
+ flags: i32,
+};
+
+pub const blksize_t = i32;
+pub const nlink_t = u32;
+pub const time_t = i32;
+pub const mode_t = u32;
+pub const off_t = i64;
+pub const ino_t = u64;
+pub const dev_t = u64;
+pub const blkcnt_t = i64;
+
+// The `stat` definition used by the Linux kernel.
+pub const Stat = extern struct {
+ dev: u32,
+ __pad0: [3]u32, // Reserved for st_dev expansion
+ ino: ino_t,
+ mode: mode_t,
+ nlink: nlink_t,
+ uid: uid_t,
+ gid: gid_t,
+ rdev: u32,
+ __pad1: [3]u32,
+ size: off_t,
+ atim: timespec,
+ mtim: timespec,
+ ctim: timespec,
+ blksize: blksize_t,
+ __pad3: u32,
+ blocks: blkcnt_t,
+ __pad4: [14]usize,
+
+ pub fn atime(self: @This()) timespec {
+ return self.atim;
+ }
+
+ pub fn mtime(self: @This()) timespec {
+ return self.mtim;
+ }
+
+ pub fn ctime(self: @This()) timespec {
+ return self.ctim;
+ }
+};
+
+pub const timeval = extern struct {
+ tv_sec: isize,
+ tv_usec: isize,
+};
+
+pub const timezone = extern struct {
+ tz_minuteswest: i32,
+ tz_dsttime: i32,
+};
+
+pub const Elf_Symndx = u32;
+
+pub const rlimit_resource = enum(c_int) {
+ /// Per-process CPU limit, in seconds.
+ CPU,
+
+ /// Largest file that can be created, in bytes.
+ FSIZE,
+
+ /// Maximum size of data segment, in bytes.
+ DATA,
+
+ /// Maximum size of stack segment, in bytes.
+ STACK,
+
+ /// Largest core file that can be created, in bytes.
+ CORE,
+
+ /// Number of open files.
+ NOFILE,
+
+ /// Address space limit.
+ AS,
+
+ /// Largest resident set size, in bytes.
+ /// This affects swapping; processes that are exceeding their
+ /// resident set size will be more likely to have physical memory
+ /// taken from them.
+ RSS,
+
+ /// Number of processes.
+ NPROC,
+
+ /// Locked-in-memory address space.
+ MEMLOCK,
+
+ /// Maximum number of file locks.
+ LOCKS,
+
+ /// Maximum number of pending signals.
+ SIGPENDING,
+
+ /// Maximum bytes in POSIX message queues.
+ MSGQUEUE,
+
+ /// Maximum nice priority allowed to raise to.
+ /// Nice levels 19 .. -20 correspond to 0 .. 39
+ /// values of this resource limit.
+ NICE,
+
+ /// Maximum realtime priority allowed for non-priviledged
+ /// processes.
+ RTPRIO,
+
+ /// Maximum CPU time in µs that a process scheduled under a real-time
+ /// scheduling policy may consume without making a blocking system
+ /// call before being forcibly descheduled.
+ RTTIME,
+
+ _,
+};
diff --git a/lib/std/os/linux/syscalls.zig b/lib/std/os/linux/syscalls.zig
index 6e8cee7b84..f176a434b4 100644
--- a/lib/std/os/linux/syscalls.zig
+++ b/lib/std/os/linux/syscalls.zig
@@ -2032,6 +2032,365 @@ pub const Mips = enum(usize) {
set_mempolicy_home_node = Linux + 450,
};
+pub const Mips64 = enum(usize) {
+ pub const Linux = 5000;
+
+ read = Linux + 0,
+ write = Linux + 1,
+ open = Linux + 2,
+ close = Linux + 3,
+ stat = Linux + 4,
+ fstat = Linux + 5,
+ lstat = Linux + 6,
+ poll = Linux + 7,
+ lseek = Linux + 8,
+ mmap = Linux + 9,
+ mprotect = Linux + 10,
+ munmap = Linux + 11,
+ brk = Linux + 12,
+ rt_sigaction = Linux + 13,
+ rt_sigprocmask = Linux + 14,
+ ioctl = Linux + 15,
+ pread64 = Linux + 16,
+ pwrite64 = Linux + 17,
+ readv = Linux + 18,
+ writev = Linux + 19,
+ access = Linux + 20,
+ pipe = Linux + 21,
+ _newselect = Linux + 22,
+ sched_yield = Linux + 23,
+ mremap = Linux + 24,
+ msync = Linux + 25,
+ mincore = Linux + 26,
+ madvise = Linux + 27,
+ shmget = Linux + 28,
+ shmat = Linux + 29,
+ shmctl = Linux + 30,
+ dup = Linux + 31,
+ dup2 = Linux + 32,
+ pause = Linux + 33,
+ nanosleep = Linux + 34,
+ getitimer = Linux + 35,
+ setitimer = Linux + 36,
+ alarm = Linux + 37,
+ getpid = Linux + 38,
+ sendfile = Linux + 39,
+ socket = Linux + 40,
+ connect = Linux + 41,
+ accept = Linux + 42,
+ sendto = Linux + 43,
+ recvfrom = Linux + 44,
+ sendmsg = Linux + 45,
+ recvmsg = Linux + 46,
+ shutdown = Linux + 47,
+ bind = Linux + 48,
+ listen = Linux + 49,
+ getsockname = Linux + 50,
+ getpeername = Linux + 51,
+ socketpair = Linux + 52,
+ setsockopt = Linux + 53,
+ getsockopt = Linux + 54,
+ clone = Linux + 55,
+ fork = Linux + 56,
+ execve = Linux + 57,
+ exit = Linux + 58,
+ wait4 = Linux + 59,
+ kill = Linux + 60,
+ uname = Linux + 61,
+ semget = Linux + 62,
+ semop = Linux + 63,
+ semctl = Linux + 64,
+ shmdt = Linux + 65,
+ msgget = Linux + 66,
+ msgsnd = Linux + 67,
+ msgrcv = Linux + 68,
+ msgctl = Linux + 69,
+ fcntl = Linux + 70,
+ flock = Linux + 71,
+ fsync = Linux + 72,
+ fdatasync = Linux + 73,
+ truncate = Linux + 74,
+ ftruncate = Linux + 75,
+ getdents = Linux + 76,
+ getcwd = Linux + 77,
+ chdir = Linux + 78,
+ fchdir = Linux + 79,
+ rename = Linux + 80,
+ mkdir = Linux + 81,
+ rmdir = Linux + 82,
+ creat = Linux + 83,
+ link = Linux + 84,
+ unlink = Linux + 85,
+ symlink = Linux + 86,
+ readlink = Linux + 87,
+ chmod = Linux + 88,
+ fchmod = Linux + 89,
+ chown = Linux + 90,
+ fchown = Linux + 91,
+ lchown = Linux + 92,
+ umask = Linux + 93,
+ gettimeofday = Linux + 94,
+ getrlimit = Linux + 95,
+ getrusage = Linux + 96,
+ sysinfo = Linux + 97,
+ times = Linux + 98,
+ ptrace = Linux + 99,
+ getuid = Linux + 100,
+ syslog = Linux + 101,
+ getgid = Linux + 102,
+ setuid = Linux + 103,
+ setgid = Linux + 104,
+ geteuid = Linux + 105,
+ getegid = Linux + 106,
+ setpgid = Linux + 107,
+ getppid = Linux + 108,
+ getpgrp = Linux + 109,
+ setsid = Linux + 110,
+ setreuid = Linux + 111,
+ setregid = Linux + 112,
+ getgroups = Linux + 113,
+ setgroups = Linux + 114,
+ setresuid = Linux + 115,
+ getresuid = Linux + 116,
+ setresgid = Linux + 117,
+ getresgid = Linux + 118,
+ getpgid = Linux + 119,
+ setfsuid = Linux + 120,
+ setfsgid = Linux + 121,
+ getsid = Linux + 122,
+ capget = Linux + 123,
+ capset = Linux + 124,
+ rt_sigpending = Linux + 125,
+ rt_sigtimedwait = Linux + 126,
+ rt_sigqueueinfo = Linux + 127,
+ rt_sigsuspend = Linux + 128,
+ sigaltstack = Linux + 129,
+ utime = Linux + 130,
+ mknod = Linux + 131,
+ personality = Linux + 132,
+ ustat = Linux + 133,
+ statfs = Linux + 134,
+ fstatfs = Linux + 135,
+ sysfs = Linux + 136,
+ getpriority = Linux + 137,
+ setpriority = Linux + 138,
+ sched_setparam = Linux + 139,
+ sched_getparam = Linux + 140,
+ sched_setscheduler = Linux + 141,
+ sched_getscheduler = Linux + 142,
+ sched_get_priority_max = Linux + 143,
+ sched_get_priority_min = Linux + 144,
+ sched_rr_get_interval = Linux + 145,
+ mlock = Linux + 146,
+ munlock = Linux + 147,
+ mlockall = Linux + 148,
+ munlockall = Linux + 149,
+ vhangup = Linux + 150,
+ pivot_root = Linux + 151,
+ _sysctl = Linux + 152,
+ prctl = Linux + 153,
+ adjtimex = Linux + 154,
+ setrlimit = Linux + 155,
+ chroot = Linux + 156,
+ sync = Linux + 157,
+ acct = Linux + 158,
+ settimeofday = Linux + 159,
+ mount = Linux + 160,
+ umount2 = Linux + 161,
+ swapon = Linux + 162,
+ swapoff = Linux + 163,
+ reboot = Linux + 164,
+ sethostname = Linux + 165,
+ setdomainname = Linux + 166,
+ create_module = Linux + 167,
+ init_module = Linux + 168,
+ delete_module = Linux + 169,
+ get_kernel_syms = Linux + 170,
+ query_module = Linux + 171,
+ quotactl = Linux + 172,
+ nfsservctl = Linux + 173,
+ getpmsg = Linux + 174,
+ putpmsg = Linux + 175,
+ afs_syscall = Linux + 176,
+ reserved177 = Linux + 177,
+ gettid = Linux + 178,
+ readahead = Linux + 179,
+ setxattr = Linux + 180,
+ lsetxattr = Linux + 181,
+ fsetxattr = Linux + 182,
+ getxattr = Linux + 183,
+ lgetxattr = Linux + 184,
+ fgetxattr = Linux + 185,
+ listxattr = Linux + 186,
+ llistxattr = Linux + 187,
+ flistxattr = Linux + 188,
+ removexattr = Linux + 189,
+ lremovexattr = Linux + 190,
+ fremovexattr = Linux + 191,
+ tkill = Linux + 192,
+ reserved193 = Linux + 193,
+ futex = Linux + 194,
+ sched_setaffinity = Linux + 195,
+ sched_getaffinity = Linux + 196,
+ cacheflush = Linux + 197,
+ cachectl = Linux + 198,
+ sysmips = Linux + 199,
+ io_setup = Linux + 200,
+ io_destroy = Linux + 201,
+ io_getevents = Linux + 202,
+ io_submit = Linux + 203,
+ io_cancel = Linux + 204,
+ exit_group = Linux + 205,
+ lookup_dcookie = Linux + 206,
+ epoll_create = Linux + 207,
+ epoll_ctl = Linux + 208,
+ epoll_wait = Linux + 209,
+ remap_file_pages = Linux + 210,
+ rt_sigreturn = Linux + 211,
+ set_tid_address = Linux + 212,
+ restart_syscall = Linux + 213,
+ semtimedop = Linux + 214,
+ fadvise64 = Linux + 215,
+ timer_create = Linux + 216,
+ timer_settime = Linux + 217,
+ timer_gettime = Linux + 218,
+ timer_getoverrun = Linux + 219,
+ timer_delete = Linux + 220,
+ clock_settime = Linux + 221,
+ clock_gettime = Linux + 222,
+ clock_getres = Linux + 223,
+ clock_nanosleep = Linux + 224,
+ tgkill = Linux + 225,
+ utimes = Linux + 226,
+ mbind = Linux + 227,
+ get_mempolicy = Linux + 228,
+ set_mempolicy = Linux + 229,
+ mq_open = Linux + 230,
+ mq_unlink = Linux + 231,
+ mq_timedsend = Linux + 232,
+ mq_timedreceive = Linux + 233,
+ mq_notify = Linux + 234,
+ mq_getsetattr = Linux + 235,
+ vserver = Linux + 236,
+ waitid = Linux + 237,
+ add_key = Linux + 239,
+ request_key = Linux + 240,
+ keyctl = Linux + 241,
+ set_thread_area = Linux + 242,
+ inotify_init = Linux + 243,
+ inotify_add_watch = Linux + 244,
+ inotify_rm_watch = Linux + 245,
+ migrate_pages = Linux + 246,
+ openat = Linux + 247,
+ mkdirat = Linux + 248,
+ mknodat = Linux + 249,
+ fchownat = Linux + 250,
+ futimesat = Linux + 251,
+ fstatat64 = Linux + 252,
+ unlinkat = Linux + 253,
+ renameat = Linux + 254,
+ linkat = Linux + 255,
+ symlinkat = Linux + 256,
+ readlinkat = Linux + 257,
+ fchmodat = Linux + 258,
+ faccessat = Linux + 259,
+ pselect6 = Linux + 260,
+ ppoll = Linux + 261,
+ unshare = Linux + 262,
+ splice = Linux + 263,
+ sync_file_range = Linux + 264,
+ tee = Linux + 265,
+ vmsplice = Linux + 266,
+ move_pages = Linux + 267,
+ set_robust_list = Linux + 268,
+ get_robust_list = Linux + 269,
+ kexec_load = Linux + 270,
+ getcpu = Linux + 271,
+ epoll_pwait = Linux + 272,
+ ioprio_set = Linux + 273,
+ ioprio_get = Linux + 274,
+ utimensat = Linux + 275,
+ signalfd = Linux + 276,
+ timerfd = Linux + 277,
+ eventfd = Linux + 278,
+ fallocate = Linux + 279,
+ timerfd_create = Linux + 280,
+ timerfd_gettime = Linux + 281,
+ timerfd_settime = Linux + 282,
+ signalfd4 = Linux + 283,
+ eventfd2 = Linux + 284,
+ epoll_create1 = Linux + 285,
+ dup3 = Linux + 286,
+ pipe2 = Linux + 287,
+ inotify_init1 = Linux + 288,
+ preadv = Linux + 289,
+ pwritev = Linux + 290,
+ rt_tgsigqueueinfo = Linux + 291,
+ perf_event_open = Linux + 292,
+ accept4 = Linux + 293,
+ recvmmsg = Linux + 294,
+ fanotify_init = Linux + 295,
+ fanotify_mark = Linux + 296,
+ prlimit64 = Linux + 297,
+ name_to_handle_at = Linux + 298,
+ open_by_handle_at = Linux + 299,
+ clock_adjtime = Linux + 300,
+ syncfs = Linux + 301,
+ sendmmsg = Linux + 302,
+ setns = Linux + 303,
+ process_vm_readv = Linux + 304,
+ process_vm_writev = Linux + 305,
+ kcmp = Linux + 306,
+ finit_module = Linux + 307,
+ getdents64 = Linux + 308,
+ sched_setattr = Linux + 309,
+ sched_getattr = Linux + 310,
+ renameat2 = Linux + 311,
+ seccomp = Linux + 312,
+ getrandom = Linux + 313,
+ memfd_create = Linux + 314,
+ bpf = Linux + 315,
+ execveat = Linux + 316,
+ userfaultfd = Linux + 317,
+ membarrier = Linux + 318,
+ mlock2 = Linux + 319,
+ copy_file_range = Linux + 320,
+ preadv2 = Linux + 321,
+ pwritev2 = Linux + 322,
+ pkey_mprotect = Linux + 323,
+ pkey_alloc = Linux + 324,
+ pkey_free = Linux + 325,
+ statx = Linux + 326,
+ rseq = Linux + 327,
+ io_pgetevents = Linux + 328,
+ pidfd_send_signal = Linux + 424,
+ io_uring_setup = Linux + 425,
+ io_uring_enter = Linux + 426,
+ io_uring_register = Linux + 427,
+ open_tree = Linux + 428,
+ move_mount = Linux + 429,
+ fsopen = Linux + 430,
+ fsconfig = Linux + 431,
+ fsmount = Linux + 432,
+ fspick = Linux + 433,
+ pidfd_open = Linux + 434,
+ clone3 = Linux + 435,
+ close_range = Linux + 436,
+ openat2 = Linux + 437,
+ pidfd_getfd = Linux + 438,
+ faccessat2 = Linux + 439,
+ process_madvise = Linux + 440,
+ epoll_pwait2 = Linux + 441,
+ mount_setattr = Linux + 442,
+ quotactl_fd = Linux + 443,
+ landlock_create_ruleset = Linux + 444,
+ landlock_add_rule = Linux + 445,
+ landlock_restrict_self = Linux + 446,
+ process_mrelease = Linux + 448,
+ futex_waitv = Linux + 449,
+ set_mempolicy_home_node = Linux + 450,
+};
+
pub const PowerPC = enum(usize) {
restart_syscall = 0,
exit = 1,
diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig
index d487530f55..cffdec0424 100644
--- a/lib/std/os/linux/tls.zig
+++ b/lib/std/os/linux/tls.zig
@@ -48,7 +48,7 @@ const TLSVariant = enum {
};
const tls_variant = switch (native_arch) {
- .arm, .armeb, .thumb, .aarch64, .aarch64_be, .riscv32, .riscv64, .mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => TLSVariant.VariantI,
+ .arm, .armeb, .thumb, .aarch64, .aarch64_be, .riscv32, .riscv64, .mips, .mipsel, .mips64, .mips64el, .powerpc, .powerpc64, .powerpc64le => TLSVariant.VariantI,
.x86_64, .x86, .sparc64 => TLSVariant.VariantII,
else => @compileError("undefined tls_variant for this architecture"),
};
@@ -64,7 +64,7 @@ const tls_tcb_size = switch (native_arch) {
// Controls if the TP points to the end of the TCB instead of its beginning
const tls_tp_points_past_tcb = switch (native_arch) {
- .riscv32, .riscv64, .mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => true,
+ .riscv32, .riscv64, .mips, .mipsel, .mips64, .mips64el, .powerpc, .powerpc64, .powerpc64le => true,
else => false,
};
@@ -72,12 +72,12 @@ const tls_tp_points_past_tcb = switch (native_arch) {
// make the generated code more efficient
const tls_tp_offset = switch (native_arch) {
- .mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => 0x7000,
+ .mips, .mipsel, .mips64, .mips64el, .powerpc, .powerpc64, .powerpc64le => 0x7000,
else => 0,
};
const tls_dtv_offset = switch (native_arch) {
- .mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => 0x8000,
+ .mips, .mipsel, .mips64, .mips64el, .powerpc, .powerpc64, .powerpc64le => 0x8000,
.riscv32, .riscv64 => 0x800,
else => 0,
};
@@ -156,7 +156,7 @@ pub fn setThreadPointer(addr: usize) void {
: [addr] "r" (addr),
);
},
- .mips, .mipsel => {
+ .mips, .mipsel, .mips64, .mips64el => {
const rc = std.os.linux.syscall1(.set_thread_area, addr);
assert(rc == 0);
},
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index e53387b27c..93e762827b 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -85,7 +85,7 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
var nt_name = UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w.ptr)),
+ .Buffer = @qualCast([*]u16, sub_path_w.ptr),
};
var attr = OBJECT_ATTRIBUTES{
.Length = @sizeOf(OBJECT_ATTRIBUTES),
@@ -634,7 +634,7 @@ pub fn SetCurrentDirectory(path_name: []const u16) SetCurrentDirectoryError!void
var nt_name = UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(path_name.ptr)),
+ .Buffer = @qualCast([*]u16, path_name.ptr),
};
const rc = ntdll.RtlSetCurrentDirectory_U(&nt_name);
@@ -766,7 +766,7 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin
var nt_name = UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w.ptr)),
+ .Buffer = @qualCast([*]u16, sub_path_w.ptr),
};
var attr = OBJECT_ATTRIBUTES{
.Length = @sizeOf(OBJECT_ATTRIBUTES),
@@ -876,7 +876,7 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
// The Windows API makes this mutable, but it will not mutate here.
- .Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w.ptr)),
+ .Buffer = @qualCast([*]u16, sub_path_w.ptr),
};
if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
@@ -1414,7 +1414,7 @@ pub fn sendmsg(
}
pub fn sendto(s: ws2_32.SOCKET, buf: [*]const u8, len: usize, flags: u32, to: ?*const ws2_32.sockaddr, to_len: ws2_32.socklen_t) i32 {
- var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = @intToPtr([*]u8, @ptrToInt(buf)) };
+ var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = @qualCast([*]u8, buf) };
var bytes_send: DWORD = undefined;
if (ws2_32.WSASendTo(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_send, flags, to, @intCast(i32, to_len), null, null) == ws2_32.SOCKET_ERROR) {
return ws2_32.SOCKET_ERROR;
@@ -1876,13 +1876,13 @@ pub fn eqlIgnoreCaseWTF16(a: []const u16, b: []const u16) bool {
const a_string = UNICODE_STRING{
.Length = a_bytes,
.MaximumLength = a_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(a.ptr)),
+ .Buffer = @qualCast([*]u16, a.ptr),
};
const b_bytes = @intCast(u16, b.len * 2);
const b_string = UNICODE_STRING{
.Length = b_bytes,
.MaximumLength = b_bytes,
- .Buffer = @intToPtr([*]u16, @ptrToInt(b.ptr)),
+ .Buffer = @qualCast([*]u16, b.ptr),
};
return ntdll.RtlEqualUnicodeString(&a_string, &b_string, TRUE) == TRUE;
}
diff --git a/lib/std/start.zig b/lib/std/start.zig
index 8aef63332d..ea221d1539 100644
--- a/lib/std/start.zig
+++ b/lib/std/start.zig
@@ -327,7 +327,7 @@ fn _start() callconv(.Naked) noreturn {
: [argc] "={sp}" (-> [*]usize),
);
},
- .mips, .mipsel => {
+ .mips, .mipsel, .mips64, .mips64el => {
// The lr is already zeroed on entry, as specified by the ABI.
argc_argv_ptr = asm volatile (
\\ move $fp, $0
diff --git a/lib/std/std.zig b/lib/std/std.zig
index ba52784b45..e02be2ebaf 100644
--- a/lib/std/std.zig
+++ b/lib/std/std.zig
@@ -9,6 +9,7 @@ pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged;
pub const AutoHashMap = hash_map.AutoHashMap;
pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged;
pub const BoundedArray = @import("bounded_array.zig").BoundedArray;
+pub const Build = @import("Build.zig");
pub const BufMap = @import("buf_map.zig").BufMap;
pub const BufSet = @import("buf_set.zig").BufSet;
pub const ChildProcess = @import("child_process.zig").ChildProcess;
@@ -49,7 +50,6 @@ pub const array_hash_map = @import("array_hash_map.zig");
pub const atomic = @import("atomic.zig");
pub const base64 = @import("base64.zig");
pub const bit_set = @import("bit_set.zig");
-pub const build = @import("build.zig");
pub const builtin = @import("builtin.zig");
pub const c = @import("c.zig");
pub const coff = @import("coff.zig");
@@ -96,6 +96,9 @@ pub const wasm = @import("wasm.zig");
pub const zig = @import("zig.zig");
pub const start = @import("start.zig");
+/// deprecated: use `Build`.
+pub const build = Build;
+
const root = @import("root");
const options_override = if (@hasDecl(root, "std_options")) root.std_options else struct {};
@@ -150,6 +153,11 @@ pub const options = struct {
else
log.defaultLog;
+ pub const fmt_max_depth = if (@hasDecl(options_override, "fmt_max_depth"))
+ options_override.fmt_max_depth
+ else
+ fmt.default_max_depth;
+
pub const cryptoRandomSeed: fn (buffer: []u8) void = if (@hasDecl(options_override, "cryptoRandomSeed"))
options_override.cryptoRandomSeed
else
diff --git a/lib/std/tar.zig b/lib/std/tar.zig
index 4f6a77c6ba..91772d7319 100644
--- a/lib/std/tar.zig
+++ b/lib/std/tar.zig
@@ -1,6 +1,18 @@
pub const Options = struct {
/// Number of directory levels to skip when extracting files.
strip_components: u32 = 0,
+ /// How to handle the "mode" property of files from within the tar file.
+ mode_mode: ModeMode = .executable_bit_only,
+
+ const ModeMode = enum {
+ /// The mode from the tar file is completely ignored. Files are created
+ /// with the default mode when creating files.
+ ignore,
+ /// The mode from the tar file is inspected for the owner executable bit
+ /// only. This bit is copied to the group and other executable bits.
+ /// Other bits of the mode are left as the default when creating files.
+ executable_bit_only,
+ };
};
pub const Header = struct {
@@ -72,6 +84,17 @@ pub const Header = struct {
};
pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !void {
+ switch (options.mode_mode) {
+ .ignore => {},
+ .executable_bit_only => {
+ // This code does not look at the mode bits yet. To implement this feature,
+ // the implementation must be adjusted to look at the mode, and check the
+ // user executable bit, then call fchmod on newly created files when
+ // the executable bit is supposed to be set.
+ // It also needs to properly deal with ACLs on Windows.
+ @panic("TODO: unimplemented: tar ModeMode.executable_bit_only");
+ },
+ }
var file_name_buffer: [255]u8 = undefined;
var buffer: [512 * 8]u8 = undefined;
var start: usize = 0;
diff --git a/lib/std/target.zig b/lib/std/target.zig
index 8ae175aac8..4429f8be2d 100644
--- a/lib/std/target.zig
+++ b/lib/std/target.zig
@@ -1880,6 +1880,559 @@ pub const Target = struct {
=> 16,
};
}
+
+ pub const CType = enum {
+ short,
+ ushort,
+ int,
+ uint,
+ long,
+ ulong,
+ longlong,
+ ulonglong,
+ float,
+ double,
+ longdouble,
+ };
+
+ pub fn c_type_byte_size(t: Target, c_type: CType) u16 {
+ return switch (c_type) {
+ .short,
+ .ushort,
+ .int,
+ .uint,
+ .long,
+ .ulong,
+ .longlong,
+ .ulonglong,
+ => @divExact(c_type_bit_size(t, c_type), 8),
+
+ .float => 4,
+ .double => 8,
+
+ .longdouble => switch (c_type_bit_size(t, c_type)) {
+ 16 => 2,
+ 32 => 4,
+ 64 => 8,
+ 80 => @intCast(u16, mem.alignForward(10, c_type_alignment(t, .longdouble))),
+ 128 => 16,
+ else => unreachable,
+ },
+ };
+ }
+
+ pub fn c_type_bit_size(target: Target, c_type: CType) u16 {
+ switch (target.os.tag) {
+ .freestanding, .other => switch (target.cpu.arch) {
+ .msp430 => switch (c_type) {
+ .short, .ushort, .int, .uint => return 16,
+ .float, .long, .ulong => return 32,
+ .longlong, .ulonglong, .double, .longdouble => return 64,
+ },
+ .avr => switch (c_type) {
+ .short, .ushort, .int, .uint => return 16,
+ .long, .ulong, .float, .double, .longdouble => return 32,
+ .longlong, .ulonglong => return 64,
+ },
+ .tce, .tcele => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
+ .float, .double, .longdouble => return 32,
+ },
+ .mips64, .mips64el => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 128,
+ },
+ .x86_64 => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.abi) {
+ .gnux32, .muslx32 => return 32,
+ else => return 64,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 80,
+ },
+ else => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return target.cpu.arch.ptrBitWidth(),
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => switch (target.cpu.arch) {
+ .x86 => switch (target.abi) {
+ .android => return 64,
+ else => return 80,
+ },
+
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ => switch (target.abi) {
+ .musl,
+ .musleabi,
+ .musleabihf,
+ .muslx32,
+ => return 64,
+ else => return 128,
+ },
+
+ .riscv32,
+ .riscv64,
+ .aarch64,
+ .aarch64_be,
+ .aarch64_32,
+ .s390x,
+ .sparc,
+ .sparc64,
+ .sparcel,
+ .wasm32,
+ .wasm64,
+ => return 128,
+
+ else => return 64,
+ },
+ },
+ },
+
+ .linux,
+ .freebsd,
+ .netbsd,
+ .dragonfly,
+ .openbsd,
+ .wasi,
+ .emscripten,
+ .plan9,
+ .solaris,
+ .haiku,
+ .ananas,
+ .fuchsia,
+ .minix,
+ => switch (target.cpu.arch) {
+ .msp430 => switch (c_type) {
+ .short, .ushort, .int, .uint => return 16,
+ .long, .ulong, .float => return 32,
+ .longlong, .ulonglong, .double, .longdouble => return 64,
+ },
+ .avr => switch (c_type) {
+ .short, .ushort, .int, .uint => return 16,
+ .long, .ulong, .float, .double, .longdouble => return 32,
+ .longlong, .ulonglong => return 64,
+ },
+ .tce, .tcele => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
+ .float, .double, .longdouble => return 32,
+ },
+ .mips64, .mips64el => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => if (target.os.tag == .freebsd) return 64 else return 128,
+ },
+ .x86_64 => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.abi) {
+ .gnux32, .muslx32 => return 32,
+ else => return 64,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 80,
+ },
+ else => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return target.cpu.arch.ptrBitWidth(),
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => switch (target.cpu.arch) {
+ .x86 => switch (target.abi) {
+ .android => return 64,
+ else => return 80,
+ },
+
+ .powerpc,
+ .powerpcle,
+ => switch (target.abi) {
+ .musl,
+ .musleabi,
+ .musleabihf,
+ .muslx32,
+ => return 64,
+ else => switch (target.os.tag) {
+ .freebsd, .netbsd, .openbsd => return 64,
+ else => return 128,
+ },
+ },
+
+ .powerpc64,
+ .powerpc64le,
+ => switch (target.abi) {
+ .musl,
+ .musleabi,
+ .musleabihf,
+ .muslx32,
+ => return 64,
+ else => switch (target.os.tag) {
+ .freebsd, .openbsd => return 64,
+ else => return 128,
+ },
+ },
+
+ .riscv32,
+ .riscv64,
+ .aarch64,
+ .aarch64_be,
+ .aarch64_32,
+ .s390x,
+ .mips64,
+ .mips64el,
+ .sparc,
+ .sparc64,
+ .sparcel,
+ .wasm32,
+ .wasm64,
+ => return 128,
+
+ else => return 64,
+ },
+ },
+ },
+
+ .windows, .uefi => switch (target.cpu.arch) {
+ .x86 => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return 32,
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => switch (target.abi) {
+ .gnu, .gnuilp32, .cygnus => return 80,
+ else => return 64,
+ },
+ },
+ .x86_64 => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.abi) {
+ .cygnus => return 64,
+ else => return 32,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => switch (target.abi) {
+ .gnu, .gnuilp32, .cygnus => return 80,
+ else => return 64,
+ },
+ },
+ else => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => return 32,
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 64,
+ },
+ },
+
+ .macos, .ios, .tvos, .watchos => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.cpu.arch) {
+ .x86, .arm, .aarch64_32 => return 32,
+ .x86_64 => switch (target.abi) {
+ .gnux32, .muslx32 => return 32,
+ else => return 64,
+ },
+ else => return 64,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => switch (target.cpu.arch) {
+ .x86 => switch (target.abi) {
+ .android => return 64,
+ else => return 80,
+ },
+ .x86_64 => return 80,
+ else => return 64,
+ },
+ },
+
+ .nvcl, .cuda => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong => switch (target.cpu.arch) {
+ .nvptx => return 32,
+ .nvptx64 => return 64,
+ else => return 64,
+ },
+ .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 64,
+ },
+
+ .amdhsa, .amdpal => switch (c_type) {
+ .short, .ushort => return 16,
+ .int, .uint, .float => return 32,
+ .long, .ulong, .longlong, .ulonglong, .double => return 64,
+ .longdouble => return 128,
+ },
+
+ .cloudabi,
+ .kfreebsd,
+ .lv2,
+ .zos,
+ .rtems,
+ .nacl,
+ .aix,
+ .ps4,
+ .ps5,
+ .elfiamcu,
+ .mesa3d,
+ .contiki,
+ .hermit,
+ .hurd,
+ .opencl,
+ .glsl450,
+ .vulkan,
+ .driverkit,
+ .shadermodel,
+ => @panic("TODO specify the C integer and float type sizes for this OS"),
+ }
+ }
+
+ pub fn c_type_alignment(target: Target, c_type: CType) u16 {
+ // Overrides for unusual alignments
+ switch (target.cpu.arch) {
+ .avr => switch (c_type) {
+ .short, .ushort => return 2,
+ else => return 1,
+ },
+ .x86 => switch (target.os.tag) {
+ .windows, .uefi => switch (c_type) {
+ .longlong, .ulonglong, .double => return 8,
+ .longdouble => switch (target.abi) {
+ .gnu, .gnuilp32, .cygnus => return 4,
+ else => return 8,
+ },
+ else => {},
+ },
+ else => {},
+ },
+ else => {},
+ }
+
+ // Next-power-of-two-aligned, up to a maximum.
+ return @min(
+ std.math.ceilPowerOfTwoAssert(u16, (c_type_bit_size(target, c_type) + 7) / 8),
+ switch (target.cpu.arch) {
+ .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
+ .netbsd => switch (target.abi) {
+ .gnueabi,
+ .gnueabihf,
+ .eabi,
+ .eabihf,
+ .android,
+ .musleabi,
+ .musleabihf,
+ => 8,
+
+ else => @as(u16, 4),
+ },
+ .ios, .tvos, .watchos => 4,
+ else => 8,
+ },
+
+ .msp430,
+ .avr,
+ => 2,
+
+ .arc,
+ .csky,
+ .x86,
+ .xcore,
+ .dxil,
+ .loongarch32,
+ .tce,
+ .tcele,
+ .le32,
+ .amdil,
+ .hsail,
+ .spir,
+ .spirv32,
+ .kalimba,
+ .shave,
+ .renderscript32,
+ .ve,
+ .spu_2,
+ => 4,
+
+ .aarch64_32,
+ .amdgcn,
+ .amdil64,
+ .bpfel,
+ .bpfeb,
+ .hexagon,
+ .hsail64,
+ .loongarch64,
+ .m68k,
+ .mips,
+ .mipsel,
+ .sparc,
+ .sparcel,
+ .sparc64,
+ .lanai,
+ .le64,
+ .nvptx,
+ .nvptx64,
+ .r600,
+ .s390x,
+ .spir64,
+ .spirv64,
+ .renderscript64,
+ => 8,
+
+ .aarch64,
+ .aarch64_be,
+ .mips64,
+ .mips64el,
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ .riscv32,
+ .riscv64,
+ .x86_64,
+ .wasm32,
+ .wasm64,
+ => 16,
+ },
+ );
+ }
+
+ pub fn c_type_preferred_alignment(target: Target, c_type: CType) u16 {
+ // Overrides for unusual alignments
+ switch (target.cpu.arch) {
+ .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
+ .netbsd => switch (target.abi) {
+ .gnueabi,
+ .gnueabihf,
+ .eabi,
+ .eabihf,
+ .android,
+ .musleabi,
+ .musleabihf,
+ => {},
+
+ else => switch (c_type) {
+ .longdouble => return 4,
+ else => {},
+ },
+ },
+ .ios, .tvos, .watchos => switch (c_type) {
+ .longdouble => return 4,
+ else => {},
+ },
+ else => {},
+ },
+ .arc => switch (c_type) {
+ .longdouble => return 4,
+ else => {},
+ },
+ .avr => switch (c_type) {
+ .int, .uint, .long, .ulong, .float, .longdouble => return 1,
+ .short, .ushort => return 2,
+ .double => return 4,
+ .longlong, .ulonglong => return 8,
+ },
+ .x86 => switch (target.os.tag) {
+ .windows, .uefi => switch (c_type) {
+ .longdouble => switch (target.abi) {
+ .gnu, .gnuilp32, .cygnus => return 4,
+ else => return 8,
+ },
+ else => {},
+ },
+ else => switch (c_type) {
+ .longdouble => return 4,
+ else => {},
+ },
+ },
+ else => {},
+ }
+
+ // Next-power-of-two-aligned, up to a maximum.
+ return @min(
+ std.math.ceilPowerOfTwoAssert(u16, (c_type_bit_size(target, c_type) + 7) / 8),
+ switch (target.cpu.arch) {
+ .msp430 => @as(u16, 2),
+
+ .csky,
+ .xcore,
+ .dxil,
+ .loongarch32,
+ .tce,
+ .tcele,
+ .le32,
+ .amdil,
+ .hsail,
+ .spir,
+ .spirv32,
+ .kalimba,
+ .shave,
+ .renderscript32,
+ .ve,
+ .spu_2,
+ => 4,
+
+ .arc,
+ .arm,
+ .armeb,
+ .avr,
+ .thumb,
+ .thumbeb,
+ .aarch64_32,
+ .amdgcn,
+ .amdil64,
+ .bpfel,
+ .bpfeb,
+ .hexagon,
+ .hsail64,
+ .x86,
+ .loongarch64,
+ .m68k,
+ .mips,
+ .mipsel,
+ .sparc,
+ .sparcel,
+ .sparc64,
+ .lanai,
+ .le64,
+ .nvptx,
+ .nvptx64,
+ .r600,
+ .s390x,
+ .spir64,
+ .spirv64,
+ .renderscript64,
+ => 8,
+
+ .aarch64,
+ .aarch64_be,
+ .mips64,
+ .mips64el,
+ .powerpc,
+ .powerpcle,
+ .powerpc64,
+ .powerpc64le,
+ .riscv32,
+ .riscv64,
+ .x86_64,
+ .wasm32,
+ .wasm64,
+ => 16,
+ },
+ );
+ }
};
test {
diff --git a/lib/std/zig.zig b/lib/std/zig.zig
index bce8f6ce3c..f85cf75e60 100644
--- a/lib/std/zig.zig
+++ b/lib/std/zig.zig
@@ -8,7 +8,6 @@ pub const Tokenizer = tokenizer.Tokenizer;
pub const fmtId = fmt.fmtId;
pub const fmtEscapes = fmt.fmtEscapes;
pub const isValidId = fmt.isValidId;
-pub const parse = @import("zig/parse.zig").parse;
pub const string_literal = @import("zig/string_literal.zig");
pub const number_literal = @import("zig/number_literal.zig");
pub const primitives = @import("zig/primitives.zig");
diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig
index f312093aa3..80dda052ab 100644
--- a/lib/std/zig/Ast.zig
+++ b/lib/std/zig/Ast.zig
@@ -1,4 +1,8 @@
//! Abstract Syntax Tree for Zig source code.
+//! For Zig syntax, the root node is at nodes[0] and contains the list of
+//! sub-nodes.
+//! For Zon syntax, the root node is at nodes[0] and contains lhs as the node
+//! index of the main expression.
/// Reference to externally-owned data.
source: [:0]const u8,
@@ -11,13 +15,6 @@ extra_data: []Node.Index,
errors: []const Error,
-const std = @import("../std.zig");
-const assert = std.debug.assert;
-const testing = std.testing;
-const mem = std.mem;
-const Token = std.zig.Token;
-const Ast = @This();
-
pub const TokenIndex = u32;
pub const ByteOffset = u32;
@@ -34,7 +31,7 @@ pub const Location = struct {
line_end: usize,
};
-pub fn deinit(tree: *Ast, gpa: mem.Allocator) void {
+pub fn deinit(tree: *Ast, gpa: Allocator) void {
tree.tokens.deinit(gpa);
tree.nodes.deinit(gpa);
gpa.free(tree.extra_data);
@@ -48,11 +45,69 @@ pub const RenderError = error{
OutOfMemory,
};
+pub const Mode = enum { zig, zon };
+
+/// Result should be freed with tree.deinit() when there are
+/// no more references to any of the tokens or nodes.
+pub fn parse(gpa: Allocator, source: [:0]const u8, mode: Mode) Allocator.Error!Ast {
+ var tokens = Ast.TokenList{};
+ defer tokens.deinit(gpa);
+
+ // Empirically, the zig std lib has an 8:1 ratio of source bytes to token count.
+ const estimated_token_count = source.len / 8;
+ try tokens.ensureTotalCapacity(gpa, estimated_token_count);
+
+ var tokenizer = std.zig.Tokenizer.init(source);
+ while (true) {
+ const token = tokenizer.next();
+ try tokens.append(gpa, .{
+ .tag = token.tag,
+ .start = @intCast(u32, token.loc.start),
+ });
+ if (token.tag == .eof) break;
+ }
+
+ var parser: Parse = .{
+ .source = source,
+ .gpa = gpa,
+ .token_tags = tokens.items(.tag),
+ .token_starts = tokens.items(.start),
+ .errors = .{},
+ .nodes = .{},
+ .extra_data = .{},
+ .scratch = .{},
+ .tok_i = 0,
+ };
+ defer parser.errors.deinit(gpa);
+ defer parser.nodes.deinit(gpa);
+ defer parser.extra_data.deinit(gpa);
+ defer parser.scratch.deinit(gpa);
+
+ // Empirically, Zig source code has a 2:1 ratio of tokens to AST nodes.
+ // Make sure at least 1 so we can use appendAssumeCapacity on the root node below.
+ const estimated_node_count = (tokens.len + 2) / 2;
+ try parser.nodes.ensureTotalCapacity(gpa, estimated_node_count);
+
+ switch (mode) {
+ .zig => try parser.parseRoot(),
+ .zon => try parser.parseZon(),
+ }
+
+ // TODO experiment with compacting the MultiArrayList slices here
+ return Ast{
+ .source = source,
+ .tokens = tokens.toOwnedSlice(),
+ .nodes = parser.nodes.toOwnedSlice(),
+ .extra_data = try parser.extra_data.toOwnedSlice(gpa),
+ .errors = try parser.errors.toOwnedSlice(gpa),
+ };
+}
+
/// `gpa` is used for allocating the resulting formatted source code, as well as
/// for allocating extra stack memory if needed, because this function utilizes recursion.
/// Note: that's not actually true yet, see https://github.com/ziglang/zig/issues/1006.
/// Caller owns the returned slice of bytes, allocated with `gpa`.
-pub fn render(tree: Ast, gpa: mem.Allocator) RenderError![]u8 {
+pub fn render(tree: Ast, gpa: Allocator) RenderError![]u8 {
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
@@ -3347,3 +3402,12 @@ pub const Node = struct {
rparen: TokenIndex,
};
};
+
+const std = @import("../std.zig");
+const assert = std.debug.assert;
+const testing = std.testing;
+const mem = std.mem;
+const Token = std.zig.Token;
+const Ast = @This();
+const Allocator = std.mem.Allocator;
+const Parse = @import("Parse.zig");
diff --git a/lib/std/zig/Parse.zig b/lib/std/zig/Parse.zig
new file mode 100644
index 0000000000..d498366b34
--- /dev/null
+++ b/lib/std/zig/Parse.zig
@@ -0,0 +1,3825 @@
+//! Represents in-progress parsing, will be converted to an Ast after completion.
+
+pub const Error = error{ParseError} || Allocator.Error;
+
+gpa: Allocator,
+source: []const u8,
+token_tags: []const Token.Tag,
+token_starts: []const Ast.ByteOffset,
+tok_i: TokenIndex,
+errors: std.ArrayListUnmanaged(AstError),
+nodes: Ast.NodeList,
+extra_data: std.ArrayListUnmanaged(Node.Index),
+scratch: std.ArrayListUnmanaged(Node.Index),
+
+const SmallSpan = union(enum) {
+ zero_or_one: Node.Index,
+ multi: Node.SubRange,
+};
+
+const Members = struct {
+ len: usize,
+ lhs: Node.Index,
+ rhs: Node.Index,
+ trailing: bool,
+
+ fn toSpan(self: Members, p: *Parse) !Node.SubRange {
+ if (self.len <= 2) {
+ const nodes = [2]Node.Index{ self.lhs, self.rhs };
+ return p.listToSpan(nodes[0..self.len]);
+ } else {
+ return Node.SubRange{ .start = self.lhs, .end = self.rhs };
+ }
+ }
+};
+
+fn listToSpan(p: *Parse, list: []const Node.Index) !Node.SubRange {
+ try p.extra_data.appendSlice(p.gpa, list);
+ return Node.SubRange{
+ .start = @intCast(Node.Index, p.extra_data.items.len - list.len),
+ .end = @intCast(Node.Index, p.extra_data.items.len),
+ };
+}
+
+fn addNode(p: *Parse, elem: Ast.NodeList.Elem) Allocator.Error!Node.Index {
+ const result = @intCast(Node.Index, p.nodes.len);
+ try p.nodes.append(p.gpa, elem);
+ return result;
+}
+
+fn setNode(p: *Parse, i: usize, elem: Ast.NodeList.Elem) Node.Index {
+ p.nodes.set(i, elem);
+ return @intCast(Node.Index, i);
+}
+
+fn reserveNode(p: *Parse, tag: Ast.Node.Tag) !usize {
+ try p.nodes.resize(p.gpa, p.nodes.len + 1);
+ p.nodes.items(.tag)[p.nodes.len - 1] = tag;
+ return p.nodes.len - 1;
+}
+
+fn unreserveNode(p: *Parse, node_index: usize) void {
+ if (p.nodes.len == node_index) {
+ p.nodes.resize(p.gpa, p.nodes.len - 1) catch unreachable;
+ } else {
+ // There is zombie node left in the tree, let's make it as inoffensive as possible
+ // (sadly there's no no-op node)
+ p.nodes.items(.tag)[node_index] = .unreachable_literal;
+ p.nodes.items(.main_token)[node_index] = p.tok_i;
+ }
+}
+
+fn addExtra(p: *Parse, extra: anytype) Allocator.Error!Node.Index {
+ const fields = std.meta.fields(@TypeOf(extra));
+ try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len);
+ const result = @intCast(u32, p.extra_data.items.len);
+ inline for (fields) |field| {
+ comptime assert(field.type == Node.Index);
+ p.extra_data.appendAssumeCapacity(@field(extra, field.name));
+ }
+ return result;
+}
+
+fn warnExpected(p: *Parse, expected_token: Token.Tag) error{OutOfMemory}!void {
+ @setCold(true);
+ try p.warnMsg(.{
+ .tag = .expected_token,
+ .token = p.tok_i,
+ .extra = .{ .expected_tag = expected_token },
+ });
+}
+
+fn warn(p: *Parse, error_tag: AstError.Tag) error{OutOfMemory}!void {
+ @setCold(true);
+ try p.warnMsg(.{ .tag = error_tag, .token = p.tok_i });
+}
+
+fn warnMsg(p: *Parse, msg: Ast.Error) error{OutOfMemory}!void {
+ @setCold(true);
+ switch (msg.tag) {
+ .expected_semi_after_decl,
+ .expected_semi_after_stmt,
+ .expected_comma_after_field,
+ .expected_comma_after_arg,
+ .expected_comma_after_param,
+ .expected_comma_after_initializer,
+ .expected_comma_after_switch_prong,
+ .expected_semi_or_else,
+ .expected_semi_or_lbrace,
+ .expected_token,
+ .expected_block,
+ .expected_block_or_assignment,
+ .expected_block_or_expr,
+ .expected_block_or_field,
+ .expected_expr,
+ .expected_expr_or_assignment,
+ .expected_fn,
+ .expected_inlinable,
+ .expected_labelable,
+ .expected_param_list,
+ .expected_prefix_expr,
+ .expected_primary_type_expr,
+ .expected_pub_item,
+ .expected_return_type,
+ .expected_suffix_op,
+ .expected_type_expr,
+ .expected_var_decl,
+ .expected_var_decl_or_fn,
+ .expected_loop_payload,
+ .expected_container,
+ => if (msg.token != 0 and !p.tokensOnSameLine(msg.token - 1, msg.token)) {
+ var copy = msg;
+ copy.token_is_prev = true;
+ copy.token -= 1;
+ return p.errors.append(p.gpa, copy);
+ },
+ else => {},
+ }
+ try p.errors.append(p.gpa, msg);
+}
+
+fn fail(p: *Parse, tag: Ast.Error.Tag) error{ ParseError, OutOfMemory } {
+ @setCold(true);
+ return p.failMsg(.{ .tag = tag, .token = p.tok_i });
+}
+
+fn failExpected(p: *Parse, expected_token: Token.Tag) error{ ParseError, OutOfMemory } {
+ @setCold(true);
+ return p.failMsg(.{
+ .tag = .expected_token,
+ .token = p.tok_i,
+ .extra = .{ .expected_tag = expected_token },
+ });
+}
+
+fn failMsg(p: *Parse, msg: Ast.Error) error{ ParseError, OutOfMemory } {
+ @setCold(true);
+ try p.warnMsg(msg);
+ return error.ParseError;
+}
+
+/// Root <- skip container_doc_comment? ContainerMembers eof
+pub fn parseRoot(p: *Parse) !void {
+ // Root node must be index 0.
+ p.nodes.appendAssumeCapacity(.{
+ .tag = .root,
+ .main_token = 0,
+ .data = undefined,
+ });
+ const root_members = try p.parseContainerMembers();
+ const root_decls = try root_members.toSpan(p);
+ if (p.token_tags[p.tok_i] != .eof) {
+ try p.warnExpected(.eof);
+ }
+ p.nodes.items(.data)[0] = .{
+ .lhs = root_decls.start,
+ .rhs = root_decls.end,
+ };
+}
+
+/// Parse in ZON mode. Subset of the language.
+/// TODO: set a flag in Parse struct, and honor that flag
+/// by emitting compilation errors when non-zon nodes are encountered.
+pub fn parseZon(p: *Parse) !void {
+ // We must use index 0 so that 0 can be used as null elsewhere.
+ p.nodes.appendAssumeCapacity(.{
+ .tag = .root,
+ .main_token = 0,
+ .data = undefined,
+ });
+ const node_index = p.expectExpr() catch |err| switch (err) {
+ error.ParseError => {
+ assert(p.errors.items.len > 0);
+ return;
+ },
+ else => |e| return e,
+ };
+ if (p.token_tags[p.tok_i] != .eof) {
+ try p.warnExpected(.eof);
+ }
+ p.nodes.items(.data)[0] = .{
+ .lhs = node_index,
+ .rhs = undefined,
+ };
+}
+
+/// ContainerMembers <- ContainerDeclarations (ContainerField COMMA)* (ContainerField / ContainerDeclarations)
+///
+/// ContainerDeclarations
+/// <- TestDecl ContainerDeclarations
+/// / ComptimeDecl ContainerDeclarations
+/// / doc_comment? KEYWORD_pub? Decl ContainerDeclarations
+/// /
+///
+/// ComptimeDecl <- KEYWORD_comptime Block
+fn parseContainerMembers(p: *Parse) !Members {
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+
+ var field_state: union(enum) {
+ /// No fields have been seen.
+ none,
+ /// Currently parsing fields.
+ seen,
+ /// Saw fields and then a declaration after them.
+ /// Payload is first token of previous declaration.
+ end: Node.Index,
+ /// There was a declaration between fields, don't report more errors.
+ err,
+ } = .none;
+
+ var last_field: TokenIndex = undefined;
+
+ // Skip container doc comments.
+ while (p.eatToken(.container_doc_comment)) |_| {}
+
+ var trailing = false;
+ while (true) {
+ const doc_comment = try p.eatDocComments();
+
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_test => {
+ if (doc_comment) |some| {
+ try p.warnMsg(.{ .tag = .test_doc_comment, .token = some });
+ }
+ const test_decl_node = try p.expectTestDeclRecoverable();
+ if (test_decl_node != 0) {
+ if (field_state == .seen) {
+ field_state = .{ .end = test_decl_node };
+ }
+ try p.scratch.append(p.gpa, test_decl_node);
+ }
+ trailing = false;
+ },
+ .keyword_comptime => switch (p.token_tags[p.tok_i + 1]) {
+ .l_brace => {
+ if (doc_comment) |some| {
+ try p.warnMsg(.{ .tag = .comptime_doc_comment, .token = some });
+ }
+ const comptime_token = p.nextToken();
+ const block = p.parseBlock() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => blk: {
+ p.findNextContainerMember();
+ break :blk null_node;
+ },
+ };
+ if (block != 0) {
+ const comptime_node = try p.addNode(.{
+ .tag = .@"comptime",
+ .main_token = comptime_token,
+ .data = .{
+ .lhs = block,
+ .rhs = undefined,
+ },
+ });
+ if (field_state == .seen) {
+ field_state = .{ .end = comptime_node };
+ }
+ try p.scratch.append(p.gpa, comptime_node);
+ }
+ trailing = false;
+ },
+ else => {
+ const identifier = p.tok_i;
+ defer last_field = identifier;
+ const container_field = p.expectContainerField() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => {
+ p.findNextContainerMember();
+ continue;
+ },
+ };
+ switch (field_state) {
+ .none => field_state = .seen,
+ .err, .seen => {},
+ .end => |node| {
+ try p.warnMsg(.{
+ .tag = .decl_between_fields,
+ .token = p.nodes.items(.main_token)[node],
+ });
+ try p.warnMsg(.{
+ .tag = .previous_field,
+ .is_note = true,
+ .token = last_field,
+ });
+ try p.warnMsg(.{
+ .tag = .next_field,
+ .is_note = true,
+ .token = identifier,
+ });
+ // Continue parsing; error will be reported later.
+ field_state = .err;
+ },
+ }
+ try p.scratch.append(p.gpa, container_field);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => {
+ p.tok_i += 1;
+ trailing = true;
+ continue;
+ },
+ .r_brace, .eof => {
+ trailing = false;
+ break;
+ },
+ else => {},
+ }
+ // There is not allowed to be a decl after a field with no comma.
+ // Report error but recover parser.
+ try p.warn(.expected_comma_after_field);
+ p.findNextContainerMember();
+ },
+ },
+ .keyword_pub => {
+ p.tok_i += 1;
+ const top_level_decl = try p.expectTopLevelDeclRecoverable();
+ if (top_level_decl != 0) {
+ if (field_state == .seen) {
+ field_state = .{ .end = top_level_decl };
+ }
+ try p.scratch.append(p.gpa, top_level_decl);
+ }
+ trailing = p.token_tags[p.tok_i - 1] == .semicolon;
+ },
+ .keyword_usingnamespace => {
+ const node = try p.expectUsingNamespaceRecoverable();
+ if (node != 0) {
+ if (field_state == .seen) {
+ field_state = .{ .end = node };
+ }
+ try p.scratch.append(p.gpa, node);
+ }
+ trailing = p.token_tags[p.tok_i - 1] == .semicolon;
+ },
+ .keyword_const,
+ .keyword_var,
+ .keyword_threadlocal,
+ .keyword_export,
+ .keyword_extern,
+ .keyword_inline,
+ .keyword_noinline,
+ .keyword_fn,
+ => {
+ const top_level_decl = try p.expectTopLevelDeclRecoverable();
+ if (top_level_decl != 0) {
+ if (field_state == .seen) {
+ field_state = .{ .end = top_level_decl };
+ }
+ try p.scratch.append(p.gpa, top_level_decl);
+ }
+ trailing = p.token_tags[p.tok_i - 1] == .semicolon;
+ },
+ .eof, .r_brace => {
+ if (doc_comment) |tok| {
+ try p.warnMsg(.{
+ .tag = .unattached_doc_comment,
+ .token = tok,
+ });
+ }
+ break;
+ },
+ else => {
+ const c_container = p.parseCStyleContainer() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => false,
+ };
+ if (c_container) continue;
+
+ const identifier = p.tok_i;
+ defer last_field = identifier;
+ const container_field = p.expectContainerField() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => {
+ p.findNextContainerMember();
+ continue;
+ },
+ };
+ switch (field_state) {
+ .none => field_state = .seen,
+ .err, .seen => {},
+ .end => |node| {
+ try p.warnMsg(.{
+ .tag = .decl_between_fields,
+ .token = p.nodes.items(.main_token)[node],
+ });
+ try p.warnMsg(.{
+ .tag = .previous_field,
+ .is_note = true,
+ .token = last_field,
+ });
+ try p.warnMsg(.{
+ .tag = .next_field,
+ .is_note = true,
+ .token = identifier,
+ });
+ // Continue parsing; error will be reported later.
+ field_state = .err;
+ },
+ }
+ try p.scratch.append(p.gpa, container_field);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => {
+ p.tok_i += 1;
+ trailing = true;
+ continue;
+ },
+ .r_brace, .eof => {
+ trailing = false;
+ break;
+ },
+ else => {},
+ }
+ // There is not allowed to be a decl after a field with no comma.
+ // Report error but recover parser.
+ try p.warn(.expected_comma_after_field);
+ if (p.token_tags[p.tok_i] == .semicolon and p.token_tags[identifier] == .identifier) {
+ try p.warnMsg(.{
+ .tag = .var_const_decl,
+ .is_note = true,
+ .token = identifier,
+ });
+ }
+ p.findNextContainerMember();
+ continue;
+ },
+ }
+ }
+
+ const items = p.scratch.items[scratch_top..];
+ switch (items.len) {
+ 0 => return Members{
+ .len = 0,
+ .lhs = 0,
+ .rhs = 0,
+ .trailing = trailing,
+ },
+ 1 => return Members{
+ .len = 1,
+ .lhs = items[0],
+ .rhs = 0,
+ .trailing = trailing,
+ },
+ 2 => return Members{
+ .len = 2,
+ .lhs = items[0],
+ .rhs = items[1],
+ .trailing = trailing,
+ },
+ else => {
+ const span = try p.listToSpan(items);
+ return Members{
+ .len = items.len,
+ .lhs = span.start,
+ .rhs = span.end,
+ .trailing = trailing,
+ };
+ },
+ }
+}
+
+/// Attempts to find next container member by searching for certain tokens
+fn findNextContainerMember(p: *Parse) void {
+ var level: u32 = 0;
+ while (true) {
+ const tok = p.nextToken();
+ switch (p.token_tags[tok]) {
+ // Any of these can start a new top level declaration.
+ .keyword_test,
+ .keyword_comptime,
+ .keyword_pub,
+ .keyword_export,
+ .keyword_extern,
+ .keyword_inline,
+ .keyword_noinline,
+ .keyword_usingnamespace,
+ .keyword_threadlocal,
+ .keyword_const,
+ .keyword_var,
+ .keyword_fn,
+ => {
+ if (level == 0) {
+ p.tok_i -= 1;
+ return;
+ }
+ },
+ .identifier => {
+ if (p.token_tags[tok + 1] == .comma and level == 0) {
+ p.tok_i -= 1;
+ return;
+ }
+ },
+ .comma, .semicolon => {
+ // this decl was likely meant to end here
+ if (level == 0) {
+ return;
+ }
+ },
+ .l_paren, .l_bracket, .l_brace => level += 1,
+ .r_paren, .r_bracket => {
+ if (level != 0) level -= 1;
+ },
+ .r_brace => {
+ if (level == 0) {
+ // end of container, exit
+ p.tok_i -= 1;
+ return;
+ }
+ level -= 1;
+ },
+ .eof => {
+ p.tok_i -= 1;
+ return;
+ },
+ else => {},
+ }
+ }
+}
+
+/// Attempts to find the next statement by searching for a semicolon
+fn findNextStmt(p: *Parse) void {
+ var level: u32 = 0;
+ while (true) {
+ const tok = p.nextToken();
+ switch (p.token_tags[tok]) {
+ .l_brace => level += 1,
+ .r_brace => {
+ if (level == 0) {
+ p.tok_i -= 1;
+ return;
+ }
+ level -= 1;
+ },
+ .semicolon => {
+ if (level == 0) {
+ return;
+ }
+ },
+ .eof => {
+ p.tok_i -= 1;
+ return;
+ },
+ else => {},
+ }
+ }
+}
+
+/// TestDecl <- KEYWORD_test (STRINGLITERALSINGLE / IDENTIFIER)? Block
+fn expectTestDecl(p: *Parse) !Node.Index {
+ const test_token = p.assertToken(.keyword_test);
+ const name_token = switch (p.token_tags[p.nextToken()]) {
+ .string_literal, .identifier => p.tok_i - 1,
+ else => blk: {
+ p.tok_i -= 1;
+ break :blk null;
+ },
+ };
+ const block_node = try p.parseBlock();
+ if (block_node == 0) return p.fail(.expected_block);
+ return p.addNode(.{
+ .tag = .test_decl,
+ .main_token = test_token,
+ .data = .{
+ .lhs = name_token orelse 0,
+ .rhs = block_node,
+ },
+ });
+}
+
+fn expectTestDeclRecoverable(p: *Parse) error{OutOfMemory}!Node.Index {
+ return p.expectTestDecl() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => {
+ p.findNextContainerMember();
+ return null_node;
+ },
+ };
+}
+
+/// Decl
+/// <- (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE? / (KEYWORD_inline / KEYWORD_noinline))? FnProto (SEMICOLON / Block)
+/// / (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE?)? KEYWORD_threadlocal? VarDecl
+/// / KEYWORD_usingnamespace Expr SEMICOLON
+fn expectTopLevelDecl(p: *Parse) !Node.Index {
+ const extern_export_inline_token = p.nextToken();
+ var is_extern: bool = false;
+ var expect_fn: bool = false;
+ var expect_var_or_fn: bool = false;
+ switch (p.token_tags[extern_export_inline_token]) {
+ .keyword_extern => {
+ _ = p.eatToken(.string_literal);
+ is_extern = true;
+ expect_var_or_fn = true;
+ },
+ .keyword_export => expect_var_or_fn = true,
+ .keyword_inline, .keyword_noinline => expect_fn = true,
+ else => p.tok_i -= 1,
+ }
+ const fn_proto = try p.parseFnProto();
+ if (fn_proto != 0) {
+ switch (p.token_tags[p.tok_i]) {
+ .semicolon => {
+ p.tok_i += 1;
+ return fn_proto;
+ },
+ .l_brace => {
+ if (is_extern) {
+ try p.warnMsg(.{ .tag = .extern_fn_body, .token = extern_export_inline_token });
+ return null_node;
+ }
+ const fn_decl_index = try p.reserveNode(.fn_decl);
+ errdefer p.unreserveNode(fn_decl_index);
+
+ const body_block = try p.parseBlock();
+ assert(body_block != 0);
+ return p.setNode(fn_decl_index, .{
+ .tag = .fn_decl,
+ .main_token = p.nodes.items(.main_token)[fn_proto],
+ .data = .{
+ .lhs = fn_proto,
+ .rhs = body_block,
+ },
+ });
+ },
+ else => {
+ // Since parseBlock only return error.ParseError on
+ // a missing '}' we can assume this function was
+ // supposed to end here.
+ try p.warn(.expected_semi_or_lbrace);
+ return null_node;
+ },
+ }
+ }
+ if (expect_fn) {
+ try p.warn(.expected_fn);
+ return error.ParseError;
+ }
+
+ const thread_local_token = p.eatToken(.keyword_threadlocal);
+ const var_decl = try p.parseVarDecl();
+ if (var_decl != 0) {
+ try p.expectSemicolon(.expected_semi_after_decl, false);
+ return var_decl;
+ }
+ if (thread_local_token != null) {
+ return p.fail(.expected_var_decl);
+ }
+ if (expect_var_or_fn) {
+ return p.fail(.expected_var_decl_or_fn);
+ }
+ if (p.token_tags[p.tok_i] != .keyword_usingnamespace) {
+ return p.fail(.expected_pub_item);
+ }
+ return p.expectUsingNamespace();
+}
+
+fn expectTopLevelDeclRecoverable(p: *Parse) error{OutOfMemory}!Node.Index {
+ return p.expectTopLevelDecl() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => {
+ p.findNextContainerMember();
+ return null_node;
+ },
+ };
+}
+
+fn expectUsingNamespace(p: *Parse) !Node.Index {
+ const usingnamespace_token = p.assertToken(.keyword_usingnamespace);
+ const expr = try p.expectExpr();
+ try p.expectSemicolon(.expected_semi_after_decl, false);
+ return p.addNode(.{
+ .tag = .@"usingnamespace",
+ .main_token = usingnamespace_token,
+ .data = .{
+ .lhs = expr,
+ .rhs = undefined,
+ },
+ });
+}
+
+fn expectUsingNamespaceRecoverable(p: *Parse) error{OutOfMemory}!Node.Index {
+ return p.expectUsingNamespace() catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => {
+ p.findNextContainerMember();
+ return null_node;
+ },
+ };
+}
+
+/// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? AddrSpace? LinkSection? CallConv? EXCLAMATIONMARK? TypeExpr
+fn parseFnProto(p: *Parse) !Node.Index {
+ const fn_token = p.eatToken(.keyword_fn) orelse return null_node;
+
+ // We want the fn proto node to be before its children in the array.
+ const fn_proto_index = try p.reserveNode(.fn_proto);
+ errdefer p.unreserveNode(fn_proto_index);
+
+ _ = p.eatToken(.identifier);
+ const params = try p.parseParamDeclList();
+ const align_expr = try p.parseByteAlign();
+ const addrspace_expr = try p.parseAddrSpace();
+ const section_expr = try p.parseLinkSection();
+ const callconv_expr = try p.parseCallconv();
+ _ = p.eatToken(.bang);
+
+ const return_type_expr = try p.parseTypeExpr();
+ if (return_type_expr == 0) {
+ // most likely the user forgot to specify the return type.
+ // Mark return type as invalid and try to continue.
+ try p.warn(.expected_return_type);
+ }
+
+ if (align_expr == 0 and section_expr == 0 and callconv_expr == 0 and addrspace_expr == 0) {
+ switch (params) {
+ .zero_or_one => |param| return p.setNode(fn_proto_index, .{
+ .tag = .fn_proto_simple,
+ .main_token = fn_token,
+ .data = .{
+ .lhs = param,
+ .rhs = return_type_expr,
+ },
+ }),
+ .multi => |span| {
+ return p.setNode(fn_proto_index, .{
+ .tag = .fn_proto_multi,
+ .main_token = fn_token,
+ .data = .{
+ .lhs = try p.addExtra(Node.SubRange{
+ .start = span.start,
+ .end = span.end,
+ }),
+ .rhs = return_type_expr,
+ },
+ });
+ },
+ }
+ }
+ switch (params) {
+ .zero_or_one => |param| return p.setNode(fn_proto_index, .{
+ .tag = .fn_proto_one,
+ .main_token = fn_token,
+ .data = .{
+ .lhs = try p.addExtra(Node.FnProtoOne{
+ .param = param,
+ .align_expr = align_expr,
+ .addrspace_expr = addrspace_expr,
+ .section_expr = section_expr,
+ .callconv_expr = callconv_expr,
+ }),
+ .rhs = return_type_expr,
+ },
+ }),
+ .multi => |span| {
+ return p.setNode(fn_proto_index, .{
+ .tag = .fn_proto,
+ .main_token = fn_token,
+ .data = .{
+ .lhs = try p.addExtra(Node.FnProto{
+ .params_start = span.start,
+ .params_end = span.end,
+ .align_expr = align_expr,
+ .addrspace_expr = addrspace_expr,
+ .section_expr = section_expr,
+ .callconv_expr = callconv_expr,
+ }),
+ .rhs = return_type_expr,
+ },
+ });
+ },
+ }
+}
+
+/// VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? AddrSpace? LinkSection? (EQUAL Expr)? SEMICOLON
+fn parseVarDecl(p: *Parse) !Node.Index {
+ const mut_token = p.eatToken(.keyword_const) orelse
+ p.eatToken(.keyword_var) orelse
+ return null_node;
+
+ _ = try p.expectToken(.identifier);
+ const type_node: Node.Index = if (p.eatToken(.colon) == null) 0 else try p.expectTypeExpr();
+ const align_node = try p.parseByteAlign();
+ const addrspace_node = try p.parseAddrSpace();
+ const section_node = try p.parseLinkSection();
+ const init_node: Node.Index = switch (p.token_tags[p.tok_i]) {
+ .equal_equal => blk: {
+ try p.warn(.wrong_equal_var_decl);
+ p.tok_i += 1;
+ break :blk try p.expectExpr();
+ },
+ .equal => blk: {
+ p.tok_i += 1;
+ break :blk try p.expectExpr();
+ },
+ else => 0,
+ };
+ if (section_node == 0 and addrspace_node == 0) {
+ if (align_node == 0) {
+ return p.addNode(.{
+ .tag = .simple_var_decl,
+ .main_token = mut_token,
+ .data = .{
+ .lhs = type_node,
+ .rhs = init_node,
+ },
+ });
+ } else if (type_node == 0) {
+ return p.addNode(.{
+ .tag = .aligned_var_decl,
+ .main_token = mut_token,
+ .data = .{
+ .lhs = align_node,
+ .rhs = init_node,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .local_var_decl,
+ .main_token = mut_token,
+ .data = .{
+ .lhs = try p.addExtra(Node.LocalVarDecl{
+ .type_node = type_node,
+ .align_node = align_node,
+ }),
+ .rhs = init_node,
+ },
+ });
+ }
+ } else {
+ return p.addNode(.{
+ .tag = .global_var_decl,
+ .main_token = mut_token,
+ .data = .{
+ .lhs = try p.addExtra(Node.GlobalVarDecl{
+ .type_node = type_node,
+ .align_node = align_node,
+ .addrspace_node = addrspace_node,
+ .section_node = section_node,
+ }),
+ .rhs = init_node,
+ },
+ });
+ }
+}
+
+/// ContainerField
+/// <- doc_comment? KEYWORD_comptime? IDENTIFIER (COLON TypeExpr)? ByteAlign? (EQUAL Expr)?
+/// / doc_comment? KEYWORD_comptime? (IDENTIFIER COLON)? !KEYWORD_fn TypeExpr ByteAlign? (EQUAL Expr)?
+fn expectContainerField(p: *Parse) !Node.Index {
+ var main_token = p.tok_i;
+ _ = p.eatToken(.keyword_comptime);
+ const tuple_like = p.token_tags[p.tok_i] != .identifier or p.token_tags[p.tok_i + 1] != .colon;
+ if (!tuple_like) {
+ main_token = p.assertToken(.identifier);
+ }
+
+ var align_expr: Node.Index = 0;
+ var type_expr: Node.Index = 0;
+ if (p.eatToken(.colon) != null or tuple_like) {
+ type_expr = try p.expectTypeExpr();
+ align_expr = try p.parseByteAlign();
+ }
+
+ const value_expr: Node.Index = if (p.eatToken(.equal) == null) 0 else try p.expectExpr();
+
+ if (align_expr == 0) {
+ return p.addNode(.{
+ .tag = .container_field_init,
+ .main_token = main_token,
+ .data = .{
+ .lhs = type_expr,
+ .rhs = value_expr,
+ },
+ });
+ } else if (value_expr == 0) {
+ return p.addNode(.{
+ .tag = .container_field_align,
+ .main_token = main_token,
+ .data = .{
+ .lhs = type_expr,
+ .rhs = align_expr,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .container_field,
+ .main_token = main_token,
+ .data = .{
+ .lhs = type_expr,
+ .rhs = try p.addExtra(Node.ContainerField{
+ .value_expr = value_expr,
+ .align_expr = align_expr,
+ }),
+ },
+ });
+ }
+}
+
+/// Statement
+/// <- KEYWORD_comptime? VarDecl
+/// / KEYWORD_comptime BlockExprStatement
+/// / KEYWORD_nosuspend BlockExprStatement
+/// / KEYWORD_suspend BlockExprStatement
+/// / KEYWORD_defer BlockExprStatement
+/// / KEYWORD_errdefer Payload? BlockExprStatement
+/// / IfStatement
+/// / LabeledStatement
+/// / SwitchExpr
+/// / AssignExpr SEMICOLON
+fn parseStatement(p: *Parse, allow_defer_var: bool) Error!Node.Index {
+ const comptime_token = p.eatToken(.keyword_comptime);
+
+ if (allow_defer_var) {
+ const var_decl = try p.parseVarDecl();
+ if (var_decl != 0) {
+ try p.expectSemicolon(.expected_semi_after_decl, true);
+ return var_decl;
+ }
+ }
+
+ if (comptime_token) |token| {
+ return p.addNode(.{
+ .tag = .@"comptime",
+ .main_token = token,
+ .data = .{
+ .lhs = try p.expectBlockExprStatement(),
+ .rhs = undefined,
+ },
+ });
+ }
+
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_nosuspend => {
+ return p.addNode(.{
+ .tag = .@"nosuspend",
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = try p.expectBlockExprStatement(),
+ .rhs = undefined,
+ },
+ });
+ },
+ .keyword_suspend => {
+ const token = p.nextToken();
+ const block_expr = try p.expectBlockExprStatement();
+ return p.addNode(.{
+ .tag = .@"suspend",
+ .main_token = token,
+ .data = .{
+ .lhs = block_expr,
+ .rhs = undefined,
+ },
+ });
+ },
+ .keyword_defer => if (allow_defer_var) return p.addNode(.{
+ .tag = .@"defer",
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = try p.expectBlockExprStatement(),
+ },
+ }),
+ .keyword_errdefer => if (allow_defer_var) return p.addNode(.{
+ .tag = .@"errdefer",
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = try p.parsePayload(),
+ .rhs = try p.expectBlockExprStatement(),
+ },
+ }),
+ .keyword_switch => return p.expectSwitchExpr(),
+ .keyword_if => return p.expectIfStatement(),
+ .keyword_enum, .keyword_struct, .keyword_union => {
+ const identifier = p.tok_i + 1;
+ if (try p.parseCStyleContainer()) {
+ // Return something so that `expectStatement` is happy.
+ return p.addNode(.{
+ .tag = .identifier,
+ .main_token = identifier,
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ });
+ }
+ },
+ else => {},
+ }
+
+ const labeled_statement = try p.parseLabeledStatement();
+ if (labeled_statement != 0) return labeled_statement;
+
+ const assign_expr = try p.parseAssignExpr();
+ if (assign_expr != 0) {
+ try p.expectSemicolon(.expected_semi_after_stmt, true);
+ return assign_expr;
+ }
+
+ return null_node;
+}
+
+fn expectStatement(p: *Parse, allow_defer_var: bool) !Node.Index {
+ const statement = try p.parseStatement(allow_defer_var);
+ if (statement == 0) {
+ return p.fail(.expected_statement);
+ }
+ return statement;
+}
+
+/// If a parse error occurs, reports an error, but then finds the next statement
+/// and returns that one instead. If a parse error occurs but there is no following
+/// statement, returns 0.
+fn expectStatementRecoverable(p: *Parse) Error!Node.Index {
+ while (true) {
+ return p.expectStatement(true) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ParseError => {
+ p.findNextStmt(); // Try to skip to the next statement.
+ switch (p.token_tags[p.tok_i]) {
+ .r_brace => return null_node,
+ .eof => return error.ParseError,
+ else => continue,
+ }
+ },
+ };
+ }
+}
+
+/// IfStatement
+/// <- IfPrefix BlockExpr ( KEYWORD_else Payload? Statement )?
+/// / IfPrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement )
+fn expectIfStatement(p: *Parse) !Node.Index {
+ const if_token = p.assertToken(.keyword_if);
+ _ = try p.expectToken(.l_paren);
+ const condition = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.parsePtrPayload();
+
+ // TODO propose to change the syntax so that semicolons are always required
+ // inside if statements, even if there is an `else`.
+ var else_required = false;
+ const then_expr = blk: {
+ const block_expr = try p.parseBlockExpr();
+ if (block_expr != 0) break :blk block_expr;
+ const assign_expr = try p.parseAssignExpr();
+ if (assign_expr == 0) {
+ return p.fail(.expected_block_or_assignment);
+ }
+ if (p.eatToken(.semicolon)) |_| {
+ return p.addNode(.{
+ .tag = .if_simple,
+ .main_token = if_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = assign_expr,
+ },
+ });
+ }
+ else_required = true;
+ break :blk assign_expr;
+ };
+ _ = p.eatToken(.keyword_else) orelse {
+ if (else_required) {
+ try p.warn(.expected_semi_or_else);
+ }
+ return p.addNode(.{
+ .tag = .if_simple,
+ .main_token = if_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = then_expr,
+ },
+ });
+ };
+ _ = try p.parsePayload();
+ const else_expr = try p.expectStatement(false);
+ return p.addNode(.{
+ .tag = .@"if",
+ .main_token = if_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.If{
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// LabeledStatement <- BlockLabel? (Block / LoopStatement)
+fn parseLabeledStatement(p: *Parse) !Node.Index {
+ const label_token = p.parseBlockLabel();
+ const block = try p.parseBlock();
+ if (block != 0) return block;
+
+ const loop_stmt = try p.parseLoopStatement();
+ if (loop_stmt != 0) return loop_stmt;
+
+ if (label_token != 0) {
+ const after_colon = p.tok_i;
+ const node = try p.parseTypeExpr();
+ if (node != 0) {
+ const a = try p.parseByteAlign();
+ const b = try p.parseAddrSpace();
+ const c = try p.parseLinkSection();
+ const d = if (p.eatToken(.equal) == null) 0 else try p.expectExpr();
+ if (a != 0 or b != 0 or c != 0 or d != 0) {
+ return p.failMsg(.{ .tag = .expected_var_const, .token = label_token });
+ }
+ }
+ return p.failMsg(.{ .tag = .expected_labelable, .token = after_colon });
+ }
+
+ return null_node;
+}
+
+/// LoopStatement <- KEYWORD_inline? (ForStatement / WhileStatement)
+fn parseLoopStatement(p: *Parse) !Node.Index {
+ const inline_token = p.eatToken(.keyword_inline);
+
+ const for_statement = try p.parseForStatement();
+ if (for_statement != 0) return for_statement;
+
+ const while_statement = try p.parseWhileStatement();
+ if (while_statement != 0) return while_statement;
+
+ if (inline_token == null) return null_node;
+
+ // If we've seen "inline", there should have been a "for" or "while"
+ return p.fail(.expected_inlinable);
+}
+
+/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
+///
+/// ForStatement
+/// <- ForPrefix BlockExpr ( KEYWORD_else Statement )?
+/// / ForPrefix AssignExpr ( SEMICOLON / KEYWORD_else Statement )
+fn parseForStatement(p: *Parse) !Node.Index {
+ const for_token = p.eatToken(.keyword_for) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const array_expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ const found_payload = try p.parsePtrIndexPayload();
+ if (found_payload == 0) try p.warn(.expected_loop_payload);
+
+ // TODO propose to change the syntax so that semicolons are always required
+ // inside while statements, even if there is an `else`.
+ var else_required = false;
+ const then_expr = blk: {
+ const block_expr = try p.parseBlockExpr();
+ if (block_expr != 0) break :blk block_expr;
+ const assign_expr = try p.parseAssignExpr();
+ if (assign_expr == 0) {
+ return p.fail(.expected_block_or_assignment);
+ }
+ if (p.eatToken(.semicolon)) |_| {
+ return p.addNode(.{
+ .tag = .for_simple,
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = assign_expr,
+ },
+ });
+ }
+ else_required = true;
+ break :blk assign_expr;
+ };
+ _ = p.eatToken(.keyword_else) orelse {
+ if (else_required) {
+ try p.warn(.expected_semi_or_else);
+ }
+ return p.addNode(.{
+ .tag = .for_simple,
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = then_expr,
+ },
+ });
+ };
+ return p.addNode(.{
+ .tag = .@"for",
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = try p.addExtra(Node.If{
+ .then_expr = then_expr,
+ .else_expr = try p.expectStatement(false),
+ }),
+ },
+ });
+}
+
+/// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
+///
+/// WhileStatement
+/// <- WhilePrefix BlockExpr ( KEYWORD_else Payload? Statement )?
+/// / WhilePrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement )
+fn parseWhileStatement(p: *Parse) !Node.Index {
+ const while_token = p.eatToken(.keyword_while) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const condition = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.parsePtrPayload();
+ const cont_expr = try p.parseWhileContinueExpr();
+
+ // TODO propose to change the syntax so that semicolons are always required
+ // inside while statements, even if there is an `else`.
+ var else_required = false;
+ const then_expr = blk: {
+ const block_expr = try p.parseBlockExpr();
+ if (block_expr != 0) break :blk block_expr;
+ const assign_expr = try p.parseAssignExpr();
+ if (assign_expr == 0) {
+ return p.fail(.expected_block_or_assignment);
+ }
+ if (p.eatToken(.semicolon)) |_| {
+ if (cont_expr == 0) {
+ return p.addNode(.{
+ .tag = .while_simple,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = assign_expr,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .while_cont,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.WhileCont{
+ .cont_expr = cont_expr,
+ .then_expr = assign_expr,
+ }),
+ },
+ });
+ }
+ }
+ else_required = true;
+ break :blk assign_expr;
+ };
+ _ = p.eatToken(.keyword_else) orelse {
+ if (else_required) {
+ try p.warn(.expected_semi_or_else);
+ }
+ if (cont_expr == 0) {
+ return p.addNode(.{
+ .tag = .while_simple,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = then_expr,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .while_cont,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.WhileCont{
+ .cont_expr = cont_expr,
+ .then_expr = then_expr,
+ }),
+ },
+ });
+ }
+ };
+ _ = try p.parsePayload();
+ const else_expr = try p.expectStatement(false);
+ return p.addNode(.{
+ .tag = .@"while",
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.While{
+ .cont_expr = cont_expr,
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// BlockExprStatement
+/// <- BlockExpr
+/// / AssignExpr SEMICOLON
+fn parseBlockExprStatement(p: *Parse) !Node.Index {
+ const block_expr = try p.parseBlockExpr();
+ if (block_expr != 0) {
+ return block_expr;
+ }
+ const assign_expr = try p.parseAssignExpr();
+ if (assign_expr != 0) {
+ try p.expectSemicolon(.expected_semi_after_stmt, true);
+ return assign_expr;
+ }
+ return null_node;
+}
+
+fn expectBlockExprStatement(p: *Parse) !Node.Index {
+ const node = try p.parseBlockExprStatement();
+ if (node == 0) {
+ return p.fail(.expected_block_or_expr);
+ }
+ return node;
+}
+
+/// BlockExpr <- BlockLabel? Block
+fn parseBlockExpr(p: *Parse) Error!Node.Index {
+ switch (p.token_tags[p.tok_i]) {
+ .identifier => {
+ if (p.token_tags[p.tok_i + 1] == .colon and
+ p.token_tags[p.tok_i + 2] == .l_brace)
+ {
+ p.tok_i += 2;
+ return p.parseBlock();
+ } else {
+ return null_node;
+ }
+ },
+ .l_brace => return p.parseBlock(),
+ else => return null_node,
+ }
+}
+
+/// AssignExpr <- Expr (AssignOp Expr)?
+///
+/// AssignOp
+/// <- ASTERISKEQUAL
+/// / ASTERISKPIPEEQUAL
+/// / SLASHEQUAL
+/// / PERCENTEQUAL
+/// / PLUSEQUAL
+/// / PLUSPIPEEQUAL
+/// / MINUSEQUAL
+/// / MINUSPIPEEQUAL
+/// / LARROW2EQUAL
+/// / LARROW2PIPEEQUAL
+/// / RARROW2EQUAL
+/// / AMPERSANDEQUAL
+/// / CARETEQUAL
+/// / PIPEEQUAL
+/// / ASTERISKPERCENTEQUAL
+/// / PLUSPERCENTEQUAL
+/// / MINUSPERCENTEQUAL
+/// / EQUAL
+fn parseAssignExpr(p: *Parse) !Node.Index {
+ const expr = try p.parseExpr();
+ if (expr == 0) return null_node;
+
+ const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
+ .asterisk_equal => .assign_mul,
+ .slash_equal => .assign_div,
+ .percent_equal => .assign_mod,
+ .plus_equal => .assign_add,
+ .minus_equal => .assign_sub,
+ .angle_bracket_angle_bracket_left_equal => .assign_shl,
+ .angle_bracket_angle_bracket_left_pipe_equal => .assign_shl_sat,
+ .angle_bracket_angle_bracket_right_equal => .assign_shr,
+ .ampersand_equal => .assign_bit_and,
+ .caret_equal => .assign_bit_xor,
+ .pipe_equal => .assign_bit_or,
+ .asterisk_percent_equal => .assign_mul_wrap,
+ .plus_percent_equal => .assign_add_wrap,
+ .minus_percent_equal => .assign_sub_wrap,
+ .asterisk_pipe_equal => .assign_mul_sat,
+ .plus_pipe_equal => .assign_add_sat,
+ .minus_pipe_equal => .assign_sub_sat,
+ .equal => .assign,
+ else => return expr,
+ };
+ return p.addNode(.{
+ .tag = tag,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = expr,
+ .rhs = try p.expectExpr(),
+ },
+ });
+}
+
+fn expectAssignExpr(p: *Parse) !Node.Index {
+ const expr = try p.parseAssignExpr();
+ if (expr == 0) {
+ return p.fail(.expected_expr_or_assignment);
+ }
+ return expr;
+}
+
+fn parseExpr(p: *Parse) Error!Node.Index {
+ return p.parseExprPrecedence(0);
+}
+
+fn expectExpr(p: *Parse) Error!Node.Index {
+ const node = try p.parseExpr();
+ if (node == 0) {
+ return p.fail(.expected_expr);
+ } else {
+ return node;
+ }
+}
+
+const Assoc = enum {
+ left,
+ none,
+};
+
+const OperInfo = struct {
+ prec: i8,
+ tag: Node.Tag,
+ assoc: Assoc = Assoc.left,
+};
+
+// A table of binary operator information. Higher precedence numbers are
+// stickier. All operators at the same precedence level should have the same
+// associativity.
+const operTable = std.enums.directEnumArrayDefault(Token.Tag, OperInfo, .{ .prec = -1, .tag = Node.Tag.root }, 0, .{
+ .keyword_or = .{ .prec = 10, .tag = .bool_or },
+
+ .keyword_and = .{ .prec = 20, .tag = .bool_and },
+
+ .equal_equal = .{ .prec = 30, .tag = .equal_equal, .assoc = Assoc.none },
+ .bang_equal = .{ .prec = 30, .tag = .bang_equal, .assoc = Assoc.none },
+ .angle_bracket_left = .{ .prec = 30, .tag = .less_than, .assoc = Assoc.none },
+ .angle_bracket_right = .{ .prec = 30, .tag = .greater_than, .assoc = Assoc.none },
+ .angle_bracket_left_equal = .{ .prec = 30, .tag = .less_or_equal, .assoc = Assoc.none },
+ .angle_bracket_right_equal = .{ .prec = 30, .tag = .greater_or_equal, .assoc = Assoc.none },
+
+ .ampersand = .{ .prec = 40, .tag = .bit_and },
+ .caret = .{ .prec = 40, .tag = .bit_xor },
+ .pipe = .{ .prec = 40, .tag = .bit_or },
+ .keyword_orelse = .{ .prec = 40, .tag = .@"orelse" },
+ .keyword_catch = .{ .prec = 40, .tag = .@"catch" },
+
+ .angle_bracket_angle_bracket_left = .{ .prec = 50, .tag = .shl },
+ .angle_bracket_angle_bracket_left_pipe = .{ .prec = 50, .tag = .shl_sat },
+ .angle_bracket_angle_bracket_right = .{ .prec = 50, .tag = .shr },
+
+ .plus = .{ .prec = 60, .tag = .add },
+ .minus = .{ .prec = 60, .tag = .sub },
+ .plus_plus = .{ .prec = 60, .tag = .array_cat },
+ .plus_percent = .{ .prec = 60, .tag = .add_wrap },
+ .minus_percent = .{ .prec = 60, .tag = .sub_wrap },
+ .plus_pipe = .{ .prec = 60, .tag = .add_sat },
+ .minus_pipe = .{ .prec = 60, .tag = .sub_sat },
+
+ .pipe_pipe = .{ .prec = 70, .tag = .merge_error_sets },
+ .asterisk = .{ .prec = 70, .tag = .mul },
+ .slash = .{ .prec = 70, .tag = .div },
+ .percent = .{ .prec = 70, .tag = .mod },
+ .asterisk_asterisk = .{ .prec = 70, .tag = .array_mult },
+ .asterisk_percent = .{ .prec = 70, .tag = .mul_wrap },
+ .asterisk_pipe = .{ .prec = 70, .tag = .mul_sat },
+});
+
+fn parseExprPrecedence(p: *Parse, min_prec: i32) Error!Node.Index {
+ assert(min_prec >= 0);
+ var node = try p.parsePrefixExpr();
+ if (node == 0) {
+ return null_node;
+ }
+
+ var banned_prec: i8 = -1;
+
+ while (true) {
+ const tok_tag = p.token_tags[p.tok_i];
+ const info = operTable[@intCast(usize, @enumToInt(tok_tag))];
+ if (info.prec < min_prec) {
+ break;
+ }
+ if (info.prec == banned_prec) {
+ return p.fail(.chained_comparison_operators);
+ }
+
+ const oper_token = p.nextToken();
+ // Special-case handling for "catch"
+ if (tok_tag == .keyword_catch) {
+ _ = try p.parsePayload();
+ }
+ const rhs = try p.parseExprPrecedence(info.prec + 1);
+ if (rhs == 0) {
+ try p.warn(.expected_expr);
+ return node;
+ }
+
+ {
+ const tok_len = tok_tag.lexeme().?.len;
+ const char_before = p.source[p.token_starts[oper_token] - 1];
+ const char_after = p.source[p.token_starts[oper_token] + tok_len];
+ if (tok_tag == .ampersand and char_after == '&') {
+ // without types we don't know if '&&' was intended as 'bitwise_and address_of', or a c-style logical_and
+ // The best the parser can do is recommend changing it to 'and' or ' & &'
+ try p.warnMsg(.{ .tag = .invalid_ampersand_ampersand, .token = oper_token });
+ } else if (std.ascii.isWhitespace(char_before) != std.ascii.isWhitespace(char_after)) {
+ try p.warnMsg(.{ .tag = .mismatched_binary_op_whitespace, .token = oper_token });
+ }
+ }
+
+ node = try p.addNode(.{
+ .tag = info.tag,
+ .main_token = oper_token,
+ .data = .{
+ .lhs = node,
+ .rhs = rhs,
+ },
+ });
+
+ if (info.assoc == Assoc.none) {
+ banned_prec = info.prec;
+ }
+ }
+
+ return node;
+}
+
+/// PrefixExpr <- PrefixOp* PrimaryExpr
+///
+/// PrefixOp
+/// <- EXCLAMATIONMARK
+/// / MINUS
+/// / TILDE
+/// / MINUSPERCENT
+/// / AMPERSAND
+/// / KEYWORD_try
+/// / KEYWORD_await
+fn parsePrefixExpr(p: *Parse) Error!Node.Index {
+ const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
+ .bang => .bool_not,
+ .minus => .negation,
+ .tilde => .bit_not,
+ .minus_percent => .negation_wrap,
+ .ampersand => .address_of,
+ .keyword_try => .@"try",
+ .keyword_await => .@"await",
+ else => return p.parsePrimaryExpr(),
+ };
+ return p.addNode(.{
+ .tag = tag,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = try p.expectPrefixExpr(),
+ .rhs = undefined,
+ },
+ });
+}
+
+fn expectPrefixExpr(p: *Parse) Error!Node.Index {
+ const node = try p.parsePrefixExpr();
+ if (node == 0) {
+ return p.fail(.expected_prefix_expr);
+ }
+ return node;
+}
+
+/// TypeExpr <- PrefixTypeOp* ErrorUnionExpr
+///
+/// PrefixTypeOp
+/// <- QUESTIONMARK
+/// / KEYWORD_anyframe MINUSRARROW
+/// / SliceTypeStart (ByteAlign / AddrSpace / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
+/// / PtrTypeStart (AddrSpace / KEYWORD_align LPAREN Expr (COLON Expr COLON Expr)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
+/// / ArrayTypeStart
+///
+/// SliceTypeStart <- LBRACKET (COLON Expr)? RBRACKET
+///
+/// PtrTypeStart
+/// <- ASTERISK
+/// / ASTERISK2
+/// / LBRACKET ASTERISK (LETTERC / COLON Expr)? RBRACKET
+///
+/// ArrayTypeStart <- LBRACKET Expr (COLON Expr)? RBRACKET
+fn parseTypeExpr(p: *Parse) Error!Node.Index {
+ switch (p.token_tags[p.tok_i]) {
+ .question_mark => return p.addNode(.{
+ .tag = .optional_type,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = try p.expectTypeExpr(),
+ .rhs = undefined,
+ },
+ }),
+ .keyword_anyframe => switch (p.token_tags[p.tok_i + 1]) {
+ .arrow => return p.addNode(.{
+ .tag = .anyframe_type,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = p.nextToken(),
+ .rhs = try p.expectTypeExpr(),
+ },
+ }),
+ else => return p.parseErrorUnionExpr(),
+ },
+ .asterisk => {
+ const asterisk = p.nextToken();
+ const mods = try p.parsePtrModifiers();
+ const elem_type = try p.expectTypeExpr();
+ if (mods.bit_range_start != 0) {
+ return p.addNode(.{
+ .tag = .ptr_type_bit_range,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrTypeBitRange{
+ .sentinel = 0,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ .bit_range_start = mods.bit_range_start,
+ .bit_range_end = mods.bit_range_end,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ } else if (mods.addrspace_node != 0) {
+ return p.addNode(.{
+ .tag = .ptr_type,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrType{
+ .sentinel = 0,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .ptr_type_aligned,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = mods.align_node,
+ .rhs = elem_type,
+ },
+ });
+ }
+ },
+ .asterisk_asterisk => {
+ const asterisk = p.nextToken();
+ const mods = try p.parsePtrModifiers();
+ const elem_type = try p.expectTypeExpr();
+ const inner: Node.Index = inner: {
+ if (mods.bit_range_start != 0) {
+ break :inner try p.addNode(.{
+ .tag = .ptr_type_bit_range,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrTypeBitRange{
+ .sentinel = 0,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ .bit_range_start = mods.bit_range_start,
+ .bit_range_end = mods.bit_range_end,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ } else if (mods.addrspace_node != 0) {
+ break :inner try p.addNode(.{
+ .tag = .ptr_type,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrType{
+ .sentinel = 0,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ } else {
+ break :inner try p.addNode(.{
+ .tag = .ptr_type_aligned,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = mods.align_node,
+ .rhs = elem_type,
+ },
+ });
+ }
+ };
+ return p.addNode(.{
+ .tag = .ptr_type_aligned,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = 0,
+ .rhs = inner,
+ },
+ });
+ },
+ .l_bracket => switch (p.token_tags[p.tok_i + 1]) {
+ .asterisk => {
+ _ = p.nextToken();
+ const asterisk = p.nextToken();
+ var sentinel: Node.Index = 0;
+ if (p.eatToken(.identifier)) |ident| {
+ const ident_slice = p.source[p.token_starts[ident]..p.token_starts[ident + 1]];
+ if (!std.mem.eql(u8, std.mem.trimRight(u8, ident_slice, &std.ascii.whitespace), "c")) {
+ p.tok_i -= 1;
+ }
+ } else if (p.eatToken(.colon)) |_| {
+ sentinel = try p.expectExpr();
+ }
+ _ = try p.expectToken(.r_bracket);
+ const mods = try p.parsePtrModifiers();
+ const elem_type = try p.expectTypeExpr();
+ if (mods.bit_range_start == 0) {
+ if (sentinel == 0 and mods.addrspace_node == 0) {
+ return p.addNode(.{
+ .tag = .ptr_type_aligned,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = mods.align_node,
+ .rhs = elem_type,
+ },
+ });
+ } else if (mods.align_node == 0 and mods.addrspace_node == 0) {
+ return p.addNode(.{
+ .tag = .ptr_type_sentinel,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = sentinel,
+ .rhs = elem_type,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .ptr_type,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrType{
+ .sentinel = sentinel,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ }
+ } else {
+ return p.addNode(.{
+ .tag = .ptr_type_bit_range,
+ .main_token = asterisk,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrTypeBitRange{
+ .sentinel = sentinel,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ .bit_range_start = mods.bit_range_start,
+ .bit_range_end = mods.bit_range_end,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ }
+ },
+ else => {
+ const lbracket = p.nextToken();
+ const len_expr = try p.parseExpr();
+ const sentinel: Node.Index = if (p.eatToken(.colon)) |_|
+ try p.expectExpr()
+ else
+ 0;
+ _ = try p.expectToken(.r_bracket);
+ if (len_expr == 0) {
+ const mods = try p.parsePtrModifiers();
+ const elem_type = try p.expectTypeExpr();
+ if (mods.bit_range_start != 0) {
+ try p.warnMsg(.{
+ .tag = .invalid_bit_range,
+ .token = p.nodes.items(.main_token)[mods.bit_range_start],
+ });
+ }
+ if (sentinel == 0 and mods.addrspace_node == 0) {
+ return p.addNode(.{
+ .tag = .ptr_type_aligned,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = mods.align_node,
+ .rhs = elem_type,
+ },
+ });
+ } else if (mods.align_node == 0 and mods.addrspace_node == 0) {
+ return p.addNode(.{
+ .tag = .ptr_type_sentinel,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = sentinel,
+ .rhs = elem_type,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .ptr_type,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = try p.addExtra(Node.PtrType{
+ .sentinel = sentinel,
+ .align_node = mods.align_node,
+ .addrspace_node = mods.addrspace_node,
+ }),
+ .rhs = elem_type,
+ },
+ });
+ }
+ } else {
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_align,
+ .keyword_const,
+ .keyword_volatile,
+ .keyword_allowzero,
+ .keyword_addrspace,
+ => return p.fail(.ptr_mod_on_array_child_type),
+ else => {},
+ }
+ const elem_type = try p.expectTypeExpr();
+ if (sentinel == 0) {
+ return p.addNode(.{
+ .tag = .array_type,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = len_expr,
+ .rhs = elem_type,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .array_type_sentinel,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = len_expr,
+ .rhs = try p.addExtra(.{
+ .elem_type = elem_type,
+ .sentinel = sentinel,
+ }),
+ },
+ });
+ }
+ }
+ },
+ },
+ else => return p.parseErrorUnionExpr(),
+ }
+}
+
+fn expectTypeExpr(p: *Parse) Error!Node.Index {
+ const node = try p.parseTypeExpr();
+ if (node == 0) {
+ return p.fail(.expected_type_expr);
+ }
+ return node;
+}
+
+/// PrimaryExpr
+/// <- AsmExpr
+/// / IfExpr
+/// / KEYWORD_break BreakLabel? Expr?
+/// / KEYWORD_comptime Expr
+/// / KEYWORD_nosuspend Expr
+/// / KEYWORD_continue BreakLabel?
+/// / KEYWORD_resume Expr
+/// / KEYWORD_return Expr?
+/// / BlockLabel? LoopExpr
+/// / Block
+/// / CurlySuffixExpr
+fn parsePrimaryExpr(p: *Parse) !Node.Index {
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_asm => return p.expectAsmExpr(),
+ .keyword_if => return p.parseIfExpr(),
+ .keyword_break => {
+ p.tok_i += 1;
+ return p.addNode(.{
+ .tag = .@"break",
+ .main_token = p.tok_i - 1,
+ .data = .{
+ .lhs = try p.parseBreakLabel(),
+ .rhs = try p.parseExpr(),
+ },
+ });
+ },
+ .keyword_continue => {
+ p.tok_i += 1;
+ return p.addNode(.{
+ .tag = .@"continue",
+ .main_token = p.tok_i - 1,
+ .data = .{
+ .lhs = try p.parseBreakLabel(),
+ .rhs = undefined,
+ },
+ });
+ },
+ .keyword_comptime => {
+ p.tok_i += 1;
+ return p.addNode(.{
+ .tag = .@"comptime",
+ .main_token = p.tok_i - 1,
+ .data = .{
+ .lhs = try p.expectExpr(),
+ .rhs = undefined,
+ },
+ });
+ },
+ .keyword_nosuspend => {
+ p.tok_i += 1;
+ return p.addNode(.{
+ .tag = .@"nosuspend",
+ .main_token = p.tok_i - 1,
+ .data = .{
+ .lhs = try p.expectExpr(),
+ .rhs = undefined,
+ },
+ });
+ },
+ .keyword_resume => {
+ p.tok_i += 1;
+ return p.addNode(.{
+ .tag = .@"resume",
+ .main_token = p.tok_i - 1,
+ .data = .{
+ .lhs = try p.expectExpr(),
+ .rhs = undefined,
+ },
+ });
+ },
+ .keyword_return => {
+ p.tok_i += 1;
+ return p.addNode(.{
+ .tag = .@"return",
+ .main_token = p.tok_i - 1,
+ .data = .{
+ .lhs = try p.parseExpr(),
+ .rhs = undefined,
+ },
+ });
+ },
+ .identifier => {
+ if (p.token_tags[p.tok_i + 1] == .colon) {
+ switch (p.token_tags[p.tok_i + 2]) {
+ .keyword_inline => {
+ p.tok_i += 3;
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_for => return p.parseForExpr(),
+ .keyword_while => return p.parseWhileExpr(),
+ else => return p.fail(.expected_inlinable),
+ }
+ },
+ .keyword_for => {
+ p.tok_i += 2;
+ return p.parseForExpr();
+ },
+ .keyword_while => {
+ p.tok_i += 2;
+ return p.parseWhileExpr();
+ },
+ .l_brace => {
+ p.tok_i += 2;
+ return p.parseBlock();
+ },
+ else => return p.parseCurlySuffixExpr(),
+ }
+ } else {
+ return p.parseCurlySuffixExpr();
+ }
+ },
+ .keyword_inline => {
+ p.tok_i += 1;
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_for => return p.parseForExpr(),
+ .keyword_while => return p.parseWhileExpr(),
+ else => return p.fail(.expected_inlinable),
+ }
+ },
+ .keyword_for => return p.parseForExpr(),
+ .keyword_while => return p.parseWhileExpr(),
+ .l_brace => return p.parseBlock(),
+ else => return p.parseCurlySuffixExpr(),
+ }
+}
+
+/// IfExpr <- IfPrefix Expr (KEYWORD_else Payload? Expr)?
+fn parseIfExpr(p: *Parse) !Node.Index {
+ return p.parseIf(expectExpr);
+}
+
+/// Block <- LBRACE Statement* RBRACE
+fn parseBlock(p: *Parse) !Node.Index {
+ const lbrace = p.eatToken(.l_brace) orelse return null_node;
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ while (true) {
+ if (p.token_tags[p.tok_i] == .r_brace) break;
+ const statement = try p.expectStatementRecoverable();
+ if (statement == 0) break;
+ try p.scratch.append(p.gpa, statement);
+ }
+ _ = try p.expectToken(.r_brace);
+ const semicolon = (p.token_tags[p.tok_i - 2] == .semicolon);
+ const statements = p.scratch.items[scratch_top..];
+ switch (statements.len) {
+ 0 => return p.addNode(.{
+ .tag = .block_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = 0,
+ .rhs = 0,
+ },
+ }),
+ 1 => return p.addNode(.{
+ .tag = if (semicolon) .block_two_semicolon else .block_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = statements[0],
+ .rhs = 0,
+ },
+ }),
+ 2 => return p.addNode(.{
+ .tag = if (semicolon) .block_two_semicolon else .block_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = statements[0],
+ .rhs = statements[1],
+ },
+ }),
+ else => {
+ const span = try p.listToSpan(statements);
+ return p.addNode(.{
+ .tag = if (semicolon) .block_semicolon else .block,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = span.start,
+ .rhs = span.end,
+ },
+ });
+ },
+ }
+}
+
+/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
+///
+/// ForExpr <- ForPrefix Expr (KEYWORD_else Expr)?
+fn parseForExpr(p: *Parse) !Node.Index {
+ const for_token = p.eatToken(.keyword_for) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const array_expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ const found_payload = try p.parsePtrIndexPayload();
+ if (found_payload == 0) try p.warn(.expected_loop_payload);
+
+ const then_expr = try p.expectExpr();
+ _ = p.eatToken(.keyword_else) orelse {
+ return p.addNode(.{
+ .tag = .for_simple,
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = then_expr,
+ },
+ });
+ };
+ const else_expr = try p.expectExpr();
+ return p.addNode(.{
+ .tag = .@"for",
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = try p.addExtra(Node.If{
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
+///
+/// WhileExpr <- WhilePrefix Expr (KEYWORD_else Payload? Expr)?
+fn parseWhileExpr(p: *Parse) !Node.Index {
+ const while_token = p.eatToken(.keyword_while) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const condition = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.parsePtrPayload();
+ const cont_expr = try p.parseWhileContinueExpr();
+
+ const then_expr = try p.expectExpr();
+ _ = p.eatToken(.keyword_else) orelse {
+ if (cont_expr == 0) {
+ return p.addNode(.{
+ .tag = .while_simple,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = then_expr,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .while_cont,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.WhileCont{
+ .cont_expr = cont_expr,
+ .then_expr = then_expr,
+ }),
+ },
+ });
+ }
+ };
+ _ = try p.parsePayload();
+ const else_expr = try p.expectExpr();
+ return p.addNode(.{
+ .tag = .@"while",
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.While{
+ .cont_expr = cont_expr,
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// CurlySuffixExpr <- TypeExpr InitList?
+///
+/// InitList
+/// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE
+/// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE
+/// / LBRACE RBRACE
+fn parseCurlySuffixExpr(p: *Parse) !Node.Index {
+ const lhs = try p.parseTypeExpr();
+ if (lhs == 0) return null_node;
+ const lbrace = p.eatToken(.l_brace) orelse return lhs;
+
+ // If there are 0 or 1 items, we can use ArrayInitOne/StructInitOne;
+ // otherwise we use the full ArrayInit/StructInit.
+
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ const field_init = try p.parseFieldInit();
+ if (field_init != 0) {
+ try p.scratch.append(p.gpa, field_init);
+ while (true) {
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_brace => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_initializer),
+ }
+ if (p.eatToken(.r_brace)) |_| break;
+ const next = try p.expectFieldInit();
+ try p.scratch.append(p.gpa, next);
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const inits = p.scratch.items[scratch_top..];
+ switch (inits.len) {
+ 0 => unreachable,
+ 1 => return p.addNode(.{
+ .tag = if (comma) .struct_init_one_comma else .struct_init_one,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = lhs,
+ .rhs = inits[0],
+ },
+ }),
+ else => return p.addNode(.{
+ .tag = if (comma) .struct_init_comma else .struct_init,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = lhs,
+ .rhs = try p.addExtra(try p.listToSpan(inits)),
+ },
+ }),
+ }
+ }
+
+ while (true) {
+ if (p.eatToken(.r_brace)) |_| break;
+ const elem_init = try p.expectExpr();
+ try p.scratch.append(p.gpa, elem_init);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_brace => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_initializer),
+ }
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const inits = p.scratch.items[scratch_top..];
+ switch (inits.len) {
+ 0 => return p.addNode(.{
+ .tag = .struct_init_one,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = lhs,
+ .rhs = 0,
+ },
+ }),
+ 1 => return p.addNode(.{
+ .tag = if (comma) .array_init_one_comma else .array_init_one,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = lhs,
+ .rhs = inits[0],
+ },
+ }),
+ else => return p.addNode(.{
+ .tag = if (comma) .array_init_comma else .array_init,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = lhs,
+ .rhs = try p.addExtra(try p.listToSpan(inits)),
+ },
+ }),
+ }
+}
+
+/// ErrorUnionExpr <- SuffixExpr (EXCLAMATIONMARK TypeExpr)?
+fn parseErrorUnionExpr(p: *Parse) !Node.Index {
+ const suffix_expr = try p.parseSuffixExpr();
+ if (suffix_expr == 0) return null_node;
+ const bang = p.eatToken(.bang) orelse return suffix_expr;
+ return p.addNode(.{
+ .tag = .error_union,
+ .main_token = bang,
+ .data = .{
+ .lhs = suffix_expr,
+ .rhs = try p.expectTypeExpr(),
+ },
+ });
+}
+
+/// SuffixExpr
+/// <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments
+/// / PrimaryTypeExpr (SuffixOp / FnCallArguments)*
+///
+/// FnCallArguments <- LPAREN ExprList RPAREN
+///
+/// ExprList <- (Expr COMMA)* Expr?
+fn parseSuffixExpr(p: *Parse) !Node.Index {
+ if (p.eatToken(.keyword_async)) |_| {
+ var res = try p.expectPrimaryTypeExpr();
+ while (true) {
+ const node = try p.parseSuffixOp(res);
+ if (node == 0) break;
+ res = node;
+ }
+ const lparen = p.eatToken(.l_paren) orelse {
+ try p.warn(.expected_param_list);
+ return res;
+ };
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ while (true) {
+ if (p.eatToken(.r_paren)) |_| break;
+ const param = try p.expectExpr();
+ try p.scratch.append(p.gpa, param);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_paren => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_arg),
+ }
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const params = p.scratch.items[scratch_top..];
+ switch (params.len) {
+ 0 => return p.addNode(.{
+ .tag = if (comma) .async_call_one_comma else .async_call_one,
+ .main_token = lparen,
+ .data = .{
+ .lhs = res,
+ .rhs = 0,
+ },
+ }),
+ 1 => return p.addNode(.{
+ .tag = if (comma) .async_call_one_comma else .async_call_one,
+ .main_token = lparen,
+ .data = .{
+ .lhs = res,
+ .rhs = params[0],
+ },
+ }),
+ else => return p.addNode(.{
+ .tag = if (comma) .async_call_comma else .async_call,
+ .main_token = lparen,
+ .data = .{
+ .lhs = res,
+ .rhs = try p.addExtra(try p.listToSpan(params)),
+ },
+ }),
+ }
+ }
+
+ var res = try p.parsePrimaryTypeExpr();
+ if (res == 0) return res;
+ while (true) {
+ const suffix_op = try p.parseSuffixOp(res);
+ if (suffix_op != 0) {
+ res = suffix_op;
+ continue;
+ }
+ const lparen = p.eatToken(.l_paren) orelse return res;
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ while (true) {
+ if (p.eatToken(.r_paren)) |_| break;
+ const param = try p.expectExpr();
+ try p.scratch.append(p.gpa, param);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_paren => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_arg),
+ }
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const params = p.scratch.items[scratch_top..];
+ res = switch (params.len) {
+ 0 => try p.addNode(.{
+ .tag = if (comma) .call_one_comma else .call_one,
+ .main_token = lparen,
+ .data = .{
+ .lhs = res,
+ .rhs = 0,
+ },
+ }),
+ 1 => try p.addNode(.{
+ .tag = if (comma) .call_one_comma else .call_one,
+ .main_token = lparen,
+ .data = .{
+ .lhs = res,
+ .rhs = params[0],
+ },
+ }),
+ else => try p.addNode(.{
+ .tag = if (comma) .call_comma else .call,
+ .main_token = lparen,
+ .data = .{
+ .lhs = res,
+ .rhs = try p.addExtra(try p.listToSpan(params)),
+ },
+ }),
+ };
+ }
+}
+
+/// PrimaryTypeExpr
+/// <- BUILTINIDENTIFIER FnCallArguments
+/// / CHAR_LITERAL
+/// / ContainerDecl
+/// / DOT IDENTIFIER
+/// / DOT InitList
+/// / ErrorSetDecl
+/// / FLOAT
+/// / FnProto
+/// / GroupedExpr
+/// / LabeledTypeExpr
+/// / IDENTIFIER
+/// / IfTypeExpr
+/// / INTEGER
+/// / KEYWORD_comptime TypeExpr
+/// / KEYWORD_error DOT IDENTIFIER
+/// / KEYWORD_anyframe
+/// / KEYWORD_unreachable
+/// / STRINGLITERAL
+/// / SwitchExpr
+///
+/// ContainerDecl <- (KEYWORD_extern / KEYWORD_packed)? ContainerDeclAuto
+///
+/// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
+///
+/// InitList
+/// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE
+/// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE
+/// / LBRACE RBRACE
+///
+/// ErrorSetDecl <- KEYWORD_error LBRACE IdentifierList RBRACE
+///
+/// GroupedExpr <- LPAREN Expr RPAREN
+///
+/// IfTypeExpr <- IfPrefix TypeExpr (KEYWORD_else Payload? TypeExpr)?
+///
+/// LabeledTypeExpr
+/// <- BlockLabel Block
+/// / BlockLabel? LoopTypeExpr
+///
+/// LoopTypeExpr <- KEYWORD_inline? (ForTypeExpr / WhileTypeExpr)
+fn parsePrimaryTypeExpr(p: *Parse) !Node.Index {
+ switch (p.token_tags[p.tok_i]) {
+ .char_literal => return p.addNode(.{
+ .tag = .char_literal,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ }),
+ .number_literal => return p.addNode(.{
+ .tag = .number_literal,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ }),
+ .keyword_unreachable => return p.addNode(.{
+ .tag = .unreachable_literal,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ }),
+ .keyword_anyframe => return p.addNode(.{
+ .tag = .anyframe_literal,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ }),
+ .string_literal => {
+ const main_token = p.nextToken();
+ return p.addNode(.{
+ .tag = .string_literal,
+ .main_token = main_token,
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ });
+ },
+
+ .builtin => return p.parseBuiltinCall(),
+ .keyword_fn => return p.parseFnProto(),
+ .keyword_if => return p.parseIf(expectTypeExpr),
+ .keyword_switch => return p.expectSwitchExpr(),
+
+ .keyword_extern,
+ .keyword_packed,
+ => {
+ p.tok_i += 1;
+ return p.parseContainerDeclAuto();
+ },
+
+ .keyword_struct,
+ .keyword_opaque,
+ .keyword_enum,
+ .keyword_union,
+ => return p.parseContainerDeclAuto(),
+
+ .keyword_comptime => return p.addNode(.{
+ .tag = .@"comptime",
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = try p.expectTypeExpr(),
+ .rhs = undefined,
+ },
+ }),
+ .multiline_string_literal_line => {
+ const first_line = p.nextToken();
+ while (p.token_tags[p.tok_i] == .multiline_string_literal_line) {
+ p.tok_i += 1;
+ }
+ return p.addNode(.{
+ .tag = .multiline_string_literal,
+ .main_token = first_line,
+ .data = .{
+ .lhs = first_line,
+ .rhs = p.tok_i - 1,
+ },
+ });
+ },
+ .identifier => switch (p.token_tags[p.tok_i + 1]) {
+ .colon => switch (p.token_tags[p.tok_i + 2]) {
+ .keyword_inline => {
+ p.tok_i += 3;
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_for => return p.parseForTypeExpr(),
+ .keyword_while => return p.parseWhileTypeExpr(),
+ else => return p.fail(.expected_inlinable),
+ }
+ },
+ .keyword_for => {
+ p.tok_i += 2;
+ return p.parseForTypeExpr();
+ },
+ .keyword_while => {
+ p.tok_i += 2;
+ return p.parseWhileTypeExpr();
+ },
+ .l_brace => {
+ p.tok_i += 2;
+ return p.parseBlock();
+ },
+ else => return p.addNode(.{
+ .tag = .identifier,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ }),
+ },
+ else => return p.addNode(.{
+ .tag = .identifier,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ }),
+ },
+ .keyword_inline => {
+ p.tok_i += 1;
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_for => return p.parseForTypeExpr(),
+ .keyword_while => return p.parseWhileTypeExpr(),
+ else => return p.fail(.expected_inlinable),
+ }
+ },
+ .keyword_for => return p.parseForTypeExpr(),
+ .keyword_while => return p.parseWhileTypeExpr(),
+ .period => switch (p.token_tags[p.tok_i + 1]) {
+ .identifier => return p.addNode(.{
+ .tag = .enum_literal,
+ .data = .{
+ .lhs = p.nextToken(), // dot
+ .rhs = undefined,
+ },
+ .main_token = p.nextToken(), // identifier
+ }),
+ .l_brace => {
+ const lbrace = p.tok_i + 1;
+ p.tok_i = lbrace + 1;
+
+ // If there are 0, 1, or 2 items, we can use ArrayInitDotTwo/StructInitDotTwo;
+ // otherwise we use the full ArrayInitDot/StructInitDot.
+
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ const field_init = try p.parseFieldInit();
+ if (field_init != 0) {
+ try p.scratch.append(p.gpa, field_init);
+ while (true) {
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_brace => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_initializer),
+ }
+ if (p.eatToken(.r_brace)) |_| break;
+ const next = try p.expectFieldInit();
+ try p.scratch.append(p.gpa, next);
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const inits = p.scratch.items[scratch_top..];
+ switch (inits.len) {
+ 0 => unreachable,
+ 1 => return p.addNode(.{
+ .tag = if (comma) .struct_init_dot_two_comma else .struct_init_dot_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = inits[0],
+ .rhs = 0,
+ },
+ }),
+ 2 => return p.addNode(.{
+ .tag = if (comma) .struct_init_dot_two_comma else .struct_init_dot_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = inits[0],
+ .rhs = inits[1],
+ },
+ }),
+ else => {
+ const span = try p.listToSpan(inits);
+ return p.addNode(.{
+ .tag = if (comma) .struct_init_dot_comma else .struct_init_dot,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = span.start,
+ .rhs = span.end,
+ },
+ });
+ },
+ }
+ }
+
+ while (true) {
+ if (p.eatToken(.r_brace)) |_| break;
+ const elem_init = try p.expectExpr();
+ try p.scratch.append(p.gpa, elem_init);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_brace => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_initializer),
+ }
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const inits = p.scratch.items[scratch_top..];
+ switch (inits.len) {
+ 0 => return p.addNode(.{
+ .tag = .struct_init_dot_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = 0,
+ .rhs = 0,
+ },
+ }),
+ 1 => return p.addNode(.{
+ .tag = if (comma) .array_init_dot_two_comma else .array_init_dot_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = inits[0],
+ .rhs = 0,
+ },
+ }),
+ 2 => return p.addNode(.{
+ .tag = if (comma) .array_init_dot_two_comma else .array_init_dot_two,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = inits[0],
+ .rhs = inits[1],
+ },
+ }),
+ else => {
+ const span = try p.listToSpan(inits);
+ return p.addNode(.{
+ .tag = if (comma) .array_init_dot_comma else .array_init_dot,
+ .main_token = lbrace,
+ .data = .{
+ .lhs = span.start,
+ .rhs = span.end,
+ },
+ });
+ },
+ }
+ },
+ else => return null_node,
+ },
+ .keyword_error => switch (p.token_tags[p.tok_i + 1]) {
+ .l_brace => {
+ const error_token = p.tok_i;
+ p.tok_i += 2;
+ while (true) {
+ if (p.eatToken(.r_brace)) |_| break;
+ _ = try p.eatDocComments();
+ _ = try p.expectToken(.identifier);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_brace => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_field),
+ }
+ }
+ return p.addNode(.{
+ .tag = .error_set_decl,
+ .main_token = error_token,
+ .data = .{
+ .lhs = undefined,
+ .rhs = p.tok_i - 1, // rbrace
+ },
+ });
+ },
+ else => {
+ const main_token = p.nextToken();
+ const period = p.eatToken(.period);
+ if (period == null) try p.warnExpected(.period);
+ const identifier = p.eatToken(.identifier);
+ if (identifier == null) try p.warnExpected(.identifier);
+ return p.addNode(.{
+ .tag = .error_value,
+ .main_token = main_token,
+ .data = .{
+ .lhs = period orelse 0,
+ .rhs = identifier orelse 0,
+ },
+ });
+ },
+ },
+ .l_paren => return p.addNode(.{
+ .tag = .grouped_expression,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = try p.expectExpr(),
+ .rhs = try p.expectToken(.r_paren),
+ },
+ }),
+ else => return null_node,
+ }
+}
+
+fn expectPrimaryTypeExpr(p: *Parse) !Node.Index {
+ const node = try p.parsePrimaryTypeExpr();
+ if (node == 0) {
+ return p.fail(.expected_primary_type_expr);
+ }
+ return node;
+}
+
+/// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
+///
+/// ForTypeExpr <- ForPrefix TypeExpr (KEYWORD_else TypeExpr)?
+fn parseForTypeExpr(p: *Parse) !Node.Index {
+ const for_token = p.eatToken(.keyword_for) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const array_expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ const found_payload = try p.parsePtrIndexPayload();
+ if (found_payload == 0) try p.warn(.expected_loop_payload);
+
+ const then_expr = try p.expectTypeExpr();
+ _ = p.eatToken(.keyword_else) orelse {
+ return p.addNode(.{
+ .tag = .for_simple,
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = then_expr,
+ },
+ });
+ };
+ const else_expr = try p.expectTypeExpr();
+ return p.addNode(.{
+ .tag = .@"for",
+ .main_token = for_token,
+ .data = .{
+ .lhs = array_expr,
+ .rhs = try p.addExtra(Node.If{
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
+///
+/// WhileTypeExpr <- WhilePrefix TypeExpr (KEYWORD_else Payload? TypeExpr)?
+fn parseWhileTypeExpr(p: *Parse) !Node.Index {
+ const while_token = p.eatToken(.keyword_while) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const condition = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.parsePtrPayload();
+ const cont_expr = try p.parseWhileContinueExpr();
+
+ const then_expr = try p.expectTypeExpr();
+ _ = p.eatToken(.keyword_else) orelse {
+ if (cont_expr == 0) {
+ return p.addNode(.{
+ .tag = .while_simple,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = then_expr,
+ },
+ });
+ } else {
+ return p.addNode(.{
+ .tag = .while_cont,
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.WhileCont{
+ .cont_expr = cont_expr,
+ .then_expr = then_expr,
+ }),
+ },
+ });
+ }
+ };
+ _ = try p.parsePayload();
+ const else_expr = try p.expectTypeExpr();
+ return p.addNode(.{
+ .tag = .@"while",
+ .main_token = while_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.While{
+ .cont_expr = cont_expr,
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// SwitchExpr <- KEYWORD_switch LPAREN Expr RPAREN LBRACE SwitchProngList RBRACE
+fn expectSwitchExpr(p: *Parse) !Node.Index {
+ const switch_token = p.assertToken(.keyword_switch);
+ _ = try p.expectToken(.l_paren);
+ const expr_node = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.expectToken(.l_brace);
+ const cases = try p.parseSwitchProngList();
+ const trailing_comma = p.token_tags[p.tok_i - 1] == .comma;
+ _ = try p.expectToken(.r_brace);
+
+ return p.addNode(.{
+ .tag = if (trailing_comma) .switch_comma else .@"switch",
+ .main_token = switch_token,
+ .data = .{
+ .lhs = expr_node,
+ .rhs = try p.addExtra(Node.SubRange{
+ .start = cases.start,
+ .end = cases.end,
+ }),
+ },
+ });
+}
+
+/// AsmExpr <- KEYWORD_asm KEYWORD_volatile? LPAREN Expr AsmOutput? RPAREN
+///
+/// AsmOutput <- COLON AsmOutputList AsmInput?
+///
+/// AsmInput <- COLON AsmInputList AsmClobbers?
+///
+/// AsmClobbers <- COLON StringList
+///
+/// StringList <- (STRINGLITERAL COMMA)* STRINGLITERAL?
+///
+/// AsmOutputList <- (AsmOutputItem COMMA)* AsmOutputItem?
+///
+/// AsmInputList <- (AsmInputItem COMMA)* AsmInputItem?
+fn expectAsmExpr(p: *Parse) !Node.Index {
+ const asm_token = p.assertToken(.keyword_asm);
+ _ = p.eatToken(.keyword_volatile);
+ _ = try p.expectToken(.l_paren);
+ const template = try p.expectExpr();
+
+ if (p.eatToken(.r_paren)) |rparen| {
+ return p.addNode(.{
+ .tag = .asm_simple,
+ .main_token = asm_token,
+ .data = .{
+ .lhs = template,
+ .rhs = rparen,
+ },
+ });
+ }
+
+ _ = try p.expectToken(.colon);
+
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+
+ while (true) {
+ const output_item = try p.parseAsmOutputItem();
+ if (output_item == 0) break;
+ try p.scratch.append(p.gpa, output_item);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ // All possible delimiters.
+ .colon, .r_paren, .r_brace, .r_bracket => break,
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warnExpected(.comma),
+ }
+ }
+ if (p.eatToken(.colon)) |_| {
+ while (true) {
+ const input_item = try p.parseAsmInputItem();
+ if (input_item == 0) break;
+ try p.scratch.append(p.gpa, input_item);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ // All possible delimiters.
+ .colon, .r_paren, .r_brace, .r_bracket => break,
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warnExpected(.comma),
+ }
+ }
+ if (p.eatToken(.colon)) |_| {
+ while (p.eatToken(.string_literal)) |_| {
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .colon, .r_paren, .r_brace, .r_bracket => break,
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warnExpected(.comma),
+ }
+ }
+ }
+ }
+ const rparen = try p.expectToken(.r_paren);
+ const span = try p.listToSpan(p.scratch.items[scratch_top..]);
+ return p.addNode(.{
+ .tag = .@"asm",
+ .main_token = asm_token,
+ .data = .{
+ .lhs = template,
+ .rhs = try p.addExtra(Node.Asm{
+ .items_start = span.start,
+ .items_end = span.end,
+ .rparen = rparen,
+ }),
+ },
+ });
+}
+
+/// AsmOutputItem <- LBRACKET IDENTIFIER RBRACKET STRINGLITERAL LPAREN (MINUSRARROW TypeExpr / IDENTIFIER) RPAREN
+fn parseAsmOutputItem(p: *Parse) !Node.Index {
+ _ = p.eatToken(.l_bracket) orelse return null_node;
+ const identifier = try p.expectToken(.identifier);
+ _ = try p.expectToken(.r_bracket);
+ _ = try p.expectToken(.string_literal);
+ _ = try p.expectToken(.l_paren);
+ const type_expr: Node.Index = blk: {
+ if (p.eatToken(.arrow)) |_| {
+ break :blk try p.expectTypeExpr();
+ } else {
+ _ = try p.expectToken(.identifier);
+ break :blk null_node;
+ }
+ };
+ const rparen = try p.expectToken(.r_paren);
+ return p.addNode(.{
+ .tag = .asm_output,
+ .main_token = identifier,
+ .data = .{
+ .lhs = type_expr,
+ .rhs = rparen,
+ },
+ });
+}
+
+/// AsmInputItem <- LBRACKET IDENTIFIER RBRACKET STRINGLITERAL LPAREN Expr RPAREN
+fn parseAsmInputItem(p: *Parse) !Node.Index {
+ _ = p.eatToken(.l_bracket) orelse return null_node;
+ const identifier = try p.expectToken(.identifier);
+ _ = try p.expectToken(.r_bracket);
+ _ = try p.expectToken(.string_literal);
+ _ = try p.expectToken(.l_paren);
+ const expr = try p.expectExpr();
+ const rparen = try p.expectToken(.r_paren);
+ return p.addNode(.{
+ .tag = .asm_input,
+ .main_token = identifier,
+ .data = .{
+ .lhs = expr,
+ .rhs = rparen,
+ },
+ });
+}
+
+/// BreakLabel <- COLON IDENTIFIER
+fn parseBreakLabel(p: *Parse) !TokenIndex {
+ _ = p.eatToken(.colon) orelse return @as(TokenIndex, 0);
+ return p.expectToken(.identifier);
+}
+
+/// BlockLabel <- IDENTIFIER COLON
+fn parseBlockLabel(p: *Parse) TokenIndex {
+ if (p.token_tags[p.tok_i] == .identifier and
+ p.token_tags[p.tok_i + 1] == .colon)
+ {
+ const identifier = p.tok_i;
+ p.tok_i += 2;
+ return identifier;
+ }
+ return null_node;
+}
+
+/// FieldInit <- DOT IDENTIFIER EQUAL Expr
+fn parseFieldInit(p: *Parse) !Node.Index {
+ if (p.token_tags[p.tok_i + 0] == .period and
+ p.token_tags[p.tok_i + 1] == .identifier and
+ p.token_tags[p.tok_i + 2] == .equal)
+ {
+ p.tok_i += 3;
+ return p.expectExpr();
+ } else {
+ return null_node;
+ }
+}
+
+fn expectFieldInit(p: *Parse) !Node.Index {
+ if (p.token_tags[p.tok_i] != .period or
+ p.token_tags[p.tok_i + 1] != .identifier or
+ p.token_tags[p.tok_i + 2] != .equal)
+ return p.fail(.expected_initializer);
+
+ p.tok_i += 3;
+ return p.expectExpr();
+}
+
+/// WhileContinueExpr <- COLON LPAREN AssignExpr RPAREN
+fn parseWhileContinueExpr(p: *Parse) !Node.Index {
+ _ = p.eatToken(.colon) orelse {
+ if (p.token_tags[p.tok_i] == .l_paren and
+ p.tokensOnSameLine(p.tok_i - 1, p.tok_i))
+ return p.fail(.expected_continue_expr);
+ return null_node;
+ };
+ _ = try p.expectToken(.l_paren);
+ const node = try p.parseAssignExpr();
+ if (node == 0) return p.fail(.expected_expr_or_assignment);
+ _ = try p.expectToken(.r_paren);
+ return node;
+}
+
+/// LinkSection <- KEYWORD_linksection LPAREN Expr RPAREN
+fn parseLinkSection(p: *Parse) !Node.Index {
+ _ = p.eatToken(.keyword_linksection) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const expr_node = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ return expr_node;
+}
+
+/// CallConv <- KEYWORD_callconv LPAREN Expr RPAREN
+fn parseCallconv(p: *Parse) !Node.Index {
+ _ = p.eatToken(.keyword_callconv) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const expr_node = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ return expr_node;
+}
+
+/// AddrSpace <- KEYWORD_addrspace LPAREN Expr RPAREN
+fn parseAddrSpace(p: *Parse) !Node.Index {
+ _ = p.eatToken(.keyword_addrspace) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const expr_node = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ return expr_node;
+}
+
+/// This function can return null nodes and then still return nodes afterwards,
+/// such as in the case of anytype and `...`. Caller must look for rparen to find
+/// out when there are no more param decls left.
+///
+/// ParamDecl
+/// <- doc_comment? (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType
+/// / DOT3
+///
+/// ParamType
+/// <- KEYWORD_anytype
+/// / TypeExpr
+fn expectParamDecl(p: *Parse) !Node.Index {
+ _ = try p.eatDocComments();
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_noalias, .keyword_comptime => p.tok_i += 1,
+ .ellipsis3 => {
+ p.tok_i += 1;
+ return null_node;
+ },
+ else => {},
+ }
+ if (p.token_tags[p.tok_i] == .identifier and
+ p.token_tags[p.tok_i + 1] == .colon)
+ {
+ p.tok_i += 2;
+ }
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_anytype => {
+ p.tok_i += 1;
+ return null_node;
+ },
+ else => return p.expectTypeExpr(),
+ }
+}
+
+/// Payload <- PIPE IDENTIFIER PIPE
+fn parsePayload(p: *Parse) !TokenIndex {
+ _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
+ const identifier = try p.expectToken(.identifier);
+ _ = try p.expectToken(.pipe);
+ return identifier;
+}
+
+/// PtrPayload <- PIPE ASTERISK? IDENTIFIER PIPE
+fn parsePtrPayload(p: *Parse) !TokenIndex {
+ _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
+ _ = p.eatToken(.asterisk);
+ const identifier = try p.expectToken(.identifier);
+ _ = try p.expectToken(.pipe);
+ return identifier;
+}
+
+/// Returns the first identifier token, if any.
+///
+/// PtrIndexPayload <- PIPE ASTERISK? IDENTIFIER (COMMA IDENTIFIER)? PIPE
+fn parsePtrIndexPayload(p: *Parse) !TokenIndex {
+ _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
+ _ = p.eatToken(.asterisk);
+ const identifier = try p.expectToken(.identifier);
+ if (p.eatToken(.comma) != null) {
+ _ = try p.expectToken(.identifier);
+ }
+ _ = try p.expectToken(.pipe);
+ return identifier;
+}
+
+/// SwitchProng <- KEYWORD_inline? SwitchCase EQUALRARROW PtrIndexPayload? AssignExpr
+///
+/// SwitchCase
+/// <- SwitchItem (COMMA SwitchItem)* COMMA?
+/// / KEYWORD_else
+fn parseSwitchProng(p: *Parse) !Node.Index {
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+
+ const is_inline = p.eatToken(.keyword_inline) != null;
+
+ if (p.eatToken(.keyword_else) == null) {
+ while (true) {
+ const item = try p.parseSwitchItem();
+ if (item == 0) break;
+ try p.scratch.append(p.gpa, item);
+ if (p.eatToken(.comma) == null) break;
+ }
+ if (scratch_top == p.scratch.items.len) {
+ if (is_inline) p.tok_i -= 1;
+ return null_node;
+ }
+ }
+ const arrow_token = try p.expectToken(.equal_angle_bracket_right);
+ _ = try p.parsePtrIndexPayload();
+
+ const items = p.scratch.items[scratch_top..];
+ switch (items.len) {
+ 0 => return p.addNode(.{
+ .tag = if (is_inline) .switch_case_inline_one else .switch_case_one,
+ .main_token = arrow_token,
+ .data = .{
+ .lhs = 0,
+ .rhs = try p.expectAssignExpr(),
+ },
+ }),
+ 1 => return p.addNode(.{
+ .tag = if (is_inline) .switch_case_inline_one else .switch_case_one,
+ .main_token = arrow_token,
+ .data = .{
+ .lhs = items[0],
+ .rhs = try p.expectAssignExpr(),
+ },
+ }),
+ else => return p.addNode(.{
+ .tag = if (is_inline) .switch_case_inline else .switch_case,
+ .main_token = arrow_token,
+ .data = .{
+ .lhs = try p.addExtra(try p.listToSpan(items)),
+ .rhs = try p.expectAssignExpr(),
+ },
+ }),
+ }
+}
+
+/// SwitchItem <- Expr (DOT3 Expr)?
+fn parseSwitchItem(p: *Parse) !Node.Index {
+ const expr = try p.parseExpr();
+ if (expr == 0) return null_node;
+
+ if (p.eatToken(.ellipsis3)) |token| {
+ return p.addNode(.{
+ .tag = .switch_range,
+ .main_token = token,
+ .data = .{
+ .lhs = expr,
+ .rhs = try p.expectExpr(),
+ },
+ });
+ }
+ return expr;
+}
+
+const PtrModifiers = struct {
+ align_node: Node.Index,
+ addrspace_node: Node.Index,
+ bit_range_start: Node.Index,
+ bit_range_end: Node.Index,
+};
+
+fn parsePtrModifiers(p: *Parse) !PtrModifiers {
+ var result: PtrModifiers = .{
+ .align_node = 0,
+ .addrspace_node = 0,
+ .bit_range_start = 0,
+ .bit_range_end = 0,
+ };
+ var saw_const = false;
+ var saw_volatile = false;
+ var saw_allowzero = false;
+ var saw_addrspace = false;
+ while (true) {
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_align => {
+ if (result.align_node != 0) {
+ try p.warn(.extra_align_qualifier);
+ }
+ p.tok_i += 1;
+ _ = try p.expectToken(.l_paren);
+ result.align_node = try p.expectExpr();
+
+ if (p.eatToken(.colon)) |_| {
+ result.bit_range_start = try p.expectExpr();
+ _ = try p.expectToken(.colon);
+ result.bit_range_end = try p.expectExpr();
+ }
+
+ _ = try p.expectToken(.r_paren);
+ },
+ .keyword_const => {
+ if (saw_const) {
+ try p.warn(.extra_const_qualifier);
+ }
+ p.tok_i += 1;
+ saw_const = true;
+ },
+ .keyword_volatile => {
+ if (saw_volatile) {
+ try p.warn(.extra_volatile_qualifier);
+ }
+ p.tok_i += 1;
+ saw_volatile = true;
+ },
+ .keyword_allowzero => {
+ if (saw_allowzero) {
+ try p.warn(.extra_allowzero_qualifier);
+ }
+ p.tok_i += 1;
+ saw_allowzero = true;
+ },
+ .keyword_addrspace => {
+ if (saw_addrspace) {
+ try p.warn(.extra_addrspace_qualifier);
+ }
+ result.addrspace_node = try p.parseAddrSpace();
+ },
+ else => return result,
+ }
+ }
+}
+
+/// SuffixOp
+/// <- LBRACKET Expr (DOT2 (Expr? (COLON Expr)?)?)? RBRACKET
+/// / DOT IDENTIFIER
+/// / DOTASTERISK
+/// / DOTQUESTIONMARK
+fn parseSuffixOp(p: *Parse, lhs: Node.Index) !Node.Index {
+ switch (p.token_tags[p.tok_i]) {
+ .l_bracket => {
+ const lbracket = p.nextToken();
+ const index_expr = try p.expectExpr();
+
+ if (p.eatToken(.ellipsis2)) |_| {
+ const end_expr = try p.parseExpr();
+ if (p.eatToken(.colon)) |_| {
+ const sentinel = try p.expectExpr();
+ _ = try p.expectToken(.r_bracket);
+ return p.addNode(.{
+ .tag = .slice_sentinel,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = lhs,
+ .rhs = try p.addExtra(Node.SliceSentinel{
+ .start = index_expr,
+ .end = end_expr,
+ .sentinel = sentinel,
+ }),
+ },
+ });
+ }
+ _ = try p.expectToken(.r_bracket);
+ if (end_expr == 0) {
+ return p.addNode(.{
+ .tag = .slice_open,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = lhs,
+ .rhs = index_expr,
+ },
+ });
+ }
+ return p.addNode(.{
+ .tag = .slice,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = lhs,
+ .rhs = try p.addExtra(Node.Slice{
+ .start = index_expr,
+ .end = end_expr,
+ }),
+ },
+ });
+ }
+ _ = try p.expectToken(.r_bracket);
+ return p.addNode(.{
+ .tag = .array_access,
+ .main_token = lbracket,
+ .data = .{
+ .lhs = lhs,
+ .rhs = index_expr,
+ },
+ });
+ },
+ .period_asterisk => return p.addNode(.{
+ .tag = .deref,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = lhs,
+ .rhs = undefined,
+ },
+ }),
+ .invalid_periodasterisks => {
+ try p.warn(.asterisk_after_ptr_deref);
+ return p.addNode(.{
+ .tag = .deref,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = lhs,
+ .rhs = undefined,
+ },
+ });
+ },
+ .period => switch (p.token_tags[p.tok_i + 1]) {
+ .identifier => return p.addNode(.{
+ .tag = .field_access,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = lhs,
+ .rhs = p.nextToken(),
+ },
+ }),
+ .question_mark => return p.addNode(.{
+ .tag = .unwrap_optional,
+ .main_token = p.nextToken(),
+ .data = .{
+ .lhs = lhs,
+ .rhs = p.nextToken(),
+ },
+ }),
+ .l_brace => {
+ // this a misplaced `.{`, handle the error somewhere else
+ return null_node;
+ },
+ else => {
+ p.tok_i += 1;
+ try p.warn(.expected_suffix_op);
+ return null_node;
+ },
+ },
+ else => return null_node,
+ }
+}
+
+/// Caller must have already verified the first token.
+///
+/// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
+///
+/// ContainerDeclType
+/// <- KEYWORD_struct (LPAREN Expr RPAREN)?
+/// / KEYWORD_opaque
+/// / KEYWORD_enum (LPAREN Expr RPAREN)?
+/// / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)?
+fn parseContainerDeclAuto(p: *Parse) !Node.Index {
+ const main_token = p.nextToken();
+ const arg_expr = switch (p.token_tags[main_token]) {
+ .keyword_opaque => null_node,
+ .keyword_struct, .keyword_enum => blk: {
+ if (p.eatToken(.l_paren)) |_| {
+ const expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ break :blk expr;
+ } else {
+ break :blk null_node;
+ }
+ },
+ .keyword_union => blk: {
+ if (p.eatToken(.l_paren)) |_| {
+ if (p.eatToken(.keyword_enum)) |_| {
+ if (p.eatToken(.l_paren)) |_| {
+ const enum_tag_expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.expectToken(.r_paren);
+
+ _ = try p.expectToken(.l_brace);
+ const members = try p.parseContainerMembers();
+ const members_span = try members.toSpan(p);
+ _ = try p.expectToken(.r_brace);
+ return p.addNode(.{
+ .tag = switch (members.trailing) {
+ true => .tagged_union_enum_tag_trailing,
+ false => .tagged_union_enum_tag,
+ },
+ .main_token = main_token,
+ .data = .{
+ .lhs = enum_tag_expr,
+ .rhs = try p.addExtra(members_span),
+ },
+ });
+ } else {
+ _ = try p.expectToken(.r_paren);
+
+ _ = try p.expectToken(.l_brace);
+ const members = try p.parseContainerMembers();
+ _ = try p.expectToken(.r_brace);
+ if (members.len <= 2) {
+ return p.addNode(.{
+ .tag = switch (members.trailing) {
+ true => .tagged_union_two_trailing,
+ false => .tagged_union_two,
+ },
+ .main_token = main_token,
+ .data = .{
+ .lhs = members.lhs,
+ .rhs = members.rhs,
+ },
+ });
+ } else {
+ const span = try members.toSpan(p);
+ return p.addNode(.{
+ .tag = switch (members.trailing) {
+ true => .tagged_union_trailing,
+ false => .tagged_union,
+ },
+ .main_token = main_token,
+ .data = .{
+ .lhs = span.start,
+ .rhs = span.end,
+ },
+ });
+ }
+ }
+ } else {
+ const expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ break :blk expr;
+ }
+ } else {
+ break :blk null_node;
+ }
+ },
+ else => {
+ p.tok_i -= 1;
+ return p.fail(.expected_container);
+ },
+ };
+ _ = try p.expectToken(.l_brace);
+ const members = try p.parseContainerMembers();
+ _ = try p.expectToken(.r_brace);
+ if (arg_expr == 0) {
+ if (members.len <= 2) {
+ return p.addNode(.{
+ .tag = switch (members.trailing) {
+ true => .container_decl_two_trailing,
+ false => .container_decl_two,
+ },
+ .main_token = main_token,
+ .data = .{
+ .lhs = members.lhs,
+ .rhs = members.rhs,
+ },
+ });
+ } else {
+ const span = try members.toSpan(p);
+ return p.addNode(.{
+ .tag = switch (members.trailing) {
+ true => .container_decl_trailing,
+ false => .container_decl,
+ },
+ .main_token = main_token,
+ .data = .{
+ .lhs = span.start,
+ .rhs = span.end,
+ },
+ });
+ }
+ } else {
+ const span = try members.toSpan(p);
+ return p.addNode(.{
+ .tag = switch (members.trailing) {
+ true => .container_decl_arg_trailing,
+ false => .container_decl_arg,
+ },
+ .main_token = main_token,
+ .data = .{
+ .lhs = arg_expr,
+ .rhs = try p.addExtra(Node.SubRange{
+ .start = span.start,
+ .end = span.end,
+ }),
+ },
+ });
+ }
+}
+
+/// Give a helpful error message for those transitioning from
+/// C's 'struct Foo {};' to Zig's 'const Foo = struct {};'.
+fn parseCStyleContainer(p: *Parse) Error!bool {
+ const main_token = p.tok_i;
+ switch (p.token_tags[p.tok_i]) {
+ .keyword_enum, .keyword_union, .keyword_struct => {},
+ else => return false,
+ }
+ const identifier = p.tok_i + 1;
+ if (p.token_tags[identifier] != .identifier) return false;
+ p.tok_i += 2;
+
+ try p.warnMsg(.{
+ .tag = .c_style_container,
+ .token = identifier,
+ .extra = .{ .expected_tag = p.token_tags[main_token] },
+ });
+ try p.warnMsg(.{
+ .tag = .zig_style_container,
+ .is_note = true,
+ .token = identifier,
+ .extra = .{ .expected_tag = p.token_tags[main_token] },
+ });
+
+ _ = try p.expectToken(.l_brace);
+ _ = try p.parseContainerMembers();
+ _ = try p.expectToken(.r_brace);
+ try p.expectSemicolon(.expected_semi_after_decl, true);
+ return true;
+}
+
+/// Holds temporary data until we are ready to construct the full ContainerDecl AST node.
+///
+/// ByteAlign <- KEYWORD_align LPAREN Expr RPAREN
+fn parseByteAlign(p: *Parse) !Node.Index {
+ _ = p.eatToken(.keyword_align) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const expr = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ return expr;
+}
+
+/// SwitchProngList <- (SwitchProng COMMA)* SwitchProng?
+fn parseSwitchProngList(p: *Parse) !Node.SubRange {
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+
+ while (true) {
+ const item = try parseSwitchProng(p);
+ if (item == 0) break;
+
+ try p.scratch.append(p.gpa, item);
+
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ // All possible delimiters.
+ .colon, .r_paren, .r_brace, .r_bracket => break,
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_switch_prong),
+ }
+ }
+ return p.listToSpan(p.scratch.items[scratch_top..]);
+}
+
+/// ParamDeclList <- (ParamDecl COMMA)* ParamDecl?
+fn parseParamDeclList(p: *Parse) !SmallSpan {
+ _ = try p.expectToken(.l_paren);
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ var varargs: union(enum) { none, seen, nonfinal: TokenIndex } = .none;
+ while (true) {
+ if (p.eatToken(.r_paren)) |_| break;
+ if (varargs == .seen) varargs = .{ .nonfinal = p.tok_i };
+ const param = try p.expectParamDecl();
+ if (param != 0) {
+ try p.scratch.append(p.gpa, param);
+ } else if (p.token_tags[p.tok_i - 1] == .ellipsis3) {
+ if (varargs == .none) varargs = .seen;
+ }
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_paren => {
+ p.tok_i += 1;
+ break;
+ },
+ .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_param),
+ }
+ }
+ if (varargs == .nonfinal) {
+ try p.warnMsg(.{ .tag = .varargs_nonfinal, .token = varargs.nonfinal });
+ }
+ const params = p.scratch.items[scratch_top..];
+ return switch (params.len) {
+ 0 => SmallSpan{ .zero_or_one = 0 },
+ 1 => SmallSpan{ .zero_or_one = params[0] },
+ else => SmallSpan{ .multi = try p.listToSpan(params) },
+ };
+}
+
+/// FnCallArguments <- LPAREN ExprList RPAREN
+///
+/// ExprList <- (Expr COMMA)* Expr?
+fn parseBuiltinCall(p: *Parse) !Node.Index {
+ const builtin_token = p.assertToken(.builtin);
+ if (p.token_tags[p.nextToken()] != .l_paren) {
+ p.tok_i -= 1;
+ try p.warn(.expected_param_list);
+ // Pretend this was an identifier so we can continue parsing.
+ return p.addNode(.{
+ .tag = .identifier,
+ .main_token = builtin_token,
+ .data = .{
+ .lhs = undefined,
+ .rhs = undefined,
+ },
+ });
+ }
+ const scratch_top = p.scratch.items.len;
+ defer p.scratch.shrinkRetainingCapacity(scratch_top);
+ while (true) {
+ if (p.eatToken(.r_paren)) |_| break;
+ const param = try p.expectExpr();
+ try p.scratch.append(p.gpa, param);
+ switch (p.token_tags[p.tok_i]) {
+ .comma => p.tok_i += 1,
+ .r_paren => {
+ p.tok_i += 1;
+ break;
+ },
+ // Likely just a missing comma; give error but continue parsing.
+ else => try p.warn(.expected_comma_after_arg),
+ }
+ }
+ const comma = (p.token_tags[p.tok_i - 2] == .comma);
+ const params = p.scratch.items[scratch_top..];
+ switch (params.len) {
+ 0 => return p.addNode(.{
+ .tag = .builtin_call_two,
+ .main_token = builtin_token,
+ .data = .{
+ .lhs = 0,
+ .rhs = 0,
+ },
+ }),
+ 1 => return p.addNode(.{
+ .tag = if (comma) .builtin_call_two_comma else .builtin_call_two,
+ .main_token = builtin_token,
+ .data = .{
+ .lhs = params[0],
+ .rhs = 0,
+ },
+ }),
+ 2 => return p.addNode(.{
+ .tag = if (comma) .builtin_call_two_comma else .builtin_call_two,
+ .main_token = builtin_token,
+ .data = .{
+ .lhs = params[0],
+ .rhs = params[1],
+ },
+ }),
+ else => {
+ const span = try p.listToSpan(params);
+ return p.addNode(.{
+ .tag = if (comma) .builtin_call_comma else .builtin_call,
+ .main_token = builtin_token,
+ .data = .{
+ .lhs = span.start,
+ .rhs = span.end,
+ },
+ });
+ },
+ }
+}
+
+/// IfPrefix <- KEYWORD_if LPAREN Expr RPAREN PtrPayload?
+fn parseIf(p: *Parse, comptime bodyParseFn: fn (p: *Parse) Error!Node.Index) !Node.Index {
+ const if_token = p.eatToken(.keyword_if) orelse return null_node;
+ _ = try p.expectToken(.l_paren);
+ const condition = try p.expectExpr();
+ _ = try p.expectToken(.r_paren);
+ _ = try p.parsePtrPayload();
+
+ const then_expr = try bodyParseFn(p);
+ assert(then_expr != 0);
+
+ _ = p.eatToken(.keyword_else) orelse return p.addNode(.{
+ .tag = .if_simple,
+ .main_token = if_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = then_expr,
+ },
+ });
+ _ = try p.parsePayload();
+ const else_expr = try bodyParseFn(p);
+ assert(then_expr != 0);
+
+ return p.addNode(.{
+ .tag = .@"if",
+ .main_token = if_token,
+ .data = .{
+ .lhs = condition,
+ .rhs = try p.addExtra(Node.If{
+ .then_expr = then_expr,
+ .else_expr = else_expr,
+ }),
+ },
+ });
+}
+
+/// Skips over doc comment tokens. Returns the first one, if any.
+fn eatDocComments(p: *Parse) !?TokenIndex {
+ if (p.eatToken(.doc_comment)) |tok| {
+ var first_line = tok;
+ if (tok > 0 and tokensOnSameLine(p, tok - 1, tok)) {
+ try p.warnMsg(.{
+ .tag = .same_line_doc_comment,
+ .token = tok,
+ });
+ first_line = p.eatToken(.doc_comment) orelse return null;
+ }
+ while (p.eatToken(.doc_comment)) |_| {}
+ return first_line;
+ }
+ return null;
+}
+
+fn tokensOnSameLine(p: *Parse, token1: TokenIndex, token2: TokenIndex) bool {
+ return std.mem.indexOfScalar(u8, p.source[p.token_starts[token1]..p.token_starts[token2]], '\n') == null;
+}
+
+fn eatToken(p: *Parse, tag: Token.Tag) ?TokenIndex {
+ return if (p.token_tags[p.tok_i] == tag) p.nextToken() else null;
+}
+
+fn assertToken(p: *Parse, tag: Token.Tag) TokenIndex {
+ const token = p.nextToken();
+ assert(p.token_tags[token] == tag);
+ return token;
+}
+
+fn expectToken(p: *Parse, tag: Token.Tag) Error!TokenIndex {
+ if (p.token_tags[p.tok_i] != tag) {
+ return p.failMsg(.{
+ .tag = .expected_token,
+ .token = p.tok_i,
+ .extra = .{ .expected_tag = tag },
+ });
+ }
+ return p.nextToken();
+}
+
+fn expectSemicolon(p: *Parse, error_tag: AstError.Tag, recoverable: bool) Error!void {
+ if (p.token_tags[p.tok_i] == .semicolon) {
+ _ = p.nextToken();
+ return;
+ }
+ try p.warn(error_tag);
+ if (!recoverable) return error.ParseError;
+}
+
+fn nextToken(p: *Parse) TokenIndex {
+ const result = p.tok_i;
+ p.tok_i += 1;
+ return result;
+}
+
+const null_node: Node.Index = 0;
+
+const Parse = @This();
+const std = @import("../std.zig");
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+const Ast = std.zig.Ast;
+const Node = Ast.Node;
+const AstError = Ast.Error;
+const TokenIndex = Ast.TokenIndex;
+const Token = std.zig.Token;
+
+test {
+ _ = @import("parser_test.zig");
+}
diff --git a/lib/std/zig/c_translation.zig b/lib/std/zig/c_translation.zig
index a050e592a2..d33c74d777 100644
--- a/lib/std/zig/c_translation.zig
+++ b/lib/std/zig/c_translation.zig
@@ -75,7 +75,7 @@ fn castPtr(comptime DestType: type, target: anytype) DestType {
const source = ptrInfo(@TypeOf(target));
if (source.is_const and !dest.is_const or source.is_volatile and !dest.is_volatile)
- return @intToPtr(DestType, @ptrToInt(target))
+ return @qualCast(DestType, target)
else if (@typeInfo(dest.child) == .Opaque)
// dest.alignment would error out
return @ptrCast(DestType, target)
diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig
deleted file mode 100644
index fdb122b19d..0000000000
--- a/lib/std/zig/parse.zig
+++ /dev/null
@@ -1,3852 +0,0 @@
-const std = @import("../std.zig");
-const assert = std.debug.assert;
-const Allocator = std.mem.Allocator;
-const Ast = std.zig.Ast;
-const Node = Ast.Node;
-const AstError = Ast.Error;
-const TokenIndex = Ast.TokenIndex;
-const Token = std.zig.Token;
-
-pub const Error = error{ParseError} || Allocator.Error;
-
-/// Result should be freed with tree.deinit() when there are
-/// no more references to any of the tokens or nodes.
-pub fn parse(gpa: Allocator, source: [:0]const u8) Allocator.Error!Ast {
- var tokens = Ast.TokenList{};
- defer tokens.deinit(gpa);
-
- // Empirically, the zig std lib has an 8:1 ratio of source bytes to token count.
- const estimated_token_count = source.len / 8;
- try tokens.ensureTotalCapacity(gpa, estimated_token_count);
-
- var tokenizer = std.zig.Tokenizer.init(source);
- while (true) {
- const token = tokenizer.next();
- try tokens.append(gpa, .{
- .tag = token.tag,
- .start = @intCast(u32, token.loc.start),
- });
- if (token.tag == .eof) break;
- }
-
- var parser: Parser = .{
- .source = source,
- .gpa = gpa,
- .token_tags = tokens.items(.tag),
- .token_starts = tokens.items(.start),
- .errors = .{},
- .nodes = .{},
- .extra_data = .{},
- .scratch = .{},
- .tok_i = 0,
- };
- defer parser.errors.deinit(gpa);
- defer parser.nodes.deinit(gpa);
- defer parser.extra_data.deinit(gpa);
- defer parser.scratch.deinit(gpa);
-
- // Empirically, Zig source code has a 2:1 ratio of tokens to AST nodes.
- // Make sure at least 1 so we can use appendAssumeCapacity on the root node below.
- const estimated_node_count = (tokens.len + 2) / 2;
- try parser.nodes.ensureTotalCapacity(gpa, estimated_node_count);
-
- try parser.parseRoot();
-
- // TODO experiment with compacting the MultiArrayList slices here
- return Ast{
- .source = source,
- .tokens = tokens.toOwnedSlice(),
- .nodes = parser.nodes.toOwnedSlice(),
- .extra_data = try parser.extra_data.toOwnedSlice(gpa),
- .errors = try parser.errors.toOwnedSlice(gpa),
- };
-}
-
-const null_node: Node.Index = 0;
-
-/// Represents in-progress parsing, will be converted to an Ast after completion.
-const Parser = struct {
- gpa: Allocator,
- source: []const u8,
- token_tags: []const Token.Tag,
- token_starts: []const Ast.ByteOffset,
- tok_i: TokenIndex,
- errors: std.ArrayListUnmanaged(AstError),
- nodes: Ast.NodeList,
- extra_data: std.ArrayListUnmanaged(Node.Index),
- scratch: std.ArrayListUnmanaged(Node.Index),
-
- const SmallSpan = union(enum) {
- zero_or_one: Node.Index,
- multi: Node.SubRange,
- };
-
- const Members = struct {
- len: usize,
- lhs: Node.Index,
- rhs: Node.Index,
- trailing: bool,
-
- fn toSpan(self: Members, p: *Parser) !Node.SubRange {
- if (self.len <= 2) {
- const nodes = [2]Node.Index{ self.lhs, self.rhs };
- return p.listToSpan(nodes[0..self.len]);
- } else {
- return Node.SubRange{ .start = self.lhs, .end = self.rhs };
- }
- }
- };
-
- fn listToSpan(p: *Parser, list: []const Node.Index) !Node.SubRange {
- try p.extra_data.appendSlice(p.gpa, list);
- return Node.SubRange{
- .start = @intCast(Node.Index, p.extra_data.items.len - list.len),
- .end = @intCast(Node.Index, p.extra_data.items.len),
- };
- }
-
- fn addNode(p: *Parser, elem: Ast.NodeList.Elem) Allocator.Error!Node.Index {
- const result = @intCast(Node.Index, p.nodes.len);
- try p.nodes.append(p.gpa, elem);
- return result;
- }
-
- fn setNode(p: *Parser, i: usize, elem: Ast.NodeList.Elem) Node.Index {
- p.nodes.set(i, elem);
- return @intCast(Node.Index, i);
- }
-
- fn reserveNode(p: *Parser, tag: Ast.Node.Tag) !usize {
- try p.nodes.resize(p.gpa, p.nodes.len + 1);
- p.nodes.items(.tag)[p.nodes.len - 1] = tag;
- return p.nodes.len - 1;
- }
-
- fn unreserveNode(p: *Parser, node_index: usize) void {
- if (p.nodes.len == node_index) {
- p.nodes.resize(p.gpa, p.nodes.len - 1) catch unreachable;
- } else {
- // There is zombie node left in the tree, let's make it as inoffensive as possible
- // (sadly there's no no-op node)
- p.nodes.items(.tag)[node_index] = .unreachable_literal;
- p.nodes.items(.main_token)[node_index] = p.tok_i;
- }
- }
-
- fn addExtra(p: *Parser, extra: anytype) Allocator.Error!Node.Index {
- const fields = std.meta.fields(@TypeOf(extra));
- try p.extra_data.ensureUnusedCapacity(p.gpa, fields.len);
- const result = @intCast(u32, p.extra_data.items.len);
- inline for (fields) |field| {
- comptime assert(field.type == Node.Index);
- p.extra_data.appendAssumeCapacity(@field(extra, field.name));
- }
- return result;
- }
-
- fn warnExpected(p: *Parser, expected_token: Token.Tag) error{OutOfMemory}!void {
- @setCold(true);
- try p.warnMsg(.{
- .tag = .expected_token,
- .token = p.tok_i,
- .extra = .{ .expected_tag = expected_token },
- });
- }
-
- fn warn(p: *Parser, error_tag: AstError.Tag) error{OutOfMemory}!void {
- @setCold(true);
- try p.warnMsg(.{ .tag = error_tag, .token = p.tok_i });
- }
-
- fn warnMsg(p: *Parser, msg: Ast.Error) error{OutOfMemory}!void {
- @setCold(true);
- switch (msg.tag) {
- .expected_semi_after_decl,
- .expected_semi_after_stmt,
- .expected_comma_after_field,
- .expected_comma_after_arg,
- .expected_comma_after_param,
- .expected_comma_after_initializer,
- .expected_comma_after_switch_prong,
- .expected_semi_or_else,
- .expected_semi_or_lbrace,
- .expected_token,
- .expected_block,
- .expected_block_or_assignment,
- .expected_block_or_expr,
- .expected_block_or_field,
- .expected_expr,
- .expected_expr_or_assignment,
- .expected_fn,
- .expected_inlinable,
- .expected_labelable,
- .expected_param_list,
- .expected_prefix_expr,
- .expected_primary_type_expr,
- .expected_pub_item,
- .expected_return_type,
- .expected_suffix_op,
- .expected_type_expr,
- .expected_var_decl,
- .expected_var_decl_or_fn,
- .expected_loop_payload,
- .expected_container,
- => if (msg.token != 0 and !p.tokensOnSameLine(msg.token - 1, msg.token)) {
- var copy = msg;
- copy.token_is_prev = true;
- copy.token -= 1;
- return p.errors.append(p.gpa, copy);
- },
- else => {},
- }
- try p.errors.append(p.gpa, msg);
- }
-
- fn fail(p: *Parser, tag: Ast.Error.Tag) error{ ParseError, OutOfMemory } {
- @setCold(true);
- return p.failMsg(.{ .tag = tag, .token = p.tok_i });
- }
-
- fn failExpected(p: *Parser, expected_token: Token.Tag) error{ ParseError, OutOfMemory } {
- @setCold(true);
- return p.failMsg(.{
- .tag = .expected_token,
- .token = p.tok_i,
- .extra = .{ .expected_tag = expected_token },
- });
- }
-
- fn failMsg(p: *Parser, msg: Ast.Error) error{ ParseError, OutOfMemory } {
- @setCold(true);
- try p.warnMsg(msg);
- return error.ParseError;
- }
-
- /// Root <- skip container_doc_comment? ContainerMembers eof
- fn parseRoot(p: *Parser) !void {
- // Root node must be index 0.
- p.nodes.appendAssumeCapacity(.{
- .tag = .root,
- .main_token = 0,
- .data = undefined,
- });
- const root_members = try p.parseContainerMembers();
- const root_decls = try root_members.toSpan(p);
- if (p.token_tags[p.tok_i] != .eof) {
- try p.warnExpected(.eof);
- }
- p.nodes.items(.data)[0] = .{
- .lhs = root_decls.start,
- .rhs = root_decls.end,
- };
- }
-
- /// ContainerMembers <- ContainerDeclarations (ContainerField COMMA)* (ContainerField / ContainerDeclarations)
- ///
- /// ContainerDeclarations
- /// <- TestDecl ContainerDeclarations
- /// / ComptimeDecl ContainerDeclarations
- /// / doc_comment? KEYWORD_pub? Decl ContainerDeclarations
- /// /
- ///
- /// ComptimeDecl <- KEYWORD_comptime Block
- fn parseContainerMembers(p: *Parser) !Members {
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
-
- var field_state: union(enum) {
- /// No fields have been seen.
- none,
- /// Currently parsing fields.
- seen,
- /// Saw fields and then a declaration after them.
- /// Payload is first token of previous declaration.
- end: Node.Index,
- /// There was a declaration between fields, don't report more errors.
- err,
- } = .none;
-
- var last_field: TokenIndex = undefined;
-
- // Skip container doc comments.
- while (p.eatToken(.container_doc_comment)) |_| {}
-
- var trailing = false;
- while (true) {
- const doc_comment = try p.eatDocComments();
-
- switch (p.token_tags[p.tok_i]) {
- .keyword_test => {
- if (doc_comment) |some| {
- try p.warnMsg(.{ .tag = .test_doc_comment, .token = some });
- }
- const test_decl_node = try p.expectTestDeclRecoverable();
- if (test_decl_node != 0) {
- if (field_state == .seen) {
- field_state = .{ .end = test_decl_node };
- }
- try p.scratch.append(p.gpa, test_decl_node);
- }
- trailing = false;
- },
- .keyword_comptime => switch (p.token_tags[p.tok_i + 1]) {
- .l_brace => {
- if (doc_comment) |some| {
- try p.warnMsg(.{ .tag = .comptime_doc_comment, .token = some });
- }
- const comptime_token = p.nextToken();
- const block = p.parseBlock() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => blk: {
- p.findNextContainerMember();
- break :blk null_node;
- },
- };
- if (block != 0) {
- const comptime_node = try p.addNode(.{
- .tag = .@"comptime",
- .main_token = comptime_token,
- .data = .{
- .lhs = block,
- .rhs = undefined,
- },
- });
- if (field_state == .seen) {
- field_state = .{ .end = comptime_node };
- }
- try p.scratch.append(p.gpa, comptime_node);
- }
- trailing = false;
- },
- else => {
- const identifier = p.tok_i;
- defer last_field = identifier;
- const container_field = p.expectContainerField() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => {
- p.findNextContainerMember();
- continue;
- },
- };
- switch (field_state) {
- .none => field_state = .seen,
- .err, .seen => {},
- .end => |node| {
- try p.warnMsg(.{
- .tag = .decl_between_fields,
- .token = p.nodes.items(.main_token)[node],
- });
- try p.warnMsg(.{
- .tag = .previous_field,
- .is_note = true,
- .token = last_field,
- });
- try p.warnMsg(.{
- .tag = .next_field,
- .is_note = true,
- .token = identifier,
- });
- // Continue parsing; error will be reported later.
- field_state = .err;
- },
- }
- try p.scratch.append(p.gpa, container_field);
- switch (p.token_tags[p.tok_i]) {
- .comma => {
- p.tok_i += 1;
- trailing = true;
- continue;
- },
- .r_brace, .eof => {
- trailing = false;
- break;
- },
- else => {},
- }
- // There is not allowed to be a decl after a field with no comma.
- // Report error but recover parser.
- try p.warn(.expected_comma_after_field);
- p.findNextContainerMember();
- },
- },
- .keyword_pub => {
- p.tok_i += 1;
- const top_level_decl = try p.expectTopLevelDeclRecoverable();
- if (top_level_decl != 0) {
- if (field_state == .seen) {
- field_state = .{ .end = top_level_decl };
- }
- try p.scratch.append(p.gpa, top_level_decl);
- }
- trailing = p.token_tags[p.tok_i - 1] == .semicolon;
- },
- .keyword_usingnamespace => {
- const node = try p.expectUsingNamespaceRecoverable();
- if (node != 0) {
- if (field_state == .seen) {
- field_state = .{ .end = node };
- }
- try p.scratch.append(p.gpa, node);
- }
- trailing = p.token_tags[p.tok_i - 1] == .semicolon;
- },
- .keyword_const,
- .keyword_var,
- .keyword_threadlocal,
- .keyword_export,
- .keyword_extern,
- .keyword_inline,
- .keyword_noinline,
- .keyword_fn,
- => {
- const top_level_decl = try p.expectTopLevelDeclRecoverable();
- if (top_level_decl != 0) {
- if (field_state == .seen) {
- field_state = .{ .end = top_level_decl };
- }
- try p.scratch.append(p.gpa, top_level_decl);
- }
- trailing = p.token_tags[p.tok_i - 1] == .semicolon;
- },
- .eof, .r_brace => {
- if (doc_comment) |tok| {
- try p.warnMsg(.{
- .tag = .unattached_doc_comment,
- .token = tok,
- });
- }
- break;
- },
- else => {
- const c_container = p.parseCStyleContainer() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => false,
- };
- if (c_container) continue;
-
- const identifier = p.tok_i;
- defer last_field = identifier;
- const container_field = p.expectContainerField() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => {
- p.findNextContainerMember();
- continue;
- },
- };
- switch (field_state) {
- .none => field_state = .seen,
- .err, .seen => {},
- .end => |node| {
- try p.warnMsg(.{
- .tag = .decl_between_fields,
- .token = p.nodes.items(.main_token)[node],
- });
- try p.warnMsg(.{
- .tag = .previous_field,
- .is_note = true,
- .token = last_field,
- });
- try p.warnMsg(.{
- .tag = .next_field,
- .is_note = true,
- .token = identifier,
- });
- // Continue parsing; error will be reported later.
- field_state = .err;
- },
- }
- try p.scratch.append(p.gpa, container_field);
- switch (p.token_tags[p.tok_i]) {
- .comma => {
- p.tok_i += 1;
- trailing = true;
- continue;
- },
- .r_brace, .eof => {
- trailing = false;
- break;
- },
- else => {},
- }
- // There is not allowed to be a decl after a field with no comma.
- // Report error but recover parser.
- try p.warn(.expected_comma_after_field);
- if (p.token_tags[p.tok_i] == .semicolon and p.token_tags[identifier] == .identifier) {
- try p.warnMsg(.{
- .tag = .var_const_decl,
- .is_note = true,
- .token = identifier,
- });
- }
- p.findNextContainerMember();
- continue;
- },
- }
- }
-
- const items = p.scratch.items[scratch_top..];
- switch (items.len) {
- 0 => return Members{
- .len = 0,
- .lhs = 0,
- .rhs = 0,
- .trailing = trailing,
- },
- 1 => return Members{
- .len = 1,
- .lhs = items[0],
- .rhs = 0,
- .trailing = trailing,
- },
- 2 => return Members{
- .len = 2,
- .lhs = items[0],
- .rhs = items[1],
- .trailing = trailing,
- },
- else => {
- const span = try p.listToSpan(items);
- return Members{
- .len = items.len,
- .lhs = span.start,
- .rhs = span.end,
- .trailing = trailing,
- };
- },
- }
- }
-
- /// Attempts to find next container member by searching for certain tokens
- fn findNextContainerMember(p: *Parser) void {
- var level: u32 = 0;
- while (true) {
- const tok = p.nextToken();
- switch (p.token_tags[tok]) {
- // Any of these can start a new top level declaration.
- .keyword_test,
- .keyword_comptime,
- .keyword_pub,
- .keyword_export,
- .keyword_extern,
- .keyword_inline,
- .keyword_noinline,
- .keyword_usingnamespace,
- .keyword_threadlocal,
- .keyword_const,
- .keyword_var,
- .keyword_fn,
- => {
- if (level == 0) {
- p.tok_i -= 1;
- return;
- }
- },
- .identifier => {
- if (p.token_tags[tok + 1] == .comma and level == 0) {
- p.tok_i -= 1;
- return;
- }
- },
- .comma, .semicolon => {
- // this decl was likely meant to end here
- if (level == 0) {
- return;
- }
- },
- .l_paren, .l_bracket, .l_brace => level += 1,
- .r_paren, .r_bracket => {
- if (level != 0) level -= 1;
- },
- .r_brace => {
- if (level == 0) {
- // end of container, exit
- p.tok_i -= 1;
- return;
- }
- level -= 1;
- },
- .eof => {
- p.tok_i -= 1;
- return;
- },
- else => {},
- }
- }
- }
-
- /// Attempts to find the next statement by searching for a semicolon
- fn findNextStmt(p: *Parser) void {
- var level: u32 = 0;
- while (true) {
- const tok = p.nextToken();
- switch (p.token_tags[tok]) {
- .l_brace => level += 1,
- .r_brace => {
- if (level == 0) {
- p.tok_i -= 1;
- return;
- }
- level -= 1;
- },
- .semicolon => {
- if (level == 0) {
- return;
- }
- },
- .eof => {
- p.tok_i -= 1;
- return;
- },
- else => {},
- }
- }
- }
-
- /// TestDecl <- KEYWORD_test (STRINGLITERALSINGLE / IDENTIFIER)? Block
- fn expectTestDecl(p: *Parser) !Node.Index {
- const test_token = p.assertToken(.keyword_test);
- const name_token = switch (p.token_tags[p.nextToken()]) {
- .string_literal, .identifier => p.tok_i - 1,
- else => blk: {
- p.tok_i -= 1;
- break :blk null;
- },
- };
- const block_node = try p.parseBlock();
- if (block_node == 0) return p.fail(.expected_block);
- return p.addNode(.{
- .tag = .test_decl,
- .main_token = test_token,
- .data = .{
- .lhs = name_token orelse 0,
- .rhs = block_node,
- },
- });
- }
-
- fn expectTestDeclRecoverable(p: *Parser) error{OutOfMemory}!Node.Index {
- return p.expectTestDecl() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => {
- p.findNextContainerMember();
- return null_node;
- },
- };
- }
-
- /// Decl
- /// <- (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE? / (KEYWORD_inline / KEYWORD_noinline))? FnProto (SEMICOLON / Block)
- /// / (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE?)? KEYWORD_threadlocal? VarDecl
- /// / KEYWORD_usingnamespace Expr SEMICOLON
- fn expectTopLevelDecl(p: *Parser) !Node.Index {
- const extern_export_inline_token = p.nextToken();
- var is_extern: bool = false;
- var expect_fn: bool = false;
- var expect_var_or_fn: bool = false;
- switch (p.token_tags[extern_export_inline_token]) {
- .keyword_extern => {
- _ = p.eatToken(.string_literal);
- is_extern = true;
- expect_var_or_fn = true;
- },
- .keyword_export => expect_var_or_fn = true,
- .keyword_inline, .keyword_noinline => expect_fn = true,
- else => p.tok_i -= 1,
- }
- const fn_proto = try p.parseFnProto();
- if (fn_proto != 0) {
- switch (p.token_tags[p.tok_i]) {
- .semicolon => {
- p.tok_i += 1;
- return fn_proto;
- },
- .l_brace => {
- if (is_extern) {
- try p.warnMsg(.{ .tag = .extern_fn_body, .token = extern_export_inline_token });
- return null_node;
- }
- const fn_decl_index = try p.reserveNode(.fn_decl);
- errdefer p.unreserveNode(fn_decl_index);
-
- const body_block = try p.parseBlock();
- assert(body_block != 0);
- return p.setNode(fn_decl_index, .{
- .tag = .fn_decl,
- .main_token = p.nodes.items(.main_token)[fn_proto],
- .data = .{
- .lhs = fn_proto,
- .rhs = body_block,
- },
- });
- },
- else => {
- // Since parseBlock only return error.ParseError on
- // a missing '}' we can assume this function was
- // supposed to end here.
- try p.warn(.expected_semi_or_lbrace);
- return null_node;
- },
- }
- }
- if (expect_fn) {
- try p.warn(.expected_fn);
- return error.ParseError;
- }
-
- const thread_local_token = p.eatToken(.keyword_threadlocal);
- const var_decl = try p.parseVarDecl();
- if (var_decl != 0) {
- try p.expectSemicolon(.expected_semi_after_decl, false);
- return var_decl;
- }
- if (thread_local_token != null) {
- return p.fail(.expected_var_decl);
- }
- if (expect_var_or_fn) {
- return p.fail(.expected_var_decl_or_fn);
- }
- if (p.token_tags[p.tok_i] != .keyword_usingnamespace) {
- return p.fail(.expected_pub_item);
- }
- return p.expectUsingNamespace();
- }
-
- fn expectTopLevelDeclRecoverable(p: *Parser) error{OutOfMemory}!Node.Index {
- return p.expectTopLevelDecl() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => {
- p.findNextContainerMember();
- return null_node;
- },
- };
- }
-
- fn expectUsingNamespace(p: *Parser) !Node.Index {
- const usingnamespace_token = p.assertToken(.keyword_usingnamespace);
- const expr = try p.expectExpr();
- try p.expectSemicolon(.expected_semi_after_decl, false);
- return p.addNode(.{
- .tag = .@"usingnamespace",
- .main_token = usingnamespace_token,
- .data = .{
- .lhs = expr,
- .rhs = undefined,
- },
- });
- }
-
- fn expectUsingNamespaceRecoverable(p: *Parser) error{OutOfMemory}!Node.Index {
- return p.expectUsingNamespace() catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => {
- p.findNextContainerMember();
- return null_node;
- },
- };
- }
-
- /// FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? AddrSpace? LinkSection? CallConv? EXCLAMATIONMARK? TypeExpr
- fn parseFnProto(p: *Parser) !Node.Index {
- const fn_token = p.eatToken(.keyword_fn) orelse return null_node;
-
- // We want the fn proto node to be before its children in the array.
- const fn_proto_index = try p.reserveNode(.fn_proto);
- errdefer p.unreserveNode(fn_proto_index);
-
- _ = p.eatToken(.identifier);
- const params = try p.parseParamDeclList();
- const align_expr = try p.parseByteAlign();
- const addrspace_expr = try p.parseAddrSpace();
- const section_expr = try p.parseLinkSection();
- const callconv_expr = try p.parseCallconv();
- _ = p.eatToken(.bang);
-
- const return_type_expr = try p.parseTypeExpr();
- if (return_type_expr == 0) {
- // most likely the user forgot to specify the return type.
- // Mark return type as invalid and try to continue.
- try p.warn(.expected_return_type);
- }
-
- if (align_expr == 0 and section_expr == 0 and callconv_expr == 0 and addrspace_expr == 0) {
- switch (params) {
- .zero_or_one => |param| return p.setNode(fn_proto_index, .{
- .tag = .fn_proto_simple,
- .main_token = fn_token,
- .data = .{
- .lhs = param,
- .rhs = return_type_expr,
- },
- }),
- .multi => |span| {
- return p.setNode(fn_proto_index, .{
- .tag = .fn_proto_multi,
- .main_token = fn_token,
- .data = .{
- .lhs = try p.addExtra(Node.SubRange{
- .start = span.start,
- .end = span.end,
- }),
- .rhs = return_type_expr,
- },
- });
- },
- }
- }
- switch (params) {
- .zero_or_one => |param| return p.setNode(fn_proto_index, .{
- .tag = .fn_proto_one,
- .main_token = fn_token,
- .data = .{
- .lhs = try p.addExtra(Node.FnProtoOne{
- .param = param,
- .align_expr = align_expr,
- .addrspace_expr = addrspace_expr,
- .section_expr = section_expr,
- .callconv_expr = callconv_expr,
- }),
- .rhs = return_type_expr,
- },
- }),
- .multi => |span| {
- return p.setNode(fn_proto_index, .{
- .tag = .fn_proto,
- .main_token = fn_token,
- .data = .{
- .lhs = try p.addExtra(Node.FnProto{
- .params_start = span.start,
- .params_end = span.end,
- .align_expr = align_expr,
- .addrspace_expr = addrspace_expr,
- .section_expr = section_expr,
- .callconv_expr = callconv_expr,
- }),
- .rhs = return_type_expr,
- },
- });
- },
- }
- }
-
- /// VarDecl <- (KEYWORD_const / KEYWORD_var) IDENTIFIER (COLON TypeExpr)? ByteAlign? AddrSpace? LinkSection? (EQUAL Expr)? SEMICOLON
- fn parseVarDecl(p: *Parser) !Node.Index {
- const mut_token = p.eatToken(.keyword_const) orelse
- p.eatToken(.keyword_var) orelse
- return null_node;
-
- _ = try p.expectToken(.identifier);
- const type_node: Node.Index = if (p.eatToken(.colon) == null) 0 else try p.expectTypeExpr();
- const align_node = try p.parseByteAlign();
- const addrspace_node = try p.parseAddrSpace();
- const section_node = try p.parseLinkSection();
- const init_node: Node.Index = switch (p.token_tags[p.tok_i]) {
- .equal_equal => blk: {
- try p.warn(.wrong_equal_var_decl);
- p.tok_i += 1;
- break :blk try p.expectExpr();
- },
- .equal => blk: {
- p.tok_i += 1;
- break :blk try p.expectExpr();
- },
- else => 0,
- };
- if (section_node == 0 and addrspace_node == 0) {
- if (align_node == 0) {
- return p.addNode(.{
- .tag = .simple_var_decl,
- .main_token = mut_token,
- .data = .{
- .lhs = type_node,
- .rhs = init_node,
- },
- });
- } else if (type_node == 0) {
- return p.addNode(.{
- .tag = .aligned_var_decl,
- .main_token = mut_token,
- .data = .{
- .lhs = align_node,
- .rhs = init_node,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .local_var_decl,
- .main_token = mut_token,
- .data = .{
- .lhs = try p.addExtra(Node.LocalVarDecl{
- .type_node = type_node,
- .align_node = align_node,
- }),
- .rhs = init_node,
- },
- });
- }
- } else {
- return p.addNode(.{
- .tag = .global_var_decl,
- .main_token = mut_token,
- .data = .{
- .lhs = try p.addExtra(Node.GlobalVarDecl{
- .type_node = type_node,
- .align_node = align_node,
- .addrspace_node = addrspace_node,
- .section_node = section_node,
- }),
- .rhs = init_node,
- },
- });
- }
- }
-
- /// ContainerField
- /// <- doc_comment? KEYWORD_comptime? IDENTIFIER (COLON TypeExpr)? ByteAlign? (EQUAL Expr)?
- /// / doc_comment? KEYWORD_comptime? (IDENTIFIER COLON)? !KEYWORD_fn TypeExpr ByteAlign? (EQUAL Expr)?
- fn expectContainerField(p: *Parser) !Node.Index {
- var main_token = p.tok_i;
- _ = p.eatToken(.keyword_comptime);
- const tuple_like = p.token_tags[p.tok_i] != .identifier or p.token_tags[p.tok_i + 1] != .colon;
- if (!tuple_like) {
- main_token = p.assertToken(.identifier);
- }
-
- var align_expr: Node.Index = 0;
- var type_expr: Node.Index = 0;
- if (p.eatToken(.colon) != null or tuple_like) {
- type_expr = try p.expectTypeExpr();
- align_expr = try p.parseByteAlign();
- }
-
- const value_expr: Node.Index = if (p.eatToken(.equal) == null) 0 else try p.expectExpr();
-
- if (align_expr == 0) {
- return p.addNode(.{
- .tag = .container_field_init,
- .main_token = main_token,
- .data = .{
- .lhs = type_expr,
- .rhs = value_expr,
- },
- });
- } else if (value_expr == 0) {
- return p.addNode(.{
- .tag = .container_field_align,
- .main_token = main_token,
- .data = .{
- .lhs = type_expr,
- .rhs = align_expr,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .container_field,
- .main_token = main_token,
- .data = .{
- .lhs = type_expr,
- .rhs = try p.addExtra(Node.ContainerField{
- .value_expr = value_expr,
- .align_expr = align_expr,
- }),
- },
- });
- }
- }
-
- /// Statement
- /// <- KEYWORD_comptime? VarDecl
- /// / KEYWORD_comptime BlockExprStatement
- /// / KEYWORD_nosuspend BlockExprStatement
- /// / KEYWORD_suspend BlockExprStatement
- /// / KEYWORD_defer BlockExprStatement
- /// / KEYWORD_errdefer Payload? BlockExprStatement
- /// / IfStatement
- /// / LabeledStatement
- /// / SwitchExpr
- /// / AssignExpr SEMICOLON
- fn parseStatement(p: *Parser, allow_defer_var: bool) Error!Node.Index {
- const comptime_token = p.eatToken(.keyword_comptime);
-
- if (allow_defer_var) {
- const var_decl = try p.parseVarDecl();
- if (var_decl != 0) {
- try p.expectSemicolon(.expected_semi_after_decl, true);
- return var_decl;
- }
- }
-
- if (comptime_token) |token| {
- return p.addNode(.{
- .tag = .@"comptime",
- .main_token = token,
- .data = .{
- .lhs = try p.expectBlockExprStatement(),
- .rhs = undefined,
- },
- });
- }
-
- switch (p.token_tags[p.tok_i]) {
- .keyword_nosuspend => {
- return p.addNode(.{
- .tag = .@"nosuspend",
- .main_token = p.nextToken(),
- .data = .{
- .lhs = try p.expectBlockExprStatement(),
- .rhs = undefined,
- },
- });
- },
- .keyword_suspend => {
- const token = p.nextToken();
- const block_expr = try p.expectBlockExprStatement();
- return p.addNode(.{
- .tag = .@"suspend",
- .main_token = token,
- .data = .{
- .lhs = block_expr,
- .rhs = undefined,
- },
- });
- },
- .keyword_defer => if (allow_defer_var) return p.addNode(.{
- .tag = .@"defer",
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = try p.expectBlockExprStatement(),
- },
- }),
- .keyword_errdefer => if (allow_defer_var) return p.addNode(.{
- .tag = .@"errdefer",
- .main_token = p.nextToken(),
- .data = .{
- .lhs = try p.parsePayload(),
- .rhs = try p.expectBlockExprStatement(),
- },
- }),
- .keyword_switch => return p.expectSwitchExpr(),
- .keyword_if => return p.expectIfStatement(),
- .keyword_enum, .keyword_struct, .keyword_union => {
- const identifier = p.tok_i + 1;
- if (try p.parseCStyleContainer()) {
- // Return something so that `expectStatement` is happy.
- return p.addNode(.{
- .tag = .identifier,
- .main_token = identifier,
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- });
- }
- },
- else => {},
- }
-
- const labeled_statement = try p.parseLabeledStatement();
- if (labeled_statement != 0) return labeled_statement;
-
- const assign_expr = try p.parseAssignExpr();
- if (assign_expr != 0) {
- try p.expectSemicolon(.expected_semi_after_stmt, true);
- return assign_expr;
- }
-
- return null_node;
- }
-
- fn expectStatement(p: *Parser, allow_defer_var: bool) !Node.Index {
- const statement = try p.parseStatement(allow_defer_var);
- if (statement == 0) {
- return p.fail(.expected_statement);
- }
- return statement;
- }
-
- /// If a parse error occurs, reports an error, but then finds the next statement
- /// and returns that one instead. If a parse error occurs but there is no following
- /// statement, returns 0.
- fn expectStatementRecoverable(p: *Parser) Error!Node.Index {
- while (true) {
- return p.expectStatement(true) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.ParseError => {
- p.findNextStmt(); // Try to skip to the next statement.
- switch (p.token_tags[p.tok_i]) {
- .r_brace => return null_node,
- .eof => return error.ParseError,
- else => continue,
- }
- },
- };
- }
- }
-
- /// IfStatement
- /// <- IfPrefix BlockExpr ( KEYWORD_else Payload? Statement )?
- /// / IfPrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement )
- fn expectIfStatement(p: *Parser) !Node.Index {
- const if_token = p.assertToken(.keyword_if);
- _ = try p.expectToken(.l_paren);
- const condition = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.parsePtrPayload();
-
- // TODO propose to change the syntax so that semicolons are always required
- // inside if statements, even if there is an `else`.
- var else_required = false;
- const then_expr = blk: {
- const block_expr = try p.parseBlockExpr();
- if (block_expr != 0) break :blk block_expr;
- const assign_expr = try p.parseAssignExpr();
- if (assign_expr == 0) {
- return p.fail(.expected_block_or_assignment);
- }
- if (p.eatToken(.semicolon)) |_| {
- return p.addNode(.{
- .tag = .if_simple,
- .main_token = if_token,
- .data = .{
- .lhs = condition,
- .rhs = assign_expr,
- },
- });
- }
- else_required = true;
- break :blk assign_expr;
- };
- _ = p.eatToken(.keyword_else) orelse {
- if (else_required) {
- try p.warn(.expected_semi_or_else);
- }
- return p.addNode(.{
- .tag = .if_simple,
- .main_token = if_token,
- .data = .{
- .lhs = condition,
- .rhs = then_expr,
- },
- });
- };
- _ = try p.parsePayload();
- const else_expr = try p.expectStatement(false);
- return p.addNode(.{
- .tag = .@"if",
- .main_token = if_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// LabeledStatement <- BlockLabel? (Block / LoopStatement)
- fn parseLabeledStatement(p: *Parser) !Node.Index {
- const label_token = p.parseBlockLabel();
- const block = try p.parseBlock();
- if (block != 0) return block;
-
- const loop_stmt = try p.parseLoopStatement();
- if (loop_stmt != 0) return loop_stmt;
-
- if (label_token != 0) {
- const after_colon = p.tok_i;
- const node = try p.parseTypeExpr();
- if (node != 0) {
- const a = try p.parseByteAlign();
- const b = try p.parseAddrSpace();
- const c = try p.parseLinkSection();
- const d = if (p.eatToken(.equal) == null) 0 else try p.expectExpr();
- if (a != 0 or b != 0 or c != 0 or d != 0) {
- return p.failMsg(.{ .tag = .expected_var_const, .token = label_token });
- }
- }
- return p.failMsg(.{ .tag = .expected_labelable, .token = after_colon });
- }
-
- return null_node;
- }
-
- /// LoopStatement <- KEYWORD_inline? (ForStatement / WhileStatement)
- fn parseLoopStatement(p: *Parser) !Node.Index {
- const inline_token = p.eatToken(.keyword_inline);
-
- const for_statement = try p.parseForStatement();
- if (for_statement != 0) return for_statement;
-
- const while_statement = try p.parseWhileStatement();
- if (while_statement != 0) return while_statement;
-
- if (inline_token == null) return null_node;
-
- // If we've seen "inline", there should have been a "for" or "while"
- return p.fail(.expected_inlinable);
- }
-
- /// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
- ///
- /// ForStatement
- /// <- ForPrefix BlockExpr ( KEYWORD_else Statement )?
- /// / ForPrefix AssignExpr ( SEMICOLON / KEYWORD_else Statement )
- fn parseForStatement(p: *Parser) !Node.Index {
- const for_token = p.eatToken(.keyword_for) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const array_expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- const found_payload = try p.parsePtrIndexPayload();
- if (found_payload == 0) try p.warn(.expected_loop_payload);
-
- // TODO propose to change the syntax so that semicolons are always required
- // inside while statements, even if there is an `else`.
- var else_required = false;
- const then_expr = blk: {
- const block_expr = try p.parseBlockExpr();
- if (block_expr != 0) break :blk block_expr;
- const assign_expr = try p.parseAssignExpr();
- if (assign_expr == 0) {
- return p.fail(.expected_block_or_assignment);
- }
- if (p.eatToken(.semicolon)) |_| {
- return p.addNode(.{
- .tag = .for_simple,
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = assign_expr,
- },
- });
- }
- else_required = true;
- break :blk assign_expr;
- };
- _ = p.eatToken(.keyword_else) orelse {
- if (else_required) {
- try p.warn(.expected_semi_or_else);
- }
- return p.addNode(.{
- .tag = .for_simple,
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = then_expr,
- },
- });
- };
- return p.addNode(.{
- .tag = .@"for",
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = try p.expectStatement(false),
- }),
- },
- });
- }
-
- /// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
- ///
- /// WhileStatement
- /// <- WhilePrefix BlockExpr ( KEYWORD_else Payload? Statement )?
- /// / WhilePrefix AssignExpr ( SEMICOLON / KEYWORD_else Payload? Statement )
- fn parseWhileStatement(p: *Parser) !Node.Index {
- const while_token = p.eatToken(.keyword_while) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const condition = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.parsePtrPayload();
- const cont_expr = try p.parseWhileContinueExpr();
-
- // TODO propose to change the syntax so that semicolons are always required
- // inside while statements, even if there is an `else`.
- var else_required = false;
- const then_expr = blk: {
- const block_expr = try p.parseBlockExpr();
- if (block_expr != 0) break :blk block_expr;
- const assign_expr = try p.parseAssignExpr();
- if (assign_expr == 0) {
- return p.fail(.expected_block_or_assignment);
- }
- if (p.eatToken(.semicolon)) |_| {
- if (cont_expr == 0) {
- return p.addNode(.{
- .tag = .while_simple,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = assign_expr,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .while_cont,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.WhileCont{
- .cont_expr = cont_expr,
- .then_expr = assign_expr,
- }),
- },
- });
- }
- }
- else_required = true;
- break :blk assign_expr;
- };
- _ = p.eatToken(.keyword_else) orelse {
- if (else_required) {
- try p.warn(.expected_semi_or_else);
- }
- if (cont_expr == 0) {
- return p.addNode(.{
- .tag = .while_simple,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = then_expr,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .while_cont,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.WhileCont{
- .cont_expr = cont_expr,
- .then_expr = then_expr,
- }),
- },
- });
- }
- };
- _ = try p.parsePayload();
- const else_expr = try p.expectStatement(false);
- return p.addNode(.{
- .tag = .@"while",
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.While{
- .cont_expr = cont_expr,
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// BlockExprStatement
- /// <- BlockExpr
- /// / AssignExpr SEMICOLON
- fn parseBlockExprStatement(p: *Parser) !Node.Index {
- const block_expr = try p.parseBlockExpr();
- if (block_expr != 0) {
- return block_expr;
- }
- const assign_expr = try p.parseAssignExpr();
- if (assign_expr != 0) {
- try p.expectSemicolon(.expected_semi_after_stmt, true);
- return assign_expr;
- }
- return null_node;
- }
-
- fn expectBlockExprStatement(p: *Parser) !Node.Index {
- const node = try p.parseBlockExprStatement();
- if (node == 0) {
- return p.fail(.expected_block_or_expr);
- }
- return node;
- }
-
- /// BlockExpr <- BlockLabel? Block
- fn parseBlockExpr(p: *Parser) Error!Node.Index {
- switch (p.token_tags[p.tok_i]) {
- .identifier => {
- if (p.token_tags[p.tok_i + 1] == .colon and
- p.token_tags[p.tok_i + 2] == .l_brace)
- {
- p.tok_i += 2;
- return p.parseBlock();
- } else {
- return null_node;
- }
- },
- .l_brace => return p.parseBlock(),
- else => return null_node,
- }
- }
-
- /// AssignExpr <- Expr (AssignOp Expr)?
- ///
- /// AssignOp
- /// <- ASTERISKEQUAL
- /// / ASTERISKPIPEEQUAL
- /// / SLASHEQUAL
- /// / PERCENTEQUAL
- /// / PLUSEQUAL
- /// / PLUSPIPEEQUAL
- /// / MINUSEQUAL
- /// / MINUSPIPEEQUAL
- /// / LARROW2EQUAL
- /// / LARROW2PIPEEQUAL
- /// / RARROW2EQUAL
- /// / AMPERSANDEQUAL
- /// / CARETEQUAL
- /// / PIPEEQUAL
- /// / ASTERISKPERCENTEQUAL
- /// / PLUSPERCENTEQUAL
- /// / MINUSPERCENTEQUAL
- /// / EQUAL
- fn parseAssignExpr(p: *Parser) !Node.Index {
- const expr = try p.parseExpr();
- if (expr == 0) return null_node;
-
- const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
- .asterisk_equal => .assign_mul,
- .slash_equal => .assign_div,
- .percent_equal => .assign_mod,
- .plus_equal => .assign_add,
- .minus_equal => .assign_sub,
- .angle_bracket_angle_bracket_left_equal => .assign_shl,
- .angle_bracket_angle_bracket_left_pipe_equal => .assign_shl_sat,
- .angle_bracket_angle_bracket_right_equal => .assign_shr,
- .ampersand_equal => .assign_bit_and,
- .caret_equal => .assign_bit_xor,
- .pipe_equal => .assign_bit_or,
- .asterisk_percent_equal => .assign_mul_wrap,
- .plus_percent_equal => .assign_add_wrap,
- .minus_percent_equal => .assign_sub_wrap,
- .asterisk_pipe_equal => .assign_mul_sat,
- .plus_pipe_equal => .assign_add_sat,
- .minus_pipe_equal => .assign_sub_sat,
- .equal => .assign,
- else => return expr,
- };
- return p.addNode(.{
- .tag = tag,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = expr,
- .rhs = try p.expectExpr(),
- },
- });
- }
-
- fn expectAssignExpr(p: *Parser) !Node.Index {
- const expr = try p.parseAssignExpr();
- if (expr == 0) {
- return p.fail(.expected_expr_or_assignment);
- }
- return expr;
- }
-
- fn parseExpr(p: *Parser) Error!Node.Index {
- return p.parseExprPrecedence(0);
- }
-
- fn expectExpr(p: *Parser) Error!Node.Index {
- const node = try p.parseExpr();
- if (node == 0) {
- return p.fail(.expected_expr);
- } else {
- return node;
- }
- }
-
- const Assoc = enum {
- left,
- none,
- };
-
- const OperInfo = struct {
- prec: i8,
- tag: Node.Tag,
- assoc: Assoc = Assoc.left,
- };
-
- // A table of binary operator information. Higher precedence numbers are
- // stickier. All operators at the same precedence level should have the same
- // associativity.
- const operTable = std.enums.directEnumArrayDefault(Token.Tag, OperInfo, .{ .prec = -1, .tag = Node.Tag.root }, 0, .{
- .keyword_or = .{ .prec = 10, .tag = .bool_or },
-
- .keyword_and = .{ .prec = 20, .tag = .bool_and },
-
- .equal_equal = .{ .prec = 30, .tag = .equal_equal, .assoc = Assoc.none },
- .bang_equal = .{ .prec = 30, .tag = .bang_equal, .assoc = Assoc.none },
- .angle_bracket_left = .{ .prec = 30, .tag = .less_than, .assoc = Assoc.none },
- .angle_bracket_right = .{ .prec = 30, .tag = .greater_than, .assoc = Assoc.none },
- .angle_bracket_left_equal = .{ .prec = 30, .tag = .less_or_equal, .assoc = Assoc.none },
- .angle_bracket_right_equal = .{ .prec = 30, .tag = .greater_or_equal, .assoc = Assoc.none },
-
- .ampersand = .{ .prec = 40, .tag = .bit_and },
- .caret = .{ .prec = 40, .tag = .bit_xor },
- .pipe = .{ .prec = 40, .tag = .bit_or },
- .keyword_orelse = .{ .prec = 40, .tag = .@"orelse" },
- .keyword_catch = .{ .prec = 40, .tag = .@"catch" },
-
- .angle_bracket_angle_bracket_left = .{ .prec = 50, .tag = .shl },
- .angle_bracket_angle_bracket_left_pipe = .{ .prec = 50, .tag = .shl_sat },
- .angle_bracket_angle_bracket_right = .{ .prec = 50, .tag = .shr },
-
- .plus = .{ .prec = 60, .tag = .add },
- .minus = .{ .prec = 60, .tag = .sub },
- .plus_plus = .{ .prec = 60, .tag = .array_cat },
- .plus_percent = .{ .prec = 60, .tag = .add_wrap },
- .minus_percent = .{ .prec = 60, .tag = .sub_wrap },
- .plus_pipe = .{ .prec = 60, .tag = .add_sat },
- .minus_pipe = .{ .prec = 60, .tag = .sub_sat },
-
- .pipe_pipe = .{ .prec = 70, .tag = .merge_error_sets },
- .asterisk = .{ .prec = 70, .tag = .mul },
- .slash = .{ .prec = 70, .tag = .div },
- .percent = .{ .prec = 70, .tag = .mod },
- .asterisk_asterisk = .{ .prec = 70, .tag = .array_mult },
- .asterisk_percent = .{ .prec = 70, .tag = .mul_wrap },
- .asterisk_pipe = .{ .prec = 70, .tag = .mul_sat },
- });
-
- fn parseExprPrecedence(p: *Parser, min_prec: i32) Error!Node.Index {
- assert(min_prec >= 0);
- var node = try p.parsePrefixExpr();
- if (node == 0) {
- return null_node;
- }
-
- var banned_prec: i8 = -1;
-
- while (true) {
- const tok_tag = p.token_tags[p.tok_i];
- const info = operTable[@intCast(usize, @enumToInt(tok_tag))];
- if (info.prec < min_prec) {
- break;
- }
- if (info.prec == banned_prec) {
- return p.fail(.chained_comparison_operators);
- }
-
- const oper_token = p.nextToken();
- // Special-case handling for "catch"
- if (tok_tag == .keyword_catch) {
- _ = try p.parsePayload();
- }
- const rhs = try p.parseExprPrecedence(info.prec + 1);
- if (rhs == 0) {
- try p.warn(.expected_expr);
- return node;
- }
-
- {
- const tok_len = tok_tag.lexeme().?.len;
- const char_before = p.source[p.token_starts[oper_token] - 1];
- const char_after = p.source[p.token_starts[oper_token] + tok_len];
- if (tok_tag == .ampersand and char_after == '&') {
- // without types we don't know if '&&' was intended as 'bitwise_and address_of', or a c-style logical_and
- // The best the parser can do is recommend changing it to 'and' or ' & &'
- try p.warnMsg(.{ .tag = .invalid_ampersand_ampersand, .token = oper_token });
- } else if (std.ascii.isWhitespace(char_before) != std.ascii.isWhitespace(char_after)) {
- try p.warnMsg(.{ .tag = .mismatched_binary_op_whitespace, .token = oper_token });
- }
- }
-
- node = try p.addNode(.{
- .tag = info.tag,
- .main_token = oper_token,
- .data = .{
- .lhs = node,
- .rhs = rhs,
- },
- });
-
- if (info.assoc == Assoc.none) {
- banned_prec = info.prec;
- }
- }
-
- return node;
- }
-
- /// PrefixExpr <- PrefixOp* PrimaryExpr
- ///
- /// PrefixOp
- /// <- EXCLAMATIONMARK
- /// / MINUS
- /// / TILDE
- /// / MINUSPERCENT
- /// / AMPERSAND
- /// / KEYWORD_try
- /// / KEYWORD_await
- fn parsePrefixExpr(p: *Parser) Error!Node.Index {
- const tag: Node.Tag = switch (p.token_tags[p.tok_i]) {
- .bang => .bool_not,
- .minus => .negation,
- .tilde => .bit_not,
- .minus_percent => .negation_wrap,
- .ampersand => .address_of,
- .keyword_try => .@"try",
- .keyword_await => .@"await",
- else => return p.parsePrimaryExpr(),
- };
- return p.addNode(.{
- .tag = tag,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = try p.expectPrefixExpr(),
- .rhs = undefined,
- },
- });
- }
-
- fn expectPrefixExpr(p: *Parser) Error!Node.Index {
- const node = try p.parsePrefixExpr();
- if (node == 0) {
- return p.fail(.expected_prefix_expr);
- }
- return node;
- }
-
- /// TypeExpr <- PrefixTypeOp* ErrorUnionExpr
- ///
- /// PrefixTypeOp
- /// <- QUESTIONMARK
- /// / KEYWORD_anyframe MINUSRARROW
- /// / SliceTypeStart (ByteAlign / AddrSpace / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
- /// / PtrTypeStart (AddrSpace / KEYWORD_align LPAREN Expr (COLON Expr COLON Expr)? RPAREN / KEYWORD_const / KEYWORD_volatile / KEYWORD_allowzero)*
- /// / ArrayTypeStart
- ///
- /// SliceTypeStart <- LBRACKET (COLON Expr)? RBRACKET
- ///
- /// PtrTypeStart
- /// <- ASTERISK
- /// / ASTERISK2
- /// / LBRACKET ASTERISK (LETTERC / COLON Expr)? RBRACKET
- ///
- /// ArrayTypeStart <- LBRACKET Expr (COLON Expr)? RBRACKET
- fn parseTypeExpr(p: *Parser) Error!Node.Index {
- switch (p.token_tags[p.tok_i]) {
- .question_mark => return p.addNode(.{
- .tag = .optional_type,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = try p.expectTypeExpr(),
- .rhs = undefined,
- },
- }),
- .keyword_anyframe => switch (p.token_tags[p.tok_i + 1]) {
- .arrow => return p.addNode(.{
- .tag = .anyframe_type,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = p.nextToken(),
- .rhs = try p.expectTypeExpr(),
- },
- }),
- else => return p.parseErrorUnionExpr(),
- },
- .asterisk => {
- const asterisk = p.nextToken();
- const mods = try p.parsePtrModifiers();
- const elem_type = try p.expectTypeExpr();
- if (mods.bit_range_start != 0) {
- return p.addNode(.{
- .tag = .ptr_type_bit_range,
- .main_token = asterisk,
- .data = .{
- .lhs = try p.addExtra(Node.PtrTypeBitRange{
- .sentinel = 0,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- .bit_range_start = mods.bit_range_start,
- .bit_range_end = mods.bit_range_end,
- }),
- .rhs = elem_type,
- },
- });
- } else if (mods.addrspace_node != 0) {
- return p.addNode(.{
- .tag = .ptr_type,
- .main_token = asterisk,
- .data = .{
- .lhs = try p.addExtra(Node.PtrType{
- .sentinel = 0,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- }),
- .rhs = elem_type,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .ptr_type_aligned,
- .main_token = asterisk,
- .data = .{
- .lhs = mods.align_node,
- .rhs = elem_type,
- },
- });
- }
- },
- .asterisk_asterisk => {
- const asterisk = p.nextToken();
- const mods = try p.parsePtrModifiers();
- const elem_type = try p.expectTypeExpr();
- const inner: Node.Index = inner: {
- if (mods.bit_range_start != 0) {
- break :inner try p.addNode(.{
- .tag = .ptr_type_bit_range,
- .main_token = asterisk,
- .data = .{
- .lhs = try p.addExtra(Node.PtrTypeBitRange{
- .sentinel = 0,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- .bit_range_start = mods.bit_range_start,
- .bit_range_end = mods.bit_range_end,
- }),
- .rhs = elem_type,
- },
- });
- } else if (mods.addrspace_node != 0) {
- break :inner try p.addNode(.{
- .tag = .ptr_type,
- .main_token = asterisk,
- .data = .{
- .lhs = try p.addExtra(Node.PtrType{
- .sentinel = 0,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- }),
- .rhs = elem_type,
- },
- });
- } else {
- break :inner try p.addNode(.{
- .tag = .ptr_type_aligned,
- .main_token = asterisk,
- .data = .{
- .lhs = mods.align_node,
- .rhs = elem_type,
- },
- });
- }
- };
- return p.addNode(.{
- .tag = .ptr_type_aligned,
- .main_token = asterisk,
- .data = .{
- .lhs = 0,
- .rhs = inner,
- },
- });
- },
- .l_bracket => switch (p.token_tags[p.tok_i + 1]) {
- .asterisk => {
- _ = p.nextToken();
- const asterisk = p.nextToken();
- var sentinel: Node.Index = 0;
- if (p.eatToken(.identifier)) |ident| {
- const ident_slice = p.source[p.token_starts[ident]..p.token_starts[ident + 1]];
- if (!std.mem.eql(u8, std.mem.trimRight(u8, ident_slice, &std.ascii.whitespace), "c")) {
- p.tok_i -= 1;
- }
- } else if (p.eatToken(.colon)) |_| {
- sentinel = try p.expectExpr();
- }
- _ = try p.expectToken(.r_bracket);
- const mods = try p.parsePtrModifiers();
- const elem_type = try p.expectTypeExpr();
- if (mods.bit_range_start == 0) {
- if (sentinel == 0 and mods.addrspace_node == 0) {
- return p.addNode(.{
- .tag = .ptr_type_aligned,
- .main_token = asterisk,
- .data = .{
- .lhs = mods.align_node,
- .rhs = elem_type,
- },
- });
- } else if (mods.align_node == 0 and mods.addrspace_node == 0) {
- return p.addNode(.{
- .tag = .ptr_type_sentinel,
- .main_token = asterisk,
- .data = .{
- .lhs = sentinel,
- .rhs = elem_type,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .ptr_type,
- .main_token = asterisk,
- .data = .{
- .lhs = try p.addExtra(Node.PtrType{
- .sentinel = sentinel,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- }),
- .rhs = elem_type,
- },
- });
- }
- } else {
- return p.addNode(.{
- .tag = .ptr_type_bit_range,
- .main_token = asterisk,
- .data = .{
- .lhs = try p.addExtra(Node.PtrTypeBitRange{
- .sentinel = sentinel,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- .bit_range_start = mods.bit_range_start,
- .bit_range_end = mods.bit_range_end,
- }),
- .rhs = elem_type,
- },
- });
- }
- },
- else => {
- const lbracket = p.nextToken();
- const len_expr = try p.parseExpr();
- const sentinel: Node.Index = if (p.eatToken(.colon)) |_|
- try p.expectExpr()
- else
- 0;
- _ = try p.expectToken(.r_bracket);
- if (len_expr == 0) {
- const mods = try p.parsePtrModifiers();
- const elem_type = try p.expectTypeExpr();
- if (mods.bit_range_start != 0) {
- try p.warnMsg(.{
- .tag = .invalid_bit_range,
- .token = p.nodes.items(.main_token)[mods.bit_range_start],
- });
- }
- if (sentinel == 0 and mods.addrspace_node == 0) {
- return p.addNode(.{
- .tag = .ptr_type_aligned,
- .main_token = lbracket,
- .data = .{
- .lhs = mods.align_node,
- .rhs = elem_type,
- },
- });
- } else if (mods.align_node == 0 and mods.addrspace_node == 0) {
- return p.addNode(.{
- .tag = .ptr_type_sentinel,
- .main_token = lbracket,
- .data = .{
- .lhs = sentinel,
- .rhs = elem_type,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .ptr_type,
- .main_token = lbracket,
- .data = .{
- .lhs = try p.addExtra(Node.PtrType{
- .sentinel = sentinel,
- .align_node = mods.align_node,
- .addrspace_node = mods.addrspace_node,
- }),
- .rhs = elem_type,
- },
- });
- }
- } else {
- switch (p.token_tags[p.tok_i]) {
- .keyword_align,
- .keyword_const,
- .keyword_volatile,
- .keyword_allowzero,
- .keyword_addrspace,
- => return p.fail(.ptr_mod_on_array_child_type),
- else => {},
- }
- const elem_type = try p.expectTypeExpr();
- if (sentinel == 0) {
- return p.addNode(.{
- .tag = .array_type,
- .main_token = lbracket,
- .data = .{
- .lhs = len_expr,
- .rhs = elem_type,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .array_type_sentinel,
- .main_token = lbracket,
- .data = .{
- .lhs = len_expr,
- .rhs = try p.addExtra(.{
- .elem_type = elem_type,
- .sentinel = sentinel,
- }),
- },
- });
- }
- }
- },
- },
- else => return p.parseErrorUnionExpr(),
- }
- }
-
- fn expectTypeExpr(p: *Parser) Error!Node.Index {
- const node = try p.parseTypeExpr();
- if (node == 0) {
- return p.fail(.expected_type_expr);
- }
- return node;
- }
-
- /// PrimaryExpr
- /// <- AsmExpr
- /// / IfExpr
- /// / KEYWORD_break BreakLabel? Expr?
- /// / KEYWORD_comptime Expr
- /// / KEYWORD_nosuspend Expr
- /// / KEYWORD_continue BreakLabel?
- /// / KEYWORD_resume Expr
- /// / KEYWORD_return Expr?
- /// / BlockLabel? LoopExpr
- /// / Block
- /// / CurlySuffixExpr
- fn parsePrimaryExpr(p: *Parser) !Node.Index {
- switch (p.token_tags[p.tok_i]) {
- .keyword_asm => return p.expectAsmExpr(),
- .keyword_if => return p.parseIfExpr(),
- .keyword_break => {
- p.tok_i += 1;
- return p.addNode(.{
- .tag = .@"break",
- .main_token = p.tok_i - 1,
- .data = .{
- .lhs = try p.parseBreakLabel(),
- .rhs = try p.parseExpr(),
- },
- });
- },
- .keyword_continue => {
- p.tok_i += 1;
- return p.addNode(.{
- .tag = .@"continue",
- .main_token = p.tok_i - 1,
- .data = .{
- .lhs = try p.parseBreakLabel(),
- .rhs = undefined,
- },
- });
- },
- .keyword_comptime => {
- p.tok_i += 1;
- return p.addNode(.{
- .tag = .@"comptime",
- .main_token = p.tok_i - 1,
- .data = .{
- .lhs = try p.expectExpr(),
- .rhs = undefined,
- },
- });
- },
- .keyword_nosuspend => {
- p.tok_i += 1;
- return p.addNode(.{
- .tag = .@"nosuspend",
- .main_token = p.tok_i - 1,
- .data = .{
- .lhs = try p.expectExpr(),
- .rhs = undefined,
- },
- });
- },
- .keyword_resume => {
- p.tok_i += 1;
- return p.addNode(.{
- .tag = .@"resume",
- .main_token = p.tok_i - 1,
- .data = .{
- .lhs = try p.expectExpr(),
- .rhs = undefined,
- },
- });
- },
- .keyword_return => {
- p.tok_i += 1;
- return p.addNode(.{
- .tag = .@"return",
- .main_token = p.tok_i - 1,
- .data = .{
- .lhs = try p.parseExpr(),
- .rhs = undefined,
- },
- });
- },
- .identifier => {
- if (p.token_tags[p.tok_i + 1] == .colon) {
- switch (p.token_tags[p.tok_i + 2]) {
- .keyword_inline => {
- p.tok_i += 3;
- switch (p.token_tags[p.tok_i]) {
- .keyword_for => return p.parseForExpr(),
- .keyword_while => return p.parseWhileExpr(),
- else => return p.fail(.expected_inlinable),
- }
- },
- .keyword_for => {
- p.tok_i += 2;
- return p.parseForExpr();
- },
- .keyword_while => {
- p.tok_i += 2;
- return p.parseWhileExpr();
- },
- .l_brace => {
- p.tok_i += 2;
- return p.parseBlock();
- },
- else => return p.parseCurlySuffixExpr(),
- }
- } else {
- return p.parseCurlySuffixExpr();
- }
- },
- .keyword_inline => {
- p.tok_i += 1;
- switch (p.token_tags[p.tok_i]) {
- .keyword_for => return p.parseForExpr(),
- .keyword_while => return p.parseWhileExpr(),
- else => return p.fail(.expected_inlinable),
- }
- },
- .keyword_for => return p.parseForExpr(),
- .keyword_while => return p.parseWhileExpr(),
- .l_brace => return p.parseBlock(),
- else => return p.parseCurlySuffixExpr(),
- }
- }
-
- /// IfExpr <- IfPrefix Expr (KEYWORD_else Payload? Expr)?
- fn parseIfExpr(p: *Parser) !Node.Index {
- return p.parseIf(expectExpr);
- }
-
- /// Block <- LBRACE Statement* RBRACE
- fn parseBlock(p: *Parser) !Node.Index {
- const lbrace = p.eatToken(.l_brace) orelse return null_node;
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- while (true) {
- if (p.token_tags[p.tok_i] == .r_brace) break;
- const statement = try p.expectStatementRecoverable();
- if (statement == 0) break;
- try p.scratch.append(p.gpa, statement);
- }
- _ = try p.expectToken(.r_brace);
- const semicolon = (p.token_tags[p.tok_i - 2] == .semicolon);
- const statements = p.scratch.items[scratch_top..];
- switch (statements.len) {
- 0 => return p.addNode(.{
- .tag = .block_two,
- .main_token = lbrace,
- .data = .{
- .lhs = 0,
- .rhs = 0,
- },
- }),
- 1 => return p.addNode(.{
- .tag = if (semicolon) .block_two_semicolon else .block_two,
- .main_token = lbrace,
- .data = .{
- .lhs = statements[0],
- .rhs = 0,
- },
- }),
- 2 => return p.addNode(.{
- .tag = if (semicolon) .block_two_semicolon else .block_two,
- .main_token = lbrace,
- .data = .{
- .lhs = statements[0],
- .rhs = statements[1],
- },
- }),
- else => {
- const span = try p.listToSpan(statements);
- return p.addNode(.{
- .tag = if (semicolon) .block_semicolon else .block,
- .main_token = lbrace,
- .data = .{
- .lhs = span.start,
- .rhs = span.end,
- },
- });
- },
- }
- }
-
- /// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
- ///
- /// ForExpr <- ForPrefix Expr (KEYWORD_else Expr)?
- fn parseForExpr(p: *Parser) !Node.Index {
- const for_token = p.eatToken(.keyword_for) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const array_expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- const found_payload = try p.parsePtrIndexPayload();
- if (found_payload == 0) try p.warn(.expected_loop_payload);
-
- const then_expr = try p.expectExpr();
- _ = p.eatToken(.keyword_else) orelse {
- return p.addNode(.{
- .tag = .for_simple,
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = then_expr,
- },
- });
- };
- const else_expr = try p.expectExpr();
- return p.addNode(.{
- .tag = .@"for",
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
- ///
- /// WhileExpr <- WhilePrefix Expr (KEYWORD_else Payload? Expr)?
- fn parseWhileExpr(p: *Parser) !Node.Index {
- const while_token = p.eatToken(.keyword_while) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const condition = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.parsePtrPayload();
- const cont_expr = try p.parseWhileContinueExpr();
-
- const then_expr = try p.expectExpr();
- _ = p.eatToken(.keyword_else) orelse {
- if (cont_expr == 0) {
- return p.addNode(.{
- .tag = .while_simple,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = then_expr,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .while_cont,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.WhileCont{
- .cont_expr = cont_expr,
- .then_expr = then_expr,
- }),
- },
- });
- }
- };
- _ = try p.parsePayload();
- const else_expr = try p.expectExpr();
- return p.addNode(.{
- .tag = .@"while",
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.While{
- .cont_expr = cont_expr,
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// CurlySuffixExpr <- TypeExpr InitList?
- ///
- /// InitList
- /// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE
- /// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE
- /// / LBRACE RBRACE
- fn parseCurlySuffixExpr(p: *Parser) !Node.Index {
- const lhs = try p.parseTypeExpr();
- if (lhs == 0) return null_node;
- const lbrace = p.eatToken(.l_brace) orelse return lhs;
-
- // If there are 0 or 1 items, we can use ArrayInitOne/StructInitOne;
- // otherwise we use the full ArrayInit/StructInit.
-
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- const field_init = try p.parseFieldInit();
- if (field_init != 0) {
- try p.scratch.append(p.gpa, field_init);
- while (true) {
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_brace => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_initializer),
- }
- if (p.eatToken(.r_brace)) |_| break;
- const next = try p.expectFieldInit();
- try p.scratch.append(p.gpa, next);
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const inits = p.scratch.items[scratch_top..];
- switch (inits.len) {
- 0 => unreachable,
- 1 => return p.addNode(.{
- .tag = if (comma) .struct_init_one_comma else .struct_init_one,
- .main_token = lbrace,
- .data = .{
- .lhs = lhs,
- .rhs = inits[0],
- },
- }),
- else => return p.addNode(.{
- .tag = if (comma) .struct_init_comma else .struct_init,
- .main_token = lbrace,
- .data = .{
- .lhs = lhs,
- .rhs = try p.addExtra(try p.listToSpan(inits)),
- },
- }),
- }
- }
-
- while (true) {
- if (p.eatToken(.r_brace)) |_| break;
- const elem_init = try p.expectExpr();
- try p.scratch.append(p.gpa, elem_init);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_brace => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_initializer),
- }
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const inits = p.scratch.items[scratch_top..];
- switch (inits.len) {
- 0 => return p.addNode(.{
- .tag = .struct_init_one,
- .main_token = lbrace,
- .data = .{
- .lhs = lhs,
- .rhs = 0,
- },
- }),
- 1 => return p.addNode(.{
- .tag = if (comma) .array_init_one_comma else .array_init_one,
- .main_token = lbrace,
- .data = .{
- .lhs = lhs,
- .rhs = inits[0],
- },
- }),
- else => return p.addNode(.{
- .tag = if (comma) .array_init_comma else .array_init,
- .main_token = lbrace,
- .data = .{
- .lhs = lhs,
- .rhs = try p.addExtra(try p.listToSpan(inits)),
- },
- }),
- }
- }
-
- /// ErrorUnionExpr <- SuffixExpr (EXCLAMATIONMARK TypeExpr)?
- fn parseErrorUnionExpr(p: *Parser) !Node.Index {
- const suffix_expr = try p.parseSuffixExpr();
- if (suffix_expr == 0) return null_node;
- const bang = p.eatToken(.bang) orelse return suffix_expr;
- return p.addNode(.{
- .tag = .error_union,
- .main_token = bang,
- .data = .{
- .lhs = suffix_expr,
- .rhs = try p.expectTypeExpr(),
- },
- });
- }
-
- /// SuffixExpr
- /// <- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments
- /// / PrimaryTypeExpr (SuffixOp / FnCallArguments)*
- ///
- /// FnCallArguments <- LPAREN ExprList RPAREN
- ///
- /// ExprList <- (Expr COMMA)* Expr?
- fn parseSuffixExpr(p: *Parser) !Node.Index {
- if (p.eatToken(.keyword_async)) |_| {
- var res = try p.expectPrimaryTypeExpr();
- while (true) {
- const node = try p.parseSuffixOp(res);
- if (node == 0) break;
- res = node;
- }
- const lparen = p.eatToken(.l_paren) orelse {
- try p.warn(.expected_param_list);
- return res;
- };
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- while (true) {
- if (p.eatToken(.r_paren)) |_| break;
- const param = try p.expectExpr();
- try p.scratch.append(p.gpa, param);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_paren => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_arg),
- }
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const params = p.scratch.items[scratch_top..];
- switch (params.len) {
- 0 => return p.addNode(.{
- .tag = if (comma) .async_call_one_comma else .async_call_one,
- .main_token = lparen,
- .data = .{
- .lhs = res,
- .rhs = 0,
- },
- }),
- 1 => return p.addNode(.{
- .tag = if (comma) .async_call_one_comma else .async_call_one,
- .main_token = lparen,
- .data = .{
- .lhs = res,
- .rhs = params[0],
- },
- }),
- else => return p.addNode(.{
- .tag = if (comma) .async_call_comma else .async_call,
- .main_token = lparen,
- .data = .{
- .lhs = res,
- .rhs = try p.addExtra(try p.listToSpan(params)),
- },
- }),
- }
- }
-
- var res = try p.parsePrimaryTypeExpr();
- if (res == 0) return res;
- while (true) {
- const suffix_op = try p.parseSuffixOp(res);
- if (suffix_op != 0) {
- res = suffix_op;
- continue;
- }
- const lparen = p.eatToken(.l_paren) orelse return res;
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- while (true) {
- if (p.eatToken(.r_paren)) |_| break;
- const param = try p.expectExpr();
- try p.scratch.append(p.gpa, param);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_paren => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_arg),
- }
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const params = p.scratch.items[scratch_top..];
- res = switch (params.len) {
- 0 => try p.addNode(.{
- .tag = if (comma) .call_one_comma else .call_one,
- .main_token = lparen,
- .data = .{
- .lhs = res,
- .rhs = 0,
- },
- }),
- 1 => try p.addNode(.{
- .tag = if (comma) .call_one_comma else .call_one,
- .main_token = lparen,
- .data = .{
- .lhs = res,
- .rhs = params[0],
- },
- }),
- else => try p.addNode(.{
- .tag = if (comma) .call_comma else .call,
- .main_token = lparen,
- .data = .{
- .lhs = res,
- .rhs = try p.addExtra(try p.listToSpan(params)),
- },
- }),
- };
- }
- }
-
- /// PrimaryTypeExpr
- /// <- BUILTINIDENTIFIER FnCallArguments
- /// / CHAR_LITERAL
- /// / ContainerDecl
- /// / DOT IDENTIFIER
- /// / DOT InitList
- /// / ErrorSetDecl
- /// / FLOAT
- /// / FnProto
- /// / GroupedExpr
- /// / LabeledTypeExpr
- /// / IDENTIFIER
- /// / IfTypeExpr
- /// / INTEGER
- /// / KEYWORD_comptime TypeExpr
- /// / KEYWORD_error DOT IDENTIFIER
- /// / KEYWORD_anyframe
- /// / KEYWORD_unreachable
- /// / STRINGLITERAL
- /// / SwitchExpr
- ///
- /// ContainerDecl <- (KEYWORD_extern / KEYWORD_packed)? ContainerDeclAuto
- ///
- /// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
- ///
- /// InitList
- /// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE
- /// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE
- /// / LBRACE RBRACE
- ///
- /// ErrorSetDecl <- KEYWORD_error LBRACE IdentifierList RBRACE
- ///
- /// GroupedExpr <- LPAREN Expr RPAREN
- ///
- /// IfTypeExpr <- IfPrefix TypeExpr (KEYWORD_else Payload? TypeExpr)?
- ///
- /// LabeledTypeExpr
- /// <- BlockLabel Block
- /// / BlockLabel? LoopTypeExpr
- ///
- /// LoopTypeExpr <- KEYWORD_inline? (ForTypeExpr / WhileTypeExpr)
- fn parsePrimaryTypeExpr(p: *Parser) !Node.Index {
- switch (p.token_tags[p.tok_i]) {
- .char_literal => return p.addNode(.{
- .tag = .char_literal,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- }),
- .number_literal => return p.addNode(.{
- .tag = .number_literal,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- }),
- .keyword_unreachable => return p.addNode(.{
- .tag = .unreachable_literal,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- }),
- .keyword_anyframe => return p.addNode(.{
- .tag = .anyframe_literal,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- }),
- .string_literal => {
- const main_token = p.nextToken();
- return p.addNode(.{
- .tag = .string_literal,
- .main_token = main_token,
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- });
- },
-
- .builtin => return p.parseBuiltinCall(),
- .keyword_fn => return p.parseFnProto(),
- .keyword_if => return p.parseIf(expectTypeExpr),
- .keyword_switch => return p.expectSwitchExpr(),
-
- .keyword_extern,
- .keyword_packed,
- => {
- p.tok_i += 1;
- return p.parseContainerDeclAuto();
- },
-
- .keyword_struct,
- .keyword_opaque,
- .keyword_enum,
- .keyword_union,
- => return p.parseContainerDeclAuto(),
-
- .keyword_comptime => return p.addNode(.{
- .tag = .@"comptime",
- .main_token = p.nextToken(),
- .data = .{
- .lhs = try p.expectTypeExpr(),
- .rhs = undefined,
- },
- }),
- .multiline_string_literal_line => {
- const first_line = p.nextToken();
- while (p.token_tags[p.tok_i] == .multiline_string_literal_line) {
- p.tok_i += 1;
- }
- return p.addNode(.{
- .tag = .multiline_string_literal,
- .main_token = first_line,
- .data = .{
- .lhs = first_line,
- .rhs = p.tok_i - 1,
- },
- });
- },
- .identifier => switch (p.token_tags[p.tok_i + 1]) {
- .colon => switch (p.token_tags[p.tok_i + 2]) {
- .keyword_inline => {
- p.tok_i += 3;
- switch (p.token_tags[p.tok_i]) {
- .keyword_for => return p.parseForTypeExpr(),
- .keyword_while => return p.parseWhileTypeExpr(),
- else => return p.fail(.expected_inlinable),
- }
- },
- .keyword_for => {
- p.tok_i += 2;
- return p.parseForTypeExpr();
- },
- .keyword_while => {
- p.tok_i += 2;
- return p.parseWhileTypeExpr();
- },
- .l_brace => {
- p.tok_i += 2;
- return p.parseBlock();
- },
- else => return p.addNode(.{
- .tag = .identifier,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- }),
- },
- else => return p.addNode(.{
- .tag = .identifier,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- }),
- },
- .keyword_inline => {
- p.tok_i += 1;
- switch (p.token_tags[p.tok_i]) {
- .keyword_for => return p.parseForTypeExpr(),
- .keyword_while => return p.parseWhileTypeExpr(),
- else => return p.fail(.expected_inlinable),
- }
- },
- .keyword_for => return p.parseForTypeExpr(),
- .keyword_while => return p.parseWhileTypeExpr(),
- .period => switch (p.token_tags[p.tok_i + 1]) {
- .identifier => return p.addNode(.{
- .tag = .enum_literal,
- .data = .{
- .lhs = p.nextToken(), // dot
- .rhs = undefined,
- },
- .main_token = p.nextToken(), // identifier
- }),
- .l_brace => {
- const lbrace = p.tok_i + 1;
- p.tok_i = lbrace + 1;
-
- // If there are 0, 1, or 2 items, we can use ArrayInitDotTwo/StructInitDotTwo;
- // otherwise we use the full ArrayInitDot/StructInitDot.
-
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- const field_init = try p.parseFieldInit();
- if (field_init != 0) {
- try p.scratch.append(p.gpa, field_init);
- while (true) {
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_brace => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_initializer),
- }
- if (p.eatToken(.r_brace)) |_| break;
- const next = try p.expectFieldInit();
- try p.scratch.append(p.gpa, next);
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const inits = p.scratch.items[scratch_top..];
- switch (inits.len) {
- 0 => unreachable,
- 1 => return p.addNode(.{
- .tag = if (comma) .struct_init_dot_two_comma else .struct_init_dot_two,
- .main_token = lbrace,
- .data = .{
- .lhs = inits[0],
- .rhs = 0,
- },
- }),
- 2 => return p.addNode(.{
- .tag = if (comma) .struct_init_dot_two_comma else .struct_init_dot_two,
- .main_token = lbrace,
- .data = .{
- .lhs = inits[0],
- .rhs = inits[1],
- },
- }),
- else => {
- const span = try p.listToSpan(inits);
- return p.addNode(.{
- .tag = if (comma) .struct_init_dot_comma else .struct_init_dot,
- .main_token = lbrace,
- .data = .{
- .lhs = span.start,
- .rhs = span.end,
- },
- });
- },
- }
- }
-
- while (true) {
- if (p.eatToken(.r_brace)) |_| break;
- const elem_init = try p.expectExpr();
- try p.scratch.append(p.gpa, elem_init);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_brace => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_initializer),
- }
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const inits = p.scratch.items[scratch_top..];
- switch (inits.len) {
- 0 => return p.addNode(.{
- .tag = .struct_init_dot_two,
- .main_token = lbrace,
- .data = .{
- .lhs = 0,
- .rhs = 0,
- },
- }),
- 1 => return p.addNode(.{
- .tag = if (comma) .array_init_dot_two_comma else .array_init_dot_two,
- .main_token = lbrace,
- .data = .{
- .lhs = inits[0],
- .rhs = 0,
- },
- }),
- 2 => return p.addNode(.{
- .tag = if (comma) .array_init_dot_two_comma else .array_init_dot_two,
- .main_token = lbrace,
- .data = .{
- .lhs = inits[0],
- .rhs = inits[1],
- },
- }),
- else => {
- const span = try p.listToSpan(inits);
- return p.addNode(.{
- .tag = if (comma) .array_init_dot_comma else .array_init_dot,
- .main_token = lbrace,
- .data = .{
- .lhs = span.start,
- .rhs = span.end,
- },
- });
- },
- }
- },
- else => return null_node,
- },
- .keyword_error => switch (p.token_tags[p.tok_i + 1]) {
- .l_brace => {
- const error_token = p.tok_i;
- p.tok_i += 2;
- while (true) {
- if (p.eatToken(.r_brace)) |_| break;
- _ = try p.eatDocComments();
- _ = try p.expectToken(.identifier);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_brace => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_field),
- }
- }
- return p.addNode(.{
- .tag = .error_set_decl,
- .main_token = error_token,
- .data = .{
- .lhs = undefined,
- .rhs = p.tok_i - 1, // rbrace
- },
- });
- },
- else => {
- const main_token = p.nextToken();
- const period = p.eatToken(.period);
- if (period == null) try p.warnExpected(.period);
- const identifier = p.eatToken(.identifier);
- if (identifier == null) try p.warnExpected(.identifier);
- return p.addNode(.{
- .tag = .error_value,
- .main_token = main_token,
- .data = .{
- .lhs = period orelse 0,
- .rhs = identifier orelse 0,
- },
- });
- },
- },
- .l_paren => return p.addNode(.{
- .tag = .grouped_expression,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = try p.expectExpr(),
- .rhs = try p.expectToken(.r_paren),
- },
- }),
- else => return null_node,
- }
- }
-
- fn expectPrimaryTypeExpr(p: *Parser) !Node.Index {
- const node = try p.parsePrimaryTypeExpr();
- if (node == 0) {
- return p.fail(.expected_primary_type_expr);
- }
- return node;
- }
-
- /// ForPrefix <- KEYWORD_for LPAREN Expr RPAREN PtrIndexPayload
- ///
- /// ForTypeExpr <- ForPrefix TypeExpr (KEYWORD_else TypeExpr)?
- fn parseForTypeExpr(p: *Parser) !Node.Index {
- const for_token = p.eatToken(.keyword_for) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const array_expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- const found_payload = try p.parsePtrIndexPayload();
- if (found_payload == 0) try p.warn(.expected_loop_payload);
-
- const then_expr = try p.expectTypeExpr();
- _ = p.eatToken(.keyword_else) orelse {
- return p.addNode(.{
- .tag = .for_simple,
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = then_expr,
- },
- });
- };
- const else_expr = try p.expectTypeExpr();
- return p.addNode(.{
- .tag = .@"for",
- .main_token = for_token,
- .data = .{
- .lhs = array_expr,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// WhilePrefix <- KEYWORD_while LPAREN Expr RPAREN PtrPayload? WhileContinueExpr?
- ///
- /// WhileTypeExpr <- WhilePrefix TypeExpr (KEYWORD_else Payload? TypeExpr)?
- fn parseWhileTypeExpr(p: *Parser) !Node.Index {
- const while_token = p.eatToken(.keyword_while) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const condition = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.parsePtrPayload();
- const cont_expr = try p.parseWhileContinueExpr();
-
- const then_expr = try p.expectTypeExpr();
- _ = p.eatToken(.keyword_else) orelse {
- if (cont_expr == 0) {
- return p.addNode(.{
- .tag = .while_simple,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = then_expr,
- },
- });
- } else {
- return p.addNode(.{
- .tag = .while_cont,
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.WhileCont{
- .cont_expr = cont_expr,
- .then_expr = then_expr,
- }),
- },
- });
- }
- };
- _ = try p.parsePayload();
- const else_expr = try p.expectTypeExpr();
- return p.addNode(.{
- .tag = .@"while",
- .main_token = while_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.While{
- .cont_expr = cont_expr,
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// SwitchExpr <- KEYWORD_switch LPAREN Expr RPAREN LBRACE SwitchProngList RBRACE
- fn expectSwitchExpr(p: *Parser) !Node.Index {
- const switch_token = p.assertToken(.keyword_switch);
- _ = try p.expectToken(.l_paren);
- const expr_node = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.expectToken(.l_brace);
- const cases = try p.parseSwitchProngList();
- const trailing_comma = p.token_tags[p.tok_i - 1] == .comma;
- _ = try p.expectToken(.r_brace);
-
- return p.addNode(.{
- .tag = if (trailing_comma) .switch_comma else .@"switch",
- .main_token = switch_token,
- .data = .{
- .lhs = expr_node,
- .rhs = try p.addExtra(Node.SubRange{
- .start = cases.start,
- .end = cases.end,
- }),
- },
- });
- }
-
- /// AsmExpr <- KEYWORD_asm KEYWORD_volatile? LPAREN Expr AsmOutput? RPAREN
- ///
- /// AsmOutput <- COLON AsmOutputList AsmInput?
- ///
- /// AsmInput <- COLON AsmInputList AsmClobbers?
- ///
- /// AsmClobbers <- COLON StringList
- ///
- /// StringList <- (STRINGLITERAL COMMA)* STRINGLITERAL?
- ///
- /// AsmOutputList <- (AsmOutputItem COMMA)* AsmOutputItem?
- ///
- /// AsmInputList <- (AsmInputItem COMMA)* AsmInputItem?
- fn expectAsmExpr(p: *Parser) !Node.Index {
- const asm_token = p.assertToken(.keyword_asm);
- _ = p.eatToken(.keyword_volatile);
- _ = try p.expectToken(.l_paren);
- const template = try p.expectExpr();
-
- if (p.eatToken(.r_paren)) |rparen| {
- return p.addNode(.{
- .tag = .asm_simple,
- .main_token = asm_token,
- .data = .{
- .lhs = template,
- .rhs = rparen,
- },
- });
- }
-
- _ = try p.expectToken(.colon);
-
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
-
- while (true) {
- const output_item = try p.parseAsmOutputItem();
- if (output_item == 0) break;
- try p.scratch.append(p.gpa, output_item);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- // All possible delimiters.
- .colon, .r_paren, .r_brace, .r_bracket => break,
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
- }
- }
- if (p.eatToken(.colon)) |_| {
- while (true) {
- const input_item = try p.parseAsmInputItem();
- if (input_item == 0) break;
- try p.scratch.append(p.gpa, input_item);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- // All possible delimiters.
- .colon, .r_paren, .r_brace, .r_bracket => break,
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
- }
- }
- if (p.eatToken(.colon)) |_| {
- while (p.eatToken(.string_literal)) |_| {
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .colon, .r_paren, .r_brace, .r_bracket => break,
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warnExpected(.comma),
- }
- }
- }
- }
- const rparen = try p.expectToken(.r_paren);
- const span = try p.listToSpan(p.scratch.items[scratch_top..]);
- return p.addNode(.{
- .tag = .@"asm",
- .main_token = asm_token,
- .data = .{
- .lhs = template,
- .rhs = try p.addExtra(Node.Asm{
- .items_start = span.start,
- .items_end = span.end,
- .rparen = rparen,
- }),
- },
- });
- }
-
- /// AsmOutputItem <- LBRACKET IDENTIFIER RBRACKET STRINGLITERAL LPAREN (MINUSRARROW TypeExpr / IDENTIFIER) RPAREN
- fn parseAsmOutputItem(p: *Parser) !Node.Index {
- _ = p.eatToken(.l_bracket) orelse return null_node;
- const identifier = try p.expectToken(.identifier);
- _ = try p.expectToken(.r_bracket);
- _ = try p.expectToken(.string_literal);
- _ = try p.expectToken(.l_paren);
- const type_expr: Node.Index = blk: {
- if (p.eatToken(.arrow)) |_| {
- break :blk try p.expectTypeExpr();
- } else {
- _ = try p.expectToken(.identifier);
- break :blk null_node;
- }
- };
- const rparen = try p.expectToken(.r_paren);
- return p.addNode(.{
- .tag = .asm_output,
- .main_token = identifier,
- .data = .{
- .lhs = type_expr,
- .rhs = rparen,
- },
- });
- }
-
- /// AsmInputItem <- LBRACKET IDENTIFIER RBRACKET STRINGLITERAL LPAREN Expr RPAREN
- fn parseAsmInputItem(p: *Parser) !Node.Index {
- _ = p.eatToken(.l_bracket) orelse return null_node;
- const identifier = try p.expectToken(.identifier);
- _ = try p.expectToken(.r_bracket);
- _ = try p.expectToken(.string_literal);
- _ = try p.expectToken(.l_paren);
- const expr = try p.expectExpr();
- const rparen = try p.expectToken(.r_paren);
- return p.addNode(.{
- .tag = .asm_input,
- .main_token = identifier,
- .data = .{
- .lhs = expr,
- .rhs = rparen,
- },
- });
- }
-
- /// BreakLabel <- COLON IDENTIFIER
- fn parseBreakLabel(p: *Parser) !TokenIndex {
- _ = p.eatToken(.colon) orelse return @as(TokenIndex, 0);
- return p.expectToken(.identifier);
- }
-
- /// BlockLabel <- IDENTIFIER COLON
- fn parseBlockLabel(p: *Parser) TokenIndex {
- if (p.token_tags[p.tok_i] == .identifier and
- p.token_tags[p.tok_i + 1] == .colon)
- {
- const identifier = p.tok_i;
- p.tok_i += 2;
- return identifier;
- }
- return null_node;
- }
-
- /// FieldInit <- DOT IDENTIFIER EQUAL Expr
- fn parseFieldInit(p: *Parser) !Node.Index {
- if (p.token_tags[p.tok_i + 0] == .period and
- p.token_tags[p.tok_i + 1] == .identifier and
- p.token_tags[p.tok_i + 2] == .equal)
- {
- p.tok_i += 3;
- return p.expectExpr();
- } else {
- return null_node;
- }
- }
-
- fn expectFieldInit(p: *Parser) !Node.Index {
- if (p.token_tags[p.tok_i] != .period or
- p.token_tags[p.tok_i + 1] != .identifier or
- p.token_tags[p.tok_i + 2] != .equal)
- return p.fail(.expected_initializer);
-
- p.tok_i += 3;
- return p.expectExpr();
- }
-
- /// WhileContinueExpr <- COLON LPAREN AssignExpr RPAREN
- fn parseWhileContinueExpr(p: *Parser) !Node.Index {
- _ = p.eatToken(.colon) orelse {
- if (p.token_tags[p.tok_i] == .l_paren and
- p.tokensOnSameLine(p.tok_i - 1, p.tok_i))
- return p.fail(.expected_continue_expr);
- return null_node;
- };
- _ = try p.expectToken(.l_paren);
- const node = try p.parseAssignExpr();
- if (node == 0) return p.fail(.expected_expr_or_assignment);
- _ = try p.expectToken(.r_paren);
- return node;
- }
-
- /// LinkSection <- KEYWORD_linksection LPAREN Expr RPAREN
- fn parseLinkSection(p: *Parser) !Node.Index {
- _ = p.eatToken(.keyword_linksection) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const expr_node = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- return expr_node;
- }
-
- /// CallConv <- KEYWORD_callconv LPAREN Expr RPAREN
- fn parseCallconv(p: *Parser) !Node.Index {
- _ = p.eatToken(.keyword_callconv) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const expr_node = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- return expr_node;
- }
-
- /// AddrSpace <- KEYWORD_addrspace LPAREN Expr RPAREN
- fn parseAddrSpace(p: *Parser) !Node.Index {
- _ = p.eatToken(.keyword_addrspace) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const expr_node = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- return expr_node;
- }
-
- /// This function can return null nodes and then still return nodes afterwards,
- /// such as in the case of anytype and `...`. Caller must look for rparen to find
- /// out when there are no more param decls left.
- ///
- /// ParamDecl
- /// <- doc_comment? (KEYWORD_noalias / KEYWORD_comptime)? (IDENTIFIER COLON)? ParamType
- /// / DOT3
- ///
- /// ParamType
- /// <- KEYWORD_anytype
- /// / TypeExpr
- fn expectParamDecl(p: *Parser) !Node.Index {
- _ = try p.eatDocComments();
- switch (p.token_tags[p.tok_i]) {
- .keyword_noalias, .keyword_comptime => p.tok_i += 1,
- .ellipsis3 => {
- p.tok_i += 1;
- return null_node;
- },
- else => {},
- }
- if (p.token_tags[p.tok_i] == .identifier and
- p.token_tags[p.tok_i + 1] == .colon)
- {
- p.tok_i += 2;
- }
- switch (p.token_tags[p.tok_i]) {
- .keyword_anytype => {
- p.tok_i += 1;
- return null_node;
- },
- else => return p.expectTypeExpr(),
- }
- }
-
- /// Payload <- PIPE IDENTIFIER PIPE
- fn parsePayload(p: *Parser) !TokenIndex {
- _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
- const identifier = try p.expectToken(.identifier);
- _ = try p.expectToken(.pipe);
- return identifier;
- }
-
- /// PtrPayload <- PIPE ASTERISK? IDENTIFIER PIPE
- fn parsePtrPayload(p: *Parser) !TokenIndex {
- _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
- _ = p.eatToken(.asterisk);
- const identifier = try p.expectToken(.identifier);
- _ = try p.expectToken(.pipe);
- return identifier;
- }
-
- /// Returns the first identifier token, if any.
- ///
- /// PtrIndexPayload <- PIPE ASTERISK? IDENTIFIER (COMMA IDENTIFIER)? PIPE
- fn parsePtrIndexPayload(p: *Parser) !TokenIndex {
- _ = p.eatToken(.pipe) orelse return @as(TokenIndex, 0);
- _ = p.eatToken(.asterisk);
- const identifier = try p.expectToken(.identifier);
- if (p.eatToken(.comma) != null) {
- _ = try p.expectToken(.identifier);
- }
- _ = try p.expectToken(.pipe);
- return identifier;
- }
-
- /// SwitchProng <- KEYWORD_inline? SwitchCase EQUALRARROW PtrIndexPayload? AssignExpr
- ///
- /// SwitchCase
- /// <- SwitchItem (COMMA SwitchItem)* COMMA?
- /// / KEYWORD_else
- fn parseSwitchProng(p: *Parser) !Node.Index {
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
-
- const is_inline = p.eatToken(.keyword_inline) != null;
-
- if (p.eatToken(.keyword_else) == null) {
- while (true) {
- const item = try p.parseSwitchItem();
- if (item == 0) break;
- try p.scratch.append(p.gpa, item);
- if (p.eatToken(.comma) == null) break;
- }
- if (scratch_top == p.scratch.items.len) {
- if (is_inline) p.tok_i -= 1;
- return null_node;
- }
- }
- const arrow_token = try p.expectToken(.equal_angle_bracket_right);
- _ = try p.parsePtrIndexPayload();
-
- const items = p.scratch.items[scratch_top..];
- switch (items.len) {
- 0 => return p.addNode(.{
- .tag = if (is_inline) .switch_case_inline_one else .switch_case_one,
- .main_token = arrow_token,
- .data = .{
- .lhs = 0,
- .rhs = try p.expectAssignExpr(),
- },
- }),
- 1 => return p.addNode(.{
- .tag = if (is_inline) .switch_case_inline_one else .switch_case_one,
- .main_token = arrow_token,
- .data = .{
- .lhs = items[0],
- .rhs = try p.expectAssignExpr(),
- },
- }),
- else => return p.addNode(.{
- .tag = if (is_inline) .switch_case_inline else .switch_case,
- .main_token = arrow_token,
- .data = .{
- .lhs = try p.addExtra(try p.listToSpan(items)),
- .rhs = try p.expectAssignExpr(),
- },
- }),
- }
- }
-
- /// SwitchItem <- Expr (DOT3 Expr)?
- fn parseSwitchItem(p: *Parser) !Node.Index {
- const expr = try p.parseExpr();
- if (expr == 0) return null_node;
-
- if (p.eatToken(.ellipsis3)) |token| {
- return p.addNode(.{
- .tag = .switch_range,
- .main_token = token,
- .data = .{
- .lhs = expr,
- .rhs = try p.expectExpr(),
- },
- });
- }
- return expr;
- }
-
- const PtrModifiers = struct {
- align_node: Node.Index,
- addrspace_node: Node.Index,
- bit_range_start: Node.Index,
- bit_range_end: Node.Index,
- };
-
- fn parsePtrModifiers(p: *Parser) !PtrModifiers {
- var result: PtrModifiers = .{
- .align_node = 0,
- .addrspace_node = 0,
- .bit_range_start = 0,
- .bit_range_end = 0,
- };
- var saw_const = false;
- var saw_volatile = false;
- var saw_allowzero = false;
- var saw_addrspace = false;
- while (true) {
- switch (p.token_tags[p.tok_i]) {
- .keyword_align => {
- if (result.align_node != 0) {
- try p.warn(.extra_align_qualifier);
- }
- p.tok_i += 1;
- _ = try p.expectToken(.l_paren);
- result.align_node = try p.expectExpr();
-
- if (p.eatToken(.colon)) |_| {
- result.bit_range_start = try p.expectExpr();
- _ = try p.expectToken(.colon);
- result.bit_range_end = try p.expectExpr();
- }
-
- _ = try p.expectToken(.r_paren);
- },
- .keyword_const => {
- if (saw_const) {
- try p.warn(.extra_const_qualifier);
- }
- p.tok_i += 1;
- saw_const = true;
- },
- .keyword_volatile => {
- if (saw_volatile) {
- try p.warn(.extra_volatile_qualifier);
- }
- p.tok_i += 1;
- saw_volatile = true;
- },
- .keyword_allowzero => {
- if (saw_allowzero) {
- try p.warn(.extra_allowzero_qualifier);
- }
- p.tok_i += 1;
- saw_allowzero = true;
- },
- .keyword_addrspace => {
- if (saw_addrspace) {
- try p.warn(.extra_addrspace_qualifier);
- }
- result.addrspace_node = try p.parseAddrSpace();
- },
- else => return result,
- }
- }
- }
-
- /// SuffixOp
- /// <- LBRACKET Expr (DOT2 (Expr? (COLON Expr)?)?)? RBRACKET
- /// / DOT IDENTIFIER
- /// / DOTASTERISK
- /// / DOTQUESTIONMARK
- fn parseSuffixOp(p: *Parser, lhs: Node.Index) !Node.Index {
- switch (p.token_tags[p.tok_i]) {
- .l_bracket => {
- const lbracket = p.nextToken();
- const index_expr = try p.expectExpr();
-
- if (p.eatToken(.ellipsis2)) |_| {
- const end_expr = try p.parseExpr();
- if (p.eatToken(.colon)) |_| {
- const sentinel = try p.expectExpr();
- _ = try p.expectToken(.r_bracket);
- return p.addNode(.{
- .tag = .slice_sentinel,
- .main_token = lbracket,
- .data = .{
- .lhs = lhs,
- .rhs = try p.addExtra(Node.SliceSentinel{
- .start = index_expr,
- .end = end_expr,
- .sentinel = sentinel,
- }),
- },
- });
- }
- _ = try p.expectToken(.r_bracket);
- if (end_expr == 0) {
- return p.addNode(.{
- .tag = .slice_open,
- .main_token = lbracket,
- .data = .{
- .lhs = lhs,
- .rhs = index_expr,
- },
- });
- }
- return p.addNode(.{
- .tag = .slice,
- .main_token = lbracket,
- .data = .{
- .lhs = lhs,
- .rhs = try p.addExtra(Node.Slice{
- .start = index_expr,
- .end = end_expr,
- }),
- },
- });
- }
- _ = try p.expectToken(.r_bracket);
- return p.addNode(.{
- .tag = .array_access,
- .main_token = lbracket,
- .data = .{
- .lhs = lhs,
- .rhs = index_expr,
- },
- });
- },
- .period_asterisk => return p.addNode(.{
- .tag = .deref,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = lhs,
- .rhs = undefined,
- },
- }),
- .invalid_periodasterisks => {
- try p.warn(.asterisk_after_ptr_deref);
- return p.addNode(.{
- .tag = .deref,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = lhs,
- .rhs = undefined,
- },
- });
- },
- .period => switch (p.token_tags[p.tok_i + 1]) {
- .identifier => return p.addNode(.{
- .tag = .field_access,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = lhs,
- .rhs = p.nextToken(),
- },
- }),
- .question_mark => return p.addNode(.{
- .tag = .unwrap_optional,
- .main_token = p.nextToken(),
- .data = .{
- .lhs = lhs,
- .rhs = p.nextToken(),
- },
- }),
- .l_brace => {
- // this a misplaced `.{`, handle the error somewhere else
- return null_node;
- },
- else => {
- p.tok_i += 1;
- try p.warn(.expected_suffix_op);
- return null_node;
- },
- },
- else => return null_node,
- }
- }
-
- /// Caller must have already verified the first token.
- ///
- /// ContainerDeclAuto <- ContainerDeclType LBRACE container_doc_comment? ContainerMembers RBRACE
- ///
- /// ContainerDeclType
- /// <- KEYWORD_struct (LPAREN Expr RPAREN)?
- /// / KEYWORD_opaque
- /// / KEYWORD_enum (LPAREN Expr RPAREN)?
- /// / KEYWORD_union (LPAREN (KEYWORD_enum (LPAREN Expr RPAREN)? / Expr) RPAREN)?
- fn parseContainerDeclAuto(p: *Parser) !Node.Index {
- const main_token = p.nextToken();
- const arg_expr = switch (p.token_tags[main_token]) {
- .keyword_opaque => null_node,
- .keyword_struct, .keyword_enum => blk: {
- if (p.eatToken(.l_paren)) |_| {
- const expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- break :blk expr;
- } else {
- break :blk null_node;
- }
- },
- .keyword_union => blk: {
- if (p.eatToken(.l_paren)) |_| {
- if (p.eatToken(.keyword_enum)) |_| {
- if (p.eatToken(.l_paren)) |_| {
- const enum_tag_expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.expectToken(.r_paren);
-
- _ = try p.expectToken(.l_brace);
- const members = try p.parseContainerMembers();
- const members_span = try members.toSpan(p);
- _ = try p.expectToken(.r_brace);
- return p.addNode(.{
- .tag = switch (members.trailing) {
- true => .tagged_union_enum_tag_trailing,
- false => .tagged_union_enum_tag,
- },
- .main_token = main_token,
- .data = .{
- .lhs = enum_tag_expr,
- .rhs = try p.addExtra(members_span),
- },
- });
- } else {
- _ = try p.expectToken(.r_paren);
-
- _ = try p.expectToken(.l_brace);
- const members = try p.parseContainerMembers();
- _ = try p.expectToken(.r_brace);
- if (members.len <= 2) {
- return p.addNode(.{
- .tag = switch (members.trailing) {
- true => .tagged_union_two_trailing,
- false => .tagged_union_two,
- },
- .main_token = main_token,
- .data = .{
- .lhs = members.lhs,
- .rhs = members.rhs,
- },
- });
- } else {
- const span = try members.toSpan(p);
- return p.addNode(.{
- .tag = switch (members.trailing) {
- true => .tagged_union_trailing,
- false => .tagged_union,
- },
- .main_token = main_token,
- .data = .{
- .lhs = span.start,
- .rhs = span.end,
- },
- });
- }
- }
- } else {
- const expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- break :blk expr;
- }
- } else {
- break :blk null_node;
- }
- },
- else => {
- p.tok_i -= 1;
- return p.fail(.expected_container);
- },
- };
- _ = try p.expectToken(.l_brace);
- const members = try p.parseContainerMembers();
- _ = try p.expectToken(.r_brace);
- if (arg_expr == 0) {
- if (members.len <= 2) {
- return p.addNode(.{
- .tag = switch (members.trailing) {
- true => .container_decl_two_trailing,
- false => .container_decl_two,
- },
- .main_token = main_token,
- .data = .{
- .lhs = members.lhs,
- .rhs = members.rhs,
- },
- });
- } else {
- const span = try members.toSpan(p);
- return p.addNode(.{
- .tag = switch (members.trailing) {
- true => .container_decl_trailing,
- false => .container_decl,
- },
- .main_token = main_token,
- .data = .{
- .lhs = span.start,
- .rhs = span.end,
- },
- });
- }
- } else {
- const span = try members.toSpan(p);
- return p.addNode(.{
- .tag = switch (members.trailing) {
- true => .container_decl_arg_trailing,
- false => .container_decl_arg,
- },
- .main_token = main_token,
- .data = .{
- .lhs = arg_expr,
- .rhs = try p.addExtra(Node.SubRange{
- .start = span.start,
- .end = span.end,
- }),
- },
- });
- }
- }
-
- /// Give a helpful error message for those transitioning from
- /// C's 'struct Foo {};' to Zig's 'const Foo = struct {};'.
- fn parseCStyleContainer(p: *Parser) Error!bool {
- const main_token = p.tok_i;
- switch (p.token_tags[p.tok_i]) {
- .keyword_enum, .keyword_union, .keyword_struct => {},
- else => return false,
- }
- const identifier = p.tok_i + 1;
- if (p.token_tags[identifier] != .identifier) return false;
- p.tok_i += 2;
-
- try p.warnMsg(.{
- .tag = .c_style_container,
- .token = identifier,
- .extra = .{ .expected_tag = p.token_tags[main_token] },
- });
- try p.warnMsg(.{
- .tag = .zig_style_container,
- .is_note = true,
- .token = identifier,
- .extra = .{ .expected_tag = p.token_tags[main_token] },
- });
-
- _ = try p.expectToken(.l_brace);
- _ = try p.parseContainerMembers();
- _ = try p.expectToken(.r_brace);
- try p.expectSemicolon(.expected_semi_after_decl, true);
- return true;
- }
-
- /// Holds temporary data until we are ready to construct the full ContainerDecl AST node.
- ///
- /// ByteAlign <- KEYWORD_align LPAREN Expr RPAREN
- fn parseByteAlign(p: *Parser) !Node.Index {
- _ = p.eatToken(.keyword_align) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const expr = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- return expr;
- }
-
- /// SwitchProngList <- (SwitchProng COMMA)* SwitchProng?
- fn parseSwitchProngList(p: *Parser) !Node.SubRange {
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
-
- while (true) {
- const item = try parseSwitchProng(p);
- if (item == 0) break;
-
- try p.scratch.append(p.gpa, item);
-
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- // All possible delimiters.
- .colon, .r_paren, .r_brace, .r_bracket => break,
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_switch_prong),
- }
- }
- return p.listToSpan(p.scratch.items[scratch_top..]);
- }
-
- /// ParamDeclList <- (ParamDecl COMMA)* ParamDecl?
- fn parseParamDeclList(p: *Parser) !SmallSpan {
- _ = try p.expectToken(.l_paren);
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- var varargs: union(enum) { none, seen, nonfinal: TokenIndex } = .none;
- while (true) {
- if (p.eatToken(.r_paren)) |_| break;
- if (varargs == .seen) varargs = .{ .nonfinal = p.tok_i };
- const param = try p.expectParamDecl();
- if (param != 0) {
- try p.scratch.append(p.gpa, param);
- } else if (p.token_tags[p.tok_i - 1] == .ellipsis3) {
- if (varargs == .none) varargs = .seen;
- }
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_paren => {
- p.tok_i += 1;
- break;
- },
- .colon, .r_brace, .r_bracket => return p.failExpected(.r_paren),
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_param),
- }
- }
- if (varargs == .nonfinal) {
- try p.warnMsg(.{ .tag = .varargs_nonfinal, .token = varargs.nonfinal });
- }
- const params = p.scratch.items[scratch_top..];
- return switch (params.len) {
- 0 => SmallSpan{ .zero_or_one = 0 },
- 1 => SmallSpan{ .zero_or_one = params[0] },
- else => SmallSpan{ .multi = try p.listToSpan(params) },
- };
- }
-
- /// FnCallArguments <- LPAREN ExprList RPAREN
- ///
- /// ExprList <- (Expr COMMA)* Expr?
- fn parseBuiltinCall(p: *Parser) !Node.Index {
- const builtin_token = p.assertToken(.builtin);
- if (p.token_tags[p.nextToken()] != .l_paren) {
- p.tok_i -= 1;
- try p.warn(.expected_param_list);
- // Pretend this was an identifier so we can continue parsing.
- return p.addNode(.{
- .tag = .identifier,
- .main_token = builtin_token,
- .data = .{
- .lhs = undefined,
- .rhs = undefined,
- },
- });
- }
- const scratch_top = p.scratch.items.len;
- defer p.scratch.shrinkRetainingCapacity(scratch_top);
- while (true) {
- if (p.eatToken(.r_paren)) |_| break;
- const param = try p.expectExpr();
- try p.scratch.append(p.gpa, param);
- switch (p.token_tags[p.tok_i]) {
- .comma => p.tok_i += 1,
- .r_paren => {
- p.tok_i += 1;
- break;
- },
- // Likely just a missing comma; give error but continue parsing.
- else => try p.warn(.expected_comma_after_arg),
- }
- }
- const comma = (p.token_tags[p.tok_i - 2] == .comma);
- const params = p.scratch.items[scratch_top..];
- switch (params.len) {
- 0 => return p.addNode(.{
- .tag = .builtin_call_two,
- .main_token = builtin_token,
- .data = .{
- .lhs = 0,
- .rhs = 0,
- },
- }),
- 1 => return p.addNode(.{
- .tag = if (comma) .builtin_call_two_comma else .builtin_call_two,
- .main_token = builtin_token,
- .data = .{
- .lhs = params[0],
- .rhs = 0,
- },
- }),
- 2 => return p.addNode(.{
- .tag = if (comma) .builtin_call_two_comma else .builtin_call_two,
- .main_token = builtin_token,
- .data = .{
- .lhs = params[0],
- .rhs = params[1],
- },
- }),
- else => {
- const span = try p.listToSpan(params);
- return p.addNode(.{
- .tag = if (comma) .builtin_call_comma else .builtin_call,
- .main_token = builtin_token,
- .data = .{
- .lhs = span.start,
- .rhs = span.end,
- },
- });
- },
- }
- }
-
- /// IfPrefix <- KEYWORD_if LPAREN Expr RPAREN PtrPayload?
- fn parseIf(p: *Parser, comptime bodyParseFn: fn (p: *Parser) Error!Node.Index) !Node.Index {
- const if_token = p.eatToken(.keyword_if) orelse return null_node;
- _ = try p.expectToken(.l_paren);
- const condition = try p.expectExpr();
- _ = try p.expectToken(.r_paren);
- _ = try p.parsePtrPayload();
-
- const then_expr = try bodyParseFn(p);
- assert(then_expr != 0);
-
- _ = p.eatToken(.keyword_else) orelse return p.addNode(.{
- .tag = .if_simple,
- .main_token = if_token,
- .data = .{
- .lhs = condition,
- .rhs = then_expr,
- },
- });
- _ = try p.parsePayload();
- const else_expr = try bodyParseFn(p);
- assert(then_expr != 0);
-
- return p.addNode(.{
- .tag = .@"if",
- .main_token = if_token,
- .data = .{
- .lhs = condition,
- .rhs = try p.addExtra(Node.If{
- .then_expr = then_expr,
- .else_expr = else_expr,
- }),
- },
- });
- }
-
- /// Skips over doc comment tokens. Returns the first one, if any.
- fn eatDocComments(p: *Parser) !?TokenIndex {
- if (p.eatToken(.doc_comment)) |tok| {
- var first_line = tok;
- if (tok > 0 and tokensOnSameLine(p, tok - 1, tok)) {
- try p.warnMsg(.{
- .tag = .same_line_doc_comment,
- .token = tok,
- });
- first_line = p.eatToken(.doc_comment) orelse return null;
- }
- while (p.eatToken(.doc_comment)) |_| {}
- return first_line;
- }
- return null;
- }
-
- fn tokensOnSameLine(p: *Parser, token1: TokenIndex, token2: TokenIndex) bool {
- return std.mem.indexOfScalar(u8, p.source[p.token_starts[token1]..p.token_starts[token2]], '\n') == null;
- }
-
- fn eatToken(p: *Parser, tag: Token.Tag) ?TokenIndex {
- return if (p.token_tags[p.tok_i] == tag) p.nextToken() else null;
- }
-
- fn assertToken(p: *Parser, tag: Token.Tag) TokenIndex {
- const token = p.nextToken();
- assert(p.token_tags[token] == tag);
- return token;
- }
-
- fn expectToken(p: *Parser, tag: Token.Tag) Error!TokenIndex {
- if (p.token_tags[p.tok_i] != tag) {
- return p.failMsg(.{
- .tag = .expected_token,
- .token = p.tok_i,
- .extra = .{ .expected_tag = tag },
- });
- }
- return p.nextToken();
- }
-
- fn expectSemicolon(p: *Parser, error_tag: AstError.Tag, recoverable: bool) Error!void {
- if (p.token_tags[p.tok_i] == .semicolon) {
- _ = p.nextToken();
- return;
- }
- try p.warn(error_tag);
- if (!recoverable) return error.ParseError;
- }
-
- fn nextToken(p: *Parser) TokenIndex {
- const result = p.tok_i;
- p.tok_i += 1;
- return result;
- }
-};
-
-test {
- _ = @import("parser_test.zig");
-}
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index 49b0715695..3c44322ccc 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -6073,7 +6073,7 @@ var fixed_buffer_mem: [100 * 1024]u8 = undefined;
fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 {
const stderr = io.getStdErr().writer();
- var tree = try std.zig.parse(allocator, source);
+ var tree = try std.zig.Ast.parse(allocator, source, .zig);
defer tree.deinit(allocator);
for (tree.errors) |parse_error| {
@@ -6124,7 +6124,7 @@ fn testCanonical(source: [:0]const u8) !void {
const Error = std.zig.Ast.Error.Tag;
fn testError(source: [:0]const u8, expected_errors: []const Error) !void {
- var tree = try std.zig.parse(std.testing.allocator, source);
+ var tree = try std.zig.Ast.parse(std.testing.allocator, source, .zig);
defer tree.deinit(std.testing.allocator);
std.testing.expectEqual(expected_errors.len, tree.errors.len) catch |err| {
diff --git a/lib/std/zig/perf_test.zig b/lib/std/zig/perf_test.zig
index d3fc90eaea..58f7a67694 100644
--- a/lib/std/zig/perf_test.zig
+++ b/lib/std/zig/perf_test.zig
@@ -1,7 +1,6 @@
const std = @import("std");
const mem = std.mem;
const Tokenizer = std.zig.Tokenizer;
-const Parser = std.zig.Parser;
const io = std.io;
const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
@@ -34,6 +33,6 @@ pub fn main() !void {
fn testOnce() usize {
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var allocator = fixed_buf_alloc.allocator();
- _ = std.zig.parse(allocator, source) catch @panic("parse failure");
+ _ = std.zig.Ast.parse(allocator, source, .zig) catch @panic("parse failure");
return fixed_buf_alloc.end_index;
}
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 15b3611a1e..10673a2b37 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2530,6 +2530,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.bit_size_of,
.typeof_log2_int_type,
.ptr_to_int,
+ .qual_cast,
.align_of,
.bool_to_int,
.embed_file,
@@ -4278,7 +4279,34 @@ fn testDecl(
var num_namespaces_out: u32 = 0;
var capturing_namespace: ?*Scope.Namespace = null;
while (true) switch (s.tag) {
- .local_val, .local_ptr => unreachable, // a test cannot be in a local scope
+ .local_val => {
+ const local_val = s.cast(Scope.LocalVal).?;
+ if (local_val.name == name_str_index) {
+ local_val.used = test_name_token;
+ return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{
+ @tagName(local_val.id_cat),
+ }, &[_]u32{
+ try astgen.errNoteTok(local_val.token_src, "{s} declared here", .{
+ @tagName(local_val.id_cat),
+ }),
+ });
+ }
+ s = local_val.parent;
+ },
+ .local_ptr => {
+ const local_ptr = s.cast(Scope.LocalPtr).?;
+ if (local_ptr.name == name_str_index) {
+ local_ptr.used = test_name_token;
+ return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{
+ @tagName(local_ptr.id_cat),
+ }, &[_]u32{
+ try astgen.errNoteTok(local_ptr.token_src, "{s} declared here", .{
+ @tagName(local_ptr.id_cat),
+ }),
+ });
+ }
+ s = local_ptr.parent;
+ },
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => {
@@ -8010,6 +8038,7 @@ fn builtinCall(
.float_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .float_cast),
.int_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .int_cast),
.ptr_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .ptr_cast),
+ .qual_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .qual_cast),
.truncate => return typeCast(gz, scope, ri, node, params[0], params[1], .truncate),
// zig fmt: on
@@ -8692,6 +8721,7 @@ fn callExpr(
defer arg_block.unstack();
// `call_inst` is reused to provide the param type.
+ arg_block.rl_ty_inst = call_inst;
const arg_ref = try expr(&arg_block, &arg_block.base, .{ .rl = .{ .coerced_ty = call_inst }, .ctx = .fn_arg }, param_node);
_ = try arg_block.addBreak(.break_inline, call_index, arg_ref);
@@ -10840,7 +10870,12 @@ const GenZir = struct {
// we emit ZIR for the block break instructions to have the result values,
// and then rvalue() on that to pass the value to the result location.
switch (parent_ri.rl) {
- .ty, .coerced_ty => |ty_inst| {
+ .coerced_ty => |ty_inst| {
+ // Type coercion needs to happend before breaks.
+ gz.rl_ty_inst = ty_inst;
+ gz.break_result_info = .{ .rl = .{ .ty = ty_inst } };
+ },
+ .ty => |ty_inst| {
gz.rl_ty_inst = ty_inst;
gz.break_result_info = parent_ri;
},
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 8afc9c859b..0c2c39bbcc 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -1400,6 +1400,7 @@ fn walkInstruction(
.float_cast,
.int_cast,
.ptr_cast,
+ .qual_cast,
.truncate,
.align_cast,
.has_decl,
@@ -2200,17 +2201,10 @@ fn walkInstruction(
false,
);
- _ = operand;
-
- // WIP
-
- printWithContext(
- file,
- inst_index,
- "TODO: implement `{s}` for walkInstruction\n\n",
- .{@tagName(tags[inst_index])},
- );
- return self.cteTodo(@tagName(tags[inst_index]));
+ return DocData.WalkResult{
+ .typeRef = operand.expr,
+ .expr = .{ .@"struct" = &.{} },
+ };
},
.struct_init_anon => {
const pl_node = data[inst_index].pl_node;
@@ -2537,6 +2531,7 @@ fn walkInstruction(
const var_init_ref = @intToEnum(Ref, file.zir.extra[extra_index]);
const var_init = try self.walkRef(file, parent_scope, parent_src, var_init_ref, need_type);
value.expr = var_init.expr;
+ value.typeRef = var_init.typeRef;
}
return value;
diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig
index b71d96c3dd..80eb739185 100644
--- a/src/BuiltinFn.zig
+++ b/src/BuiltinFn.zig
@@ -75,6 +75,7 @@ pub const Tag = enum {
prefetch,
ptr_cast,
ptr_to_int,
+ qual_cast,
rem,
return_address,
select,
@@ -674,6 +675,13 @@ pub const list = list: {
.param_count = 1,
},
},
+ .{
+ "@qualCast",
+ .{
+ .tag = .qual_cast,
+ .param_count = 2,
+ },
+ },
.{
"@rem",
.{
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 09c6e1c686..e09b8f18ab 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -385,7 +385,7 @@ pub const AllErrors = struct {
count: u32 = 1,
/// Does not include the trailing newline.
source_line: ?[]const u8,
- notes: []Message = &.{},
+ notes: []const Message = &.{},
reference_trace: []Message = &.{},
/// Splits the error message up into lines to properly indent them
@@ -3299,7 +3299,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const gpa = comp.gpa;
const module = comp.bin_file.options.module.?;
const decl = module.declPtr(decl_index);
- comp.bin_file.updateDeclLineNumber(module, decl) catch |err| {
+ comp.bin_file.updateDeclLineNumber(module, decl_index) catch |err| {
try module.failed_decls.ensureUnusedCapacity(gpa, 1);
module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create(
gpa,
diff --git a/src/Manifest.zig b/src/Manifest.zig
new file mode 100644
index 0000000000..c3f77aec98
--- /dev/null
+++ b/src/Manifest.zig
@@ -0,0 +1,499 @@
+pub const basename = "build.zig.zon";
+pub const Hash = std.crypto.hash.sha2.Sha256;
+
+pub const Dependency = struct {
+ url: []const u8,
+ url_tok: Ast.TokenIndex,
+ hash: ?[]const u8,
+ hash_tok: Ast.TokenIndex,
+};
+
+pub const ErrorMessage = struct {
+ msg: []const u8,
+ tok: Ast.TokenIndex,
+ off: u32,
+};
+
+pub const MultihashFunction = enum(u16) {
+ identity = 0x00,
+ sha1 = 0x11,
+ @"sha2-256" = 0x12,
+ @"sha2-512" = 0x13,
+ @"sha3-512" = 0x14,
+ @"sha3-384" = 0x15,
+ @"sha3-256" = 0x16,
+ @"sha3-224" = 0x17,
+ @"sha2-384" = 0x20,
+ @"sha2-256-trunc254-padded" = 0x1012,
+ @"sha2-224" = 0x1013,
+ @"sha2-512-224" = 0x1014,
+ @"sha2-512-256" = 0x1015,
+ @"blake2b-256" = 0xb220,
+ _,
+};
+
+pub const multihash_function: MultihashFunction = switch (Hash) {
+ std.crypto.hash.sha2.Sha256 => .@"sha2-256",
+ else => @compileError("unreachable"),
+};
+comptime {
+ // We avoid unnecessary uleb128 code in hexDigest by asserting here the
+ // values are small enough to be contained in the one-byte encoding.
+ assert(@enumToInt(multihash_function) < 127);
+ assert(Hash.digest_length < 127);
+}
+pub const multihash_len = 1 + 1 + Hash.digest_length;
+
+name: []const u8,
+version: std.SemanticVersion,
+dependencies: std.StringArrayHashMapUnmanaged(Dependency),
+
+errors: []ErrorMessage,
+arena_state: std.heap.ArenaAllocator.State,
+
+pub const Error = Allocator.Error;
+
+pub fn parse(gpa: Allocator, ast: std.zig.Ast) Error!Manifest {
+ const node_tags = ast.nodes.items(.tag);
+ const node_datas = ast.nodes.items(.data);
+ assert(node_tags[0] == .root);
+ const main_node_index = node_datas[0].lhs;
+
+ var arena_instance = std.heap.ArenaAllocator.init(gpa);
+ errdefer arena_instance.deinit();
+
+ var p: Parse = .{
+ .gpa = gpa,
+ .ast = ast,
+ .arena = arena_instance.allocator(),
+ .errors = .{},
+
+ .name = undefined,
+ .version = undefined,
+ .dependencies = .{},
+ .buf = .{},
+ };
+ defer p.buf.deinit(gpa);
+ defer p.errors.deinit(gpa);
+ defer p.dependencies.deinit(gpa);
+
+ p.parseRoot(main_node_index) catch |err| switch (err) {
+ error.ParseFailure => assert(p.errors.items.len > 0),
+ else => |e| return e,
+ };
+
+ return .{
+ .name = p.name,
+ .version = p.version,
+ .dependencies = try p.dependencies.clone(p.arena),
+ .errors = try p.arena.dupe(ErrorMessage, p.errors.items),
+ .arena_state = arena_instance.state,
+ };
+}
+
+pub fn deinit(man: *Manifest, gpa: Allocator) void {
+ man.arena_state.promote(gpa).deinit();
+ man.* = undefined;
+}
+
+const hex_charset = "0123456789abcdef";
+
+pub fn hex64(x: u64) [16]u8 {
+ var result: [16]u8 = undefined;
+ var i: usize = 0;
+ while (i < 8) : (i += 1) {
+ const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
+ result[i * 2 + 0] = hex_charset[byte >> 4];
+ result[i * 2 + 1] = hex_charset[byte & 15];
+ }
+ return result;
+}
+
+test hex64 {
+ const s = "[" ++ hex64(0x12345678_abcdef00) ++ "]";
+ try std.testing.expectEqualStrings("[00efcdab78563412]", s);
+}
+
+pub fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
+ var result: [multihash_len * 2]u8 = undefined;
+
+ result[0] = hex_charset[@enumToInt(multihash_function) >> 4];
+ result[1] = hex_charset[@enumToInt(multihash_function) & 15];
+
+ result[2] = hex_charset[Hash.digest_length >> 4];
+ result[3] = hex_charset[Hash.digest_length & 15];
+
+ for (digest) |byte, i| {
+ result[4 + i * 2] = hex_charset[byte >> 4];
+ result[5 + i * 2] = hex_charset[byte & 15];
+ }
+ return result;
+}
+
+const Parse = struct {
+ gpa: Allocator,
+ ast: std.zig.Ast,
+ arena: Allocator,
+ buf: std.ArrayListUnmanaged(u8),
+ errors: std.ArrayListUnmanaged(ErrorMessage),
+
+ name: []const u8,
+ version: std.SemanticVersion,
+ dependencies: std.StringArrayHashMapUnmanaged(Dependency),
+
+ const InnerError = error{ ParseFailure, OutOfMemory };
+
+ fn parseRoot(p: *Parse, node: Ast.Node.Index) !void {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+ const main_token = main_tokens[node];
+
+ var buf: [2]Ast.Node.Index = undefined;
+ const struct_init = ast.fullStructInit(&buf, node) orelse {
+ return fail(p, main_token, "expected top level expression to be a struct", .{});
+ };
+
+ var have_name = false;
+ var have_version = false;
+
+ for (struct_init.ast.fields) |field_init| {
+ const name_token = ast.firstToken(field_init) - 2;
+ const field_name = try identifierTokenString(p, name_token);
+ // We could get fancy with reflection and comptime logic here but doing
+ // things manually provides an opportunity to do any additional verification
+ // that is desirable on a per-field basis.
+ if (mem.eql(u8, field_name, "dependencies")) {
+ try parseDependencies(p, field_init);
+ } else if (mem.eql(u8, field_name, "name")) {
+ p.name = try parseString(p, field_init);
+ have_name = true;
+ } else if (mem.eql(u8, field_name, "version")) {
+ const version_text = try parseString(p, field_init);
+ p.version = std.SemanticVersion.parse(version_text) catch |err| v: {
+ try appendError(p, main_tokens[field_init], "unable to parse semantic version: {s}", .{@errorName(err)});
+ break :v undefined;
+ };
+ have_version = true;
+ } else {
+ // Ignore unknown fields so that we can add fields in future zig
+ // versions without breaking older zig versions.
+ }
+ }
+
+ if (!have_name) {
+ try appendError(p, main_token, "missing top-level 'name' field", .{});
+ }
+
+ if (!have_version) {
+ try appendError(p, main_token, "missing top-level 'version' field", .{});
+ }
+ }
+
+ fn parseDependencies(p: *Parse, node: Ast.Node.Index) !void {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+
+ var buf: [2]Ast.Node.Index = undefined;
+ const struct_init = ast.fullStructInit(&buf, node) orelse {
+ const tok = main_tokens[node];
+ return fail(p, tok, "expected dependencies expression to be a struct", .{});
+ };
+
+ for (struct_init.ast.fields) |field_init| {
+ const name_token = ast.firstToken(field_init) - 2;
+ const dep_name = try identifierTokenString(p, name_token);
+ const dep = try parseDependency(p, field_init);
+ try p.dependencies.put(p.gpa, dep_name, dep);
+ }
+ }
+
+ fn parseDependency(p: *Parse, node: Ast.Node.Index) !Dependency {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+
+ var buf: [2]Ast.Node.Index = undefined;
+ const struct_init = ast.fullStructInit(&buf, node) orelse {
+ const tok = main_tokens[node];
+ return fail(p, tok, "expected dependency expression to be a struct", .{});
+ };
+
+ var dep: Dependency = .{
+ .url = undefined,
+ .url_tok = undefined,
+ .hash = null,
+ .hash_tok = undefined,
+ };
+ var have_url = false;
+
+ for (struct_init.ast.fields) |field_init| {
+ const name_token = ast.firstToken(field_init) - 2;
+ const field_name = try identifierTokenString(p, name_token);
+ // We could get fancy with reflection and comptime logic here but doing
+ // things manually provides an opportunity to do any additional verification
+ // that is desirable on a per-field basis.
+ if (mem.eql(u8, field_name, "url")) {
+ dep.url = parseString(p, field_init) catch |err| switch (err) {
+ error.ParseFailure => continue,
+ else => |e| return e,
+ };
+ dep.url_tok = main_tokens[field_init];
+ have_url = true;
+ } else if (mem.eql(u8, field_name, "hash")) {
+ dep.hash = parseHash(p, field_init) catch |err| switch (err) {
+ error.ParseFailure => continue,
+ else => |e| return e,
+ };
+ dep.hash_tok = main_tokens[field_init];
+ } else {
+ // Ignore unknown fields so that we can add fields in future zig
+ // versions without breaking older zig versions.
+ }
+ }
+
+ if (!have_url) {
+ try appendError(p, main_tokens[node], "dependency is missing 'url' field", .{});
+ }
+
+ return dep;
+ }
+
+ fn parseString(p: *Parse, node: Ast.Node.Index) ![]const u8 {
+ const ast = p.ast;
+ const node_tags = ast.nodes.items(.tag);
+ const main_tokens = ast.nodes.items(.main_token);
+ if (node_tags[node] != .string_literal) {
+ return fail(p, main_tokens[node], "expected string literal", .{});
+ }
+ const str_lit_token = main_tokens[node];
+ const token_bytes = ast.tokenSlice(str_lit_token);
+ p.buf.clearRetainingCapacity();
+ try parseStrLit(p, str_lit_token, &p.buf, token_bytes, 0);
+ const duped = try p.arena.dupe(u8, p.buf.items);
+ return duped;
+ }
+
+ fn parseHash(p: *Parse, node: Ast.Node.Index) ![]const u8 {
+ const ast = p.ast;
+ const main_tokens = ast.nodes.items(.main_token);
+ const tok = main_tokens[node];
+ const h = try parseString(p, node);
+
+ if (h.len >= 2) {
+ const their_multihash_func = std.fmt.parseInt(u8, h[0..2], 16) catch |err| {
+ return fail(p, tok, "invalid multihash value: unable to parse hash function: {s}", .{
+ @errorName(err),
+ });
+ };
+ if (@intToEnum(MultihashFunction, their_multihash_func) != multihash_function) {
+ return fail(p, tok, "unsupported hash function: only sha2-256 is supported", .{});
+ }
+ }
+
+ const hex_multihash_len = 2 * Manifest.multihash_len;
+ if (h.len != hex_multihash_len) {
+ return fail(p, tok, "wrong hash size. expected: {d}, found: {d}", .{
+ hex_multihash_len, h.len,
+ });
+ }
+
+ return h;
+ }
+
+ /// TODO: try to DRY this with AstGen.identifierTokenString
+ fn identifierTokenString(p: *Parse, token: Ast.TokenIndex) InnerError![]const u8 {
+ const ast = p.ast;
+ const token_tags = ast.tokens.items(.tag);
+ assert(token_tags[token] == .identifier);
+ const ident_name = ast.tokenSlice(token);
+ if (!mem.startsWith(u8, ident_name, "@")) {
+ return ident_name;
+ }
+ p.buf.clearRetainingCapacity();
+ try parseStrLit(p, token, &p.buf, ident_name, 1);
+ const duped = try p.arena.dupe(u8, p.buf.items);
+ return duped;
+ }
+
+ /// TODO: try to DRY this with AstGen.parseStrLit
+ fn parseStrLit(
+ p: *Parse,
+ token: Ast.TokenIndex,
+ buf: *std.ArrayListUnmanaged(u8),
+ bytes: []const u8,
+ offset: u32,
+ ) InnerError!void {
+ const raw_string = bytes[offset..];
+ var buf_managed = buf.toManaged(p.gpa);
+ const result = std.zig.string_literal.parseWrite(buf_managed.writer(), raw_string);
+ buf.* = buf_managed.moveToUnmanaged();
+ switch (try result) {
+ .success => {},
+ .failure => |err| try p.appendStrLitError(err, token, bytes, offset),
+ }
+ }
+
+ /// TODO: try to DRY this with AstGen.failWithStrLitError
+ fn appendStrLitError(
+ p: *Parse,
+ err: std.zig.string_literal.Error,
+ token: Ast.TokenIndex,
+ bytes: []const u8,
+ offset: u32,
+ ) Allocator.Error!void {
+ const raw_string = bytes[offset..];
+ switch (err) {
+ .invalid_escape_character => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "invalid escape character: '{c}'",
+ .{raw_string[bad_index]},
+ );
+ },
+ .expected_hex_digit => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected hex digit, found '{c}'",
+ .{raw_string[bad_index]},
+ );
+ },
+ .empty_unicode_escape_sequence => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "empty unicode escape sequence",
+ .{},
+ );
+ },
+ .expected_hex_digit_or_rbrace => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected hex digit or '}}', found '{c}'",
+ .{raw_string[bad_index]},
+ );
+ },
+ .invalid_unicode_codepoint => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "unicode escape does not correspond to a valid codepoint",
+ .{},
+ );
+ },
+ .expected_lbrace => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected '{{', found '{c}",
+ .{raw_string[bad_index]},
+ );
+ },
+ .expected_rbrace => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected '}}', found '{c}",
+ .{raw_string[bad_index]},
+ );
+ },
+ .expected_single_quote => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "expected single quote ('), found '{c}",
+ .{raw_string[bad_index]},
+ );
+ },
+ .invalid_character => |bad_index| {
+ try p.appendErrorOff(
+ token,
+ offset + @intCast(u32, bad_index),
+ "invalid byte in string or character literal: '{c}'",
+ .{raw_string[bad_index]},
+ );
+ },
+ }
+ }
+
+ fn fail(
+ p: *Parse,
+ tok: Ast.TokenIndex,
+ comptime fmt: []const u8,
+ args: anytype,
+ ) InnerError {
+ try appendError(p, tok, fmt, args);
+ return error.ParseFailure;
+ }
+
+ fn appendError(p: *Parse, tok: Ast.TokenIndex, comptime fmt: []const u8, args: anytype) !void {
+ return appendErrorOff(p, tok, 0, fmt, args);
+ }
+
+ fn appendErrorOff(
+ p: *Parse,
+ tok: Ast.TokenIndex,
+ byte_offset: u32,
+ comptime fmt: []const u8,
+ args: anytype,
+ ) Allocator.Error!void {
+ try p.errors.append(p.gpa, .{
+ .msg = try std.fmt.allocPrint(p.arena, fmt, args),
+ .tok = tok,
+ .off = byte_offset,
+ });
+ }
+};
+
+const Manifest = @This();
+const std = @import("std");
+const mem = std.mem;
+const Allocator = std.mem.Allocator;
+const assert = std.debug.assert;
+const Ast = std.zig.Ast;
+const testing = std.testing;
+
+test "basic" {
+ const gpa = testing.allocator;
+
+ const example =
+ \\.{
+ \\ .name = "foo",
+ \\ .version = "3.2.1",
+ \\ .dependencies = .{
+ \\ .bar = .{
+ \\ .url = "https://example.com/baz.tar.gz",
+ \\ .hash = "1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f",
+ \\ },
+ \\ },
+ \\}
+ ;
+
+ var ast = try std.zig.Ast.parse(gpa, example, .zon);
+ defer ast.deinit(gpa);
+
+ try testing.expect(ast.errors.len == 0);
+
+ var manifest = try Manifest.parse(gpa, ast);
+ defer manifest.deinit(gpa);
+
+ try testing.expectEqualStrings("foo", manifest.name);
+
+ try testing.expectEqual(@as(std.SemanticVersion, .{
+ .major = 3,
+ .minor = 2,
+ .patch = 1,
+ }), manifest.version);
+
+ try testing.expect(manifest.dependencies.count() == 1);
+ try testing.expectEqualStrings("bar", manifest.dependencies.keys()[0]);
+ try testing.expectEqualStrings(
+ "https://example.com/baz.tar.gz",
+ manifest.dependencies.values()[0].url,
+ );
+ try testing.expectEqualStrings(
+ "1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f",
+ manifest.dependencies.values()[0].hash orelse return error.TestFailed,
+ );
+}
diff --git a/src/Module.zig b/src/Module.zig
index dcdbeec322..e4cf0189cc 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -328,8 +328,6 @@ pub const ErrorInt = u32;
pub const Export = struct {
options: std.builtin.ExportOptions,
src: LazySrcLoc,
- /// Represents the position of the export, if any, in the output file.
- link: link.File.Export,
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
owner_decl: Decl.Index,
/// The Decl containing the export statement. Inline function calls
@@ -533,17 +531,6 @@ pub const Decl = struct {
/// What kind of a declaration is this.
kind: Kind,
- /// Represents the position of the code in the output file.
- /// This is populated regardless of semantic analysis and code generation.
- link: link.File.LinkBlock,
-
- /// Represents the function in the linked output file, if the `Decl` is a function.
- /// This is stored here and not in `Fn` because `Decl` survives across updates but
- /// `Fn` does not.
- /// TODO Look into making `Fn` a longer lived structure and moving this field there
- /// to save on memory usage.
- fn_link: link.File.LinkFn,
-
/// The shallow set of other decls whose typed_value could possibly change if this Decl's
/// typed_value is modified.
dependants: DepsTable = .{},
@@ -2067,7 +2054,7 @@ pub const File = struct {
if (file.tree_loaded) return &file.tree;
const source = try file.getSource(gpa);
- file.tree = try std.zig.parse(gpa, source.bytes);
+ file.tree = try Ast.parse(gpa, source.bytes, .zig);
file.tree_loaded = true;
return &file.tree;
}
@@ -3672,7 +3659,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
file.source = source;
file.source_loaded = true;
- file.tree = try std.zig.parse(gpa, source);
+ file.tree = try Ast.parse(gpa, source, .zig);
defer if (!file.tree_loaded) file.tree.deinit(gpa);
if (file.tree.errors.len != 0) {
@@ -3987,7 +3974,7 @@ pub fn populateBuiltinFile(mod: *Module) !void {
else => |e| return e,
}
- file.tree = try std.zig.parse(gpa, file.source);
+ file.tree = try Ast.parse(gpa, file.source, .zig);
file.tree_loaded = true;
assert(file.tree.errors.len == 0); // builtin.zig must parse
@@ -4098,7 +4085,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
// The exports this Decl performs will be re-discovered, so we remove them here
// prior to re-analysis.
- mod.deleteDeclExports(decl_index);
+ try mod.deleteDeclExports(decl_index);
// Similarly, `@setAlignStack` invocations will be re-discovered.
if (decl.getFunction()) |func| {
@@ -4878,14 +4865,31 @@ pub fn importFile(
};
}
-pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*EmbedFile {
+pub fn embedFile(mod: *Module, cur_file: *File, import_string: []const u8) !*EmbedFile {
const gpa = mod.gpa;
- // The resolved path is used as the key in the table, to detect if
- // a file refers to the same as another, despite different relative paths.
+ if (cur_file.pkg.table.get(import_string)) |pkg| {
+ const resolved_path = try std.fs.path.resolve(gpa, &[_][]const u8{
+ pkg.root_src_directory.path orelse ".", pkg.root_src_path,
+ });
+ var keep_resolved_path = false;
+ defer if (!keep_resolved_path) gpa.free(resolved_path);
+
+ const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
+ errdefer assert(mod.embed_table.remove(resolved_path));
+ if (gop.found_existing) return gop.value_ptr.*;
+
+ const sub_file_path = try gpa.dupe(u8, pkg.root_src_path);
+ errdefer gpa.free(sub_file_path);
+
+ return newEmbedFile(mod, pkg, sub_file_path, resolved_path, &keep_resolved_path, gop);
+ }
+
+ // The resolved path is used as the key in the table, to detect if a file
+ // refers to the same as another, despite different relative paths.
const cur_pkg_dir_path = cur_file.pkg.root_src_directory.path orelse ".";
const resolved_path = try std.fs.path.resolve(gpa, &[_][]const u8{
- cur_pkg_dir_path, cur_file.sub_file_path, "..", rel_file_path,
+ cur_pkg_dir_path, cur_file.sub_file_path, "..", import_string,
});
var keep_resolved_path = false;
defer if (!keep_resolved_path) gpa.free(resolved_path);
@@ -4894,9 +4898,6 @@ pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*Emb
errdefer assert(mod.embed_table.remove(resolved_path));
if (gop.found_existing) return gop.value_ptr.*;
- const new_file = try gpa.create(EmbedFile);
- errdefer gpa.destroy(new_file);
-
const resolved_root_path = try std.fs.path.resolve(gpa, &[_][]const u8{cur_pkg_dir_path});
defer gpa.free(resolved_root_path);
@@ -4915,7 +4916,23 @@ pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*Emb
};
errdefer gpa.free(sub_file_path);
- var file = try cur_file.pkg.root_src_directory.handle.openFile(sub_file_path, .{});
+ return newEmbedFile(mod, cur_file.pkg, sub_file_path, resolved_path, &keep_resolved_path, gop);
+}
+
+fn newEmbedFile(
+ mod: *Module,
+ pkg: *Package,
+ sub_file_path: []const u8,
+ resolved_path: []const u8,
+ keep_resolved_path: *bool,
+ gop: std.StringHashMapUnmanaged(*EmbedFile).GetOrPutResult,
+) !*EmbedFile {
+ const gpa = mod.gpa;
+
+ const new_file = try gpa.create(EmbedFile);
+ errdefer gpa.destroy(new_file);
+
+ var file = try pkg.root_src_directory.handle.openFile(sub_file_path, .{});
defer file.close();
const actual_stat = try file.stat();
@@ -4928,10 +4945,6 @@ pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*Emb
const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), size_usize, 1, 0);
errdefer gpa.free(bytes);
- log.debug("new embedFile. resolved_root_path={s}, resolved_path={s}, sub_file_path={s}, rel_file_path={s}", .{
- resolved_root_path, resolved_path, sub_file_path, rel_file_path,
- });
-
if (mod.comp.whole_cache_manifest) |whole_cache_manifest| {
const copied_resolved_path = try gpa.dupe(u8, resolved_path);
errdefer gpa.free(copied_resolved_path);
@@ -4940,13 +4953,13 @@ pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*Emb
try whole_cache_manifest.addFilePostContents(copied_resolved_path, bytes, stat);
}
- keep_resolved_path = true; // It's now owned by embed_table.
+ keep_resolved_path.* = true; // It's now owned by embed_table.
gop.value_ptr.* = new_file;
new_file.* = .{
.sub_file_path = sub_file_path,
.bytes = bytes,
.stat = stat,
- .pkg = cur_file.pkg,
+ .pkg = pkg,
.owner_decl = undefined, // Set by Sema immediately after this function returns.
};
return new_file;
@@ -5183,20 +5196,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
decl.zir_decl_index = @intCast(u32, decl_sub_index);
if (decl.getFunction()) |_| {
switch (comp.bin_file.tag) {
- .coff => {
- // TODO Implement for COFF
- },
- .elf => if (decl.fn_link.elf.len != 0) {
- // TODO Look into detecting when this would be unnecessary by storing enough state
- // in `Decl` to notice that the line number did not change.
- comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
- },
- .macho => if (decl.fn_link.macho.len != 0) {
- // TODO Look into detecting when this would be unnecessary by storing enough state
- // in `Decl` to notice that the line number did not change.
- comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
- },
- .plan9 => {
+ .coff, .elf, .macho, .plan9 => {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
@@ -5265,34 +5265,11 @@ pub fn clearDecl(
assert(emit_h.decl_table.swapRemove(decl_index));
}
_ = mod.compile_log_decls.swapRemove(decl_index);
- mod.deleteDeclExports(decl_index);
+ try mod.deleteDeclExports(decl_index);
if (decl.has_tv) {
if (decl.ty.isFnOrHasRuntimeBits()) {
mod.comp.bin_file.freeDecl(decl_index);
-
- // TODO instead of a union, put this memory trailing Decl objects,
- // and allow it to be variably sized.
- decl.link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = link.File.Coff.Atom.empty },
- .elf => .{ .elf = link.File.Elf.TextBlock.empty },
- .macho => .{ .macho = link.File.MachO.Atom.empty },
- .plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
- };
- decl.fn_link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = {} },
- .elf => .{ .elf = link.File.Dwarf.SrcFn.empty },
- .macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
- .plan9 => .{ .plan9 = {} },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.FnData.empty },
- .spirv => .{ .spirv = .{} },
- .nvptx => .{ .nvptx = {} },
- };
}
if (decl.getInnerNamespace()) |namespace| {
try namespace.deleteAllDecls(mod, outdated_decls);
@@ -5358,7 +5335,7 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
/// this Decl will cause them to be re-created (or not).
-fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
+fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
var export_owners = (mod.export_owners.fetchSwapRemove(decl_index) orelse return).value;
for (export_owners.items) |exp| {
@@ -5381,16 +5358,16 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
}
}
if (mod.comp.bin_file.cast(link.File.Elf)) |elf| {
- elf.deleteExport(exp.link.elf);
+ elf.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.comp.bin_file.cast(link.File.MachO)) |macho| {
- macho.deleteExport(exp.link.macho);
+ try macho.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| {
- wasm.deleteExport(exp.link.wasm);
+ wasm.deleteDeclExport(decl_index);
}
if (mod.comp.bin_file.cast(link.File.Coff)) |coff| {
- coff.deleteExport(exp.link.coff);
+ coff.deleteDeclExport(decl_index, exp.options.name);
}
if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| {
failed_kv.value.destroy(mod.gpa);
@@ -5693,26 +5670,6 @@ pub fn allocateNewDecl(
.deletion_flag = false,
.zir_decl_index = 0,
.src_scope = src_scope,
- .link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = link.File.Coff.Atom.empty },
- .elf => .{ .elf = link.File.Elf.TextBlock.empty },
- .macho => .{ .macho = link.File.MachO.Atom.empty },
- .plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
- },
- .fn_link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = {} },
- .elf => .{ .elf = link.File.Dwarf.SrcFn.empty },
- .macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
- .plan9 => .{ .plan9 = {} },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = link.File.Wasm.FnData.empty },
- .spirv => .{ .spirv = .{} },
- .nvptx => .{ .nvptx = {} },
- },
.generation = 0,
.is_pub = false,
.is_exported = false,
diff --git a/src/Package.zig b/src/Package.zig
index ebe84b8444..401eef2121 100644
--- a/src/Package.zig
+++ b/src/Package.zig
@@ -1,12 +1,13 @@
const Package = @This();
+const builtin = @import("builtin");
const std = @import("std");
const fs = std.fs;
const mem = std.mem;
const Allocator = mem.Allocator;
const assert = std.debug.assert;
-const Hash = std.crypto.hash.sha2.Sha256;
const log = std.log.scoped(.package);
+const main = @import("main.zig");
const Compilation = @import("Compilation.zig");
const Module = @import("Module.zig");
@@ -14,6 +15,7 @@ const ThreadPool = @import("ThreadPool.zig");
const WaitGroup = @import("WaitGroup.zig");
const Cache = @import("Cache.zig");
const build_options = @import("build_options");
+const Manifest = @import("Manifest.zig");
pub const Table = std.StringHashMapUnmanaged(*Package);
@@ -140,10 +142,10 @@ pub fn addAndAdopt(parent: *Package, gpa: Allocator, child: *Package) !void {
}
pub const build_zig_basename = "build.zig";
-pub const ini_basename = build_zig_basename ++ ".ini";
pub fn fetchAndAddDependencies(
pkg: *Package,
+ arena: Allocator,
thread_pool: *ThreadPool,
http_client: *std.http.Client,
directory: Compilation.Directory,
@@ -152,89 +154,77 @@ pub fn fetchAndAddDependencies(
dependencies_source: *std.ArrayList(u8),
build_roots_source: *std.ArrayList(u8),
name_prefix: []const u8,
+ color: main.Color,
) !void {
const max_bytes = 10 * 1024 * 1024;
const gpa = thread_pool.allocator;
- const build_zig_ini = directory.handle.readFileAlloc(gpa, ini_basename, max_bytes) catch |err| switch (err) {
+ const build_zig_zon_bytes = directory.handle.readFileAllocOptions(
+ arena,
+ Manifest.basename,
+ max_bytes,
+ null,
+ 1,
+ 0,
+ ) catch |err| switch (err) {
error.FileNotFound => {
// Handle the same as no dependencies.
return;
},
else => |e| return e,
};
- defer gpa.free(build_zig_ini);
- const ini: std.Ini = .{ .bytes = build_zig_ini };
- var any_error = false;
- var it = ini.iterateSection("\n[dependency]\n");
- while (it.next()) |dep| {
- var line_it = mem.split(u8, dep, "\n");
- var opt_name: ?[]const u8 = null;
- var opt_url: ?[]const u8 = null;
- var expected_hash: ?[]const u8 = null;
- while (line_it.next()) |kv| {
- const eq_pos = mem.indexOfScalar(u8, kv, '=') orelse continue;
- const key = kv[0..eq_pos];
- const value = kv[eq_pos + 1 ..];
- if (mem.eql(u8, key, "name")) {
- opt_name = value;
- } else if (mem.eql(u8, key, "url")) {
- opt_url = value;
- } else if (mem.eql(u8, key, "hash")) {
- expected_hash = value;
- } else {
- const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(key.ptr) - @ptrToInt(ini.bytes.ptr));
- std.log.warn("{s}/{s}:{d}:{d} unrecognized key: '{s}'", .{
- directory.path orelse ".",
- "build.zig.ini",
- loc.line,
- loc.column,
- key,
- });
- }
+ var ast = try std.zig.Ast.parse(gpa, build_zig_zon_bytes, .zon);
+ defer ast.deinit(gpa);
+
+ if (ast.errors.len > 0) {
+ const file_path = try directory.join(arena, &.{Manifest.basename});
+ try main.printErrsMsgToStdErr(gpa, arena, ast, file_path, color);
+ return error.PackageFetchFailed;
+ }
+
+ var manifest = try Manifest.parse(gpa, ast);
+ defer manifest.deinit(gpa);
+
+ if (manifest.errors.len > 0) {
+ const ttyconf: std.debug.TTY.Config = switch (color) {
+ .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
+ .on => .escape_codes,
+ .off => .no_color,
+ };
+ const file_path = try directory.join(arena, &.{Manifest.basename});
+ for (manifest.errors) |msg| {
+ Report.renderErrorMessage(ast, file_path, ttyconf, msg, &.{});
}
+ return error.PackageFetchFailed;
+ }
- const name = opt_name orelse {
- const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(dep.ptr) - @ptrToInt(ini.bytes.ptr));
- std.log.err("{s}/{s}:{d}:{d} missing key: 'name'", .{
- directory.path orelse ".",
- "build.zig.ini",
- loc.line,
- loc.column,
- });
- any_error = true;
- continue;
- };
+ const report: Report = .{
+ .ast = &ast,
+ .directory = directory,
+ .color = color,
+ .arena = arena,
+ };
- const url = opt_url orelse {
- const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(dep.ptr) - @ptrToInt(ini.bytes.ptr));
- std.log.err("{s}/{s}:{d}:{d} missing key: 'name'", .{
- directory.path orelse ".",
- "build.zig.ini",
- loc.line,
- loc.column,
- });
- any_error = true;
- continue;
- };
+ var any_error = false;
+ const deps_list = manifest.dependencies.values();
+ for (manifest.dependencies.keys()) |name, i| {
+ const dep = deps_list[i];
- const sub_prefix = try std.fmt.allocPrint(gpa, "{s}{s}.", .{ name_prefix, name });
- defer gpa.free(sub_prefix);
+ const sub_prefix = try std.fmt.allocPrint(arena, "{s}{s}.", .{ name_prefix, name });
const fqn = sub_prefix[0 .. sub_prefix.len - 1];
const sub_pkg = try fetchAndUnpack(
thread_pool,
http_client,
global_cache_directory,
- url,
- expected_hash,
- ini,
- directory,
+ dep,
+ report,
build_roots_source,
fqn,
);
try pkg.fetchAndAddDependencies(
+ arena,
thread_pool,
http_client,
sub_pkg.root_src_directory,
@@ -243,6 +233,7 @@ pub fn fetchAndAddDependencies(
dependencies_source,
build_roots_source,
sub_prefix,
+ color,
);
try addAndAdopt(pkg, gpa, sub_pkg);
@@ -252,7 +243,7 @@ pub fn fetchAndAddDependencies(
});
}
- if (any_error) return error.InvalidBuildZigIniFile;
+ if (any_error) return error.InvalidBuildManifestFile;
}
pub fn createFilePkg(
@@ -263,7 +254,7 @@ pub fn createFilePkg(
contents: []const u8,
) !*Package {
const rand_int = std.crypto.random.int(u64);
- const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ hex64(rand_int);
+ const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ Manifest.hex64(rand_int);
{
var tmp_dir = try cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{});
defer tmp_dir.close();
@@ -281,14 +272,73 @@ pub fn createFilePkg(
return createWithDir(gpa, name, cache_directory, o_dir_sub_path, basename);
}
+const Report = struct {
+ ast: *const std.zig.Ast,
+ directory: Compilation.Directory,
+ color: main.Color,
+ arena: Allocator,
+
+ fn fail(
+ report: Report,
+ tok: std.zig.Ast.TokenIndex,
+ comptime fmt_string: []const u8,
+ fmt_args: anytype,
+ ) error{ PackageFetchFailed, OutOfMemory } {
+ return failWithNotes(report, &.{}, tok, fmt_string, fmt_args);
+ }
+
+ fn failWithNotes(
+ report: Report,
+ notes: []const Compilation.AllErrors.Message,
+ tok: std.zig.Ast.TokenIndex,
+ comptime fmt_string: []const u8,
+ fmt_args: anytype,
+ ) error{ PackageFetchFailed, OutOfMemory } {
+ const ttyconf: std.debug.TTY.Config = switch (report.color) {
+ .auto => std.debug.detectTTYConfig(std.io.getStdErr()),
+ .on => .escape_codes,
+ .off => .no_color,
+ };
+ const file_path = try report.directory.join(report.arena, &.{Manifest.basename});
+ renderErrorMessage(report.ast.*, file_path, ttyconf, .{
+ .tok = tok,
+ .off = 0,
+ .msg = try std.fmt.allocPrint(report.arena, fmt_string, fmt_args),
+ }, notes);
+ return error.PackageFetchFailed;
+ }
+
+ fn renderErrorMessage(
+ ast: std.zig.Ast,
+ file_path: []const u8,
+ ttyconf: std.debug.TTY.Config,
+ msg: Manifest.ErrorMessage,
+ notes: []const Compilation.AllErrors.Message,
+ ) void {
+ const token_starts = ast.tokens.items(.start);
+ const start_loc = ast.tokenLocation(0, msg.tok);
+ Compilation.AllErrors.Message.renderToStdErr(.{ .src = .{
+ .msg = msg.msg,
+ .src_path = file_path,
+ .line = @intCast(u32, start_loc.line),
+ .column = @intCast(u32, start_loc.column),
+ .span = .{
+ .start = token_starts[msg.tok],
+ .end = @intCast(u32, token_starts[msg.tok] + ast.tokenSlice(msg.tok).len),
+ .main = token_starts[msg.tok] + msg.off,
+ },
+ .source_line = ast.source[start_loc.line_start..start_loc.line_end],
+ .notes = notes,
+ } }, ttyconf);
+ }
+};
+
fn fetchAndUnpack(
thread_pool: *ThreadPool,
http_client: *std.http.Client,
global_cache_directory: Compilation.Directory,
- url: []const u8,
- expected_hash: ?[]const u8,
- ini: std.Ini,
- comp_directory: Compilation.Directory,
+ dep: Manifest.Dependency,
+ report: Report,
build_roots_source: *std.ArrayList(u8),
fqn: []const u8,
) !*Package {
@@ -297,17 +347,9 @@ fn fetchAndUnpack(
// Check if the expected_hash is already present in the global package
// cache, and thereby avoid both fetching and unpacking.
- if (expected_hash) |h| cached: {
- if (h.len != 2 * Hash.digest_length) {
- return reportError(
- ini,
- comp_directory,
- h.ptr,
- "wrong hash size. expected: {d}, found: {d}",
- .{ Hash.digest_length, h.len },
- );
- }
- const hex_digest = h[0 .. 2 * Hash.digest_length];
+ if (dep.hash) |h| cached: {
+ const hex_multihash_len = 2 * Manifest.multihash_len;
+ const hex_digest = h[0..hex_multihash_len];
const pkg_dir_sub_path = "p" ++ s ++ hex_digest;
var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
error.FileNotFound => break :cached,
@@ -344,10 +386,10 @@ fn fetchAndUnpack(
return ptr;
}
- const uri = try std.Uri.parse(url);
+ const uri = try std.Uri.parse(dep.url);
const rand_int = std.crypto.random.int(u64);
- const tmp_dir_sub_path = "tmp" ++ s ++ hex64(rand_int);
+ const tmp_dir_sub_path = "tmp" ++ s ++ Manifest.hex64(rand_int);
const actual_hash = a: {
var tmp_directory: Compilation.Directory = d: {
@@ -376,13 +418,9 @@ fn fetchAndUnpack(
// by default, so the same logic applies for buffering the reader as for gzip.
try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.xz);
} else {
- return reportError(
- ini,
- comp_directory,
- uri.path.ptr,
- "unknown file extension for path '{s}'",
- .{uri.path},
- );
+ return report.fail(dep.url_tok, "unknown file extension for path '{s}'", .{
+ uri.path,
+ });
}
// TODO: delete files not included in the package prior to computing the package hash.
@@ -393,28 +431,21 @@ fn fetchAndUnpack(
break :a try computePackageHash(thread_pool, .{ .dir = tmp_directory.handle });
};
- const pkg_dir_sub_path = "p" ++ s ++ hexDigest(actual_hash);
+ const pkg_dir_sub_path = "p" ++ s ++ Manifest.hexDigest(actual_hash);
try renameTmpIntoCache(global_cache_directory.handle, tmp_dir_sub_path, pkg_dir_sub_path);
- if (expected_hash) |h| {
- const actual_hex = hexDigest(actual_hash);
+ const actual_hex = Manifest.hexDigest(actual_hash);
+ if (dep.hash) |h| {
if (!mem.eql(u8, h, &actual_hex)) {
- return reportError(
- ini,
- comp_directory,
- h.ptr,
- "hash mismatch: expected: {s}, found: {s}",
- .{ h, actual_hex },
- );
+ return report.fail(dep.hash_tok, "hash mismatch: expected: {s}, found: {s}", .{
+ h, actual_hex,
+ });
}
} else {
- return reportError(
- ini,
- comp_directory,
- url.ptr,
- "url field is missing corresponding hash field: hash={s}",
- .{std.fmt.fmtSliceHexLower(&actual_hash)},
- );
+ const notes: [1]Compilation.AllErrors.Message = .{.{ .plain = .{
+ .msg = try std.fmt.allocPrint(report.arena, "expected .hash = \"{s}\",", .{&actual_hex}),
+ } }};
+ return report.failWithNotes(¬es, dep.url_tok, "url field is missing corresponding hash field", .{});
}
const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path});
@@ -440,35 +471,21 @@ fn unpackTarball(
try std.tar.pipeToFileSystem(out_dir, decompress.reader(), .{
.strip_components = 1,
+ // TODO: we would like to set this to executable_bit_only, but two
+ // things need to happen before that:
+ // 1. the tar implementation needs to support it
+ // 2. the hashing algorithm here needs to support detecting the is_executable
+ // bit on Windows from the ACLs (see the isExecutable function).
+ .mode_mode = .ignore,
});
}
-fn reportError(
- ini: std.Ini,
- comp_directory: Compilation.Directory,
- src_ptr: [*]const u8,
- comptime fmt_string: []const u8,
- fmt_args: anytype,
-) error{PackageFetchFailed} {
- const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(src_ptr) - @ptrToInt(ini.bytes.ptr));
- if (comp_directory.path) |p| {
- std.debug.print("{s}{c}{s}:{d}:{d}: error: " ++ fmt_string ++ "\n", .{
- p, fs.path.sep, ini_basename, loc.line + 1, loc.column + 1,
- } ++ fmt_args);
- } else {
- std.debug.print("{s}:{d}:{d}: error: " ++ fmt_string ++ "\n", .{
- ini_basename, loc.line + 1, loc.column + 1,
- } ++ fmt_args);
- }
- return error.PackageFetchFailed;
-}
-
const HashedFile = struct {
path: []const u8,
- hash: [Hash.digest_length]u8,
+ hash: [Manifest.Hash.digest_length]u8,
failure: Error!void,
- const Error = fs.File.OpenError || fs.File.ReadError;
+ const Error = fs.File.OpenError || fs.File.ReadError || fs.File.StatError;
fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
_ = context;
@@ -479,7 +496,7 @@ const HashedFile = struct {
fn computePackageHash(
thread_pool: *ThreadPool,
pkg_dir: fs.IterableDir,
-) ![Hash.digest_length]u8 {
+) ![Manifest.Hash.digest_length]u8 {
const gpa = thread_pool.allocator;
// We'll use an arena allocator for the path name strings since they all
@@ -522,7 +539,7 @@ fn computePackageHash(
std.sort.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan);
- var hasher = Hash.init(.{});
+ var hasher = Manifest.Hash.init(.{});
var any_failures = false;
for (all_files.items) |hashed_file| {
hashed_file.failure catch |err| {
@@ -543,7 +560,9 @@ fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile, wg: *WaitGroup) void {
fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
var buf: [8000]u8 = undefined;
var file = try dir.openFile(hashed_file.path, .{});
- var hasher = Hash.init(.{});
+ var hasher = Manifest.Hash.init(.{});
+ hasher.update(hashed_file.path);
+ hasher.update(&.{ 0, @boolToInt(try isExecutable(file)) });
while (true) {
const bytes_read = try file.read(&buf);
if (bytes_read == 0) break;
@@ -552,31 +571,17 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
hasher.final(&hashed_file.hash);
}
-const hex_charset = "0123456789abcdef";
-
-fn hex64(x: u64) [16]u8 {
- var result: [16]u8 = undefined;
- var i: usize = 0;
- while (i < 8) : (i += 1) {
- const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
- result[i * 2 + 0] = hex_charset[byte >> 4];
- result[i * 2 + 1] = hex_charset[byte & 15];
+fn isExecutable(file: fs.File) !bool {
+ if (builtin.os.tag == .windows) {
+ // TODO check the ACL on Windows.
+ // Until this is implemented, this could be a false negative on
+ // Windows, which is why we do not yet set executable_bit_only above
+ // when unpacking the tarball.
+ return false;
+ } else {
+ const stat = try file.stat();
+ return (stat.mode & std.os.S.IXUSR) != 0;
}
- return result;
-}
-
-test hex64 {
- const s = "[" ++ hex64(0x12345678_abcdef00) ++ "]";
- try std.testing.expectEqualStrings("[00efcdab78563412]", s);
-}
-
-fn hexDigest(digest: [Hash.digest_length]u8) [Hash.digest_length * 2]u8 {
- var result: [Hash.digest_length * 2]u8 = undefined;
- for (digest) |byte, i| {
- result[i * 2 + 0] = hex_charset[byte >> 4];
- result[i * 2 + 1] = hex_charset[byte & 15];
- }
- return result;
}
fn renameTmpIntoCache(
diff --git a/src/Sema.zig b/src/Sema.zig
index 7448fd149c..b7b3a55063 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1015,6 +1015,7 @@ fn analyzeBodyInner(
.float_cast => try sema.zirFloatCast(block, inst),
.int_cast => try sema.zirIntCast(block, inst),
.ptr_cast => try sema.zirPtrCast(block, inst),
+ .qual_cast => try sema.zirQualCast(block, inst),
.truncate => try sema.zirTruncate(block, inst),
.align_cast => try sema.zirAlignCast(block, inst),
.has_decl => try sema.zirHasDecl(block, inst),
@@ -3294,7 +3295,7 @@ fn ensureResultUsed(
const msg = msg: {
const msg = try sema.errMsg(block, src, "error is ignored", .{});
errdefer msg.destroy(sema.gpa);
- try sema.errNote(block, src, msg, "consider using `try`, `catch`, or `if`", .{});
+ try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -3325,7 +3326,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const msg = msg: {
const msg = try sema.errMsg(block, src, "error is discarded", .{});
errdefer msg.destroy(sema.gpa);
- try sema.errNote(block, src, msg, "consider using `try`, `catch`, or `if`", .{});
+ try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -5564,16 +5565,6 @@ pub fn analyzeExport(
.visibility = borrowed_options.visibility,
},
.src = src,
- .link = switch (mod.comp.bin_file.tag) {
- .coff => .{ .coff = .{} },
- .elf => .{ .elf = .{} },
- .macho => .{ .macho = .{} },
- .plan9 => .{ .plan9 = null },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = .{} },
- .spirv => .{ .spirv = {} },
- .nvptx => .{ .nvptx = {} },
- },
.owner_decl = sema.owner_decl_index,
.src_decl = block.src_decl,
.exported_decl = exported_decl_index,
@@ -6884,6 +6875,8 @@ fn analyzeInlineCallArg(
if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err);
return err;
};
+ } else if (!is_comptime_call and zir_tags[inst] == .param_comptime) {
+ _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
}
const casted_arg = sema.coerceExtra(arg_block, param_ty, uncasted_arg, arg_src, .{ .param_src = .{
.func_inst = func_inst,
@@ -6957,6 +6950,9 @@ fn analyzeInlineCallArg(
.val = arg_val,
};
} else {
+ if (zir_tags[inst] == .param_anytype_comptime) {
+ _ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
+ }
sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
}
@@ -8477,7 +8473,7 @@ fn handleExternLibName(
return sema.fail(
block,
src_loc,
- "dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by `-l{s}` or `-fPIC`.",
+ "dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by '-l{s}' or '-fPIC'.",
.{ lib_name, lib_name },
);
}
@@ -9014,7 +9010,18 @@ fn zirParam(
if (is_comptime and sema.preallocated_new_func != null) {
// We have a comptime value for this parameter so it should be elided from the
// function type of the function instruction in this block.
- const coerced_arg = try sema.coerce(block, param_ty, arg, src);
+ const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) {
+ error.NeededSourceLocation => {
+ // We are instantiating a generic function and a comptime arg
+ // cannot be coerced to the param type, but since we don't
+ // have the callee source location return `GenericPoison`
+ // so that the instantiation is failed and the coercion
+ // is handled by comptime call logic instead.
+ assert(sema.is_generic_instantiation);
+ return error.GenericPoison;
+ },
+ else => return err,
+ };
sema.inst_map.putAssumeCapacity(inst, coerced_arg);
return;
}
@@ -19529,13 +19536,34 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const operand_info = operand_ty.ptrInfo().data;
const dest_info = dest_ty.ptrInfo().data;
if (!operand_info.mutable and dest_info.mutable) {
- return sema.fail(block, src, "cast discards const qualifier", .{});
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ try sema.errNote(block, src, msg, "consider using '@qualCast'", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
if (operand_info.@"volatile" and !dest_info.@"volatile") {
- return sema.fail(block, src, "cast discards volatile qualifier", .{});
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ try sema.errNote(block, src, msg, "consider using '@qualCast'", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
if (operand_info.@"addrspace" != dest_info.@"addrspace") {
- return sema.fail(block, src, "cast changes pointer address space", .{});
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "cast changes pointer address space", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ try sema.errNote(block, src, msg, "consider using '@addrSpaceCast'", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
const dest_is_slice = dest_ty.isSlice();
@@ -19590,6 +19618,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{
dest_ty.fmt(sema.mod), dest_align,
});
+
+ try sema.errNote(block, src, msg, "consider using '@alignCast'", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -19625,6 +19655,49 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return block.addBitCast(aligned_dest_ty, ptr);
}
+fn zirQualCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const src = inst_data.src();
+ const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
+ const operand = try sema.resolveInst(extra.rhs);
+ const operand_ty = sema.typeOf(operand);
+
+ try sema.checkPtrType(block, dest_ty_src, dest_ty);
+ try sema.checkPtrOperand(block, operand_src, operand_ty);
+
+ var operand_payload = operand_ty.ptrInfo();
+ var dest_info = dest_ty.ptrInfo();
+
+ operand_payload.data.mutable = dest_info.data.mutable;
+ operand_payload.data.@"volatile" = dest_info.data.@"volatile";
+
+ const altered_operand_ty = Type.initPayload(&operand_payload.base);
+ if (!altered_operand_ty.eql(dest_ty, sema.mod)) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "'@qualCast' can only modify 'const' and 'volatile' qualifiers", .{});
+ errdefer msg.destroy(sema.gpa);
+
+ dest_info.data.mutable = !operand_ty.isConstPtr();
+ dest_info.data.@"volatile" = operand_ty.isVolatilePtr();
+ const altered_dest_ty = Type.initPayload(&dest_info.base);
+ try sema.errNote(block, src, msg, "expected type '{}'", .{altered_dest_ty.fmt(sema.mod)});
+ try sema.errNote(block, src, msg, "got type '{}'", .{operand_ty.fmt(sema.mod)});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+
+ if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
+ return sema.addConstant(dest_ty, operand_val);
+ }
+
+ try sema.requireRuntimeBlock(block, src, null);
+ return block.addBitCast(dest_ty, operand);
+}
+
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
@@ -25141,7 +25214,7 @@ fn coerceExtra(
(try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{
try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{});
- try sema.errNote(block, inst_src, msg, "consider using `try`, `catch`, or `if`", .{});
+ try sema.errNote(block, inst_src, msg, "consider using 'try', 'catch', or 'if'", .{});
}
// ?T to T
@@ -25150,7 +25223,7 @@ fn coerceExtra(
(try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
{
try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{});
- try sema.errNote(block, inst_src, msg, "consider using `.?`, `orelse`, or `if`", .{});
+ try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{});
}
try in_memory_result.report(sema, block, inst_src, msg);
@@ -26076,7 +26149,7 @@ fn coerceVarArgParam(
.Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}),
.Float => float: {
const target = sema.mod.getTarget();
- const double_bits = @import("type.zig").CType.sizeInBits(.double, target);
+ const double_bits = target.c_type_bit_size(.double);
const inst_bits = uncasted_ty.floatBits(sema.mod.getTarget());
if (inst_bits >= double_bits) break :float inst;
switch (double_bits) {
diff --git a/src/TypedValue.zig b/src/TypedValue.zig
index 6e096ee90a..cb28274f10 100644
--- a/src/TypedValue.zig
+++ b/src/TypedValue.zig
@@ -176,7 +176,9 @@ pub fn print(
var i: u32 = 0;
while (i < max_len) : (i += 1) {
- buf[i] = std.math.cast(u8, val.fieldValue(ty, i).toUnsignedInt(target)) orelse break :str;
+ const elem = val.fieldValue(ty, i);
+ if (elem.isUndef()) break :str;
+ buf[i] = std.math.cast(u8, elem.toUnsignedInt(target)) orelse break :str;
}
const truncated = if (len > max_string_len) " (truncated)" else "";
@@ -390,6 +392,7 @@ pub fn print(
while (i < max_len) : (i += 1) {
var elem_buf: Value.ElemValueBuffer = undefined;
const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf);
+ if (elem_val.isUndef()) break :str;
buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(target)) orelse break :str;
}
diff --git a/src/Zir.zig b/src/Zir.zig
index 94e6a9a11a..b93422177e 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -857,6 +857,9 @@ pub const Inst = struct {
/// Implements the `@ptrCast` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
ptr_cast,
+ /// Implements the `@qualCast` builtin.
+ /// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
+ qual_cast,
/// Implements the `@truncate` builtin.
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
truncate,
@@ -1195,6 +1198,7 @@ pub const Inst = struct {
.float_cast,
.int_cast,
.ptr_cast,
+ .qual_cast,
.truncate,
.align_cast,
.has_field,
@@ -1484,6 +1488,7 @@ pub const Inst = struct {
.float_cast,
.int_cast,
.ptr_cast,
+ .qual_cast,
.truncate,
.align_cast,
.has_field,
@@ -1755,6 +1760,7 @@ pub const Inst = struct {
.float_cast = .pl_node,
.int_cast = .pl_node,
.ptr_cast = .pl_node,
+ .qual_cast = .pl_node,
.truncate = .pl_node,
.align_cast = .pl_node,
.typeof_builtin = .pl_node,
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 0efd34937a..473a62fd83 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -203,13 +203,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
- try dw.genArgDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- loc,
- );
+ try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -255,14 +249,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
- try dw.genVarDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- is_ptr,
- loc,
- );
+ try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -4019,11 +4006,17 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.getSymbolIndex().?,
- .coff => owner_decl.link.coff.getSymbolIndex().?,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -4301,34 +4294,37 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- try fn_owner_decl.link.macho.ensureInitialized(macho_file);
+ const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
- .sym_index = fn_owner_decl.link.macho.getSymbolIndex().?,
+ .sym_index = sym_index,
},
});
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- try fn_owner_decl.link.coff.ensureInitialized(coff_file);
+ const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = coff_file.getAtom(atom).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
- .sym_index = fn_owner_decl.link.coff.getSymbolIndex().?,
+ .sym_index = sym_index,
},
});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(func.owner_decl);
+ const decl_block_index = try p9.seeDecl(func.owner_decl);
+ const decl_block = p9.getDeclBlock(decl_block_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
- const got_index = fn_owner_decl.link.plan9.got_index.?;
+ const got_index = decl_block.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
} else unreachable;
@@ -4349,11 +4345,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{
.tag = .call_extern,
.data = .{
.relocation = .{
- .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.getSymbolIndex().?,
+ .atom_index = atom_index,
.sym_index = sym_index,
},
},
@@ -5488,11 +5486,17 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.getSymbolIndex().?,
- .coff => owner_decl.link.coff.getSymbolIndex().?,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -5602,11 +5606,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.direct => .load_memory_direct,
.import => .load_memory_import,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.getSymbolIndex().?,
- .coff => owner_decl.link.coff.getSymbolIndex().?,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -5796,11 +5806,17 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
.direct => .load_memory_ptr_direct,
.import => unreachable,
};
- const mod = self.bin_file.options.module.?;
- const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
- .macho => owner_decl.link.macho.getSymbolIndex().?,
- .coff => owner_decl.link.coff.getSymbolIndex().?,
+ .macho => blk: {
+ const macho_file = self.bin_file.cast(link.File.MachO).?;
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ },
+ .coff => blk: {
+ const coff_file = self.bin_file.cast(link.File.Coff).?;
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ },
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@@ -6119,23 +6135,27 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- try decl.link.macho.ensureInitialized(macho_file);
+ const atom = try macho_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.macho.getSymbolIndex().?,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- try decl.link.coff.ensureInitialized(coff_file);
+ const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.coff.getSymbolIndex().?,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@@ -6148,8 +6168,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
- return MCValue{ .memory = vaddr };
+ return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
return MCValue{ .linker_load = .{
.type = .direct,
diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig
index 3812597789..3c2a81d5d1 100644
--- a/src/arch/aarch64/Emit.zig
+++ b/src/arch/aarch64/Emit.zig
@@ -670,9 +670,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
.target = target,
.offset = offset,
@@ -883,10 +883,10 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
}
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
// TODO this causes segfault in stage1
// try atom.addRelocations(macho_file, 2, .{
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset,
.addend = 0,
@@ -902,7 +902,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
},
});
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.target = .{ .sym_index = data.sym_index, .file = null },
.offset = offset + 4,
.addend = 0,
@@ -919,7 +919,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
},
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom = coff_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
+ const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
const target = switch (tag) {
.load_memory_got,
.load_memory_ptr_got,
@@ -929,7 +929,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
.load_memory_import => coff_file.getGlobalByIndex(data.sym_index),
else => unreachable,
};
- try atom.addRelocation(coff_file, .{
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.target = target,
.offset = offset,
.addend = 0,
@@ -946,7 +946,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
},
});
- try atom.addRelocation(coff_file, .{
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.target = target,
.offset = offset + 4,
.addend = 0,
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 49f979624d..57a8aed699 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -282,13 +282,7 @@ const DbgInfoReloc = struct {
else => unreachable, // not a possible argument
};
- try dw.genArgDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- loc,
- );
+ try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -331,14 +325,7 @@ const DbgInfoReloc = struct {
break :blk .nop;
},
};
- try dw.genVarDbgInfo(
- reloc.name,
- reloc.ty,
- function.bin_file.tag,
- function.mod_fn.owner_decl,
- is_ptr,
- loc,
- );
+ try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -4256,12 +4243,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
@@ -6084,15 +6070,17 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@@ -6106,8 +6094,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
- return MCValue{ .memory = vaddr };
+ return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable;
} else if (self.bin_file.cast(link.File.Coff)) |_| {
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index d50a614206..8b8fca4859 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -1615,13 +1615,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
- .register => |reg| try dw.genArgDbgInfo(
- name,
- ty,
- self.bin_file.tag,
- self.mod_fn.owner_decl,
- .{ .register = reg.dwarfLocOp() },
- ),
+ .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
+ .register = reg.dwarfLocOp(),
+ }),
.stack_offset => {},
else => {},
},
@@ -1721,12 +1717,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
-
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
-
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
.tag = .jalr,
@@ -2553,17 +2546,17 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
const decl = mod.declPtr(decl_index);
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
- // TODO I'm hacking my way through here by repurposing .memory for storing
- // index to the GOT target symbol index.
- return MCValue{ .memory = decl.link.macho.sym_index };
+ unreachable;
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 5e9326d23b..418c67c580 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -1216,11 +1216,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.tag == link.File.Elf.base_tag) {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- break :blk @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file));
} else unreachable;
try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr });
@@ -3413,13 +3412,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
switch (self.debug_output) {
.dwarf => |dw| switch (mcv) {
- .register => |reg| try dw.genArgDbgInfo(
- name,
- ty,
- self.bin_file.tag,
- self.mod_fn.owner_decl,
- .{ .register = reg.dwarfLocOp() },
- ),
+ .register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
+ .register = reg.dwarfLocOp(),
+ }),
else => {},
},
else => {},
@@ -4205,8 +4200,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index c0d0c11b56..7ce6a0482b 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -1194,7 +1194,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
const fn_info = func.decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
defer func_type.deinit(func.gpa);
- func.decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
+ _ = try func.bin_file.storeDeclType(func.decl_index, func_type);
var cc_result = try func.resolveCallingConventionValues(func.decl.ty);
defer cc_result.deinit(func.gpa);
@@ -1269,10 +1269,10 @@ fn genFunc(func: *CodeGen) InnerError!void {
var emit: Emit = .{
.mir = mir,
- .bin_file = &func.bin_file.base,
+ .bin_file = func.bin_file,
.code = func.code,
.locals = func.locals.items,
- .decl = func.decl,
+ .decl_index = func.decl_index,
.dbg_output = func.debug_output,
.prev_di_line = 0,
.prev_di_column = 0,
@@ -2117,33 +2117,31 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const fn_info = fn_ty.fnInfo();
const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target);
- const callee: ?*Decl = blk: {
+ const callee: ?Decl.Index = blk: {
const func_val = func.air.value(pl_op.operand) orelse break :blk null;
const module = func.bin_file.base.options.module.?;
if (func_val.castTag(.function)) |function| {
- const decl = module.declPtr(function.data.owner_decl);
- try decl.link.wasm.ensureInitialized(func.bin_file);
- break :blk decl;
+ _ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl);
+ break :blk function.data.owner_decl;
} else if (func_val.castTag(.extern_fn)) |extern_fn| {
const ext_decl = module.declPtr(extern_fn.data.owner_decl);
const ext_info = ext_decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target);
defer func_type.deinit(func.gpa);
- const atom = &ext_decl.link.wasm;
- try atom.ensureInitialized(func.bin_file);
- ext_decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
+ const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
+ const atom = func.bin_file.getAtomPtr(atom_index);
+ const type_index = try func.bin_file.storeDeclType(extern_fn.data.owner_decl, func_type);
try func.bin_file.addOrUpdateImport(
mem.sliceTo(ext_decl.name, 0),
atom.getSymbolIndex().?,
ext_decl.getExternFn().?.lib_name,
- ext_decl.fn_link.wasm.type_index,
+ type_index,
);
- break :blk ext_decl;
+ break :blk extern_fn.data.owner_decl;
} else if (func_val.castTag(.decl_ref)) |decl_ref| {
- const decl = module.declPtr(decl_ref.data);
- try decl.link.wasm.ensureInitialized(func.bin_file);
- break :blk decl;
+ _ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data);
+ break :blk decl_ref.data;
}
return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
};
@@ -2164,7 +2162,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
}
if (callee) |direct| {
- try func.addLabel(.call, direct.link.wasm.sym_index);
+ const atom_index = func.bin_file.decls.get(direct).?;
+ try func.addLabel(.call, func.bin_file.getAtom(atom_index).sym_index);
} else {
// in this case we call a function pointer
// so load its value onto the stack
@@ -2477,7 +2476,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.dwarf => |dwarf| {
const src_index = func.air.instructions.items(.data)[inst].arg.src_index;
const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, src_index);
- try dwarf.genArgDbgInfo(name, arg_ty, .wasm, func.mod_fn.owner_decl, .{
+ try dwarf.genArgDbgInfo(name, arg_ty, func.mod_fn.owner_decl, .{
.wasm_local = arg.local.value,
});
},
@@ -2760,9 +2759,10 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind
}
module.markDeclAlive(decl);
- try decl.link.wasm.ensureInitialized(func.bin_file);
+ const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index);
+ const atom = func.bin_file.getAtom(atom_index);
- const target_sym_index = decl.link.wasm.sym_index;
+ const target_sym_index = atom.sym_index;
if (decl.ty.zigTypeTag() == .Fn) {
try func.bin_file.addTableFunction(target_sym_index);
return WValue{ .function_index = target_sym_index };
@@ -5547,7 +5547,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void {
break :blk .nop;
},
};
- try func.debug_output.dwarf.genVarDbgInfo(name, ty, .wasm, func.mod_fn.owner_decl, is_ptr, loc);
+ try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.mod_fn.owner_decl, is_ptr, loc);
func.finishAir(inst, .none, &.{});
}
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index 71d21d2797..a340ac5da8 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -11,8 +11,8 @@ const leb128 = std.leb;
/// Contains our list of instructions
mir: Mir,
-/// Reference to the file handler
-bin_file: *link.File,
+/// Reference to the Wasm module linker
+bin_file: *link.File.Wasm,
/// Possible error message. When set, the value is allocated and
/// must be freed manually.
error_msg: ?*Module.ErrorMsg = null,
@@ -21,7 +21,7 @@ code: *std.ArrayList(u8),
/// List of allocated locals.
locals: []const u8,
/// The declaration that code is being generated for.
-decl: *Module.Decl,
+decl_index: Module.Decl.Index,
// Debug information
/// Holds the debug information for this emission
@@ -252,8 +252,8 @@ fn offset(self: Emit) u32 {
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
std.debug.assert(emit.error_msg == null);
- // TODO: Determine the source location.
- emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.allocator, emit.decl.srcLoc(), format, args);
+ const mod = emit.bin_file.base.options.module.?;
+ emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(), format, args);
return error.EmitFail;
}
@@ -304,8 +304,9 @@ fn emitGlobal(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
const global_offset = emit.offset();
try emit.code.appendSlice(&buf);
- // globals can have index 0 as it represents the stack pointer
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.index = label,
.offset = global_offset,
.relocation_type = .R_WASM_GLOBAL_INDEX_LEB,
@@ -361,7 +362,9 @@ fn emitCall(emit: *Emit, inst: Mir.Inst.Index) !void {
try emit.code.appendSlice(&buf);
if (label != 0) {
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = call_offset,
.index = label,
.relocation_type = .R_WASM_FUNCTION_INDEX_LEB,
@@ -387,7 +390,9 @@ fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void {
try emit.code.appendSlice(&buf);
if (symbol_index != 0) {
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = index_offset,
.index = symbol_index,
.relocation_type = .R_WASM_TABLE_INDEX_SLEB,
@@ -399,7 +404,7 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const mem = emit.mir.extraData(Mir.Memory, extra_index).data;
const mem_offset = emit.offset() + 1;
- const is_wasm32 = emit.bin_file.options.target.cpu.arch == .wasm32;
+ const is_wasm32 = emit.bin_file.base.options.target.cpu.arch == .wasm32;
if (is_wasm32) {
try emit.code.append(std.wasm.opcode(.i32_const));
var buf: [5]u8 = undefined;
@@ -413,7 +418,9 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
}
if (mem.pointer != 0) {
- try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
+ const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
+ const atom = emit.bin_file.getAtomPtr(atom_index);
+ try atom.relocs.append(emit.bin_file.base.allocator, .{
.offset = mem_offset,
.index = mem.pointer,
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index df24fe5e7d..c11ea4e63e 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -2668,12 +2668,13 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
switch (ptr) {
.linker_load => |load_struct| {
const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*));
- const mod = self.bin_file.options.module.?;
- const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
- const atom_index = if (self.bin_file.tag == link.File.MachO.base_tag)
- fn_owner_decl.link.macho.getSymbolIndex().?
- else
- fn_owner_decl.link.coff.getSymbolIndex().?;
+ const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: {
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk macho_file.getAtom(atom).getSymbolIndex().?;
+ } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: {
+ const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ break :blk coff_file.getAtom(atom).getSymbolIndex().?;
+ } else unreachable;
const flags: u2 = switch (load_struct.type) {
.got => 0b00,
.direct => 0b01,
@@ -3835,7 +3836,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void {
},
else => unreachable, // not a valid function parameter
};
- try dw.genArgDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, loc);
+ try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, loc);
},
.plan9 => {},
.none => {},
@@ -3875,7 +3876,7 @@ fn genVarDbgInfo(
break :blk .nop;
},
};
- try dw.genVarDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, is_ptr, loc);
+ try dw.genVarDbgInfo(name, ty, self.mod_fn.owner_decl, is_ptr, loc);
},
.plan9 => {},
.none => {},
@@ -3995,19 +3996,19 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
- const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try fn_owner_decl.link.elf.ensureInitialized(elf_file);
- const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
+ const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
+ const atom = elf_file.getAtom(atom_index);
+ const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .imm = got_addr },
});
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- try fn_owner_decl.link.coff.ensureInitialized(coff_file);
- const sym_index = fn_owner_decl.link.coff.getSymbolIndex().?;
+ const atom_index = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
@@ -4023,8 +4024,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.data = undefined,
});
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- try fn_owner_decl.link.macho.ensureInitialized(macho_file);
- const sym_index = fn_owner_decl.link.macho.getSymbolIndex().?;
+ const atom_index = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
@@ -4040,11 +4041,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.data = undefined,
});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(func.owner_decl);
+ const decl_block_index = try p9.seeDecl(func.owner_decl);
+ const decl_block = p9.getDeclBlock(decl_block_index);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
- const got_index = fn_owner_decl.link.plan9.got_index.?;
+ const got_index = decl_block.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
_ = try self.addInst(.{
.tag = .call,
@@ -4080,15 +4082,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
});
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
+ const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
+ const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
_ = try self.addInst(.{
.tag = .call_extern,
.ops = undefined,
- .data = .{
- .relocation = .{
- .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.getSymbolIndex().?,
- .sym_index = sym_index,
- },
- },
+ .data = .{ .relocation = .{
+ .atom_index = atom_index,
+ .sym_index = sym_index,
+ } },
});
} else {
return self.fail("TODO implement calling extern functions", .{});
@@ -6719,23 +6721,27 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
module.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- try decl.link.elf.ensureInitialized(elf_file);
- return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
+ const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
+ const atom = elf_file.getAtom(atom_index);
+ return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
- try decl.link.macho.ensureInitialized(macho_file);
+ const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.macho.getSymbolIndex().?,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
- try decl.link.coff.ensureInitialized(coff_file);
+ const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
+ const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
return MCValue{ .linker_load = .{
.type = .got,
- .sym_index = decl.link.coff.getSymbolIndex().?,
+ .sym_index = sym_index,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
- try p9.seeDecl(decl_index);
- const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
+ const decl_block_index = try p9.seeDecl(decl_index);
+ const decl_block = p9.getDeclBlock(decl_block_index);
+ const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
@@ -6748,8 +6754,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
};
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
- const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
- return MCValue{ .memory = vaddr };
+ return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
return MCValue{ .linker_load = .{
.type = .direct,
diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig
index af3ed5e053..c4f9b4eb42 100644
--- a/src/arch/x86_64/Emit.zig
+++ b/src/arch/x86_64/Emit.zig
@@ -1001,8 +1001,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
else => unreachable,
};
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- try atom.addRelocation(macho_file, .{
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = reloc_type,
.target = .{ .sym_index = relocation.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4),
@@ -1011,8 +1011,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.length = 2,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- try atom.addRelocation(coff_file, .{
+ const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = switch (ops.flags) {
0b00 => .got,
0b01 => .direct,
@@ -1140,9 +1140,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
- const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = macho_file.getGlobalByIndex(relocation.sym_index);
- try atom.addRelocation(macho_file, .{
+ try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = target,
.offset = offset,
@@ -1152,9 +1152,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
- const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+ const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
const target = coff_file.getGlobalByIndex(relocation.sym_index);
- try atom.addRelocation(coff_file, .{
+ try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
.type = .direct,
.target = target,
.offset = offset,
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index eb0ae1b1f6..2f721e1b4b 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -16,7 +16,6 @@ const trace = @import("../tracy.zig").trace;
const LazySrcLoc = Module.LazySrcLoc;
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
-const CType = @import("../type.zig").CType;
const target_util = @import("../target.zig");
const libcFloatPrefix = target_util.libcFloatPrefix;
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index c528abdd7c..e19c70f322 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -19,7 +19,6 @@ const Liveness = @import("../Liveness.zig");
const Value = @import("../value.zig").Value;
const Type = @import("../type.zig").Type;
const LazySrcLoc = Module.LazySrcLoc;
-const CType = @import("../type.zig").CType;
const x86_64_abi = @import("../arch/x86_64/abi.zig");
const wasm_c_abi = @import("../arch/wasm/abi.zig");
const aarch64_c_abi = @import("../arch/aarch64/abi.zig");
@@ -11043,8 +11042,8 @@ fn backendSupportsF128(target: std.Target) bool {
fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool {
return switch (scalar_ty.tag()) {
.f16 => backendSupportsF16(target),
- .f80 => (CType.longdouble.sizeInBits(target) == 80) and backendSupportsF80(target),
- .f128 => (CType.longdouble.sizeInBits(target) == 128) and backendSupportsF128(target),
+ .f80 => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target),
+ .f128 => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target),
else => true,
};
}
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index e1af8c847f..c5a3d57d07 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -49,7 +49,7 @@ pub const DeclGen = struct {
spv: *SpvModule,
/// The decl we are currently generating code for.
- decl: *Decl,
+ decl_index: Decl.Index,
/// The intermediate code of the declaration we are currently generating. Note: If
/// the declaration is not a function, this value will be undefined!
@@ -59,6 +59,8 @@ pub const DeclGen = struct {
/// Note: If the declaration is not a function, this value will be undefined!
liveness: Liveness,
+ ids: *const std.AutoHashMap(Decl.Index, IdResult),
+
/// An array of function argument result-ids. Each index corresponds with the
/// function argument of the same index.
args: std.ArrayListUnmanaged(IdRef) = .{},
@@ -133,14 +135,20 @@ pub const DeclGen = struct {
/// Initialize the common resources of a DeclGen. Some fields are left uninitialized,
/// only set when `gen` is called.
- pub fn init(allocator: Allocator, module: *Module, spv: *SpvModule) DeclGen {
+ pub fn init(
+ allocator: Allocator,
+ module: *Module,
+ spv: *SpvModule,
+ ids: *const std.AutoHashMap(Decl.Index, IdResult),
+ ) DeclGen {
return .{
.gpa = allocator,
.module = module,
.spv = spv,
- .decl = undefined,
+ .decl_index = undefined,
.air = undefined,
.liveness = undefined,
+ .ids = ids,
.next_arg_index = undefined,
.current_block_label_id = undefined,
.error_msg = undefined,
@@ -150,9 +158,9 @@ pub const DeclGen = struct {
/// Generate the code for `decl`. If a reportable error occurred during code generation,
/// a message is returned by this function. Callee owns the memory. If this function
/// returns such a reportable error, it is valid to be called again for a different decl.
- pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
+ pub fn gen(self: *DeclGen, decl_index: Decl.Index, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
// Reset internal resources, we don't want to re-allocate these.
- self.decl = decl;
+ self.decl_index = decl_index;
self.air = air;
self.liveness = liveness;
self.args.items.len = 0;
@@ -194,7 +202,7 @@ pub const DeclGen = struct {
pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
@setCold(true);
const src = LazySrcLoc.nodeOffset(0);
- const src_loc = src.toSrcLoc(self.decl);
+ const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index));
assert(self.error_msg == null);
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args);
return error.CodegenFail;
@@ -332,7 +340,7 @@ pub const DeclGen = struct {
};
const decl = self.module.declPtr(fn_decl_index);
self.module.markDeclAlive(decl);
- return decl.fn_link.spirv.id.toRef();
+ return self.ids.get(fn_decl_index).?.toRef();
}
const target = self.getTarget();
@@ -553,8 +561,8 @@ pub const DeclGen = struct {
}
fn genDecl(self: *DeclGen) !void {
- const decl = self.decl;
- const result_id = decl.fn_link.spirv.id;
+ const result_id = self.ids.get(self.decl_index).?;
+ const decl = self.module.declPtr(self.decl_index);
if (decl.val.castTag(.function)) |_| {
assert(decl.ty.zigTypeTag() == .Fn);
@@ -945,7 +953,7 @@ pub const DeclGen = struct {
fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
- const src_fname_id = try self.spv.resolveSourceFileName(self.decl);
+ const src_fname_id = try self.spv.resolveSourceFileName(self.module.declPtr(self.decl_index));
try self.func.body.emit(self.spv.gpa, .OpLine, .{
.file = src_fname_id,
.line = dbg_stmt.line,
@@ -1106,7 +1114,7 @@ pub const DeclGen = struct {
assert(as.errors.items.len != 0);
assert(self.error_msg == null);
const loc = LazySrcLoc.nodeOffset(0);
- const src_loc = loc.toSrcLoc(self.decl);
+ const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index));
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len);
diff --git a/src/link.zig b/src/link.zig
index 668c5b72e3..2b3ce51667 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -261,39 +261,6 @@ pub const File = struct {
/// of this linking operation.
lock: ?Cache.Lock = null,
- pub const LinkBlock = union {
- elf: Elf.TextBlock,
- coff: Coff.Atom,
- macho: MachO.Atom,
- plan9: Plan9.DeclBlock,
- c: void,
- wasm: Wasm.DeclBlock,
- spirv: void,
- nvptx: void,
- };
-
- pub const LinkFn = union {
- elf: Dwarf.SrcFn,
- coff: Coff.SrcFn,
- macho: Dwarf.SrcFn,
- plan9: void,
- c: void,
- wasm: Wasm.FnData,
- spirv: SpirV.FnData,
- nvptx: void,
- };
-
- pub const Export = union {
- elf: Elf.Export,
- coff: Coff.Export,
- macho: MachO.Export,
- plan9: Plan9.Export,
- c: void,
- wasm: Wasm.Export,
- spirv: void,
- nvptx: void,
- };
-
/// Attempts incremental linking, if the file already exists. If
/// incremental linking fails, falls back to truncating the file and
/// rewriting it. A malicious file is detected as incremental link failure
@@ -580,22 +547,23 @@ pub const File = struct {
}
}
- pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void {
+ pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
+ const decl = module.declPtr(decl_index);
log.debug("updateDeclLineNumber {*} ({s}), line={}", .{
decl, decl.name, decl.src_line + 1,
});
assert(decl.has_tv);
if (build_options.only_c) {
assert(base.tag == .c);
- return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl);
+ return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index);
}
switch (base.tag) {
- .coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
- .elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
- .macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
- .c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl),
- .wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl),
- .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl),
+ .coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl_index),
+ .elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl_index),
+ .macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl_index),
+ .c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index),
+ .wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl_index),
+ .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl_index),
.spirv, .nvptx => {},
}
}
diff --git a/src/link/C.zig b/src/link/C.zig
index 8b05b8b22d..02e5cadfbc 100644
--- a/src/link/C.zig
+++ b/src/link/C.zig
@@ -219,12 +219,12 @@ pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !voi
code.shrinkAndFree(module.gpa, code.items.len);
}
-pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void {
// The C backend does not have the ability to fix line numbers without re-generating
// the entire Decl.
_ = self;
_ = module;
- _ = decl;
+ _ = decl_index;
}
pub fn flush(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void {
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index dee3c7c381..2922e783e1 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -79,13 +79,13 @@ entry_addr: ?u32 = null,
/// We store them here so that we can properly dispose of any allocated
/// memory within the atom in the incremental linker.
/// TODO consolidate this.
-decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{},
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
/// List of atoms that are either synthetic or map directly to the Zig source program.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Table of atoms indexed by the symbol index.
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -124,9 +124,9 @@ const Entry = struct {
sym_index: u32,
};
-const RelocTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation));
-const BaseRelocationTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32));
-const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
+const RelocTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
+const BaseRelocationTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
+const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
const default_file_alignment: u16 = 0x200;
const default_size_of_stack_reserve: u32 = 0x1000000;
@@ -137,7 +137,7 @@ const default_size_of_heap_commit: u32 = 0x1000;
const Section = struct {
header: coff.SectionHeader,
- last_atom: ?*Atom = null,
+ last_atom_index: ?Atom.Index = null,
/// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added
@@ -154,7 +154,34 @@ const Section = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh atom, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
- free_list: std.ArrayListUnmanaged(*Atom) = .{},
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
+};
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ section: u16,
+ /// A list of all exports aliases of this Decl.
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, coff_file: *const Coff, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, coff_file.getSymbolName(.{
+ .sym_index = exp,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, coff_file: *Coff, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, coff_file.getSymbolName(.{
+ .sym_index = exp.*,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
};
pub const PtrWidth = enum {
@@ -168,11 +195,6 @@ pub const PtrWidth = enum {
};
}
};
-pub const SrcFn = void;
-
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
pub const SymbolWithLoc = struct {
// Index into the respective symbol table.
@@ -271,11 +293,7 @@ pub fn deinit(self: *Coff) void {
}
self.sections.deinit(gpa);
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
-
+ self.atoms.deinit(gpa);
self.locals.deinit(gpa);
self.globals.deinit(gpa);
@@ -297,7 +315,15 @@ pub fn deinit(self: *Coff) void {
self.imports.deinit(gpa);
self.imports_free_list.deinit(gpa);
self.imports_table.deinit(gpa);
- self.decls.deinit(gpa);
+
+ {
+ var it = self.decls.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.exports.deinit(gpa);
+ }
+ self.decls.deinit(gpa);
+ }
+
self.atom_by_index_table.deinit(gpa);
{
@@ -461,17 +487,18 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
// TODO: enforce order by increasing VM addresses in self.sections container.
// This is required by the loader anyhow as far as I can tell.
for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id + 1 + next_sect_id];
+ const maybe_last_atom_index = self.sections.items(.last_atom_index)[sect_id + 1 + next_sect_id];
next_header.virtual_address += diff;
- if (maybe_last_atom.*) |last_atom| {
- var atom = last_atom;
+ if (maybe_last_atom_index) |last_atom_index| {
+ var atom_index = last_atom_index;
while (true) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
sym.value += diff;
- if (atom.prev) |prev| {
- atom = prev;
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
} else break;
}
}
@@ -480,14 +507,15 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
header.virtual_size = increased_size;
}
-fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
+fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
const tracy = trace(@src());
defer tracy.end();
+ const atom = self.getAtom(atom_index);
const sect_id = @enumToInt(atom.getSymbol(self).section_number) - 1;
const header = &self.sections.items(.header)[sect_id];
const free_list = &self.sections.items(.free_list)[sect_id];
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
const new_atom_ideal_capacity = if (header.isCode()) padToIdeal(new_atom_size) else new_atom_size;
// We use these to indicate our intention to update metadata, placing the new atom,
@@ -495,7 +523,7 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var atom_placement: ?*Atom = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node.
@@ -503,7 +531,8 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
var vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_atom = free_list.items[i];
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const sym = big_atom.getSymbol(self);
@@ -531,34 +560,43 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = big_atom;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (maybe_last_atom.*) |last| {
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment);
- atom_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment);
}
};
- const expand_section = atom_placement == null or atom_placement.?.next == null;
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
if (expand_section) {
const sect_capacity = self.allocatedSize(header.pointer_to_raw_data);
const needed_size: u32 = (vaddr + new_atom_size) - header.virtual_address;
if (needed_size > sect_capacity) {
const new_offset = self.findFreeSpace(needed_size, default_file_alignment);
- const current_size = if (maybe_last_atom.*) |last_atom| blk: {
+ const current_size = if (maybe_last_atom_index.*) |last_atom_index| blk: {
+ const last_atom = self.getAtom(last_atom_index);
const sym = last_atom.getSymbol(self);
break :blk (sym.value + last_atom.size) - header.virtual_address;
} else 0;
- log.debug("moving {s} from 0x{x} to 0x{x}", .{ self.getSectionName(header), header.pointer_to_raw_data, new_offset });
+ log.debug("moving {s} from 0x{x} to 0x{x}", .{
+ self.getSectionName(header),
+ header.pointer_to_raw_data,
+ new_offset,
+ });
const amt = try self.base.file.?.copyRangeAll(
header.pointer_to_raw_data,
self.base.file.?,
@@ -577,26 +615,34 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
header.virtual_size = @max(header.virtual_size, needed_size);
header.size_of_raw_data = needed_size;
- maybe_last_atom.* = atom;
+ maybe_last_atom_index.* = atom_index;
}
- atom.size = new_atom_size;
- atom.alignment = alignment;
-
- if (atom.prev) |prev| {
- prev.next = atom.next;
- }
- if (atom.next) |next| {
- next.prev = atom.prev;
+ {
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.size = new_atom_size;
+ atom_ptr.alignment = alignment;
}
- if (atom_placement) |big_atom| {
- atom.prev = big_atom;
- atom.next = big_atom.next;
- big_atom.next = atom;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
+ }
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
+ }
+
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- atom.prev = null;
- atom.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -701,24 +747,37 @@ pub fn allocateImportEntry(self: *Coff, target: SymbolWithLoc) !u32 {
return index;
}
-fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
+pub fn createAtom(self: *Coff) !Atom.Index {
const gpa = self.base.allocator;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
+ const sym_index = try self.allocateSymbol();
+ try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
+ atom.* = .{
+ .sym_index = sym_index,
+ .file = null,
+ .size = 0,
+ .alignment = 0,
+ .prev_index = null,
+ .next_index = null,
+ };
+ log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
+ return atom_index;
+}
+
+fn createGotAtom(self: *Coff, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- try self.managed_atoms.append(gpa, atom);
-
const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.got_section_index.? + 1);
- sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
+ sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
log.debug("allocated GOT atom at 0x{x}", .{sym.value});
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = .direct,
.target = target,
.offset = 0,
@@ -732,49 +791,46 @@ fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
.UNDEFINED => @panic("TODO generate a binding for undefined GOT target"),
.ABSOLUTE => {},
.DEBUG => unreachable, // not possible
- else => try atom.addBaseRelocation(self, 0),
+ else => try Atom.addBaseRelocation(self, atom_index, 0),
}
- return atom;
+ return atom_index;
}
-fn createImportAtom(self: *Coff) !*Atom {
- const gpa = self.base.allocator;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+fn createImportAtom(self: *Coff) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- try self.managed_atoms.append(gpa, atom);
-
const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.idata_section_index.? + 1);
- sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
+ sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
log.debug("allocated import atom at 0x{x}", .{sym.value});
- return atom;
+ return atom_index;
}
-fn growAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
+fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.value;
- return self.allocateAtom(atom, new_atom_size, alignment);
+ return self.allocateAtom(atom_index, new_atom_size, alignment);
}
-fn shrinkAtom(self: *Coff, atom: *Atom, new_block_size: u32) void {
+fn shrinkAtom(self: *Coff, atom_index: Atom.Index, new_block_size: u32) void {
_ = self;
- _ = atom;
+ _ = atom_index;
_ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
-fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void {
+fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []const u8) !void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const section = self.sections.get(@enumToInt(sym.section_number) - 1);
const file_offset = section.header.pointer_to_raw_data + sym.value - section.header.virtual_address;
@@ -784,18 +840,18 @@ fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void {
file_offset + code.len,
});
try self.base.file.?.pwriteAll(code, file_offset);
- try self.resolveRelocs(atom);
+ try self.resolveRelocs(atom_index);
}
-fn writePtrWidthAtom(self: *Coff, atom: *Atom) !void {
+fn writePtrWidthAtom(self: *Coff, atom_index: Atom.Index) !void {
switch (self.ptr_width) {
.p32 => {
var buffer: [@sizeOf(u32)]u8 = [_]u8{0} ** @sizeOf(u32);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
},
.p64 => {
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
},
}
}
@@ -815,7 +871,8 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
var it = self.relocs.valueIterator();
while (it.next()) |relocs| {
for (relocs.items) |*reloc| {
- const target_atom = reloc.getTargetAtom(self) orelse continue;
+ const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
+ const target_atom = self.getAtom(target_atom_index);
const target_sym = target_atom.getSymbol(self);
if (target_sym.value < addr) continue;
reloc.dirty = true;
@@ -823,24 +880,26 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
}
}
-fn resolveRelocs(self: *Coff, atom: *Atom) !void {
- const relocs = self.relocs.get(atom) orelse return;
+fn resolveRelocs(self: *Coff, atom_index: Atom.Index) !void {
+ const relocs = self.relocs.get(atom_index) orelse return;
- log.debug("relocating '{s}'", .{atom.getName(self)});
+ log.debug("relocating '{s}'", .{self.getAtom(atom_index).getName(self)});
for (relocs.items) |*reloc| {
if (!reloc.dirty) continue;
- try reloc.resolve(atom, self);
+ try reloc.resolve(atom_index, self);
}
}
-fn freeAtom(self: *Coff, atom: *Atom) void {
- log.debug("freeAtom {*}", .{atom});
-
- // Remove any relocs and base relocs associated with this Atom
- self.freeRelocationsForAtom(atom);
+fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
+ log.debug("freeAtom {d}", .{atom_index});
const gpa = self.base.allocator;
+
+ // Remove any relocs and base relocs associated with this Atom
+ Atom.freeRelocations(self, atom_index);
+
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const sect_id = @enumToInt(sym.section_number) - 1;
const free_list = &self.sections.items(.free_list)[sect_id];
@@ -849,45 +908,46 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == atom) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == atom.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
- if (maybe_last_atom.*) |last_atom| {
- if (last_atom == atom) {
- if (atom.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- maybe_last_atom.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- maybe_last_atom.* = null;
+ maybe_last_atom_index.* = null;
}
}
}
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
- free_list.append(gpa, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- atom.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- atom.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
@@ -910,7 +970,7 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
self.locals.items[sym_index].section_number = .UNDEFINED;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
- atom.sym_index = 0;
+ self.getAtomPtr(atom_index).sym_index = 0;
}
pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
@@ -927,15 +987,10 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.coff;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeUnnamedConsts(decl_index);
- self.freeRelocationsForAtom(&decl.link.coff);
- } else {
- gop.value_ptr.* = null;
- }
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ self.freeUnnamedConsts(decl_index);
+ Atom.freeRelocations(self, atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -979,11 +1034,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
}
const unnamed_consts = gop.value_ptr;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
- try self.managed_atoms.append(gpa, atom);
+ const atom_index = try self.createAtom();
const sym_name = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
@@ -993,11 +1044,15 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
};
defer gpa.free(sym_name);
- try self.setSymbolName(atom.getSymbolPtr(self), sym_name);
- atom.getSymbolPtr(self).section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
+ {
+ const atom = self.getAtom(atom_index);
+ const sym = atom.getSymbolPtr(self);
+ try self.setSymbolName(sym, sym_name);
+ sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
+ }
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{
- .parent_atom_index = atom.getSymbolIndex().?,
+ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -1010,17 +1065,18 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
};
const required_alignment = tv.ty.abiAlignment(self.base.options.target);
+ const atom = self.getAtomPtr(atom_index);
atom.alignment = required_alignment;
atom.size = @intCast(u32, code.len);
- atom.getSymbolPtr(self).value = try self.allocateAtom(atom, atom.size, atom.alignment);
- errdefer self.freeAtom(atom);
+ atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
+ errdefer self.freeAtom(atom_index);
- try unnamed_consts.append(gpa, atom);
+ try unnamed_consts.append(gpa, atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, atom.getSymbol(self).value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
return atom.getSymbolIndex().?;
}
@@ -1047,14 +1103,9 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
}
}
- const atom = &decl.link.coff;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeRelocationsForAtom(atom);
- } else {
- gop.value_ptr.* = null;
- }
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -1064,7 +1115,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
- .parent_atom_index = decl.link.coff.getSymbolIndex().?,
+ .parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -1082,7 +1133,20 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
-fn getDeclOutputSection(self: *Coff, decl: *Module.Decl) u16 {
+pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .section = self.getDeclOutputSection(decl_index),
+ .exports = .{},
+ };
+ }
+ return gop.value_ptr.atom;
+}
+
+fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
const val = decl.val;
@@ -1117,14 +1181,11 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(self.base.options.target);
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = self.getDeclOutputSection(decl);
- }
- const sect_index = decl_ptr.*.?;
-
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
+ const sect_index = decl_metadata.section;
const code_len = @intCast(u32, code.len);
- const atom = &decl.link.coff;
if (atom.size != 0) {
const sym = atom.getSymbolPtr(self);
@@ -1135,7 +1196,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
const capacity = atom.capacity(self);
const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment);
if (need_realloc) {
- const vaddr = try self.growAtom(atom, code_len, required_alignment);
+ const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, sym.value, vaddr });
log.debug(" (required alignment 0x{x}", .{required_alignment});
@@ -1143,49 +1204,43 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
sym.value = vaddr;
log.debug(" (updating GOT entry)", .{});
const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
- const got_atom = self.getGotAtomForSymbol(got_target).?;
+ const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target);
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
} else if (code_len < atom.size) {
- self.shrinkAtom(atom, code_len);
+ self.shrinkAtom(atom_index, code_len);
}
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
} else {
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name);
sym.section_number = @intToEnum(coff.SectionNumber, sect_index + 1);
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
- const vaddr = try self.allocateAtom(atom, code_len, required_alignment);
- errdefer self.freeAtom(atom);
+ const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
+ errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ decl_name, vaddr });
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
sym.value = vaddr;
const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
const got_index = try self.allocateGotEntry(got_target);
- const got_atom = try self.createGotAtom(got_target);
+ const got_atom_index = try self.createGotAtom(got_target);
+ const got_atom = self.getAtom(got_atom_index);
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
- try self.writeAtom(atom, code);
-}
-
-fn freeRelocationsForAtom(self: *Coff, atom: *Atom) void {
- var removed_relocs = self.relocs.fetchRemove(atom);
- if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
- var removed_base_relocs = self.base_relocs.fetchRemove(atom);
- if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(self.base.allocator);
+ try self.writeAtom(atom_index, code);
}
fn freeUnnamedConsts(self: *Coff, decl_index: Module.Decl.Index) void {
const gpa = self.base.allocator;
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
- for (unnamed_consts.items) |atom| {
- self.freeAtom(atom);
+ for (unnamed_consts.items) |atom_index| {
+ self.freeAtom(atom_index);
}
unnamed_consts.clearAndFree(gpa);
}
@@ -1200,11 +1255,11 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
- if (self.decls.fetchRemove(decl_index)) |kv| {
- if (kv.value) |_| {
- self.freeAtom(&decl.link.coff);
- self.freeUnnamedConsts(decl_index);
- }
+ if (self.decls.fetchRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ self.freeAtom(kv.value.atom);
+ self.freeUnnamedConsts(decl_index);
+ kv.value.exports.deinit(self.base.allocator);
}
}
@@ -1257,16 +1312,10 @@ pub fn updateDeclExports(
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.coff;
-
- if (atom.getSymbolIndex() == null) return;
-
- const gop = try self.decls.getOrPut(gpa, decl_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = self.getDeclOutputSection(decl);
- }
-
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| {
log.debug("adding new export '{s}'", .{exp.options.name});
@@ -1301,9 +1350,9 @@ pub fn updateDeclExports(
continue;
}
- const sym_index = exp.link.coff.sym_index orelse blk: {
+ const sym_index = decl_metadata.getExport(self, exp.options.name) orelse blk: {
const sym_index = try self.allocateSymbol();
- exp.link.coff.sym_index = sym_index;
+ try decl_metadata.exports.append(gpa, sym_index);
break :blk sym_index;
};
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
@@ -1326,16 +1375,15 @@ pub fn updateDeclExports(
}
}
-pub fn deleteExport(self: *Coff, exp: Export) void {
+pub fn deleteDeclExport(self: *Coff, decl_index: Module.Decl.Index, name: []const u8) void {
if (self.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
+ const sym_index = metadata.getExportPtr(self, name) orelse return;
const gpa = self.base.allocator;
-
- const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
const sym = self.getSymbolPtr(sym_loc);
- const sym_name = self.getSymbolName(sym_loc);
- log.debug("deleting export '{s}'", .{sym_name});
+ log.debug("deleting export '{s}'", .{name});
assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED);
sym.* = .{
.name = [_]u8{0} ** 8,
@@ -1345,9 +1393,9 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
.storage_class = .NULL,
.number_of_aux_symbols = 0,
};
- self.locals_free_list.append(gpa, sym_index) catch {};
+ self.locals_free_list.append(gpa, sym_index.*) catch {};
- if (self.resolver.fetchRemove(sym_name)) |entry| {
+ if (self.resolver.fetchRemove(name)) |entry| {
defer gpa.free(entry.key);
self.globals_free_list.append(gpa, entry.value) catch {};
self.globals.items[entry.value] = .{
@@ -1355,6 +1403,8 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
.file = null,
};
}
+
+ sym_index.* = 0;
}
fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void {
@@ -1419,9 +1469,10 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
if (self.imports_table.contains(global)) continue;
const import_index = try self.allocateImportEntry(global);
- const import_atom = try self.createImportAtom();
+ const import_atom_index = try self.createImportAtom();
+ const import_atom = self.getAtom(import_atom_index);
self.imports.items[import_index].sym_index = import_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(import_atom);
+ try self.writePtrWidthAtom(import_atom_index);
}
if (build_options.enable_logging) {
@@ -1455,22 +1506,14 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
}
}
-pub fn getDeclVAddr(
- self: *Coff,
- decl_index: Module.Decl.Index,
- reloc_info: link.File.RelocInfo,
-) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
+pub fn getDeclVAddr(self: *Coff, decl_index: Module.Decl.Index, reloc_info: link.File.RelocInfo) !u64 {
assert(self.llvm_object == null);
- try decl.link.coff.ensureInitialized(self);
- const sym_index = decl.link.coff.getSymbolIndex().?;
-
- const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?;
+ const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
const target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = .direct,
.target = target,
.offset = @intCast(u32, reloc_info.offset),
@@ -1478,7 +1521,7 @@ pub fn getDeclVAddr(
.pcrel = false,
.length = 3,
});
- try atom.addBaseRelocation(self, @intCast(u32, reloc_info.offset));
+ try Atom.addBaseRelocation(self, atom_index, @intCast(u32, reloc_info.offset));
return 0;
}
@@ -1505,10 +1548,10 @@ pub fn getGlobalSymbol(self: *Coff, name: []const u8) !u32 {
return global_index;
}
-pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void {
_ = self;
_ = module;
- _ = decl;
+ _ = decl_index;
log.debug("TODO implement updateDeclLineNumber", .{});
}
@@ -1529,7 +1572,8 @@ fn writeBaseRelocations(self: *Coff) !void {
var it = self.base_relocs.iterator();
while (it.next()) |entry| {
- const atom = entry.key_ptr.*;
+ const atom_index = entry.key_ptr.*;
+ const atom = self.getAtom(atom_index);
const offsets = entry.value_ptr.*;
for (offsets.items) |offset| {
@@ -1613,7 +1657,8 @@ fn writeImportTable(self: *Coff) !void {
const gpa = self.base.allocator;
const section = self.sections.get(self.idata_section_index.?);
- const last_atom = section.last_atom orelse return;
+ const last_atom_index = section.last_atom_index orelse return;
+ const last_atom = self.getAtom(last_atom_index);
const iat_rva = section.header.virtual_address;
const iat_size = last_atom.getSymbol(self).value + last_atom.size * 2 - iat_rva; // account for sentinel zero pointer
@@ -2051,27 +2096,37 @@ pub fn getOrPutGlobalPtr(self: *Coff, name: []const u8) !GetOrPutGlobalPtrResult
return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
}
+pub fn getAtom(self: *const Coff, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *Coff, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
/// Returns atom if there is an atom referenced by the symbol described by `sym_loc` descriptor.
/// Returns null on failure.
-pub fn getAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+pub fn getAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
assert(sym_loc.file == null); // TODO linking with object files
return self.atom_by_index_table.get(sym_loc.sym_index);
}
/// Returns GOT atom that references `sym_loc` if one exists.
/// Returns null otherwise.
-pub fn getGotAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+pub fn getGotAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
const got_index = self.got_entries_table.get(sym_loc) orelse return null;
const got_entry = self.got_entries.items[got_index];
- return self.getAtomForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
+ return self.getAtomIndexForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
}
/// Returns import atom that references `sym_loc` if one exists.
/// Returns null otherwise.
-pub fn getImportAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
+pub fn getImportAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
const imports_index = self.imports_table.get(sym_loc) orelse return null;
const imports_entry = self.imports.items[imports_index];
- return self.getAtomForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
+ return self.getAtomIndexForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
}
fn setSectionName(self: *Coff, header: *coff.SectionHeader, name: []const u8) !void {
diff --git a/src/link/Coff/Atom.zig b/src/link/Coff/Atom.zig
index 78824eac1d..80c04a8fa1 100644
--- a/src/link/Coff/Atom.zig
+++ b/src/link/Coff/Atom.zig
@@ -27,23 +27,10 @@ alignment: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `Atom`.
-prev: ?*Atom,
-next: ?*Atom,
+prev_index: ?Index,
+next_index: ?Index,
-pub const empty = Atom{
- .sym_index = 0,
- .file = null,
- .size = 0,
- .alignment = 0,
- .prev = null,
- .next = null,
-};
-
-pub fn ensureInitialized(self: *Atom, coff_file: *Coff) !void {
- if (self.getSymbolIndex() != null) return; // Already initialized
- self.sym_index = try coff_file.allocateSymbol();
- try coff_file.atom_by_index_table.putNoClobber(coff_file.base.allocator, self.sym_index, self);
-}
+pub const Index = u32;
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.sym_index == 0) return null;
@@ -85,7 +72,8 @@ pub fn getName(self: Atom, coff_file: *const Coff) []const u8 {
/// Returns how much room there is to grow in virtual address space.
pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
const self_sym = self.getSymbol(coff_file);
- if (self.next) |next| {
+ if (self.next_index) |next_index| {
+ const next = coff_file.getAtom(next_index);
const next_sym = next.getSymbol(coff_file);
return next_sym.value - self_sym.value;
} else {
@@ -97,7 +85,8 @@ pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
// No need to keep a free list node for the last atom.
- const next = self.next orelse return false;
+ const next_index = self.next_index orelse return false;
+ const next = coff_file.getAtom(next_index);
const self_sym = self.getSymbol(coff_file);
const next_sym = next.getSymbol(coff_file);
const cap = next_sym.value - self_sym.value;
@@ -107,22 +96,33 @@ pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
return surplus >= Coff.min_text_capacity;
}
-pub fn addRelocation(self: *Atom, coff_file: *Coff, reloc: Relocation) !void {
+pub fn addRelocation(coff_file: *Coff, atom_index: Index, reloc: Relocation) !void {
const gpa = coff_file.base.allocator;
log.debug(" (adding reloc of type {s} to target %{d})", .{ @tagName(reloc.type), reloc.target.sym_index });
- const gop = try coff_file.relocs.getOrPut(gpa, self);
+ const gop = try coff_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, reloc);
}
-pub fn addBaseRelocation(self: *Atom, coff_file: *Coff, offset: u32) !void {
+pub fn addBaseRelocation(coff_file: *Coff, atom_index: Index, offset: u32) !void {
const gpa = coff_file.base.allocator;
- log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{ offset, self.sym_index });
- const gop = try coff_file.base_relocs.getOrPut(gpa, self);
+ log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{
+ offset,
+ coff_file.getAtom(atom_index).getSymbolIndex().?,
+ });
+ const gop = try coff_file.base_relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, offset);
}
+
+pub fn freeRelocations(coff_file: *Coff, atom_index: Index) void {
+ const gpa = coff_file.base.allocator;
+ var removed_relocs = coff_file.relocs.fetchRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
+ var removed_base_relocs = coff_file.base_relocs.fetchRemove(atom_index);
+ if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(gpa);
+}
diff --git a/src/link/Coff/Relocation.zig b/src/link/Coff/Relocation.zig
index 12a34b332d..1ba1d7a1c1 100644
--- a/src/link/Coff/Relocation.zig
+++ b/src/link/Coff/Relocation.zig
@@ -46,33 +46,35 @@ length: u2,
dirty: bool = true,
/// Returns an Atom which is the target node of this relocation edge (if any).
-pub fn getTargetAtom(self: Relocation, coff_file: *Coff) ?*Atom {
+pub fn getTargetAtomIndex(self: Relocation, coff_file: *const Coff) ?Atom.Index {
switch (self.type) {
.got,
.got_page,
.got_pageoff,
- => return coff_file.getGotAtomForSymbol(self.target),
+ => return coff_file.getGotAtomIndexForSymbol(self.target),
.direct,
.page,
.pageoff,
- => return coff_file.getAtomForSymbol(self.target),
+ => return coff_file.getAtomIndexForSymbol(self.target),
.import,
.import_page,
.import_pageoff,
- => return coff_file.getImportAtomForSymbol(self.target),
+ => return coff_file.getImportAtomIndexForSymbol(self.target),
}
}
-pub fn resolve(self: *Relocation, atom: *Atom, coff_file: *Coff) !void {
+pub fn resolve(self: *Relocation, atom_index: Atom.Index, coff_file: *Coff) !void {
+ const atom = coff_file.getAtom(atom_index);
const source_sym = atom.getSymbol(coff_file);
const source_section = coff_file.sections.get(@enumToInt(source_sym.section_number) - 1).header;
const source_vaddr = source_sym.value + self.offset;
const file_offset = source_section.pointer_to_raw_data + source_sym.value - source_section.virtual_address;
- const target_atom = self.getTargetAtom(coff_file) orelse return;
+ const target_atom_index = self.getTargetAtomIndex(coff_file) orelse return;
+ const target_atom = coff_file.getAtom(target_atom_index);
const target_vaddr = target_atom.getSymbol(coff_file).value;
const target_vaddr_with_addend = target_vaddr + self.addend;
@@ -107,7 +109,7 @@ const Context = struct {
image_base: u64,
};
-fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
+fn resolveAarch64(self: Relocation, ctx: Context, coff_file: *Coff) !void {
var buffer: [@sizeOf(u64)]u8 = undefined;
switch (self.length) {
2 => {
@@ -197,7 +199,7 @@ fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
}
}
-fn resolveX86(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
+fn resolveX86(self: Relocation, ctx: Context, coff_file: *Coff) !void {
switch (self.type) {
.got_page => unreachable,
.got_pageoff => unreachable,
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 1b65bbb04b..a3d0aa8a53 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -18,31 +18,36 @@ const LinkBlock = File.LinkBlock;
const LinkFn = File.LinkFn;
const LinkerLoad = @import("../codegen.zig").LinkerLoad;
const Module = @import("../Module.zig");
-const Value = @import("../value.zig").Value;
+const StringTable = @import("strtab.zig").StringTable;
const Type = @import("../type.zig").Type;
+const Value = @import("../value.zig").Value;
allocator: Allocator,
bin_file: *File,
ptr_width: PtrWidth,
target: std.Target,
-/// A list of `File.LinkFn` whose Line Number Programs have surplus capacity.
-/// This is the same concept as `text_block_free_list`; see those doc comments.
-dbg_line_fn_free_list: std.AutoHashMapUnmanaged(*SrcFn, void) = .{},
-dbg_line_fn_first: ?*SrcFn = null,
-dbg_line_fn_last: ?*SrcFn = null,
+/// A list of `Atom`s whose Line Number Programs have surplus capacity.
+/// This is the same concept as `Section.free_list` in Elf; see those doc comments.
+src_fn_free_list: std.AutoHashMapUnmanaged(Atom.Index, void) = .{},
+src_fn_first_index: ?Atom.Index = null,
+src_fn_last_index: ?Atom.Index = null,
+src_fns: std.ArrayListUnmanaged(Atom) = .{},
+src_fn_decls: AtomTable = .{},
/// A list of `Atom`s whose corresponding .debug_info tags have surplus capacity.
/// This is the same concept as `text_block_free_list`; see those doc comments.
-atom_free_list: std.AutoHashMapUnmanaged(*Atom, void) = .{},
-atom_first: ?*Atom = null,
-atom_last: ?*Atom = null,
+di_atom_free_list: std.AutoHashMapUnmanaged(Atom.Index, void) = .{},
+di_atom_first_index: ?Atom.Index = null,
+di_atom_last_index: ?Atom.Index = null,
+di_atoms: std.ArrayListUnmanaged(Atom) = .{},
+di_atom_decls: AtomTable = .{},
abbrev_table_offset: ?u64 = null,
/// TODO replace with InternPool
/// Table of debug symbol names.
-strtab: std.ArrayListUnmanaged(u8) = .{},
+strtab: StringTable(.strtab) = .{},
/// Quick lookup array of all defined source files referenced by at least one Decl.
/// They will end up in the DWARF debug_line header as two lists:
@@ -50,22 +55,23 @@ strtab: std.ArrayListUnmanaged(u8) = .{},
/// * []file_names
di_files: std.AutoArrayHashMapUnmanaged(*const Module.File, void) = .{},
-/// List of atoms that are owned directly by the DWARF module.
-/// TODO convert links in DebugInfoAtom into indices and make
-/// sure every atom is owned by this module.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
-
global_abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
-pub const Atom = struct {
- /// Previous/next linked list pointers.
- /// This is the linked list node for this Decl's corresponding .debug_info tag.
- prev: ?*Atom,
- next: ?*Atom,
- /// Offset into .debug_info pointing to the tag for this Decl.
+const AtomTable = std.AutoHashMapUnmanaged(Module.Decl.Index, Atom.Index);
+
+const Atom = struct {
+ /// Offset into .debug_info pointing to the tag for this Decl, or
+ /// offset from the beginning of the Debug Line Program header that contains this function.
off: u32,
- /// Size of the .debug_info tag for this Decl, not including padding.
+ /// Size of the .debug_info tag for this Decl, not including padding, or
+ /// size of the line number program component belonging to this function, not
+ /// including padding.
len: u32,
+
+ prev_index: ?Index,
+ next_index: ?Index,
+
+ pub const Index = u32;
};
/// Represents state of the analysed Decl.
@@ -75,6 +81,7 @@ pub const Atom = struct {
pub const DeclState = struct {
gpa: Allocator,
mod: *Module,
+ di_atom_decls: *const AtomTable,
dbg_line: std.ArrayList(u8),
dbg_info: std.ArrayList(u8),
abbrev_type_arena: std.heap.ArenaAllocator,
@@ -88,10 +95,11 @@ pub const DeclState = struct {
abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation) = .{},
- fn init(gpa: Allocator, mod: *Module) DeclState {
+ fn init(gpa: Allocator, mod: *Module, di_atom_decls: *const AtomTable) DeclState {
return .{
.gpa = gpa,
.mod = mod,
+ .di_atom_decls = di_atom_decls,
.dbg_line = std.ArrayList(u8).init(gpa),
.dbg_info = std.ArrayList(u8).init(gpa),
.abbrev_type_arena = std.heap.ArenaAllocator.init(gpa),
@@ -119,11 +127,11 @@ pub const DeclState = struct {
/// Adds local type relocation of the form: @offset => @this + addend
/// @this signifies the offset within the .debug_abbrev section of the containing atom.
- fn addTypeRelocLocal(self: *DeclState, atom: *const Atom, offset: u32, addend: u32) !void {
+ fn addTypeRelocLocal(self: *DeclState, atom_index: Atom.Index, offset: u32, addend: u32) !void {
log.debug("{x}: @this + {x}", .{ offset, addend });
try self.abbrev_relocs.append(self.gpa, .{
.target = null,
- .atom = atom,
+ .atom_index = atom_index,
.offset = offset,
.addend = addend,
});
@@ -132,13 +140,13 @@ pub const DeclState = struct {
/// Adds global type relocation of the form: @offset => @symbol + 0
/// @symbol signifies a type abbreviation posititioned somewhere in the .debug_abbrev section
/// which we use as our target of the relocation.
- fn addTypeRelocGlobal(self: *DeclState, atom: *const Atom, ty: Type, offset: u32) !void {
+ fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void {
const resolv = self.abbrev_resolver.getContext(ty, .{
.mod = self.mod,
}) orelse blk: {
const sym_index = @intCast(u32, self.abbrev_table.items.len);
try self.abbrev_table.append(self.gpa, .{
- .atom = atom,
+ .atom_index = atom_index,
.type = ty,
.offset = undefined,
});
@@ -153,7 +161,7 @@ pub const DeclState = struct {
log.debug("{x}: %{d} + 0", .{ offset, resolv });
try self.abbrev_relocs.append(self.gpa, .{
.target = resolv,
- .atom = atom,
+ .atom_index = atom_index,
.offset = offset,
.addend = 0,
});
@@ -162,7 +170,7 @@ pub const DeclState = struct {
fn addDbgInfoType(
self: *DeclState,
module: *Module,
- atom: *Atom,
+ atom_index: Atom.Index,
ty: Type,
) error{OutOfMemory}!void {
const arena = self.abbrev_type_arena.allocator();
@@ -227,7 +235,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, Type.bool, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.bool, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(6);
dbg_info_buffer.appendAssumeCapacity(0);
@@ -239,7 +247,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, payload_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
const offset = abi_size - payload_ty.abiSize(target);
try leb128.writeULEB128(dbg_info_buffer.writer(), offset);
@@ -270,7 +278,7 @@ pub const DeclState = struct {
try dbg_info_buffer.resize(index + 4);
var buf = try arena.create(Type.SlicePtrFieldTypeBuffer);
const ptr_ty = ty.slicePtrFieldType(buf);
- try self.addTypeRelocGlobal(atom, ptr_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(6);
dbg_info_buffer.appendAssumeCapacity(0);
@@ -282,7 +290,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, Type.usize, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.ensureUnusedCapacity(2);
dbg_info_buffer.appendAssumeCapacity(ptr_bytes);
@@ -294,7 +302,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, ty.childType(), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index));
}
},
.Array => {
@@ -305,13 +313,13 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, ty.childType(), @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index));
// DW.AT.subrange_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_dim));
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, Type.usize, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
// DW.AT.count, DW.FORM.udata
const len = ty.arrayLenIncludingSentinel();
try leb128.writeULEB128(dbg_info_buffer.writer(), len);
@@ -339,7 +347,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, field, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
const field_off = ty.structFieldOffset(field_index, target);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -371,7 +379,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, field.ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
const field_off = ty.structFieldOffset(field_index, target);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
@@ -454,7 +462,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const inner_union_index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(inner_union_index + 4);
- try self.addTypeRelocLocal(atom, @intCast(u32, inner_union_index), 5);
+ try self.addTypeRelocLocal(atom_index, @intCast(u32, inner_union_index), 5);
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), payload_offset);
}
@@ -481,7 +489,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, field.ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try dbg_info_buffer.append(0);
}
@@ -498,7 +506,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, union_obj.tag_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, union_obj.tag_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), tag_offset);
@@ -541,7 +549,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, payload_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), payload_off);
@@ -554,7 +562,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
- try self.addTypeRelocGlobal(atom, error_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, error_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.sdata
try leb128.writeULEB128(dbg_info_buffer.writer(), error_off);
@@ -587,12 +595,11 @@ pub const DeclState = struct {
self: *DeclState,
name: [:0]const u8,
ty: Type,
- tag: File.Tag,
owner_decl: Module.Decl.Index,
loc: DbgInfoLoc,
) error{OutOfMemory}!void {
const dbg_info = &self.dbg_info;
- const atom = getDbgInfoAtom(tag, self.mod, owner_decl);
+ const atom_index = self.di_atom_decls.get(owner_decl).?;
const name_with_null = name.ptr[0 .. name.len + 1];
switch (loc) {
@@ -637,7 +644,7 @@ pub const DeclState = struct {
try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4
- try self.addTypeRelocGlobal(atom, ty, @intCast(u32, index)); // DW.AT.type, DW.FORM.ref4
+ try self.addTypeRelocGlobal(atom_index, ty, @intCast(u32, index)); // DW.AT.type, DW.FORM.ref4
dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
}
@@ -645,13 +652,12 @@ pub const DeclState = struct {
self: *DeclState,
name: [:0]const u8,
ty: Type,
- tag: File.Tag,
owner_decl: Module.Decl.Index,
is_ptr: bool,
loc: DbgInfoLoc,
) error{OutOfMemory}!void {
const dbg_info = &self.dbg_info;
- const atom = getDbgInfoAtom(tag, self.mod, owner_decl);
+ const atom_index = self.di_atom_decls.get(owner_decl).?;
const name_with_null = name.ptr[0 .. name.len + 1];
try dbg_info.append(@enumToInt(AbbrevKind.variable));
const target = self.mod.getTarget();
@@ -781,7 +787,7 @@ pub const DeclState = struct {
try dbg_info.ensureUnusedCapacity(5 + name_with_null.len);
const index = dbg_info.items.len;
try dbg_info.resize(index + 4); // dw.at.type, dw.form.ref4
- try self.addTypeRelocGlobal(atom, child_ty, @intCast(u32, index));
+ try self.addTypeRelocGlobal(atom_index, child_ty, @intCast(u32, index));
dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT.name, DW.FORM.string
}
@@ -814,7 +820,7 @@ pub const DeclState = struct {
};
pub const AbbrevEntry = struct {
- atom: *const Atom,
+ atom_index: Atom.Index,
type: Type,
offset: u32,
};
@@ -823,7 +829,7 @@ pub const AbbrevRelocation = struct {
/// If target is null, we deal with a local relocation that is based on simple offset + addend
/// only.
target: ?u32,
- atom: *const Atom,
+ atom_index: Atom.Index,
offset: u32,
addend: u32,
};
@@ -840,26 +846,6 @@ pub const ExprlocRelocation = struct {
offset: u32,
};
-pub const SrcFn = struct {
- /// Offset from the beginning of the Debug Line Program header that contains this function.
- off: u32,
- /// Size of the line number program component belonging to this function, not
- /// including padding.
- len: u32,
-
- /// Points to the previous and next neighbors, based on the offset from .debug_line.
- /// This can be used to find, for example, the capacity of this `SrcFn`.
- prev: ?*SrcFn,
- next: ?*SrcFn,
-
- pub const empty: SrcFn = .{
- .off = 0,
- .len = 0,
- .prev = null,
- .next = null,
- };
-};
-
pub const PtrWidth = enum { p32, p64 };
pub const AbbrevKind = enum(u8) {
@@ -909,16 +895,18 @@ pub fn init(allocator: Allocator, bin_file: *File, target: std.Target) Dwarf {
pub fn deinit(self: *Dwarf) void {
const gpa = self.allocator;
- self.dbg_line_fn_free_list.deinit(gpa);
- self.atom_free_list.deinit(gpa);
+
+ self.src_fn_free_list.deinit(gpa);
+ self.src_fns.deinit(gpa);
+ self.src_fn_decls.deinit(gpa);
+
+ self.di_atom_free_list.deinit(gpa);
+ self.di_atoms.deinit(gpa);
+ self.di_atom_decls.deinit(gpa);
+
self.strtab.deinit(gpa);
self.di_files.deinit(gpa);
self.global_abbrev_relocs.deinit(gpa);
-
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
}
/// Initializes Decl's state and its matching output buffers.
@@ -934,15 +922,19 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
log.debug("initDeclState {s}{*}", .{ decl_name, decl });
const gpa = self.allocator;
- var decl_state = DeclState.init(gpa, mod);
+ var decl_state = DeclState.init(gpa, mod, &self.di_atom_decls);
errdefer decl_state.deinit();
const dbg_line_buffer = &decl_state.dbg_line;
const dbg_info_buffer = &decl_state.dbg_info;
+ const di_atom_index = try self.getOrCreateAtomForDecl(.di_atom, decl_index);
+
assert(decl.has_tv);
switch (decl.ty.zigTypeTag()) {
.Fn => {
+ _ = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
+
// For functions we need to add a prologue to the debug line program.
try dbg_line_buffer.ensureTotalCapacity(26);
@@ -1002,8 +994,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
dbg_info_buffer.items.len += 4; // DW.AT.high_pc, DW.FORM.data4
//
if (fn_ret_has_bits) {
- const atom = getDbgInfoAtom(self.bin_file.tag, mod, decl_index);
- try decl_state.addTypeRelocGlobal(atom, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len));
+ try decl_state.addTypeRelocGlobal(di_atom_index, fn_ret_type, @intCast(u32, dbg_info_buffer.items.len));
dbg_info_buffer.items.len += 4; // DW.AT.type, DW.FORM.ref4
}
@@ -1075,31 +1066,28 @@ pub fn commitDeclState(
// This logic is nearly identical to the logic below in `updateDeclDebugInfo` for
// `TextBlock` and the .debug_info. If you are editing this logic, you
// probably need to edit that logic too.
- const src_fn = switch (self.bin_file.tag) {
- .elf => &decl.fn_link.elf,
- .macho => &decl.fn_link.macho,
- .wasm => &decl.fn_link.wasm.src_fn,
- else => unreachable, // TODO
- };
+ const src_fn_index = self.src_fn_decls.get(decl_index).?;
+ const src_fn = self.getAtomPtr(.src_fn, src_fn_index);
src_fn.len = @intCast(u32, dbg_line_buffer.items.len);
- if (self.dbg_line_fn_last) |last| blk: {
- if (src_fn == last) break :blk;
- if (src_fn.next) |next| {
+ if (self.src_fn_last_index) |last_index| blk: {
+ if (src_fn_index == last_index) break :blk;
+ if (src_fn.next_index) |next_index| {
+ const next = self.getAtomPtr(.src_fn, next_index);
// Update existing function - non-last item.
if (src_fn.off + src_fn.len + min_nop_size > next.off) {
// It grew too big, so we move it to a new location.
- if (src_fn.prev) |prev| {
- self.dbg_line_fn_free_list.put(gpa, prev, {}) catch {};
- prev.next = src_fn.next;
+ if (src_fn.prev_index) |prev_index| {
+ self.src_fn_free_list.put(gpa, prev_index, {}) catch {};
+ self.getAtomPtr(.src_fn, prev_index).next_index = src_fn.next_index;
}
- next.prev = src_fn.prev;
- src_fn.next = null;
+ next.prev_index = src_fn.prev_index;
+ src_fn.next_index = null;
// Populate where it used to be with NOPs.
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_line_sect = &elf_file.sections.items[elf_file.debug_line_section_index.?];
+ const debug_line_sect = &elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, src_fn.len);
},
@@ -1111,39 +1099,48 @@ pub fn commitDeclState(
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_line = wasm_file.debug_line_atom.?.code;
+ const debug_line = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
writeDbgLineNopsBuffered(debug_line.items, src_fn.off, 0, &.{}, src_fn.len);
},
else => unreachable,
}
// TODO Look at the free list before appending at the end.
- src_fn.prev = last;
- last.next = src_fn;
- self.dbg_line_fn_last = src_fn;
+ src_fn.prev_index = last_index;
+ const last = self.getAtomPtr(.src_fn, last_index);
+ last.next_index = src_fn_index;
+ self.src_fn_last_index = src_fn_index;
src_fn.off = last.off + padToIdeal(last.len);
}
- } else if (src_fn.prev == null) {
+ } else if (src_fn.prev_index == null) {
// Append new function.
// TODO Look at the free list before appending at the end.
- src_fn.prev = last;
- last.next = src_fn;
- self.dbg_line_fn_last = src_fn;
+ src_fn.prev_index = last_index;
+ const last = self.getAtomPtr(.src_fn, last_index);
+ last.next_index = src_fn_index;
+ self.src_fn_last_index = src_fn_index;
src_fn.off = last.off + padToIdeal(last.len);
}
} else {
// This is the first function of the Line Number Program.
- self.dbg_line_fn_first = src_fn;
- self.dbg_line_fn_last = src_fn;
+ self.src_fn_first_index = src_fn_index;
+ self.src_fn_last_index = src_fn_index;
src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes(&[0][]u8{}, &[0][]u8{}));
}
- const last_src_fn = self.dbg_line_fn_last.?;
+ const last_src_fn_index = self.src_fn_last_index.?;
+ const last_src_fn = self.getAtom(.src_fn, last_src_fn_index);
const needed_size = last_src_fn.off + last_src_fn.len;
- const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0;
- const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0;
+ const prev_padding_size: u32 = if (src_fn.prev_index) |prev_index| blk: {
+ const prev = self.getAtom(.src_fn, prev_index);
+ break :blk src_fn.off - (prev.off + prev.len);
+ } else 0;
+ const next_padding_size: u32 = if (src_fn.next_index) |next_index| blk: {
+ const next = self.getAtom(.src_fn, next_index);
+ break :blk next.off - (src_fn.off + src_fn.len);
+ } else 0;
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_line section.
@@ -1152,7 +1149,7 @@ pub fn commitDeclState(
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_line_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const debug_line_sect = elf_file.sections.items[shdr_index];
+ const debug_line_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_line_sect.sh_offset + src_fn.off;
try pwriteDbgLineNops(
elf_file.base.file.?,
@@ -1180,7 +1177,7 @@ pub fn commitDeclState(
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const atom = wasm_file.debug_line_atom.?;
+ const atom = wasm_file.getAtomPtr(wasm_file.debug_line_atom.?);
const debug_line = &atom.code;
const segment_size = debug_line.items.len;
if (needed_size != segment_size) {
@@ -1212,7 +1209,7 @@ pub fn commitDeclState(
if (dbg_info_buffer.items.len == 0)
return;
- const atom = getDbgInfoAtom(self.bin_file.tag, module, decl_index);
+ const di_atom_index = self.di_atom_decls.get(decl_index).?;
if (decl_state.abbrev_table.items.len > 0) {
// Now we emit the .debug_info types of the Decl. These will count towards the size of
// the buffer, so we have to do it before computing the offset, and we can't perform the actual
@@ -1234,12 +1231,12 @@ pub fn commitDeclState(
if (deferred) continue;
symbol.offset = @intCast(u32, dbg_info_buffer.items.len);
- try decl_state.addDbgInfoType(module, atom, ty);
+ try decl_state.addDbgInfoType(module, di_atom_index, ty);
}
}
log.debug("updateDeclDebugInfoAllocation for '{s}'", .{decl.name});
- try self.updateDeclDebugInfoAllocation(atom, @intCast(u32, dbg_info_buffer.items.len));
+ try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
while (decl_state.abbrev_relocs.popOrNull()) |reloc| {
if (reloc.target) |target| {
@@ -1260,11 +1257,12 @@ pub fn commitDeclState(
try self.global_abbrev_relocs.append(gpa, .{
.target = null,
.offset = reloc.offset,
- .atom = reloc.atom,
+ .atom_index = reloc.atom_index,
.addend = reloc.addend,
});
} else {
- const value = symbol.atom.off + symbol.offset + reloc.addend;
+ const atom = self.getAtom(.di_atom, symbol.atom_index);
+ const value = atom.off + symbol.offset + reloc.addend;
log.debug("{x}: [() => {x}] (%{d}, '{}')", .{ reloc.offset, value, target, ty.fmtDebug() });
mem.writeInt(
u32,
@@ -1274,10 +1272,11 @@ pub fn commitDeclState(
);
}
} else {
+ const atom = self.getAtom(.di_atom, reloc.atom_index);
mem.writeInt(
u32,
dbg_info_buffer.items[reloc.offset..][0..@sizeOf(u32)],
- reloc.atom.off + reloc.offset + reloc.addend,
+ atom.off + reloc.offset + reloc.addend,
target_endian,
);
}
@@ -1293,7 +1292,7 @@ pub fn commitDeclState(
.got_load => .got_load,
},
.target = reloc.target,
- .offset = reloc.offset + atom.off,
+ .offset = reloc.offset + self.getAtom(.di_atom, di_atom_index).off,
.addend = 0,
.prev_vaddr = 0,
});
@@ -1303,10 +1302,10 @@ pub fn commitDeclState(
}
log.debug("writeDeclDebugInfo for '{s}", .{decl.name});
- try self.writeDeclDebugInfo(atom, dbg_info_buffer.items);
+ try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
}
-fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
+fn updateDeclDebugInfoAllocation(self: *Dwarf, atom_index: Atom.Index, len: u32) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1315,24 +1314,26 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
// probably need to edit that logic too.
const gpa = self.allocator;
+ const atom = self.getAtomPtr(.di_atom, atom_index);
atom.len = len;
- if (self.atom_last) |last| blk: {
- if (atom == last) break :blk;
- if (atom.next) |next| {
+ if (self.di_atom_last_index) |last_index| blk: {
+ if (atom_index == last_index) break :blk;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(.di_atom, next_index);
// Update existing Decl - non-last item.
if (atom.off + atom.len + min_nop_size > next.off) {
// It grew too big, so we move it to a new location.
- if (atom.prev) |prev| {
- self.atom_free_list.put(gpa, prev, {}) catch {};
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ self.di_atom_free_list.put(gpa, prev_index, {}) catch {};
+ self.getAtomPtr(.di_atom, prev_index).next_index = atom.next_index;
}
- next.prev = atom.prev;
- atom.next = null;
+ next.prev_index = atom.prev_index;
+ atom.next_index = null;
// Populate where it used to be with NOPs.
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_info_sect = &elf_file.sections.items[elf_file.debug_info_section_index.?];
+ const debug_info_sect = &elf_file.sections.items(.shdr)[elf_file.debug_info_section_index.?];
const file_pos = debug_info_sect.sh_offset + atom.off;
try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, &[0]u8{}, atom.len, false);
},
@@ -1344,37 +1345,40 @@ fn updateDeclDebugInfoAllocation(self: *Dwarf, atom: *Atom, len: u32) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_info = &wasm_file.debug_info_atom.?.code;
+ const debug_info_index = wasm_file.debug_info_atom.?;
+ const debug_info = &wasm_file.getAtomPtr(debug_info_index).code;
try writeDbgInfoNopsToArrayList(gpa, debug_info, atom.off, 0, &.{0}, atom.len, false);
},
else => unreachable,
}
// TODO Look at the free list before appending at the end.
- atom.prev = last;
- last.next = atom;
- self.atom_last = atom;
+ atom.prev_index = last_index;
+ const last = self.getAtomPtr(.di_atom, last_index);
+ last.next_index = atom_index;
+ self.di_atom_last_index = atom_index;
atom.off = last.off + padToIdeal(last.len);
}
- } else if (atom.prev == null) {
+ } else if (atom.prev_index == null) {
// Append new Decl.
// TODO Look at the free list before appending at the end.
- atom.prev = last;
- last.next = atom;
- self.atom_last = atom;
+ atom.prev_index = last_index;
+ const last = self.getAtomPtr(.di_atom, last_index);
+ last.next_index = atom_index;
+ self.di_atom_last_index = atom_index;
atom.off = last.off + padToIdeal(last.len);
}
} else {
// This is the first Decl of the .debug_info
- self.atom_first = atom;
- self.atom_last = atom;
+ self.di_atom_first_index = atom_index;
+ self.di_atom_last_index = atom_index;
atom.off = @intCast(u32, padToIdeal(self.dbgInfoHeaderBytes()));
}
}
-fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void {
+fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []const u8) !void {
const tracy = trace(@src());
defer tracy.end();
@@ -1383,14 +1387,22 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
// probably need to edit that logic too.
const gpa = self.allocator;
- const last_decl = self.atom_last.?;
+ const atom = self.getAtom(.di_atom, atom_index);
+ const last_decl_index = self.di_atom_last_index.?;
+ const last_decl = self.getAtom(.di_atom, last_decl_index);
// +1 for a trailing zero to end the children of the decl tag.
const needed_size = last_decl.off + last_decl.len + 1;
- const prev_padding_size: u32 = if (atom.prev) |prev| atom.off - (prev.off + prev.len) else 0;
- const next_padding_size: u32 = if (atom.next) |next| next.off - (atom.off + atom.len) else 0;
+ const prev_padding_size: u32 = if (atom.prev_index) |prev_index| blk: {
+ const prev = self.getAtom(.di_atom, prev_index);
+ break :blk atom.off - (prev.off + prev.len);
+ } else 0;
+ const next_padding_size: u32 = if (atom.next_index) |next_index| blk: {
+ const next = self.getAtom(.di_atom, next_index);
+ break :blk next.off - (atom.off + atom.len);
+ } else 0;
// To end the children of the decl tag.
- const trailing_zero = atom.next == null;
+ const trailing_zero = atom.next_index == null;
// We only have support for one compilation unit so far, so the offsets are directly
// from the .debug_info section.
@@ -1399,7 +1411,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_info_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const debug_info_sect = elf_file.sections.items[shdr_index];
+ const debug_info_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_info_sect.sh_offset + atom.off;
try pwriteDbgInfoNops(
elf_file.base.file.?,
@@ -1430,7 +1442,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
const info_atom = wasm_file.debug_info_atom.?;
- const debug_info = &info_atom.code;
+ const debug_info = &wasm_file.getAtomPtr(info_atom).code;
const segment_size = debug_info.items.len;
if (needed_size != segment_size) {
log.debug(" needed size does not equal allocated size: {d}", .{needed_size});
@@ -1458,10 +1470,15 @@ fn writeDeclDebugInfo(self: *Dwarf, atom: *Atom, dbg_info_buf: []const u8) !void
}
}
-pub fn updateDeclLineNumber(self: *Dwarf, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src());
defer tracy.end();
+ const atom_index = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
+ const atom = self.getAtom(.src_fn, atom_index);
+ if (atom.len == 0) return;
+
+ const decl = module.declPtr(decl_index);
const func = decl.val.castTag(.function).?.data;
log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
decl.src_line,
@@ -1475,79 +1492,81 @@ pub fn updateDeclLineNumber(self: *Dwarf, decl: *const Module.Decl) !void {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const shdr = elf_file.sections.items[elf_file.debug_line_section_index.?];
- const file_pos = shdr.sh_offset + decl.fn_link.elf.off + self.getRelocDbgLineOff();
+ const shdr = elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
+ const file_pos = shdr.sh_offset + atom.off + self.getRelocDbgLineOff();
try elf_file.base.file.?.pwriteAll(&data, file_pos);
},
.macho => {
const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
const sect = d_sym.getSection(d_sym.debug_line_section_index.?);
- const file_pos = sect.offset + decl.fn_link.macho.off + self.getRelocDbgLineOff();
+ const file_pos = sect.offset + atom.off + self.getRelocDbgLineOff();
try d_sym.file.pwriteAll(&data, file_pos);
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const offset = decl.fn_link.wasm.src_fn.off + self.getRelocDbgLineOff();
- const atom = wasm_file.debug_line_atom.?;
- mem.copy(u8, atom.code.items[offset..], &data);
+ const offset = atom.off + self.getRelocDbgLineOff();
+ const line_atom_index = wasm_file.debug_line_atom.?;
+ mem.copy(u8, wasm_file.getAtomPtr(line_atom_index).code.items[offset..], &data);
},
else => unreachable,
}
}
-pub fn freeAtom(self: *Dwarf, atom: *Atom) void {
- if (self.atom_first == atom) {
- self.atom_first = atom.next;
- }
- if (self.atom_last == atom) {
- // TODO shrink the .debug_info section size here
- self.atom_last = atom.prev;
- }
-
- if (atom.prev) |prev| {
- prev.next = atom.next;
-
- // TODO the free list logic like we do for text blocks above
- } else {
- atom.prev = null;
- }
-
- if (atom.next) |next| {
- next.prev = atom.prev;
- } else {
- atom.next = null;
- }
-}
-
-pub fn freeDecl(self: *Dwarf, decl: *Module.Decl) void {
- // TODO make this logic match freeTextBlock. Maybe abstract the logic out since the same thing
- // is desired for both.
+pub fn freeDecl(self: *Dwarf, decl_index: Module.Decl.Index) void {
const gpa = self.allocator;
- const fn_link = switch (self.bin_file.tag) {
- .elf => &decl.fn_link.elf,
- .macho => &decl.fn_link.macho,
- .wasm => &decl.fn_link.wasm.src_fn,
- else => unreachable,
- };
- _ = self.dbg_line_fn_free_list.remove(fn_link);
- if (fn_link.prev) |prev| {
- self.dbg_line_fn_free_list.put(gpa, prev, {}) catch {};
- prev.next = fn_link.next;
- if (fn_link.next) |next| {
- next.prev = prev;
- } else {
- self.dbg_line_fn_last = prev;
+ // Free SrcFn atom
+ if (self.src_fn_decls.fetchRemove(decl_index)) |kv| {
+ const src_fn_index = kv.value;
+ const src_fn = self.getAtom(.src_fn, src_fn_index);
+ _ = self.src_fn_free_list.remove(src_fn_index);
+
+ if (src_fn.prev_index) |prev_index| {
+ self.src_fn_free_list.put(gpa, prev_index, {}) catch {};
+ const prev = self.getAtomPtr(.src_fn, prev_index);
+ prev.next_index = src_fn.next_index;
+ if (src_fn.next_index) |next_index| {
+ self.getAtomPtr(.src_fn, next_index).prev_index = prev_index;
+ } else {
+ self.src_fn_last_index = prev_index;
+ }
+ } else if (src_fn.next_index) |next_index| {
+ self.src_fn_first_index = next_index;
+ self.getAtomPtr(.src_fn, next_index).prev_index = null;
+ }
+ if (self.src_fn_first_index == src_fn_index) {
+ self.src_fn_first_index = src_fn.next_index;
+ }
+ if (self.src_fn_last_index == src_fn_index) {
+ self.src_fn_last_index = src_fn.prev_index;
}
- } else if (fn_link.next) |next| {
- self.dbg_line_fn_first = next;
- next.prev = null;
}
- if (self.dbg_line_fn_first == fn_link) {
- self.dbg_line_fn_first = fn_link.next;
- }
- if (self.dbg_line_fn_last == fn_link) {
- self.dbg_line_fn_last = fn_link.prev;
+
+ // Free DI atom
+ if (self.di_atom_decls.fetchRemove(decl_index)) |kv| {
+ const di_atom_index = kv.value;
+ const di_atom = self.getAtomPtr(.di_atom, di_atom_index);
+
+ if (self.di_atom_first_index == di_atom_index) {
+ self.di_atom_first_index = di_atom.next_index;
+ }
+ if (self.di_atom_last_index == di_atom_index) {
+ // TODO shrink the .debug_info section size here
+ self.di_atom_last_index = di_atom.prev_index;
+ }
+
+ if (di_atom.prev_index) |prev_index| {
+ self.getAtomPtr(.di_atom, prev_index).next_index = di_atom.next_index;
+ // TODO the free list logic like we do for SrcFn above
+ } else {
+ di_atom.prev_index = null;
+ }
+
+ if (di_atom.next_index) |next_index| {
+ self.getAtomPtr(.di_atom, next_index).prev_index = di_atom.prev_index;
+ } else {
+ di_atom.next_index = null;
+ }
}
}
@@ -1690,7 +1709,7 @@ pub fn writeDbgAbbrev(self: *Dwarf) !void {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_abbrev_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, false);
- const debug_abbrev_sect = elf_file.sections.items[shdr_index];
+ const debug_abbrev_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_abbrev_sect.sh_offset + abbrev_offset;
try elf_file.base.file.?.pwriteAll(&abbrev_buf, file_pos);
},
@@ -1704,7 +1723,7 @@ pub fn writeDbgAbbrev(self: *Dwarf) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_abbrev = &wasm_file.debug_abbrev_atom.?.code;
+ const debug_abbrev = &wasm_file.getAtomPtr(wasm_file.debug_abbrev_atom.?).code;
try debug_abbrev.resize(wasm_file.base.allocator, needed_size);
mem.copy(u8, debug_abbrev.items, &abbrev_buf);
},
@@ -1770,11 +1789,11 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
},
}
// Write the form for the compile unit, which must match the abbrev table above.
- const name_strp = try self.makeString(module.root_pkg.root_src_path);
+ const name_strp = try self.strtab.insert(self.allocator, module.root_pkg.root_src_path);
var compile_unit_dir_buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const compile_unit_dir = resolveCompilationDir(module, &compile_unit_dir_buffer);
- const comp_dir_strp = try self.makeString(compile_unit_dir);
- const producer_strp = try self.makeString(link.producer_string);
+ const comp_dir_strp = try self.strtab.insert(self.allocator, compile_unit_dir);
+ const producer_strp = try self.strtab.insert(self.allocator, link.producer_string);
di_buf.appendAssumeCapacity(@enumToInt(AbbrevKind.compile_unit));
if (self.bin_file.tag == .macho) {
@@ -1805,7 +1824,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_info_sect = elf_file.sections.items[elf_file.debug_info_section_index.?];
+ const debug_info_sect = elf_file.sections.items(.shdr)[elf_file.debug_info_section_index.?];
const file_pos = debug_info_sect.sh_offset;
try pwriteDbgInfoNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt, false);
},
@@ -1817,7 +1836,7 @@ pub fn writeDbgInfoHeader(self: *Dwarf, module: *Module, low_pc: u64, high_pc: u
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_info = &wasm_file.debug_info_atom.?.code;
+ const debug_info = &wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code;
try writeDbgInfoNopsToArrayList(self.allocator, debug_info, 0, 0, di_buf.items, jmp_amt, false);
},
else => unreachable,
@@ -2124,7 +2143,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_aranges_section_index.?;
try elf_file.growNonAllocSection(shdr_index, needed_size, 16, false);
- const debug_aranges_sect = elf_file.sections.items[shdr_index];
+ const debug_aranges_sect = elf_file.sections.items(.shdr)[shdr_index];
const file_pos = debug_aranges_sect.sh_offset;
try elf_file.base.file.?.pwriteAll(di_buf.items, file_pos);
},
@@ -2138,7 +2157,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_ranges = &wasm_file.debug_ranges_atom.?.code;
+ const debug_ranges = &wasm_file.getAtomPtr(wasm_file.debug_ranges_atom.?).code;
try debug_ranges.resize(wasm_file.base.allocator, needed_size);
mem.copy(u8, debug_ranges.items, di_buf.items);
},
@@ -2275,19 +2294,23 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
const needed_with_padding = padToIdeal(needed_bytes);
const delta = needed_with_padding - dbg_line_prg_off;
- var src_fn = self.dbg_line_fn_first.?;
- const last_fn = self.dbg_line_fn_last.?;
+ const first_fn_index = self.src_fn_first_index.?;
+ const first_fn = self.getAtom(.src_fn, first_fn_index);
+ const last_fn_index = self.src_fn_last_index.?;
+ const last_fn = self.getAtom(.src_fn, last_fn_index);
- var buffer = try gpa.alloc(u8, last_fn.off + last_fn.len - src_fn.off);
+ var src_fn_index = first_fn_index;
+
+ var buffer = try gpa.alloc(u8, last_fn.off + last_fn.len - first_fn.off);
defer gpa.free(buffer);
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
const shdr_index = elf_file.debug_line_section_index.?;
- const needed_size = elf_file.sections.items[shdr_index].sh_size + delta;
+ const needed_size = elf_file.sections.items(.shdr)[shdr_index].sh_size + delta;
try elf_file.growNonAllocSection(shdr_index, needed_size, 1, true);
- const file_pos = elf_file.sections.items[shdr_index].sh_offset + src_fn.off;
+ const file_pos = elf_file.sections.items(.shdr)[shdr_index].sh_offset + first_fn.off;
const amt = try elf_file.base.file.?.preadAll(buffer, file_pos);
if (amt != buffer.len) return error.InputOutput;
@@ -2299,7 +2322,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
const sect_index = d_sym.debug_line_section_index.?;
const needed_size = @intCast(u32, d_sym.getSection(sect_index).size + delta);
try d_sym.growSection(sect_index, needed_size, true);
- const file_pos = d_sym.getSection(sect_index).offset + src_fn.off;
+ const file_pos = d_sym.getSection(sect_index).offset + first_fn.off;
const amt = try d_sym.file.preadAll(buffer, file_pos);
if (amt != buffer.len) return error.InputOutput;
@@ -2308,19 +2331,20 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_line = &wasm_file.debug_line_atom.?.code;
- mem.copy(u8, buffer, debug_line.items[src_fn.off..]);
+ const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
+ mem.copy(u8, buffer, debug_line.items[first_fn.off..]);
try debug_line.resize(self.allocator, debug_line.items.len + delta);
- mem.copy(u8, debug_line.items[src_fn.off + delta ..], buffer);
+ mem.copy(u8, debug_line.items[first_fn.off + delta ..], buffer);
},
else => unreachable,
}
while (true) {
+ const src_fn = self.getAtomPtr(.src_fn, src_fn_index);
src_fn.off += delta;
- if (src_fn.next) |next| {
- src_fn = next;
+ if (src_fn.next_index) |next_index| {
+ src_fn_index = next_index;
} else break;
}
}
@@ -2346,7 +2370,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_line_sect = elf_file.sections.items[elf_file.debug_line_section_index.?];
+ const debug_line_sect = elf_file.sections.items(.shdr)[elf_file.debug_line_section_index.?];
const file_pos = debug_line_sect.sh_offset;
try pwriteDbgLineNops(elf_file.base.file.?, file_pos, 0, di_buf.items, jmp_amt);
},
@@ -2358,7 +2382,7 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_line = wasm_file.debug_line_atom.?.code;
+ const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
writeDbgLineNopsBuffered(debug_line.items, 0, 0, di_buf.items, jmp_amt);
},
else => unreachable,
@@ -2366,22 +2390,26 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
}
fn getDebugInfoOff(self: Dwarf) ?u32 {
- const first = self.atom_first orelse return null;
+ const first_index = self.di_atom_first_index orelse return null;
+ const first = self.getAtom(.di_atom, first_index);
return first.off;
}
fn getDebugInfoEnd(self: Dwarf) ?u32 {
- const last = self.atom_last orelse return null;
+ const last_index = self.di_atom_last_index orelse return null;
+ const last = self.getAtom(.di_atom, last_index);
return last.off + last.len;
}
fn getDebugLineProgramOff(self: Dwarf) ?u32 {
- const first = self.dbg_line_fn_first orelse return null;
+ const first_index = self.src_fn_first_index orelse return null;
+ const first = self.getAtom(.src_fn, first_index);
return first.off;
}
fn getDebugLineProgramEnd(self: Dwarf) ?u32 {
- const last = self.dbg_line_fn_last orelse return null;
+ const last_index = self.src_fn_last_index orelse return null;
+ const last = self.getAtom(.src_fn, last_index);
return last.off + last.len;
}
@@ -2435,15 +2463,6 @@ fn getRelocDbgInfoSubprogramHighPC(self: Dwarf) u32 {
return dbg_info_low_pc_reloc_index + self.ptrWidthBytes();
}
-/// TODO Improve this to use a table.
-fn makeString(self: *Dwarf, bytes: []const u8) !u32 {
- try self.strtab.ensureUnusedCapacity(self.allocator, bytes.len + 1);
- const result = self.strtab.items.len;
- self.strtab.appendSliceAssumeCapacity(bytes);
- self.strtab.appendAssumeCapacity(0);
- return @intCast(u32, result);
-}
-
fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
return actual_size +| (actual_size / ideal_factor);
}
@@ -2465,29 +2484,20 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
}
error_set.names = names;
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = .{
- .prev = null,
- .next = null,
- .off = 0,
- .len = 0,
- };
-
var dbg_info_buffer = std.ArrayList(u8).init(arena);
try addDbgInfoErrorSet(arena, module, error_ty, self.target, &dbg_info_buffer);
- try self.managed_atoms.append(gpa, atom);
+ const di_atom_index = try self.createAtom(.di_atom);
log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
- try self.updateDeclDebugInfoAllocation(atom, @intCast(u32, dbg_info_buffer.items.len));
+ try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
log.debug("writeDeclDebugInfo in flushModule", .{});
- try self.writeDeclDebugInfo(atom, dbg_info_buffer.items);
+ try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
const file_pos = blk: {
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- const debug_info_sect = &elf_file.sections.items[elf_file.debug_info_section_index.?];
+ const debug_info_sect = &elf_file.sections.items(.shdr)[elf_file.debug_info_section_index.?];
break :blk debug_info_sect.sh_offset;
},
.macho => {
@@ -2502,22 +2512,23 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
};
var buf: [@sizeOf(u32)]u8 = undefined;
- mem.writeInt(u32, &buf, atom.off, self.target.cpu.arch.endian());
+ mem.writeInt(u32, &buf, self.getAtom(.di_atom, di_atom_index).off, self.target.cpu.arch.endian());
while (self.global_abbrev_relocs.popOrNull()) |reloc| {
+ const atom = self.getAtom(.di_atom, reloc.atom_index);
switch (self.bin_file.tag) {
.elf => {
const elf_file = self.bin_file.cast(File.Elf).?;
- try elf_file.base.file.?.pwriteAll(&buf, file_pos + reloc.atom.off + reloc.offset);
+ try elf_file.base.file.?.pwriteAll(&buf, file_pos + atom.off + reloc.offset);
},
.macho => {
const d_sym = self.bin_file.cast(File.MachO).?.getDebugSymbols().?;
- try d_sym.file.pwriteAll(&buf, file_pos + reloc.atom.off + reloc.offset);
+ try d_sym.file.pwriteAll(&buf, file_pos + atom.off + reloc.offset);
},
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
- const debug_info = wasm_file.debug_info_atom.?.code;
- mem.copy(u8, debug_info.items[reloc.atom.off + reloc.offset ..], &buf);
+ const debug_info = wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code;
+ mem.copy(u8, debug_info.items[atom.off + reloc.offset ..], &buf);
},
else => unreachable,
}
@@ -2635,12 +2646,62 @@ fn addDbgInfoErrorSet(
try dbg_info_buffer.append(0);
}
-fn getDbgInfoAtom(tag: File.Tag, mod: *Module, decl_index: Module.Decl.Index) *Atom {
- const decl = mod.declPtr(decl_index);
- return switch (tag) {
- .elf => &decl.link.elf.dbg_info_atom,
- .macho => &decl.link.macho.dbg_info_atom,
- .wasm => &decl.link.wasm.dbg_info_atom,
- else => unreachable,
+const Kind = enum { src_fn, di_atom };
+
+fn createAtom(self: *Dwarf, comptime kind: Kind) !Atom.Index {
+ const index = blk: {
+ switch (kind) {
+ .src_fn => {
+ const index = @intCast(Atom.Index, self.src_fns.items.len);
+ _ = try self.src_fns.addOne(self.allocator);
+ break :blk index;
+ },
+ .di_atom => {
+ const index = @intCast(Atom.Index, self.di_atoms.items.len);
+ _ = try self.di_atoms.addOne(self.allocator);
+ break :blk index;
+ },
+ }
+ };
+ const atom = self.getAtomPtr(kind, index);
+ atom.* = .{
+ .off = 0,
+ .len = 0,
+ .prev_index = null,
+ .next_index = null,
+ };
+ return index;
+}
+
+fn getOrCreateAtomForDecl(self: *Dwarf, comptime kind: Kind, decl_index: Module.Decl.Index) !Atom.Index {
+ switch (kind) {
+ .src_fn => {
+ const gop = try self.src_fn_decls.getOrPut(self.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = try self.createAtom(kind);
+ }
+ return gop.value_ptr.*;
+ },
+ .di_atom => {
+ const gop = try self.di_atom_decls.getOrPut(self.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = try self.createAtom(kind);
+ }
+ return gop.value_ptr.*;
+ },
+ }
+}
+
+fn getAtom(self: *const Dwarf, comptime kind: Kind, index: Atom.Index) Atom {
+ return switch (kind) {
+ .src_fn => self.src_fns.items[index],
+ .di_atom => self.di_atoms.items[index],
+ };
+}
+
+fn getAtomPtr(self: *Dwarf, comptime kind: Kind, index: Atom.Index) *Atom {
+ return switch (kind) {
+ .src_fn => &self.src_fns.items[index],
+ .di_atom => &self.di_atoms.items[index],
};
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 2c55e55f83..45952da6c0 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1,43 +1,89 @@
const Elf = @This();
const std = @import("std");
+const build_options = @import("build_options");
const builtin = @import("builtin");
+const assert = std.debug.assert;
+const elf = std.elf;
+const fs = std.fs;
+const log = std.log.scoped(.link);
const math = std.math;
const mem = std.mem;
-const assert = std.debug.assert;
-const Allocator = std.mem.Allocator;
-const fs = std.fs;
-const elf = std.elf;
-const log = std.log.scoped(.link);
-const Atom = @import("Elf/Atom.zig");
-const Module = @import("../Module.zig");
+const codegen = @import("../codegen.zig");
+const glibc = @import("../glibc.zig");
+const link = @import("../link.zig");
+const lldMain = @import("../main.zig").lldMain;
+const musl = @import("../musl.zig");
+const target_util = @import("../target.zig");
+const trace = @import("../tracy.zig").trace;
+
+const Air = @import("../Air.zig");
+const Allocator = std.mem.Allocator;
+pub const Atom = @import("Elf/Atom.zig");
+const Cache = @import("../Cache.zig");
const Compilation = @import("../Compilation.zig");
const Dwarf = @import("Dwarf.zig");
-const codegen = @import("../codegen.zig");
-const lldMain = @import("../main.zig").lldMain;
-const trace = @import("../tracy.zig").trace;
-const Package = @import("../Package.zig");
-const Value = @import("../value.zig").Value;
-const Type = @import("../type.zig").Type;
-const TypedValue = @import("../TypedValue.zig");
-const link = @import("../link.zig");
const File = link.File;
-const build_options = @import("build_options");
-const target_util = @import("../target.zig");
-const glibc = @import("../glibc.zig");
-const musl = @import("../musl.zig");
-const Cache = @import("../Cache.zig");
-const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
-
-pub const TextBlock = Atom;
+const Module = @import("../Module.zig");
+const Package = @import("../Package.zig");
+const StringTable = @import("strtab.zig").StringTable;
+const Type = @import("../type.zig").Type;
+const TypedValue = @import("../TypedValue.zig");
+const Value = @import("../value.zig").Value;
const default_entry_addr = 0x8000000;
pub const base_tag: File.Tag = .elf;
+const Section = struct {
+ shdr: elf.Elf64_Shdr,
+ phdr_index: u16,
+
+ /// Index of the last allocated atom in this section.
+ last_atom_index: ?Atom.Index = null,
+
+ /// A list of atoms that have surplus capacity. This list can have false
+ /// positives, as functions grow and shrink over time, only sometimes being added
+ /// or removed from the freelist.
+ ///
+ /// An atom has surplus capacity when its overcapacity value is greater than
+ /// padToIdeal(minimum_atom_size). That is, when it has so
+ /// much extra capacity, that we could fit a small new symbol in it, itself with
+ /// ideal_capacity or more.
+ ///
+ /// Ideal capacity is defined by size + (size / ideal_factor)
+ ///
+ /// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
+ /// overcapacity can be negative. A simple way to have negative overcapacity is to
+ /// allocate a fresh text block, which will have ideal capacity, and then grow it
+ /// by 1 byte. It will then have -1 overcapacity.
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
+};
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ shdr: u16,
+ /// A list of all exports aliases of this Decl.
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, elf_file: *const Elf, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, elf_file.getGlobalName(exp))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, elf_file: *Elf, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, elf_file.getGlobalName(exp.*))) return exp;
+ }
+ return null;
+ }
+};
+
base: File,
dwarf: ?Dwarf = null,
@@ -48,12 +94,12 @@ llvm_object: ?*LlvmObject = null,
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
-sections: std.ArrayListUnmanaged(elf.Elf64_Shdr) = std.ArrayListUnmanaged(elf.Elf64_Shdr){},
+sections: std.MultiArrayList(Section) = .{},
shdr_table_offset: ?u64 = null,
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
/// Same order as in the file.
-program_headers: std.ArrayListUnmanaged(elf.Elf64_Phdr) = std.ArrayListUnmanaged(elf.Elf64_Phdr){},
+program_headers: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{},
phdr_table_offset: ?u64 = null,
/// The index into the program headers of a PT_LOAD program header with Read and Execute flags
phdr_load_re_index: ?u16 = null,
@@ -65,12 +111,10 @@ phdr_load_ro_index: ?u16 = null,
/// The index into the program headers of a PT_LOAD program header with Write flag
phdr_load_rw_index: ?u16 = null,
-phdr_shdr_table: std.AutoHashMapUnmanaged(u16, u16) = .{},
-
entry_addr: ?u64 = null,
page_size: u32,
-shstrtab: std.ArrayListUnmanaged(u8) = std.ArrayListUnmanaged(u8){},
+shstrtab: StringTable(.strtab) = .{},
shstrtab_index: ?u16 = null,
symtab_section_index: ?u16 = null,
@@ -113,39 +157,14 @@ debug_line_header_dirty: bool = false,
error_flags: File.ErrorFlags = File.ErrorFlags{},
-/// Pointer to the last allocated atom
-atoms: std.AutoHashMapUnmanaged(u16, *TextBlock) = .{},
-
-/// A list of text blocks that have surplus capacity. This list can have false
-/// positives, as functions grow and shrink over time, only sometimes being added
-/// or removed from the freelist.
-///
-/// A text block has surplus capacity when its overcapacity value is greater than
-/// padToIdeal(minimum_text_block_size). That is, when it has so
-/// much extra capacity, that we could fit a small new symbol in it, itself with
-/// ideal_capacity or more.
-///
-/// Ideal capacity is defined by size + (size / ideal_factor)
-///
-/// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
-/// overcapacity can be negative. A simple way to have negative overcapacity is to
-/// allocate a fresh text block, which will have ideal capacity, and then grow it
-/// by 1 byte. It will then have -1 overcapacity.
-atom_free_lists: std.AutoHashMapUnmanaged(u16, std.ArrayListUnmanaged(*TextBlock)) = .{},
-
-/// Table of Decls that are currently alive.
-/// We store them here so that we can properly dispose of any allocated
-/// memory within the atom in the incremental linker.
-/// TODO consolidate this.
-decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{},
+/// Table of tracked Decls.
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
/// List of atoms that are owned directly by the linker.
-/// Currently these are only atoms that are the result of linking
-/// object files. Atoms which take part in incremental linking are
-/// at present owned by Module.Decl.
-/// TODO consolidate this.
-managed_atoms: std.ArrayListUnmanaged(*TextBlock) = .{},
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *TextBlock) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
+
+/// Table of atoms indexed by the symbol index.
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -173,15 +192,8 @@ unnamed_const_atoms: UnnamedConstTable = .{},
/// this will be a table indexed by index into the list of Atoms.
relocs: RelocTable = .{},
-const Reloc = struct {
- target: u32,
- offset: u64,
- addend: u32,
- prev_vaddr: u64,
-};
-
-const RelocTable = std.AutoHashMapUnmanaged(*TextBlock, std.ArrayListUnmanaged(Reloc));
-const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*TextBlock));
+const RelocTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Atom.Reloc));
+const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
/// When allocating, the ideal_capacity is calculated by
/// actual_capacity + (actual_capacity / ideal_factor)
@@ -190,15 +202,11 @@ const ideal_factor = 3;
/// In order for a slice of bytes to be considered eligible to keep metadata pointing at
/// it as a possible place to put new symbols, it must have enough room for this many bytes
/// (plus extra for reserved capacity).
-const minimum_text_block_size = 64;
-pub const min_text_capacity = padToIdeal(minimum_text_block_size);
+const minimum_atom_size = 64;
+pub const min_text_capacity = padToIdeal(minimum_atom_size);
pub const PtrWidth = enum { p32, p64 };
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
-
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Elf {
assert(options.target.ofmt == .elf);
@@ -230,16 +238,19 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
// There must always be a null section in index 0
try self.sections.append(allocator, .{
- .sh_name = 0,
- .sh_type = elf.SHT_NULL,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = 0,
- .sh_size = 0,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 0,
- .sh_entsize = 0,
+ .shdr = .{
+ .sh_name = 0,
+ .sh_type = elf.SHT_NULL,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = 0,
+ .sh_size = 0,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 0,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
try self.populateMissingMetadata();
@@ -286,75 +297,67 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
}
pub fn deinit(self: *Elf) void {
+ const gpa = self.base.allocator;
+
if (build_options.have_llvm) {
- if (self.llvm_object) |llvm_object| llvm_object.destroy(self.base.allocator);
+ if (self.llvm_object) |llvm_object| llvm_object.destroy(gpa);
}
- self.sections.deinit(self.base.allocator);
- self.program_headers.deinit(self.base.allocator);
- self.shstrtab.deinit(self.base.allocator);
- self.local_symbols.deinit(self.base.allocator);
- self.global_symbols.deinit(self.base.allocator);
- self.global_symbol_free_list.deinit(self.base.allocator);
- self.local_symbol_free_list.deinit(self.base.allocator);
- self.offset_table_free_list.deinit(self.base.allocator);
- self.offset_table.deinit(self.base.allocator);
- self.phdr_shdr_table.deinit(self.base.allocator);
- self.decls.deinit(self.base.allocator);
+ for (self.sections.items(.free_list)) |*free_list| {
+ free_list.deinit(gpa);
+ }
+ self.sections.deinit(gpa);
+
+ self.program_headers.deinit(gpa);
+ self.shstrtab.deinit(gpa);
+ self.local_symbols.deinit(gpa);
+ self.global_symbols.deinit(gpa);
+ self.global_symbol_free_list.deinit(gpa);
+ self.local_symbol_free_list.deinit(gpa);
+ self.offset_table_free_list.deinit(gpa);
+ self.offset_table.deinit(gpa);
- self.atoms.deinit(self.base.allocator);
{
- var it = self.atom_free_lists.valueIterator();
- while (it.next()) |free_list| {
- free_list.deinit(self.base.allocator);
+ var it = self.decls.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.exports.deinit(gpa);
}
- self.atom_free_lists.deinit(self.base.allocator);
+ self.decls.deinit(gpa);
}
- for (self.managed_atoms.items) |atom| {
- self.base.allocator.destroy(atom);
- }
- self.managed_atoms.deinit(self.base.allocator);
+ self.atoms.deinit(gpa);
+ self.atom_by_index_table.deinit(gpa);
{
var it = self.unnamed_const_atoms.valueIterator();
while (it.next()) |atoms| {
- atoms.deinit(self.base.allocator);
+ atoms.deinit(gpa);
}
- self.unnamed_const_atoms.deinit(self.base.allocator);
+ self.unnamed_const_atoms.deinit(gpa);
}
{
var it = self.relocs.valueIterator();
while (it.next()) |relocs| {
- relocs.deinit(self.base.allocator);
+ relocs.deinit(gpa);
}
- self.relocs.deinit(self.base.allocator);
+ self.relocs.deinit(gpa);
}
- self.atom_by_index_table.deinit(self.base.allocator);
-
if (self.dwarf) |*dw| {
dw.deinit();
}
}
pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
assert(self.llvm_object == null);
- try decl.link.elf.ensureInitialized(self);
- const target = decl.link.elf.getSymbolIndex().?;
-
- const vaddr = self.local_symbols.items[target].st_value;
- const atom = self.atom_by_index_table.get(reloc_info.parent_atom_index).?;
- const gop = try self.relocs.getOrPut(self.base.allocator, atom);
- if (!gop.found_existing) {
- gop.value_ptr.* = .{};
- }
- try gop.value_ptr.append(self.base.allocator, .{
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const this_atom = self.getAtom(this_atom_index);
+ const target = this_atom.getSymbolIndex().?;
+ const vaddr = this_atom.getSymbol(self).st_value;
+ const atom_index = self.getAtomIndexForSymbol(reloc_info.parent_atom_index).?;
+ try Atom.addRelocation(self, atom_index, .{
.target = target,
.offset = reloc_info.offset,
.addend = reloc_info.addend,
@@ -375,7 +378,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
if (self.shdr_table_offset) |off| {
const shdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Shdr) else @sizeOf(elf.Elf64_Shdr);
- const tight_size = self.sections.items.len * shdr_size;
+ const tight_size = self.sections.slice().len * shdr_size;
const increased_size = padToIdeal(tight_size);
const test_end = off + increased_size;
if (end > off and start < test_end) {
@@ -385,7 +388,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
if (self.phdr_table_offset) |off| {
const phdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Phdr) else @sizeOf(elf.Elf64_Phdr);
- const tight_size = self.sections.items.len * phdr_size;
+ const tight_size = self.sections.slice().len * phdr_size;
const increased_size = padToIdeal(tight_size);
const test_end = off + increased_size;
if (end > off and start < test_end) {
@@ -393,7 +396,7 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
}
}
- for (self.sections.items) |section| {
+ for (self.sections.items(.shdr)) |section| {
const increased_size = padToIdeal(section.sh_size);
const test_end = section.sh_offset + increased_size;
if (end > section.sh_offset and start < test_end) {
@@ -420,7 +423,7 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 {
if (self.phdr_table_offset) |off| {
if (off > start and off < min_pos) min_pos = off;
}
- for (self.sections.items) |section| {
+ for (self.sections.items(.shdr)) |section| {
if (section.sh_offset <= start) continue;
if (section.sh_offset < min_pos) min_pos = section.sh_offset;
}
@@ -439,31 +442,10 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u32) u64 {
return start;
}
-/// TODO Improve this to use a table.
-fn makeString(self: *Elf, bytes: []const u8) !u32 {
- try self.shstrtab.ensureUnusedCapacity(self.base.allocator, bytes.len + 1);
- const result = self.shstrtab.items.len;
- self.shstrtab.appendSliceAssumeCapacity(bytes);
- self.shstrtab.appendAssumeCapacity(0);
- return @intCast(u32, result);
-}
-
-pub fn getString(self: Elf, str_off: u32) []const u8 {
- assert(str_off < self.shstrtab.items.len);
- return mem.sliceTo(@ptrCast([*:0]const u8, self.shstrtab.items.ptr + str_off), 0);
-}
-
-fn updateString(self: *Elf, old_str_off: u32, new_name: []const u8) !u32 {
- const existing_name = self.getString(old_str_off);
- if (mem.eql(u8, existing_name, new_name)) {
- return old_str_off;
- }
- return self.makeString(new_name);
-}
-
pub fn populateMissingMetadata(self: *Elf) !void {
assert(self.llvm_object == null);
+ const gpa = self.base.allocator;
const small_ptr = switch (self.ptr_width) {
.p32 => true,
.p64 => false,
@@ -477,7 +459,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
const off = self.findFreeSpace(file_size, p_align);
log.debug("found PT_LOAD RE free space 0x{x} to 0x{x}", .{ off, off + file_size });
const entry_addr: u64 = self.entry_addr orelse if (self.base.options.target.cpu.arch == .spu_2) @as(u64, 0) else default_entry_addr;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -487,7 +469,6 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_align = p_align,
.p_flags = elf.PF_X | elf.PF_R,
});
- try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_re_index.?, .{});
self.entry_addr = null;
self.phdr_table_dirty = true;
}
@@ -504,7 +485,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
// we'll need to re-use that function anyway, in case the GOT grows and overlaps something
// else in virtual memory.
const got_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x4000000 else 0x8000;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -527,7 +508,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
log.debug("found PT_LOAD RO free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO Same as for GOT
const rodata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0xc000000 else 0xa000;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -537,7 +518,6 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_align = p_align,
.p_flags = elf.PF_R,
});
- try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_ro_index.?, .{});
self.phdr_table_dirty = true;
}
@@ -551,7 +531,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
log.debug("found PT_LOAD RW free space 0x{x} to 0x{x}", .{ off, off + file_size });
// TODO Same as for GOT
const rwdata_addr: u32 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x10000000 else 0xc000;
- try self.program_headers.append(self.base.allocator, .{
+ try self.program_headers.append(gpa, .{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = file_size,
@@ -561,148 +541,145 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p_align = p_align,
.p_flags = elf.PF_R | elf.PF_W,
});
- try self.atom_free_lists.putNoClobber(self.base.allocator, self.phdr_load_rw_index.?, .{});
self.phdr_table_dirty = true;
}
if (self.shstrtab_index == null) {
- self.shstrtab_index = @intCast(u16, self.sections.items.len);
- assert(self.shstrtab.items.len == 0);
- try self.shstrtab.append(self.base.allocator, 0); // need a 0 at position 0
- const off = self.findFreeSpace(self.shstrtab.items.len, 1);
- log.debug("found shstrtab free space 0x{x} to 0x{x}", .{ off, off + self.shstrtab.items.len });
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".shstrtab"),
- .sh_type = elf.SHT_STRTAB,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = self.shstrtab.items.len,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+ self.shstrtab_index = @intCast(u16, self.sections.slice().len);
+ assert(self.shstrtab.buffer.items.len == 0);
+ try self.shstrtab.buffer.append(gpa, 0); // need a 0 at position 0
+ const off = self.findFreeSpace(self.shstrtab.buffer.items.len, 1);
+ log.debug("found shstrtab free space 0x{x} to 0x{x}", .{ off, off + self.shstrtab.buffer.items.len });
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".shstrtab"),
+ .sh_type = elf.SHT_STRTAB,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = self.shstrtab.buffer.items.len,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shstrtab_dirty = true;
self.shdr_table_dirty = true;
}
if (self.text_section_index == null) {
- self.text_section_index = @intCast(u16, self.sections.items.len);
+ self.text_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".text"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".text"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_load_re_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_load_re_index.?,
- self.text_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.got_section_index == null) {
- self.got_section_index = @intCast(u16, self.sections.items.len);
+ self.got_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_got_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".got"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = @as(u16, ptr_size),
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".got"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = @as(u16, ptr_size),
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_got_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_got_index.?,
- self.got_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.rodata_section_index == null) {
- self.rodata_section_index = @intCast(u16, self.sections.items.len);
+ self.rodata_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_load_ro_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".rodata"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".rodata"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_load_ro_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_load_ro_index.?,
- self.rodata_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.data_section_index == null) {
- self.data_section_index = @intCast(u16, self.sections.items.len);
+ self.data_section_index = @intCast(u16, self.sections.slice().len);
const phdr = &self.program_headers.items[self.phdr_load_rw_index.?];
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".data"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_WRITE | elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = @as(u16, ptr_size),
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".data"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_WRITE | elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = @as(u16, ptr_size),
+ .sh_entsize = 0,
+ },
+ .phdr_index = self.phdr_load_rw_index.?,
});
- try self.phdr_shdr_table.putNoClobber(
- self.base.allocator,
- self.phdr_load_rw_index.?,
- self.data_section_index.?,
- );
self.shdr_table_dirty = true;
}
if (self.symtab_section_index == null) {
- self.symtab_section_index = @intCast(u16, self.sections.items.len);
+ self.symtab_section_index = @intCast(u16, self.sections.slice().len);
const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
const file_size = self.base.options.symbol_count_hint * each_size;
const off = self.findFreeSpace(file_size, min_align);
log.debug("found symtab free space 0x{x} to 0x{x}", .{ off, off + file_size });
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".symtab"),
- .sh_type = elf.SHT_SYMTAB,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size,
- // The section header index of the associated string table.
- .sh_link = self.shstrtab_index.?,
- .sh_info = @intCast(u32, self.local_symbols.items.len),
- .sh_addralign = min_align,
- .sh_entsize = each_size,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".symtab"),
+ .sh_type = elf.SHT_SYMTAB,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size,
+ // The section header index of the associated string table.
+ .sh_link = self.shstrtab_index.?,
+ .sh_info = @intCast(u32, self.local_symbols.items.len),
+ .sh_addralign = min_align,
+ .sh_entsize = each_size,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
try self.writeSymbol(0);
@@ -710,27 +687,30 @@ pub fn populateMissingMetadata(self: *Elf) !void {
if (self.dwarf) |*dw| {
if (self.debug_str_section_index == null) {
- self.debug_str_section_index = @intCast(u16, self.sections.items.len);
- assert(dw.strtab.items.len == 0);
- try dw.strtab.append(self.base.allocator, 0);
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_str"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_MERGE | elf.SHF_STRINGS,
- .sh_addr = 0,
- .sh_offset = 0,
- .sh_size = 0,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 1,
+ self.debug_str_section_index = @intCast(u16, self.sections.slice().len);
+ assert(dw.strtab.buffer.items.len == 0);
+ try dw.strtab.buffer.append(gpa, 0);
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_str"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_MERGE | elf.SHF_STRINGS,
+ .sh_addr = 0,
+ .sh_offset = 0,
+ .sh_size = 0,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 1,
+ },
+ .phdr_index = undefined,
});
self.debug_strtab_dirty = true;
self.shdr_table_dirty = true;
}
if (self.debug_info_section_index == null) {
- self.debug_info_section_index = @intCast(u16, self.sections.items.len);
+ self.debug_info_section_index = @intCast(u16, self.sections.slice().len);
const file_size_hint = 200;
const p_align = 1;
@@ -739,24 +719,27 @@ pub fn populateMissingMetadata(self: *Elf) !void {
off,
off + file_size_hint,
});
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_info"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_info"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
self.debug_info_header_dirty = true;
}
if (self.debug_abbrev_section_index == null) {
- self.debug_abbrev_section_index = @intCast(u16, self.sections.items.len);
+ self.debug_abbrev_section_index = @intCast(u16, self.sections.slice().len);
const file_size_hint = 128;
const p_align = 1;
@@ -765,24 +748,27 @@ pub fn populateMissingMetadata(self: *Elf) !void {
off,
off + file_size_hint,
});
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_abbrev"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_abbrev"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
self.debug_abbrev_section_dirty = true;
}
if (self.debug_aranges_section_index == null) {
- self.debug_aranges_section_index = @intCast(u16, self.sections.items.len);
+ self.debug_aranges_section_index = @intCast(u16, self.sections.slice().len);
const file_size_hint = 160;
const p_align = 16;
@@ -791,24 +777,27 @@ pub fn populateMissingMetadata(self: *Elf) !void {
off,
off + file_size_hint,
});
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_aranges"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_aranges"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
self.debug_aranges_section_dirty = true;
}
if (self.debug_line_section_index == null) {
- self.debug_line_section_index = @intCast(u16, self.sections.items.len);
+ self.debug_line_section_index = @intCast(u16, self.sections.slice().len);
const file_size_hint = 250;
const p_align = 1;
@@ -817,17 +806,20 @@ pub fn populateMissingMetadata(self: *Elf) !void {
off,
off + file_size_hint,
});
- try self.sections.append(self.base.allocator, .{
- .sh_name = try self.makeString(".debug_line"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size_hint,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = p_align,
- .sh_entsize = 0,
+ try self.sections.append(gpa, .{
+ .shdr = .{
+ .sh_name = try self.shstrtab.insert(gpa, ".debug_line"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size_hint,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = p_align,
+ .sh_entsize = 0,
+ },
+ .phdr_index = undefined,
});
self.shdr_table_dirty = true;
self.debug_line_header_dirty = true;
@@ -843,7 +835,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
.p64 => @alignOf(elf.Elf64_Shdr),
};
if (self.shdr_table_offset == null) {
- self.shdr_table_offset = self.findFreeSpace(self.sections.items.len * shsize, shalign);
+ self.shdr_table_offset = self.findFreeSpace(self.sections.slice().len * shsize, shalign);
self.shdr_table_dirty = true;
}
@@ -874,7 +866,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
// offset + it's filesize.
var max_file_offset: u64 = 0;
- for (self.sections.items) |shdr| {
+ for (self.sections.items(.shdr)) |shdr| {
if (shdr.sh_offset + shdr.sh_size > max_file_offset) {
max_file_offset = shdr.sh_offset + shdr.sh_size;
}
@@ -884,15 +876,18 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
}
-fn growAllocSection(self: *Elf, shdr_index: u16, phdr_index: u16, needed_size: u64) !void {
+fn growAllocSection(self: *Elf, shdr_index: u16, needed_size: u64) !void {
// TODO Also detect virtual address collisions.
- const shdr = &self.sections.items[shdr_index];
+ const shdr = &self.sections.items(.shdr)[shdr_index];
+ const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const phdr = &self.program_headers.items[phdr_index];
+ const maybe_last_atom_index = self.sections.items(.last_atom_index)[shdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
// Must move the entire section.
const new_offset = self.findFreeSpace(needed_size, self.page_size);
- const existing_size = if (self.atoms.get(phdr_index)) |last| blk: {
+ const existing_size = if (maybe_last_atom_index) |last_atom_index| blk: {
+ const last = self.getAtom(last_atom_index);
const sym = last.getSymbol(self);
break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
} else if (shdr_index == self.got_section_index.?) blk: {
@@ -900,8 +895,8 @@ fn growAllocSection(self: *Elf, shdr_index: u16, phdr_index: u16, needed_size: u
} else 0;
shdr.sh_size = 0;
- log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
- self.getString(shdr.sh_name),
+ log.debug("new '{?s}' file offset 0x{x} to 0x{x}", .{
+ self.shstrtab.get(shdr.sh_name),
new_offset,
new_offset + existing_size,
});
@@ -927,7 +922,7 @@ pub fn growNonAllocSection(
min_alignment: u32,
requires_file_copy: bool,
) !void {
- const shdr = &self.sections.items[shdr_index];
+ const shdr = &self.sections.items(.shdr)[shdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
const existing_size = if (self.symtab_section_index.? == shdr_index) blk: {
@@ -940,7 +935,7 @@ pub fn growNonAllocSection(
shdr.sh_size = 0;
// Move all the symbols to a new file location.
const new_offset = self.findFreeSpace(needed_size, min_alignment);
- log.debug("moving '{s}' from 0x{x} to 0x{x}", .{ self.getString(shdr.sh_name), shdr.sh_offset, new_offset });
+ log.debug("moving '{?s}' from 0x{x} to 0x{x}", .{ self.shstrtab.get(shdr.sh_name), shdr.sh_offset, new_offset });
if (requires_file_copy) {
const amt = try self.base.file.?.copyRangeAll(
@@ -1011,6 +1006,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
}
}
+ const gpa = self.base.allocator;
var sub_prog_node = prog_node.start("ELF Flush", 0);
sub_prog_node.activate();
defer sub_prog_node.end();
@@ -1029,12 +1025,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
{
var it = self.relocs.iterator();
while (it.next()) |entry| {
- const atom = entry.key_ptr.*;
+ const atom_index = entry.key_ptr.*;
const relocs = entry.value_ptr.*;
+ const atom = self.getAtom(atom_index);
const source_sym = atom.getSymbol(self);
- const source_shdr = self.sections.items[source_sym.st_shndx];
+ const source_shdr = self.sections.items(.shdr)[source_sym.st_shndx];
- log.debug("relocating '{s}'", .{self.getString(source_sym.st_name)});
+ log.debug("relocating '{?s}'", .{self.shstrtab.get(source_sym.st_name)});
for (relocs.items) |*reloc| {
const target_sym = self.local_symbols.items[reloc.target];
@@ -1045,10 +1042,10 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const section_offset = (source_sym.st_value + reloc.offset) - source_shdr.sh_addr;
const file_offset = source_shdr.sh_offset + section_offset;
- log.debug(" ({x}: [() => 0x{x}] ({s}))", .{
+ log.debug(" ({x}: [() => 0x{x}] ({?s}))", .{
reloc.offset,
target_vaddr,
- self.getString(target_sym.st_name),
+ self.shstrtab.get(target_sym.st_name),
});
switch (self.ptr_width) {
@@ -1126,8 +1123,8 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
switch (self.ptr_width) {
.p32 => {
- const buf = try self.base.allocator.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
- defer self.base.allocator.free(buf);
+ const buf = try gpa.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
+ defer gpa.free(buf);
for (buf) |*phdr, i| {
phdr.* = progHeaderTo32(self.program_headers.items[i]);
@@ -1138,8 +1135,8 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
},
.p64 => {
- const buf = try self.base.allocator.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
- defer self.base.allocator.free(buf);
+ const buf = try gpa.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
+ defer gpa.free(buf);
for (buf) |*phdr, i| {
phdr.* = self.program_headers.items[i];
@@ -1155,20 +1152,20 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
{
const shdr_index = self.shstrtab_index.?;
- if (self.shstrtab_dirty or self.shstrtab.items.len != self.sections.items[shdr_index].sh_size) {
- try self.growNonAllocSection(shdr_index, self.shstrtab.items.len, 1, false);
- const shstrtab_sect = self.sections.items[shdr_index];
- try self.base.file.?.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
+ if (self.shstrtab_dirty or self.shstrtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ try self.growNonAllocSection(shdr_index, self.shstrtab.buffer.items.len, 1, false);
+ const shstrtab_sect = self.sections.items(.shdr)[shdr_index];
+ try self.base.file.?.pwriteAll(self.shstrtab.buffer.items, shstrtab_sect.sh_offset);
self.shstrtab_dirty = false;
}
}
if (self.dwarf) |dwarf| {
const shdr_index = self.debug_str_section_index.?;
- if (self.debug_strtab_dirty or dwarf.strtab.items.len != self.sections.items[shdr_index].sh_size) {
- try self.growNonAllocSection(shdr_index, dwarf.strtab.items.len, 1, false);
- const debug_strtab_sect = self.sections.items[shdr_index];
- try self.base.file.?.pwriteAll(dwarf.strtab.items, debug_strtab_sect.sh_offset);
+ if (self.debug_strtab_dirty or dwarf.strtab.buffer.items.len != self.sections.items(.shdr)[shdr_index].sh_size) {
+ try self.growNonAllocSection(shdr_index, dwarf.strtab.buffer.items.len, 1, false);
+ const debug_strtab_sect = self.sections.items(.shdr)[shdr_index];
+ try self.base.file.?.pwriteAll(dwarf.strtab.buffer.items, debug_strtab_sect.sh_offset);
self.debug_strtab_dirty = false;
}
}
@@ -1183,7 +1180,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
.p64 => @alignOf(elf.Elf64_Shdr),
};
const allocated_size = self.allocatedSize(self.shdr_table_offset.?);
- const needed_size = self.sections.items.len * shsize;
+ const needed_size = self.sections.slice().len * shsize;
if (needed_size > allocated_size) {
self.shdr_table_offset = null; // free the space
@@ -1192,12 +1189,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
switch (self.ptr_width) {
.p32 => {
- const buf = try self.base.allocator.alloc(elf.Elf32_Shdr, self.sections.items.len);
- defer self.base.allocator.free(buf);
+ const slice = self.sections.slice();
+ const buf = try gpa.alloc(elf.Elf32_Shdr, slice.len);
+ defer gpa.free(buf);
for (buf) |*shdr, i| {
- shdr.* = sectHeaderTo32(self.sections.items[i]);
- log.debug("writing section {s}: {}", .{ self.getString(shdr.sh_name), shdr.* });
+ shdr.* = sectHeaderTo32(slice.items(.shdr)[i]);
+ log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf32_Shdr, shdr);
}
@@ -1205,12 +1203,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
},
.p64 => {
- const buf = try self.base.allocator.alloc(elf.Elf64_Shdr, self.sections.items.len);
- defer self.base.allocator.free(buf);
+ const slice = self.sections.slice();
+ const buf = try gpa.alloc(elf.Elf64_Shdr, slice.len);
+ defer gpa.free(buf);
for (buf) |*shdr, i| {
- shdr.* = self.sections.items[i];
- log.debug("writing section {s}: {}", .{ self.getString(shdr.sh_name), shdr.* });
+ shdr.* = slice.items(.shdr)[i];
+ log.debug("writing section {?s}: {}", .{ self.shstrtab.get(shdr.sh_name), shdr.* });
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf64_Shdr, shdr);
}
@@ -2021,7 +2020,7 @@ fn writeElfHeader(self: *Elf) !void {
mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian);
index += 2;
- const e_shnum = @intCast(u16, self.sections.items.len);
+ const e_shnum = @intCast(u16, self.sections.slice().len);
mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian);
index += 2;
@@ -2033,124 +2032,145 @@ fn writeElfHeader(self: *Elf) !void {
try self.base.file.?.pwriteAll(hdr_buf[0..index], 0);
}
-fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void {
- const local_sym = text_block.getSymbol(self);
- const name_str_index = local_sym.st_name;
- const name = self.getString(name_str_index);
- log.debug("freeTextBlock {*} ({s})", .{ text_block, name });
+fn freeAtom(self: *Elf, atom_index: Atom.Index) void {
+ const atom = self.getAtom(atom_index);
+ log.debug("freeAtom {d} ({s})", .{ atom_index, atom.getName(self) });
- self.freeRelocationsForTextBlock(text_block);
+ Atom.freeRelocations(self, atom_index);
- const free_list = self.atom_free_lists.getPtr(phdr_index).?;
+ const gpa = self.base.allocator;
+ const shndx = atom.getSymbol(self).st_shndx;
+ const free_list = &self.sections.items(.free_list)[shndx];
var already_have_free_list_node = false;
{
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == text_block) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == text_block.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- if (self.atoms.getPtr(phdr_index)) |last_block| {
- if (last_block.* == text_block) {
- if (text_block.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[shndx];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- last_block.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- _ = self.atoms.fetchRemove(phdr_index);
+ maybe_last_atom_index.* = null;
}
}
}
- if (text_block.prev) |prev| {
- prev.next = text_block.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
- free_list.append(self.base.allocator, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- text_block.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (text_block.next) |next| {
- next.prev = text_block.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- text_block.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
- const local_sym_index = text_block.getSymbolIndex().?;
- self.local_symbol_free_list.append(self.base.allocator, local_sym_index) catch {};
+ const local_sym_index = atom.getSymbolIndex().?;
+
+ self.local_symbol_free_list.append(gpa, local_sym_index) catch {};
self.local_symbols.items[local_sym_index].st_info = 0;
+ self.local_symbols.items[local_sym_index].st_shndx = 0;
_ = self.atom_by_index_table.remove(local_sym_index);
- text_block.local_sym_index = 0;
+ self.getAtomPtr(atom_index).local_sym_index = 0;
- self.offset_table_free_list.append(self.base.allocator, text_block.offset_table_index) catch {};
-
- if (self.dwarf) |*dw| {
- dw.freeAtom(&text_block.dbg_info_atom);
- }
+ self.offset_table_free_list.append(self.base.allocator, atom.offset_table_index) catch {};
}
-fn shrinkTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, phdr_index: u16) void {
+fn shrinkAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64) void {
_ = self;
- _ = text_block;
+ _ = atom_index;
_ = new_block_size;
- _ = phdr_index;
}
-fn growTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64, phdr_index: u16) !u64 {
- const sym = text_block.getSymbol(self);
+fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
+ const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value;
- const need_realloc = !align_ok or new_block_size > text_block.capacity(self);
+ const need_realloc = !align_ok or new_block_size > atom.capacity(self);
if (!need_realloc) return sym.st_value;
- return self.allocateTextBlock(text_block, new_block_size, alignment, phdr_index);
+ return self.allocateAtom(atom_index, new_block_size, alignment);
}
-fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64, phdr_index: u16) !u64 {
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
- const phdr = &self.program_headers.items[phdr_index];
- const shdr = &self.sections.items[shdr_index];
- const new_block_ideal_capacity = padToIdeal(new_block_size);
+pub fn createAtom(self: *Elf) !Atom.Index {
+ const gpa = self.base.allocator;
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
+ const local_sym_index = try self.allocateLocalSymbol();
+ const offset_table_index = try self.allocateGotOffset();
+ try self.atom_by_index_table.putNoClobber(gpa, local_sym_index, atom_index);
+ atom.* = .{
+ .local_sym_index = local_sym_index,
+ .offset_table_index = offset_table_index,
+ .prev_index = null,
+ .next_index = null,
+ };
+ log.debug("creating ATOM(%{d}) at index {d}", .{ local_sym_index, atom_index });
+ return atom_index;
+}
- // We use these to indicate our intention to update metadata, placing the new block,
+fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
+ const sym = atom.getSymbol(self);
+ const phdr_index = self.sections.items(.phdr_index)[sym.st_shndx];
+ const phdr = &self.program_headers.items[phdr_index];
+ const shdr = &self.sections.items(.shdr)[sym.st_shndx];
+ const free_list = &self.sections.items(.free_list)[sym.st_shndx];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sym.st_shndx];
+ const new_atom_ideal_capacity = padToIdeal(new_block_size);
+
+ // We use these to indicate our intention to update metadata, placing the new atom,
// and possibly removing a free list node.
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var block_placement: ?*TextBlock = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
- var free_list = self.atom_free_lists.get(phdr_index).?;
// First we look for an appropriately sized free list node.
// The list is unordered. We'll just take the first thing that works.
const vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_block = free_list.items[i];
- // We now have a pointer to a live text block that has too much capacity.
- // Is it enough that we could fit this new text block?
- const sym = big_block.getSymbol(self);
- const capacity = big_block.capacity(self);
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
+ // We now have a pointer to a live atom that has too much capacity.
+ // Is it enough that we could fit this new atom?
+ const big_atom_sym = big_atom.getSymbol(self);
+ const capacity = big_atom.capacity(self);
const ideal_capacity = padToIdeal(capacity);
- const ideal_capacity_end_vaddr = std.math.add(u64, sym.st_value, ideal_capacity) catch ideal_capacity;
- const capacity_end_vaddr = sym.st_value + capacity;
- const new_start_vaddr_unaligned = capacity_end_vaddr - new_block_ideal_capacity;
+ const ideal_capacity_end_vaddr = std.math.add(u64, big_atom_sym.st_value, ideal_capacity) catch ideal_capacity;
+ const capacity_end_vaddr = big_atom_sym.st_value + capacity;
+ const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment);
if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node
// should be deleted because the block that it points to has grown to take up
// more of the extra capacity.
- if (!big_block.freeListEligible(self)) {
+ if (!big_atom.freeListEligible(self)) {
_ = free_list.swapRemove(i);
} else {
i += 1;
@@ -2164,29 +2184,33 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- block_placement = big_block;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (self.atoms.get(phdr_index)) |last| {
- const sym = last.getSymbol(self);
- const ideal_capacity = padToIdeal(sym.st_size);
- const ideal_capacity_end_vaddr = sym.st_value + ideal_capacity;
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
+ const last_sym = last.getSymbol(self);
+ const ideal_capacity = padToIdeal(last_sym.st_size);
+ const ideal_capacity_end_vaddr = last_sym.st_value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
// Set up the metadata to be updated, after errors are no longer possible.
- block_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk phdr.p_vaddr;
}
};
- const expand_text_section = block_placement == null or block_placement.?.next == null;
- if (expand_text_section) {
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
+ if (expand_section) {
const needed_size = (vaddr + new_block_size) - phdr.p_vaddr;
- try self.growAllocSection(shdr_index, phdr_index, needed_size);
- _ = try self.atoms.put(self.base.allocator, phdr_index, text_block);
+ try self.growAllocSection(sym.st_shndx, needed_size);
+ maybe_last_atom_index.* = atom_index;
if (self.dwarf) |_| {
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
@@ -2201,23 +2225,28 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
}
shdr.sh_addralign = math.max(shdr.sh_addralign, alignment);
- // This function can also reallocate a text block.
+ // This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
// plugging it in to its new location.
- if (text_block.prev) |prev| {
- prev.next = text_block.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
}
- if (text_block.next) |next| {
- next.prev = text_block.prev;
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
}
- if (block_placement) |big_block| {
- text_block.prev = big_block;
- text_block.next = big_block.next;
- big_block.next = text_block;
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- text_block.prev = null;
- text_block.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -2272,15 +2301,10 @@ pub fn allocateGotOffset(self: *Elf) !u32 {
return index;
}
-fn freeRelocationsForTextBlock(self: *Elf, text_block: *TextBlock) void {
- var removed_relocs = self.relocs.fetchRemove(text_block);
- if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
-}
-
fn freeUnnamedConsts(self: *Elf, decl_index: Module.Decl.Index) void {
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| {
- self.freeTextBlock(atom, self.phdr_load_ro_index.?);
+ self.freeAtom(atom);
}
unnamed_consts.clearAndFree(self.base.allocator);
}
@@ -2295,43 +2319,57 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
- if (self.decls.fetchRemove(decl_index)) |kv| {
- if (kv.value) |index| {
- self.freeTextBlock(&decl.link.elf, index);
- self.freeUnnamedConsts(decl_index);
- }
+ if (self.decls.fetchRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ self.freeAtom(kv.value.atom);
+ self.freeUnnamedConsts(decl_index);
+ kv.value.exports.deinit(self.base.allocator);
}
if (self.dwarf) |*dw| {
- dw.freeDecl(decl);
+ dw.freeDecl(decl_index);
}
}
-fn getDeclPhdrIndex(self: *Elf, decl: *Module.Decl) !u16 {
+pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .shdr = self.getDeclShdrIndex(decl_index),
+ .exports = .{},
+ };
+ }
+ return gop.value_ptr.atom;
+}
+
+fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
const val = decl.val;
- const phdr_index: u16 = blk: {
+ const shdr_index: u16 = blk: {
if (val.isUndefDeep()) {
// TODO in release-fast and release-small, we should put undef in .bss
- break :blk self.phdr_load_rw_index.?;
+ break :blk self.data_section_index.?;
}
switch (zig_ty) {
// TODO: what if this is a function pointer?
- .Fn => break :blk self.phdr_load_re_index.?,
+ .Fn => break :blk self.text_section_index.?,
else => {
if (val.castTag(.variable)) |_| {
- break :blk self.phdr_load_rw_index.?;
+ break :blk self.data_section_index.?;
}
- break :blk self.phdr_load_ro_index.?;
+ break :blk self.rodata_section_index.?;
},
}
};
- return phdr_index;
+ return shdr_index;
}
fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym {
+ const gpa = self.base.allocator;
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
@@ -2341,60 +2379,65 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(self.base.options.target);
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = try self.getDeclPhdrIndex(decl);
- }
- const phdr_index = decl_ptr.*.?;
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
- const local_sym = decl.link.elf.getSymbolPtr(self);
- if (local_sym.st_size != 0) {
- const capacity = decl.link.elf.capacity(self);
+ const shdr_index = decl_metadata.shdr;
+ if (atom.getSymbol(self).st_size != 0) {
+ const local_sym = atom.getSymbolPtr(self);
+ local_sym.st_name = try self.shstrtab.insert(gpa, decl_name);
+ local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
+ local_sym.st_other = 0;
+ local_sym.st_shndx = shdr_index;
+
+ const capacity = atom.capacity(self);
const need_realloc = code.len > capacity or
!mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
+
if (need_realloc) {
- const vaddr = try self.growTextBlock(&decl.link.elf, code.len, required_alignment, phdr_index);
+ const vaddr = try self.growAtom(atom_index, code.len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, local_sym.st_value, vaddr });
if (vaddr != local_sym.st_value) {
local_sym.st_value = vaddr;
log.debug(" (writing new offset table entry)", .{});
- self.offset_table.items[decl.link.elf.offset_table_index] = vaddr;
- try self.writeOffsetTableEntry(decl.link.elf.offset_table_index);
+ self.offset_table.items[atom.offset_table_index] = vaddr;
+ try self.writeOffsetTableEntry(atom.offset_table_index);
}
} else if (code.len < local_sym.st_size) {
- self.shrinkTextBlock(&decl.link.elf, code.len, phdr_index);
+ self.shrinkAtom(atom_index, code.len);
}
local_sym.st_size = code.len;
- local_sym.st_name = try self.updateString(local_sym.st_name, decl_name);
- local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
- local_sym.st_other = 0;
- local_sym.st_shndx = shdr_index;
- // TODO this write could be avoided if no fields of the symbol were changed.
- try self.writeSymbol(decl.link.elf.getSymbolIndex().?);
- } else {
- const name_str_index = try self.makeString(decl_name);
- const vaddr = try self.allocateTextBlock(&decl.link.elf, code.len, required_alignment, phdr_index);
- errdefer self.freeTextBlock(&decl.link.elf, phdr_index);
- log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, vaddr });
+ // TODO this write could be avoided if no fields of the symbol were changed.
+ try self.writeSymbol(atom.getSymbolIndex().?);
+ } else {
+ const local_sym = atom.getSymbolPtr(self);
local_sym.* = .{
- .st_name = name_str_index,
+ .st_name = try self.shstrtab.insert(gpa, decl_name),
.st_info = (elf.STB_LOCAL << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
- .st_value = vaddr,
- .st_size = code.len,
+ .st_value = 0,
+ .st_size = 0,
};
- self.offset_table.items[decl.link.elf.offset_table_index] = vaddr;
+ const vaddr = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
+ log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, vaddr });
- try self.writeSymbol(decl.link.elf.getSymbolIndex().?);
- try self.writeOffsetTableEntry(decl.link.elf.offset_table_index);
+ self.offset_table.items[atom.offset_table_index] = vaddr;
+ local_sym.st_value = vaddr;
+ local_sym.st_size = code.len;
+
+ try self.writeSymbol(atom.getSymbolIndex().?);
+ try self.writeOffsetTableEntry(atom.offset_table_index);
}
+ const local_sym = atom.getSymbolPtr(self);
+ const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr;
- const file_offset = self.sections.items[shdr_index].sh_offset + section_offset;
+ const file_offset = self.sections.items(.shdr)[shdr_index].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
return local_sym;
@@ -2413,15 +2456,10 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.elf;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeUnnamedConsts(decl_index);
- self.freeRelocationsForTextBlock(atom);
- } else {
- gop.value_ptr.* = null;
- }
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ self.freeUnnamedConsts(decl_index);
+ Atom.freeRelocations(self, atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -2483,16 +2521,9 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
}
}
- assert(!self.unnamed_const_atoms.contains(decl_index));
-
- const atom = &decl.link.elf;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeRelocationsForTextBlock(atom);
- } else {
- gop.value_ptr.* = null;
- }
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -2509,14 +2540,14 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
}, &code_buffer, .{
.dwarf = ds,
}, .{
- .parent_atom_index = decl.link.elf.getSymbolIndex().?,
+ .parent_atom_index = atom.getSymbolIndex().?,
})
else
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
- .parent_atom_index = decl.link.elf.getSymbolIndex().?,
+ .parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
@@ -2545,41 +2576,35 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
}
pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 {
- var code_buffer = std.ArrayList(u8).init(self.base.allocator);
+ const gpa = self.base.allocator;
+
+ var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
- const gop = try self.unnamed_const_atoms.getOrPut(self.base.allocator, decl_index);
+ const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
const unnamed_consts = gop.value_ptr;
- const atom = try self.base.allocator.create(TextBlock);
- errdefer self.base.allocator.destroy(atom);
- atom.* = TextBlock.empty;
- // TODO for unnamed consts we don't need GOT offset/entry allocated
- try atom.ensureInitialized(self);
- try self.managed_atoms.append(self.base.allocator, atom);
-
+ const decl = mod.declPtr(decl_index);
const name_str_index = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
- defer self.base.allocator.free(decl_name);
-
+ defer gpa.free(decl_name);
const index = unnamed_consts.items.len;
- const name = try std.fmt.allocPrint(self.base.allocator, "__unnamed_{s}_{d}", .{ decl_name, index });
- defer self.base.allocator.free(name);
-
- break :blk try self.makeString(name);
+ const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
+ defer gpa.free(name);
+ break :blk try self.shstrtab.insert(gpa, name);
};
- const name = self.getString(name_str_index);
+ const name = self.shstrtab.get(name_str_index).?;
+
+ const atom_index = try self.createAtom();
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
.none = {},
}, .{
- .parent_atom_index = atom.getSymbolIndex().?,
+ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -2592,31 +2617,27 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
- const phdr_index = self.phdr_load_ro_index.?;
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
- const vaddr = try self.allocateTextBlock(atom, code.len, required_alignment, phdr_index);
- errdefer self.freeTextBlock(atom, phdr_index);
+ const shdr_index = self.rodata_section_index.?;
+ const phdr_index = self.sections.items(.phdr_index)[shdr_index];
+ const local_sym = self.getAtom(atom_index).getSymbolPtr(self);
+ local_sym.st_name = name_str_index;
+ local_sym.st_info = (elf.STB_LOCAL << 4) | elf.STT_OBJECT;
+ local_sym.st_other = 0;
+ local_sym.st_shndx = shdr_index;
+ local_sym.st_size = code.len;
+ local_sym.st_value = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
- log.debug("allocated text block for {s} at 0x{x}", .{ name, vaddr });
+ log.debug("allocated text block for {s} at 0x{x}", .{ name, local_sym.st_value });
- const local_sym = atom.getSymbolPtr(self);
- local_sym.* = .{
- .st_name = name_str_index,
- .st_info = (elf.STB_LOCAL << 4) | elf.STT_OBJECT,
- .st_other = 0,
- .st_shndx = shdr_index,
- .st_value = vaddr,
- .st_size = code.len,
- };
-
- try self.writeSymbol(atom.getSymbolIndex().?);
- try unnamed_consts.append(self.base.allocator, atom);
+ try self.writeSymbol(self.getAtom(atom_index).getSymbolIndex().?);
+ try unnamed_consts.append(gpa, atom_index);
const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr;
- const file_offset = self.sections.items[shdr_index].sh_offset + section_offset;
+ const file_offset = self.sections.items(.shdr)[shdr_index].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
- return atom.getSymbolIndex().?;
+ return self.getAtom(atom_index).getSymbolIndex().?;
}
pub fn updateDeclExports(
@@ -2635,20 +2656,16 @@ pub fn updateDeclExports(
const tracy = trace(@src());
defer tracy.end();
+ const gpa = self.base.allocator;
+
const decl = module.declPtr(decl_index);
- const atom = &decl.link.elf;
-
- if (atom.getSymbolIndex() == null) return;
-
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
- try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
+ const shdr_index = decl_metadata.shdr;
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = try self.getDeclPhdrIndex(decl);
- }
- const phdr_index = gop.value_ptr.*.?;
- const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
+ try self.global_symbols.ensureUnusedCapacity(gpa, exports.len);
for (exports) |exp| {
if (exp.options.section) |section_name| {
@@ -2681,10 +2698,10 @@ pub fn updateDeclExports(
},
};
const stt_bits: u8 = @truncate(u4, decl_sym.st_info);
- if (exp.link.elf.sym_index) |i| {
+ if (decl_metadata.getExport(self, exp.options.name)) |i| {
const sym = &self.global_symbols.items[i];
sym.* = .{
- .st_name = try self.updateString(sym.st_name, exp.options.name),
+ .st_name = try self.shstrtab.insert(gpa, exp.options.name),
.st_info = (stb_bits << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
@@ -2692,30 +2709,29 @@ pub fn updateDeclExports(
.st_size = decl_sym.st_size,
};
} else {
- const name = try self.makeString(exp.options.name);
const i = if (self.global_symbol_free_list.popOrNull()) |i| i else blk: {
_ = self.global_symbols.addOneAssumeCapacity();
break :blk self.global_symbols.items.len - 1;
};
+ try decl_metadata.exports.append(gpa, @intCast(u32, i));
self.global_symbols.items[i] = .{
- .st_name = name,
+ .st_name = try self.shstrtab.insert(gpa, exp.options.name),
.st_info = (stb_bits << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
.st_value = decl_sym.st_value,
.st_size = decl_sym.st_size,
};
-
- exp.link.elf.sym_index = @intCast(u32, i);
}
}
}
/// Must be called only after a successful call to `updateDecl`.
-pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src());
defer tracy.end();
+ const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(decl_name);
@@ -2723,16 +2739,18 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl: *const Module.Decl)
if (self.llvm_object) |_| return;
if (self.dwarf) |*dw| {
- try dw.updateDeclLineNumber(decl);
+ try dw.updateDeclLineNumber(mod, decl_index);
}
}
-pub fn deleteExport(self: *Elf, exp: Export) void {
+pub fn deleteDeclExport(self: *Elf, decl_index: Module.Decl.Index, name: []const u8) void {
if (self.llvm_object) |_| return;
-
- const sym_index = exp.sym_index orelse return;
- self.global_symbol_free_list.append(self.base.allocator, sym_index) catch {};
- self.global_symbols.items[sym_index].st_info = 0;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
+ const sym_index = metadata.getExportPtr(self, name) orelse return;
+ log.debug("deleting export '{s}'", .{name});
+ self.global_symbol_free_list.append(self.base.allocator, sym_index.*) catch {};
+ self.global_symbols.items[sym_index.*].st_info = 0;
+ sym_index.* = 0;
}
fn writeProgHeader(self: *Elf, index: usize) !void {
@@ -2761,7 +2779,7 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
switch (self.ptr_width) {
.p32 => {
var shdr: [1]elf.Elf32_Shdr = undefined;
- shdr[0] = sectHeaderTo32(self.sections.items[index]);
+ shdr[0] = sectHeaderTo32(self.sections.items(.shdr)[index]);
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf32_Shdr, &shdr[0]);
}
@@ -2769,7 +2787,7 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
return self.base.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
},
.p64 => {
- var shdr = [1]elf.Elf64_Shdr{self.sections.items[index]};
+ var shdr = [1]elf.Elf64_Shdr{self.sections.items(.shdr)[index]};
if (foreign_endian) {
mem.byteSwapAllFields(elf.Elf64_Shdr, &shdr[0]);
}
@@ -2783,11 +2801,11 @@ fn writeOffsetTableEntry(self: *Elf, index: usize) !void {
const entry_size: u16 = self.archPtrWidthBytes();
if (self.offset_table_count_dirty) {
const needed_size = self.offset_table.items.len * entry_size;
- try self.growAllocSection(self.got_section_index.?, self.phdr_got_index.?, needed_size);
+ try self.growAllocSection(self.got_section_index.?, needed_size);
self.offset_table_count_dirty = false;
}
const endian = self.base.options.target.cpu.arch.endian();
- const shdr = &self.sections.items[self.got_section_index.?];
+ const shdr = &self.sections.items(.shdr)[self.got_section_index.?];
const off = shdr.sh_offset + @as(u64, entry_size) * index;
switch (entry_size) {
2 => {
@@ -2813,7 +2831,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
const tracy = trace(@src());
defer tracy.end();
- const syms_sect = &self.sections.items[self.symtab_section_index.?];
+ const syms_sect = &self.sections.items(.shdr)[self.symtab_section_index.?];
// Make sure we are not pointlessly writing symbol data that will have to get relocated
// due to running out of space.
if (self.local_symbols.items.len != syms_sect.sh_info) {
@@ -2835,7 +2853,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
.p64 => syms_sect.sh_offset + @sizeOf(elf.Elf64_Sym) * index,
};
const local = self.local_symbols.items[index];
- log.debug("writing symbol {d}, '{s}' at 0x{x}", .{ index, self.getString(local.st_name), off });
+ log.debug("writing symbol {d}, '{?s}' at 0x{x}", .{ index, self.shstrtab.get(local.st_name), off });
log.debug(" ({})", .{local});
switch (self.ptr_width) {
.p32 => {
@@ -2865,7 +2883,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
}
fn writeAllGlobalSymbols(self: *Elf) !void {
- const syms_sect = &self.sections.items[self.symtab_section_index.?];
+ const syms_sect = &self.sections.items(.shdr)[self.symtab_section_index.?];
const sym_size: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Sym),
.p64 => @sizeOf(elf.Elf64_Sym),
@@ -3215,10 +3233,58 @@ const CsuObjects = struct {
fn logSymtab(self: Elf) void {
log.debug("locals:", .{});
for (self.local_symbols.items) |sym, id| {
- log.debug(" {d}: {s}: @{x} in {d}", .{ id, self.getString(sym.st_name), sym.st_value, sym.st_shndx });
+ log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
log.debug("globals:", .{});
for (self.global_symbols.items) |sym, id| {
- log.debug(" {d}: {s}: @{x} in {d}", .{ id, self.getString(sym.st_name), sym.st_value, sym.st_shndx });
+ log.debug(" {d}: {?s}: @{x} in {d}", .{ id, self.shstrtab.get(sym.st_name), sym.st_value, sym.st_shndx });
}
}
+
+pub fn getProgramHeader(self: *const Elf, shdr_index: u16) elf.Elf64_Phdr {
+ const index = self.sections.items(.phdr_index)[shdr_index];
+ return self.program_headers.items[index];
+}
+
+pub fn getProgramHeaderPtr(self: *Elf, shdr_index: u16) *elf.Elf64_Phdr {
+ const index = self.sections.items(.phdr_index)[shdr_index];
+ return &self.program_headers.items[index];
+}
+
+/// Returns pointer-to-symbol described at sym_index.
+pub fn getSymbolPtr(self: *Elf, sym_index: u32) *elf.Elf64_Sym {
+ return &self.local_symbols.items[sym_index];
+}
+
+/// Returns symbol at sym_index.
+pub fn getSymbol(self: *const Elf, sym_index: u32) elf.Elf64_Sym {
+ return self.local_symbols.items[sym_index];
+}
+
+/// Returns name of the symbol at sym_index.
+pub fn getSymbolName(self: *const Elf, sym_index: u32) []const u8 {
+ const sym = self.local_symbols.items[sym_index];
+ return self.shstrtab.get(sym.st_name).?;
+}
+
+/// Returns name of the global symbol at index.
+pub fn getGlobalName(self: *const Elf, index: u32) []const u8 {
+ const sym = self.global_symbols.items[index];
+ return self.shstrtab.get(sym.st_name).?;
+}
+
+pub fn getAtom(self: *const Elf, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *Elf, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
+/// Returns atom if there is an atom referenced by the symbol.
+/// Returns null on failure.
+pub fn getAtomIndexForSymbol(self: *Elf, sym_index: u32) ?Atom.Index {
+ return self.atom_by_index_table.get(sym_index);
+}
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index caeb3bfbc5..4ab304ef71 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -4,7 +4,6 @@ const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
-const Dwarf = @import("../Dwarf.zig");
const Elf = @import("../Elf.zig");
/// Each decl always gets a local symbol with the fully qualified name.
@@ -20,44 +19,33 @@ offset_table_index: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
-prev: ?*Atom,
-next: ?*Atom,
+prev_index: ?Index,
+next_index: ?Index,
-dbg_info_atom: Dwarf.Atom,
+pub const Index = u32;
-pub const empty = Atom{
- .local_sym_index = 0,
- .offset_table_index = undefined,
- .prev = null,
- .next = null,
- .dbg_info_atom = undefined,
+pub const Reloc = struct {
+ target: u32,
+ offset: u64,
+ addend: u32,
+ prev_vaddr: u64,
};
-pub fn ensureInitialized(self: *Atom, elf_file: *Elf) !void {
- if (self.getSymbolIndex() != null) return; // Already initialized
- self.local_sym_index = try elf_file.allocateLocalSymbol();
- self.offset_table_index = try elf_file.allocateGotOffset();
- try elf_file.atom_by_index_table.putNoClobber(elf_file.base.allocator, self.local_sym_index, self);
-}
-
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.local_sym_index == 0) return null;
return self.local_sym_index;
}
-pub fn getSymbol(self: Atom, elf_file: *Elf) elf.Elf64_Sym {
- const sym_index = self.getSymbolIndex().?;
- return elf_file.local_symbols.items[sym_index];
+pub fn getSymbol(self: Atom, elf_file: *const Elf) elf.Elf64_Sym {
+ return elf_file.getSymbol(self.getSymbolIndex().?);
}
pub fn getSymbolPtr(self: Atom, elf_file: *Elf) *elf.Elf64_Sym {
- const sym_index = self.getSymbolIndex().?;
- return &elf_file.local_symbols.items[sym_index];
+ return elf_file.getSymbolPtr(self.getSymbolIndex().?);
}
-pub fn getName(self: Atom, elf_file: *Elf) []const u8 {
- const sym = self.getSymbol();
- return elf_file.getString(sym.st_name);
+pub fn getName(self: Atom, elf_file: *const Elf) []const u8 {
+ return elf_file.getSymbolName(self.getSymbolIndex().?);
}
pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
@@ -72,9 +60,10 @@ pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
/// Returns how much room there is to grow in virtual address space.
/// File offset relocation happens transparently, so it is not included in
/// this calculation.
-pub fn capacity(self: Atom, elf_file: *Elf) u64 {
+pub fn capacity(self: Atom, elf_file: *const Elf) u64 {
const self_sym = self.getSymbol(elf_file);
- if (self.next) |next| {
+ if (self.next_index) |next_index| {
+ const next = elf_file.getAtom(next_index);
const next_sym = next.getSymbol(elf_file);
return next_sym.st_value - self_sym.st_value;
} else {
@@ -83,9 +72,10 @@ pub fn capacity(self: Atom, elf_file: *Elf) u64 {
}
}
-pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
+pub fn freeListEligible(self: Atom, elf_file: *const Elf) bool {
// No need to keep a free list node for the last block.
- const next = self.next orelse return false;
+ const next_index = self.next_index orelse return false;
+ const next = elf_file.getAtom(next_index);
const self_sym = self.getSymbol(elf_file);
const next_sym = next.getSymbol(elf_file);
const cap = next_sym.st_value - self_sym.st_value;
@@ -94,3 +84,17 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
const surplus = cap - ideal_cap;
return surplus >= Elf.min_text_capacity;
}
+
+pub fn addRelocation(elf_file: *Elf, atom_index: Index, reloc: Reloc) !void {
+ const gpa = elf_file.base.allocator;
+ const gop = try elf_file.relocs.getOrPut(gpa, atom_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{};
+ }
+ try gop.value_ptr.append(gpa, reloc);
+}
+
+pub fn freeRelocations(elf_file: *Elf, atom_index: Index) void {
+ var removed_relocs = elf_file.relocs.fetchRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(elf_file.base.allocator);
+}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 543cb473d7..24ef275c5b 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -66,7 +66,7 @@ const Section = struct {
// TODO is null here necessary, or can we do away with tracking via section
// size in incremental context?
- last_atom: ?*Atom = null,
+ last_atom_index: ?Atom.Index = null,
/// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added
@@ -83,7 +83,7 @@ const Section = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh atom, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
- free_list: std.ArrayListUnmanaged(*Atom) = .{},
+ free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
};
base: File,
@@ -140,8 +140,8 @@ locals_free_list: std.ArrayListUnmanaged(u32) = .{},
globals_free_list: std.ArrayListUnmanaged(u32) = .{},
dyld_stub_binder_index: ?u32 = null,
-dyld_private_atom: ?*Atom = null,
-stub_helper_preamble_atom: ?*Atom = null,
+dyld_private_atom_index: ?Atom.Index = null,
+stub_helper_preamble_atom_index: ?Atom.Index = null,
strtab: StringTable(.strtab) = .{},
@@ -164,10 +164,10 @@ segment_table_dirty: bool = false,
cold_start: bool = true,
/// List of atoms that are either synthetic or map directly to the Zig source program.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Table of atoms indexed by the symbol index.
-atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
+atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
@@ -210,11 +210,36 @@ bindings: BindingTable = .{},
/// this will be a table indexed by index into the list of Atoms.
lazy_bindings: BindingTable = .{},
-/// Table of Decls that are currently alive.
-/// We store them here so that we can properly dispose of any allocated
-/// memory within the atom in the incremental linker.
-/// TODO consolidate this.
-decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, ?u8) = .{},
+/// Table of tracked Decls.
+decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
+
+const DeclMetadata = struct {
+ atom: Atom.Index,
+ section: u8,
+ /// A list of all exports aliases of this Decl.
+ /// TODO do we actually need this at all?
+ exports: std.ArrayListUnmanaged(u32) = .{},
+
+ fn getExport(m: DeclMetadata, macho_file: *const MachO, name: []const u8) ?u32 {
+ for (m.exports.items) |exp| {
+ if (mem.eql(u8, name, macho_file.getSymbolName(.{
+ .sym_index = exp,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+
+ fn getExportPtr(m: *DeclMetadata, macho_file: *MachO, name: []const u8) ?*u32 {
+ for (m.exports.items) |*exp| {
+ if (mem.eql(u8, name, macho_file.getSymbolName(.{
+ .sym_index = exp.*,
+ .file = null,
+ }))) return exp;
+ }
+ return null;
+ }
+};
const Entry = struct {
target: SymbolWithLoc,
@@ -229,8 +254,8 @@ const Entry = struct {
return macho_file.getSymbolPtr(.{ .sym_index = entry.sym_index, .file = null });
}
- pub fn getAtom(entry: Entry, macho_file: *MachO) ?*Atom {
- return macho_file.getAtomForSymbol(.{ .sym_index = entry.sym_index, .file = null });
+ pub fn getAtomIndex(entry: Entry, macho_file: *MachO) ?Atom.Index {
+ return macho_file.getAtomIndexForSymbol(.{ .sym_index = entry.sym_index, .file = null });
}
pub fn getName(entry: Entry, macho_file: *MachO) []const u8 {
@@ -238,10 +263,10 @@ const Entry = struct {
}
};
-const BindingTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Atom.Binding));
-const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
-const RebaseTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32));
-const RelocationTable = std.AutoArrayHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation));
+const BindingTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Atom.Binding));
+const UnnamedConstTable = std.AutoArrayHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
+const RebaseTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
+const RelocationTable = std.AutoArrayHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
const PendingUpdate = union(enum) {
resolve_undef: u32,
@@ -286,10 +311,6 @@ pub const default_pagezero_vmsize: u64 = 0x100000000;
/// potential future extensions.
pub const default_headerpad_size: u32 = 0x1000;
-pub const Export = struct {
- sym_index: ?u32 = null,
-};
-
pub fn openPath(allocator: Allocator, options: link.Options) !*MachO {
assert(options.target.ofmt == .macho);
@@ -547,8 +568,8 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
try self.allocateSpecialSymbols();
- for (self.relocs.keys()) |atom| {
- try atom.resolveRelocations(self);
+ for (self.relocs.keys()) |atom_index| {
+ try Atom.resolveRelocations(self, atom_index);
}
if (build_options.enable_logging) {
@@ -999,18 +1020,19 @@ pub fn parseDependentLibs(self: *MachO, syslibroot: ?[]const u8, dependent_libs:
}
}
-pub fn writeAtom(self: *MachO, atom: *Atom, code: []const u8) !void {
+pub fn writeAtom(self: *MachO, atom_index: Atom.Index, code: []const u8) !void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const section = self.sections.get(sym.n_sect - 1);
const file_offset = section.header.offset + sym.n_value - section.header.addr;
log.debug("writing atom for symbol {s} at file offset 0x{x}", .{ atom.getName(self), file_offset });
try self.base.file.?.pwriteAll(code, file_offset);
- try atom.resolveRelocations(self);
+ try Atom.resolveRelocations(self, atom_index);
}
-fn writePtrWidthAtom(self: *MachO, atom: *Atom) !void {
+fn writePtrWidthAtom(self: *MachO, atom_index: Atom.Index) !void {
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
- try self.writeAtom(atom, &buffer);
+ try self.writeAtom(atom_index, &buffer);
}
fn markRelocsDirtyByTarget(self: *MachO, target: SymbolWithLoc) void {
@@ -1026,7 +1048,8 @@ fn markRelocsDirtyByTarget(self: *MachO, target: SymbolWithLoc) void {
fn markRelocsDirtyByAddress(self: *MachO, addr: u64) void {
for (self.relocs.values()) |*relocs| {
for (relocs.items) |*reloc| {
- const target_atom = reloc.getTargetAtom(self) orelse continue;
+ const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
+ const target_atom = self.getAtom(target_atom_index);
const target_sym = target_atom.getSymbol(self);
if (target_sym.n_value < addr) continue;
reloc.dirty = true;
@@ -1053,26 +1076,38 @@ pub fn allocateSpecialSymbols(self: *MachO) !void {
}
}
-pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
+pub fn createAtom(self: *MachO) !Atom.Index {
const gpa = self.base.allocator;
+ const atom_index = @intCast(Atom.Index, self.atoms.items.len);
+ const atom = try self.atoms.addOne(gpa);
+ const sym_index = try self.allocateSymbol();
+ try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
+ atom.* = .{
+ .sym_index = sym_index,
+ .file = null,
+ .size = 0,
+ .alignment = 0,
+ .prev_index = null,
+ .next_index = null,
+ };
+ log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
+ return atom_index;
+}
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- errdefer gpa.destroy(atom);
-
- try self.managed_atoms.append(gpa, atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.got_section_index.? + 1;
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated GOT atom at 0x{x}", .{sym.n_value});
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -1087,45 +1122,39 @@ pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
const target_sym = self.getSymbol(target);
if (target_sym.undf()) {
- try atom.addBinding(self, .{
+ try Atom.addBinding(self, atom_index, .{
.target = self.getGlobal(self.getSymbolName(target)).?,
.offset = 0,
});
} else {
- try atom.addRebase(self, 0);
+ try Atom.addRebase(self, atom_index, 0);
}
- return atom;
+ return atom_index;
}
pub fn createDyldPrivateAtom(self: *MachO) !void {
if (self.dyld_stub_binder_index == null) return;
- if (self.dyld_private_atom != null) return;
+ if (self.dyld_private_atom_index != null) return;
- const gpa = self.base.allocator;
-
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.data_section_index.? + 1;
- self.dyld_private_atom = atom;
+ self.dyld_private_atom_index = atom_index;
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated dyld_private atom at 0x{x}", .{sym.n_value});
- try self.writePtrWidthAtom(atom);
+ try self.writePtrWidthAtom(atom_index);
}
pub fn createStubHelperPreambleAtom(self: *MachO) !void {
if (self.dyld_stub_binder_index == null) return;
- if (self.stub_helper_preamble_atom != null) return;
+ if (self.stub_helper_preamble_atom_index != null) return;
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
@@ -1134,22 +1163,23 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
.aarch64 => 6 * @sizeOf(u32),
else => unreachable,
};
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable,
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.stub_helper_section_index.? + 1;
- const dyld_private_sym_index = self.dyld_private_atom.?.getSymbolIndex().?;
+ const dyld_private_sym_index = if (self.dyld_private_atom_index) |dyld_index|
+ self.getAtom(dyld_index).getSymbolIndex().?
+ else
+ unreachable;
const code = try gpa.alloc(u8, size);
defer gpa.free(code);
@@ -1168,7 +1198,7 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
code[9] = 0xff;
code[10] = 0x25;
- try atom.addRelocations(self, 2, .{ .{
+ try Atom.addRelocations(self, atom_index, 2, .{ .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
.target = .{ .sym_index = dyld_private_sym_index, .file = null },
.offset = 3,
@@ -1208,7 +1238,7 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
// br x16
mem.writeIntLittle(u32, code[20..][0..4], aarch64.Instruction.br(.x16).toU32());
- try atom.addRelocations(self, 4, .{ .{
+ try Atom.addRelocations(self, atom_index, 4, .{ .{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_PAGE21),
.target = .{ .sym_index = dyld_private_sym_index, .file = null },
.offset = 0,
@@ -1241,16 +1271,14 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
else => unreachable,
}
- self.stub_helper_preamble_atom = atom;
+ self.stub_helper_preamble_atom_index = atom_index;
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub preamble atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
}
-pub fn createStubHelperAtom(self: *MachO) !*Atom {
+pub fn createStubHelperAtom(self: *MachO) !Atom.Index {
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
const size: u4 = switch (arch) {
@@ -1258,16 +1286,14 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable,
};
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable,
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
@@ -1277,6 +1303,11 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
defer gpa.free(code);
mem.set(u8, code, 0);
+ const stub_helper_preamble_atom_sym_index = if (self.stub_helper_preamble_atom_index) |stub_index|
+ self.getAtom(stub_index).getSymbolIndex().?
+ else
+ unreachable;
+
switch (arch) {
.x86_64 => {
// pushq
@@ -1285,9 +1316,9 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
// jmpq
code[5] = 0xe9;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
- .target = .{ .sym_index = self.stub_helper_preamble_atom.?.getSymbolIndex().?, .file = null },
+ .target = .{ .sym_index = stub_helper_preamble_atom_sym_index, .file = null },
.offset = 6,
.addend = 0,
.pcrel = true,
@@ -1308,9 +1339,9 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.b(0).toU32());
// Next 4 bytes 8..12 are just a placeholder populated in `populateLazyBindOffsetsInStubHelper`.
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
- .target = .{ .sym_index = self.stub_helper_preamble_atom.?.getSymbolIndex().?, .file = null },
+ .target = .{ .sym_index = stub_helper_preamble_atom_sym_index, .file = null },
.offset = 4,
.addend = 0,
.pcrel = true,
@@ -1320,29 +1351,24 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
else => unreachable,
}
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub helper atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
- return atom;
+ return atom_index;
}
-pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !*Atom {
- const gpa = self.base.allocator;
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !Atom.Index {
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
sym.n_sect = self.la_symbol_ptr_section_index.? + 1;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -1354,22 +1380,20 @@ pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWi
.pcrel = false,
.length = 3,
});
- try atom.addRebase(self, 0);
- try atom.addLazyBinding(self, .{
+ try Atom.addRebase(self, atom_index, 0);
+ try Atom.addLazyBinding(self, atom_index, .{
.target = self.getGlobal(self.getSymbolName(target)).?,
.offset = 0,
});
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
+ sym.n_value = try self.allocateAtom(atom_index, atom.size, @alignOf(u64));
log.debug("allocated lazy pointer atom at 0x{x} ({s})", .{ sym.n_value, self.getSymbolName(target) });
- try self.writePtrWidthAtom(atom);
+ try self.writePtrWidthAtom(atom_index);
- return atom;
+ return atom_index;
}
-pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
+pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !Atom.Index {
const gpa = self.base.allocator;
const arch = self.base.options.target.cpu.arch;
const size: u4 = switch (arch) {
@@ -1377,9 +1401,8 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable, // unhandled architecture type
};
- const atom = try gpa.create(Atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
+ const atom_index = try self.createAtom();
+ const atom = self.getAtomPtr(atom_index);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
@@ -1387,7 +1410,6 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
else => unreachable, // unhandled architecture type
};
- errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
@@ -1403,7 +1425,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
code[0] = 0xff;
code[1] = 0x25;
- try atom.addRelocation(self, .{
+ try Atom.addRelocation(self, atom_index, .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = .{ .sym_index = laptr_sym_index, .file = null },
.offset = 2,
@@ -1424,7 +1446,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
// br x16
mem.writeIntLittle(u32, code[8..12], aarch64.Instruction.br(.x16).toU32());
- try atom.addRelocations(self, 2, .{
+ try Atom.addRelocations(self, atom_index, 2, .{
.{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_PAGE21),
.target = .{ .sym_index = laptr_sym_index, .file = null },
@@ -1446,13 +1468,11 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
else => unreachable,
}
- try self.managed_atoms.append(gpa, atom);
-
- sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
+ sym.n_value = try self.allocateAtom(atom_index, size, atom.alignment);
log.debug("allocated stub atom at 0x{x}", .{sym.n_value});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
- return atom;
+ return atom_index;
}
pub fn createMhExecuteHeaderSymbol(self: *MachO) !void {
@@ -1586,9 +1606,12 @@ pub fn resolveSymbolsInDylibs(self: *MachO) !void {
if (self.stubs_table.contains(global)) break :blk;
const stub_index = try self.allocateStubEntry(global);
- const stub_helper_atom = try self.createStubHelperAtom();
- const laptr_atom = try self.createLazyPointerAtom(stub_helper_atom.getSymbolIndex().?, global);
- const stub_atom = try self.createStubAtom(laptr_atom.getSymbolIndex().?);
+ const stub_helper_atom_index = try self.createStubHelperAtom();
+ const stub_helper_atom = self.getAtom(stub_helper_atom_index);
+ const laptr_atom_index = try self.createLazyPointerAtom(stub_helper_atom.getSymbolIndex().?, global);
+ const laptr_atom = self.getAtom(laptr_atom_index);
+ const stub_atom_index = try self.createStubAtom(laptr_atom.getSymbolIndex().?);
+ const stub_atom = self.getAtom(stub_atom_index);
self.stubs.items[stub_index].sym_index = stub_atom.getSymbolIndex().?;
self.markRelocsDirtyByTarget(global);
}
@@ -1686,10 +1709,11 @@ pub fn resolveDyldStubBinder(self: *MachO) !void {
// Add dyld_stub_binder as the final GOT entry.
const got_index = try self.allocateGotEntry(global);
- const got_atom = try self.createGotAtom(global);
+ const got_atom_index = try self.createGotAtom(global);
+ const got_atom = self.getAtom(got_atom_index);
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
pub fn deinit(self: *MachO) void {
@@ -1739,12 +1763,12 @@ pub fn deinit(self: *MachO) void {
}
self.sections.deinit(gpa);
- for (self.managed_atoms.items) |atom| {
- gpa.destroy(atom);
- }
- self.managed_atoms.deinit(gpa);
+ self.atoms.deinit(gpa);
if (self.base.options.module) |_| {
+ for (self.decls.values()) |*m| {
+ m.exports.deinit(gpa);
+ }
self.decls.deinit(gpa);
} else {
assert(self.decls.count() == 0);
@@ -1778,14 +1802,14 @@ pub fn deinit(self: *MachO) void {
self.lazy_bindings.deinit(gpa);
}
-fn freeAtom(self: *MachO, atom: *Atom) void {
- log.debug("freeAtom {*}", .{atom});
-
+fn freeAtom(self: *MachO, atom_index: Atom.Index) void {
const gpa = self.base.allocator;
+ log.debug("freeAtom {d}", .{atom_index});
// Remove any relocs and base relocs associated with this Atom
- self.freeRelocationsForAtom(atom);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
const sect_id = atom.getSymbol(self).n_sect - 1;
const free_list = &self.sections.items(.free_list)[sect_id];
var already_have_free_list_node = false;
@@ -1793,45 +1817,46 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
var i: usize = 0;
// TODO turn free_list into a hash map
while (i < free_list.items.len) {
- if (free_list.items[i] == atom) {
+ if (free_list.items[i] == atom_index) {
_ = free_list.swapRemove(i);
continue;
}
- if (free_list.items[i] == atom.prev) {
+ if (free_list.items[i] == atom.prev_index) {
already_have_free_list_node = true;
}
i += 1;
}
}
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
- if (maybe_last_atom.*) |last_atom| {
- if (last_atom == atom) {
- if (atom.prev) |prev| {
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ if (last_atom_index == atom_index) {
+ if (atom.prev_index) |prev_index| {
// TODO shrink the section size here
- maybe_last_atom.* = prev;
+ maybe_last_atom_index.* = prev_index;
} else {
- maybe_last_atom.* = null;
+ maybe_last_atom_index.* = null;
}
}
}
- if (atom.prev) |prev| {
- prev.next = atom.next;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
- if (!already_have_free_list_node and prev.freeListEligible(self)) {
+ if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can ignore
// the OOM here.
- free_list.append(gpa, prev) catch {};
+ free_list.append(gpa, prev_index) catch {};
}
} else {
- atom.prev = null;
+ self.getAtomPtr(atom_index).prev_index = null;
}
- if (atom.next) |next| {
- next.prev = atom.prev;
+ if (atom.next_index) |next_index| {
+ self.getAtomPtr(next_index).prev_index = atom.prev_index;
} else {
- atom.next = null;
+ self.getAtomPtr(atom_index).next_index = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
@@ -1859,27 +1884,24 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
self.locals.items[sym_index].n_type = 0;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
- atom.sym_index = 0;
-
- if (self.d_sym) |*d_sym| {
- d_sym.dwarf.freeAtom(&atom.dbg_info_atom);
- }
+ self.getAtomPtr(atom_index).sym_index = 0;
}
-fn shrinkAtom(self: *MachO, atom: *Atom, new_block_size: u64) void {
+fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void {
_ = self;
- _ = atom;
+ _ = atom_index;
_ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
-fn growAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !u64 {
+fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u64, sym.n_value, alignment) == sym.n_value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.n_value;
- return self.allocateAtom(atom, new_atom_size, alignment);
+ return self.allocateAtom(atom_index, new_atom_size, alignment);
}
pub fn allocateSymbol(self: *MachO) !u32 {
@@ -1986,15 +2008,12 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.macho;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeUnnamedConsts(decl_index);
- self.freeRelocationsForAtom(atom);
- } else {
- gop.value_ptr.* = null;
- }
+
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ self.freeUnnamedConsts(decl_index);
+ Atom.freeRelocations(self, atom_index);
+
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -2024,13 +2043,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
const addr = try self.updateDeclCode(decl_index, code);
if (decl_state) |*ds| {
- try self.d_sym.?.dwarf.commitDeclState(
- module,
- decl_index,
- addr,
- decl.link.macho.size,
- ds,
- );
+ try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
}
// Since we updated the vaddr and the size, each corresponding export symbol also
@@ -2065,14 +2078,10 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
log.debug("allocating symbol indexes for {?s}", .{name});
- const atom = try gpa.create(Atom);
- errdefer gpa.destroy(atom);
- atom.* = Atom.empty;
- try atom.ensureInitialized(self);
- try self.managed_atoms.append(gpa, atom);
+ const atom_index = try self.createAtom();
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none, .{
- .parent_atom_index = atom.getSymbolIndex().?,
+ .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@@ -2085,24 +2094,25 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
+ const atom = self.getAtomPtr(atom_index);
atom.size = code.len;
atom.alignment = required_alignment;
// TODO: work out logic for disambiguating functions from function pointers
- // const sect_id = self.getDeclOutputSection(decl);
+ // const sect_id = self.getDeclOutputSection(decl_index);
const sect_id = self.data_const_section_index.?;
const symbol = atom.getSymbolPtr(self);
symbol.n_strx = name_str_index;
symbol.n_type = macho.N_SECT;
symbol.n_sect = sect_id + 1;
- symbol.n_value = try self.allocateAtom(atom, code.len, required_alignment);
- errdefer self.freeAtom(atom);
+ symbol.n_value = try self.allocateAtom(atom_index, code.len, required_alignment);
+ errdefer self.freeAtom(atom_index);
- try unnamed_consts.append(gpa, atom);
+ try unnamed_consts.append(gpa, atom_index);
log.debug("allocated atom for {?s} at 0x{x}", .{ name, symbol.n_value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
return atom.getSymbolIndex().?;
}
@@ -2129,14 +2139,9 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
}
}
- const atom = &decl.link.macho;
- try atom.ensureInitialized(self);
- const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
- if (gop.found_existing) {
- self.freeRelocationsForAtom(atom);
- } else {
- gop.value_ptr.* = null;
- }
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ Atom.freeRelocations(self, atom_index);
+ const atom = self.getAtom(atom_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -2155,14 +2160,14 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
}, &code_buffer, .{
.dwarf = ds,
}, .{
- .parent_atom_index = decl.link.macho.getSymbolIndex().?,
+ .parent_atom_index = atom.getSymbolIndex().?,
})
else
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
- .parent_atom_index = decl.link.macho.getSymbolIndex().?,
+ .parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
@@ -2176,13 +2181,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
const addr = try self.updateDeclCode(decl_index, code);
if (decl_state) |*ds| {
- try self.d_sym.?.dwarf.commitDeclState(
- module,
- decl_index,
- addr,
- decl.link.macho.size,
- ds,
- );
+ try self.d_sym.?.dwarf.commitDeclState(module, decl_index, addr, atom.size, ds);
}
// Since we updated the vaddr and the size, each corresponding export symbol also
@@ -2190,7 +2189,20 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
-fn getDeclOutputSection(self: *MachO, decl: *Module.Decl) u8 {
+pub fn getOrCreateAtomForDecl(self: *MachO, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = .{
+ .atom = try self.createAtom(),
+ .section = self.getDeclOutputSection(decl_index),
+ .exports = .{},
+ };
+ }
+ return gop.value_ptr.atom;
+}
+
+fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const val = decl.val;
const zig_ty = ty.zigTypeTag();
@@ -2341,13 +2353,11 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
const sym_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(sym_name);
- const atom = &decl.link.macho;
- const sym_index = atom.getSymbolIndex().?; // Atom was not initialized
- const decl_ptr = self.decls.getPtr(decl_index).?;
- if (decl_ptr.* == null) {
- decl_ptr.* = self.getDeclOutputSection(decl);
- }
- const sect_id = decl_ptr.*.?;
+ const decl_metadata = self.decls.get(decl_index).?;
+ const atom_index = decl_metadata.atom;
+ const atom = self.getAtom(atom_index);
+ const sym_index = atom.getSymbolIndex().?;
+ const sect_id = decl_metadata.section;
const code_len = code.len;
if (atom.size != 0) {
@@ -2357,11 +2367,11 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_sect = sect_id + 1;
sym.n_desc = 0;
- const capacity = decl.link.macho.capacity(self);
+ const capacity = atom.capacity(self);
const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, sym.n_value, required_alignment);
if (need_realloc) {
- const vaddr = try self.growAtom(atom, code_len, required_alignment);
+ const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ sym_name, sym.n_value, vaddr });
log.debug(" (required alignment 0x{x})", .{required_alignment});
@@ -2369,19 +2379,19 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_value = vaddr;
log.debug(" (updating GOT entry)", .{});
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
- const got_atom = self.getGotAtomForSymbol(got_target).?;
+ const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target);
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
} else if (code_len < atom.size) {
- self.shrinkAtom(atom, code_len);
- } else if (atom.next == null) {
+ self.shrinkAtom(atom_index, code_len);
+ } else if (atom.next_index == null) {
const header = &self.sections.items(.header)[sect_id];
const segment = self.getSegment(sect_id);
const needed_size = (sym.n_value + code_len) - segment.vmaddr;
header.size = needed_size;
}
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
} else {
const name_str_index = try self.strtab.insert(gpa, sym_name);
const sym = atom.getSymbolPtr(self);
@@ -2390,32 +2400,32 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
sym.n_sect = sect_id + 1;
sym.n_desc = 0;
- const vaddr = try self.allocateAtom(atom, code_len, required_alignment);
- errdefer self.freeAtom(atom);
+ const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
+ errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, vaddr });
log.debug(" (required alignment 0x{x})", .{required_alignment});
- atom.size = code_len;
+ self.getAtomPtr(atom_index).size = code_len;
sym.n_value = vaddr;
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
const got_index = try self.allocateGotEntry(got_target);
- const got_atom = try self.createGotAtom(got_target);
+ const got_atom_index = try self.createGotAtom(got_target);
+ const got_atom = self.getAtom(got_atom_index);
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
- try self.writePtrWidthAtom(got_atom);
+ try self.writePtrWidthAtom(got_atom_index);
}
self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
- try self.writeAtom(atom, code);
+ try self.writeAtom(atom_index, code);
return atom.getSymbol(self).n_value;
}
-pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {
- _ = module;
+pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void {
if (self.d_sym) |*d_sym| {
- try d_sym.dwarf.updateDeclLineNumber(decl);
+ try d_sym.dwarf.updateDeclLineNumber(module, decl_index);
}
}
@@ -2432,22 +2442,17 @@ pub fn updateDeclExports(
if (self.llvm_object) |llvm_object|
return llvm_object.updateDeclExports(module, decl_index, exports);
}
+
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
- const atom = &decl.link.macho;
-
- if (atom.getSymbolIndex() == null) return;
-
- const gop = try self.decls.getOrPut(gpa, decl_index);
- if (!gop.found_existing) {
- gop.value_ptr.* = self.getDeclOutputSection(decl);
- }
-
+ const atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
+ const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| {
const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{exp.options.name});
@@ -2485,9 +2490,9 @@ pub fn updateDeclExports(
continue;
}
- const sym_index = exp.link.macho.sym_index orelse blk: {
+ const sym_index = decl_metadata.getExport(self, exp_name) orelse blk: {
const sym_index = try self.allocateSymbol();
- exp.link.macho.sym_index = sym_index;
+ try decl_metadata.exports.append(gpa, sym_index);
break :blk sym_index;
};
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
@@ -2535,16 +2540,18 @@ pub fn updateDeclExports(
}
}
-pub fn deleteExport(self: *MachO, exp: Export) void {
+pub fn deleteDeclExport(self: *MachO, decl_index: Module.Decl.Index, name: []const u8) Allocator.Error!void {
if (self.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const metadata = self.decls.getPtr(decl_index) orelse return;
const gpa = self.base.allocator;
+ const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{name});
+ defer gpa.free(exp_name);
+ const sym_index = metadata.getExportPtr(self, exp_name) orelse return;
- const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
+ const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
const sym = self.getSymbolPtr(sym_loc);
- const sym_name = self.getSymbolName(sym_loc);
- log.debug("deleting export '{s}'", .{sym_name});
+ log.debug("deleting export '{s}'", .{exp_name});
assert(sym.sect() and sym.ext());
sym.* = .{
.n_strx = 0,
@@ -2553,9 +2560,9 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
.n_desc = 0,
.n_value = 0,
};
- self.locals_free_list.append(gpa, sym_index) catch {};
+ self.locals_free_list.append(gpa, sym_index.*) catch {};
- if (self.resolver.fetchRemove(sym_name)) |entry| {
+ if (self.resolver.fetchRemove(exp_name)) |entry| {
defer gpa.free(entry.key);
self.globals_free_list.append(gpa, entry.value) catch {};
self.globals.items[entry.value] = .{
@@ -2563,17 +2570,8 @@ pub fn deleteExport(self: *MachO, exp: Export) void {
.file = null,
};
}
-}
-fn freeRelocationsForAtom(self: *MachO, atom: *Atom) void {
- var removed_relocs = self.relocs.fetchOrderedRemove(atom);
- if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
- var removed_rebases = self.rebases.fetchOrderedRemove(atom);
- if (removed_rebases) |*rebases| rebases.value.deinit(self.base.allocator);
- var removed_bindings = self.bindings.fetchOrderedRemove(atom);
- if (removed_bindings) |*bindings| bindings.value.deinit(self.base.allocator);
- var removed_lazy_bindings = self.lazy_bindings.fetchOrderedRemove(atom);
- if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(self.base.allocator);
+ sym_index.* = 0;
}
fn freeUnnamedConsts(self: *MachO, decl_index: Module.Decl.Index) void {
@@ -2594,29 +2592,25 @@ pub fn freeDecl(self: *MachO, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
- if (self.decls.fetchSwapRemove(decl_index)) |kv| {
- if (kv.value) |_| {
- self.freeAtom(&decl.link.macho);
- self.freeUnnamedConsts(decl_index);
- }
+ if (self.decls.fetchSwapRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ self.freeAtom(kv.value.atom);
+ self.freeUnnamedConsts(decl_index);
+ kv.value.exports.deinit(self.base.allocator);
}
if (self.d_sym) |*d_sym| {
- d_sym.dwarf.freeDecl(decl);
+ d_sym.dwarf.freeDecl(decl_index);
}
}
pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: File.RelocInfo) !u64 {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
-
assert(self.llvm_object == null);
- try decl.link.macho.ensureInitialized(self);
- const sym_index = decl.link.macho.getSymbolIndex().?;
-
- const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
- try atom.addRelocation(self, .{
+ const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
+ const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?;
+ const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
+ try Atom.addRelocation(self, atom_index, .{
.type = switch (self.base.options.target.cpu.arch) {
.aarch64 => @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_UNSIGNED),
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
@@ -2628,7 +2622,7 @@ pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: Fil
.pcrel = false,
.length = 3,
});
- try atom.addRebase(self, @intCast(u32, reloc_info.offset));
+ try Atom.addRebase(self, atom_index, @intCast(u32, reloc_info.offset));
return 0;
}
@@ -2860,34 +2854,36 @@ fn moveSectionInVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void
// TODO: enforce order by increasing VM addresses in self.sections container.
for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
const index = @intCast(u8, sect_id + 1 + next_sect_id);
- const maybe_last_atom = &self.sections.items(.last_atom)[index];
const next_segment = self.getSegmentPtr(index);
next_header.addr += diff;
next_segment.vmaddr += diff;
- if (maybe_last_atom.*) |last_atom| {
- var atom = last_atom;
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[index];
+ if (maybe_last_atom_index.*) |last_atom_index| {
+ var atom_index = last_atom_index;
while (true) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbolPtr(self);
sym.n_value += diff;
- if (atom.prev) |prev| {
- atom = prev;
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
} else break;
}
}
}
}
-fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !u64 {
+fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
const tracy = trace(@src());
defer tracy.end();
+ const atom = self.getAtom(atom_index);
const sect_id = atom.getSymbol(self).n_sect - 1;
const segment = self.getSegmentPtr(sect_id);
const header = &self.sections.items(.header)[sect_id];
const free_list = &self.sections.items(.free_list)[sect_id];
- const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
+ const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
const requires_padding = blk: {
if (!header.isCode()) break :blk false;
if (header.isSymbolStubs()) break :blk false;
@@ -2901,7 +2897,7 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
- var atom_placement: ?*Atom = null;
+ var atom_placement: ?Atom.Index = null;
var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node.
@@ -2909,7 +2905,8 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
var vaddr = blk: {
var i: usize = 0;
while (i < free_list.items.len) {
- const big_atom = free_list.items[i];
+ const big_atom_index = free_list.items[i];
+ const big_atom = self.getAtom(big_atom_index);
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const sym = big_atom.getSymbol(self);
@@ -2937,30 +2934,35 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
const keep_free_list_node = remaining_capacity >= min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = big_atom;
+ atom_placement = big_atom_index;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk new_start_vaddr;
- } else if (maybe_last_atom.*) |last| {
+ } else if (maybe_last_atom_index.*) |last_index| {
+ const last = self.getAtom(last_index);
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
- atom_placement = last;
+ atom_placement = last_index;
break :blk new_start_vaddr;
} else {
break :blk mem.alignForwardGeneric(u64, segment.vmaddr, alignment);
}
};
- const expand_section = atom_placement == null or atom_placement.?.next == null;
+ const expand_section = if (atom_placement) |placement_index|
+ self.getAtom(placement_index).next_index == null
+ else
+ true;
if (expand_section) {
const sect_capacity = self.allocatedSize(header.offset);
const needed_size = (vaddr + new_atom_size) - segment.vmaddr;
if (needed_size > sect_capacity) {
const new_offset = self.findFreeSpace(needed_size, self.page_size);
- const current_size = if (maybe_last_atom.*) |last_atom| blk: {
+ const current_size = if (maybe_last_atom_index.*) |last_atom_index| blk: {
+ const last_atom = self.getAtom(last_atom_index);
const sym = last_atom.getSymbol(self);
break :blk (sym.n_value + last_atom.size) - segment.vmaddr;
} else 0;
@@ -2992,7 +2994,7 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
header.size = needed_size;
segment.filesize = mem.alignForwardGeneric(u64, needed_size, self.page_size);
segment.vmsize = mem.alignForwardGeneric(u64, needed_size, self.page_size);
- maybe_last_atom.* = atom;
+ maybe_last_atom_index.* = atom_index;
self.segment_table_dirty = true;
}
@@ -3001,21 +3003,31 @@ fn allocateAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !
if (header.@"align" < align_pow) {
header.@"align" = align_pow;
}
-
- if (atom.prev) |prev| {
- prev.next = atom.next;
- }
- if (atom.next) |next| {
- next.prev = atom.prev;
+ {
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.size = new_atom_size;
+ atom_ptr.alignment = @intCast(u32, alignment);
}
- if (atom_placement) |big_atom| {
- atom.prev = big_atom;
- atom.next = big_atom.next;
- big_atom.next = atom;
+ if (atom.prev_index) |prev_index| {
+ const prev = self.getAtomPtr(prev_index);
+ prev.next_index = atom.next_index;
+ }
+ if (atom.next_index) |next_index| {
+ const next = self.getAtomPtr(next_index);
+ next.prev_index = atom.prev_index;
+ }
+
+ if (atom_placement) |big_atom_index| {
+ const big_atom = self.getAtomPtr(big_atom_index);
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = big_atom_index;
+ atom_ptr.next_index = big_atom.next_index;
+ big_atom.next_index = atom_index;
} else {
- atom.prev = null;
- atom.next = null;
+ const atom_ptr = self.getAtomPtr(atom_index);
+ atom_ptr.prev_index = null;
+ atom_ptr.next_index = null;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@@ -3155,7 +3167,8 @@ fn collectRebaseData(self: *MachO, rebase: *Rebase) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (self.rebases.keys()) |atom, i| {
+ for (self.rebases.keys()) |atom_index, i| {
+ const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
const sym = atom.getSymbol(self);
@@ -3184,7 +3197,8 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
const gpa = self.base.allocator;
const slice = self.sections.slice();
- for (raw_bindings.keys()) |atom, i| {
+ for (raw_bindings.keys()) |atom_index, i| {
+ const atom = self.getAtom(atom_index);
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
const sym = atom.getSymbol(self);
@@ -3359,7 +3373,7 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
if (lazy_bind.size() == 0) return;
const stub_helper_section_index = self.stub_helper_section_index.?;
- assert(self.stub_helper_preamble_atom != null);
+ assert(self.stub_helper_preamble_atom_index != null);
const section = self.sections.get(stub_helper_section_index);
@@ -3369,10 +3383,11 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
else => unreachable,
};
const header = section.header;
- var atom = section.last_atom.?;
+ var atom_index = section.last_atom_index.?;
var index: usize = lazy_bind.offsets.items.len;
while (index > 0) : (index -= 1) {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const file_offset = header.offset + sym.n_value - header.addr + stub_offset;
const bind_offset = lazy_bind.offsets.items[index - 1];
@@ -3385,7 +3400,7 @@ fn populateLazyBindOffsetsInStubHelper(self: *MachO, lazy_bind: LazyBind) !void
try self.base.file.?.pwriteAll(mem.asBytes(&bind_offset), file_offset);
- atom = atom.prev.?;
+ atom_index = atom.prev_index.?;
}
}
@@ -3828,25 +3843,35 @@ pub fn getOrPutGlobalPtr(self: *MachO, name: []const u8) !GetOrPutGlobalPtrResul
return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
}
+pub fn getAtom(self: *MachO, atom_index: Atom.Index) Atom {
+ assert(atom_index < self.atoms.items.len);
+ return self.atoms.items[atom_index];
+}
+
+pub fn getAtomPtr(self: *MachO, atom_index: Atom.Index) *Atom {
+ assert(atom_index < self.atoms.items.len);
+ return &self.atoms.items[atom_index];
+}
+
/// Returns atom if there is an atom referenced by the symbol described by `sym_with_loc` descriptor.
/// Returns null on failure.
-pub fn getAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
assert(sym_with_loc.file == null);
return self.atom_by_index_table.get(sym_with_loc.sym_index);
}
/// Returns GOT atom that references `sym_with_loc` if one exists.
/// Returns null otherwise.
-pub fn getGotAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getGotAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
const got_index = self.got_entries_table.get(sym_with_loc) orelse return null;
- return self.got_entries.items[got_index].getAtom(self);
+ return self.got_entries.items[got_index].getAtomIndex(self);
}
/// Returns stubs atom that references `sym_with_loc` if one exists.
/// Returns null otherwise.
-pub fn getStubsAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom {
+pub fn getStubsAtomIndexForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?Atom.Index {
const stubs_index = self.stubs_table.get(sym_with_loc) orelse return null;
- return self.stubs.items[stubs_index].getAtom(self);
+ return self.stubs.items[stubs_index].getAtomIndex(self);
}
/// Returns symbol location corresponding to the set entrypoint.
@@ -4232,26 +4257,31 @@ pub fn logAtoms(self: *MachO) void {
log.debug("atoms:", .{});
const slice = self.sections.slice();
- for (slice.items(.last_atom)) |last, i| {
- var atom = last orelse continue;
+ for (slice.items(.last_atom_index)) |last_atom_index, i| {
+ var atom_index = last_atom_index orelse continue;
const header = slice.items(.header)[i];
- while (atom.prev) |prev| {
- atom = prev;
+ while (true) {
+ const atom = self.getAtom(atom_index);
+ if (atom.prev_index) |prev_index| {
+ atom_index = prev_index;
+ } else break;
}
log.debug("{s},{s}", .{ header.segName(), header.sectName() });
while (true) {
- self.logAtom(atom);
- if (atom.next) |next| {
- atom = next;
+ self.logAtom(atom_index);
+ const atom = self.getAtom(atom_index);
+ if (atom.next_index) |next_index| {
+ atom_index = next_index;
} else break;
}
}
}
-pub fn logAtom(self: *MachO, atom: *const Atom) void {
+pub fn logAtom(self: *MachO, atom_index: Atom.Index) void {
+ const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
const sym_name = atom.getName(self);
log.debug(" ATOM(%{?d}, '{s}') @ {x} (sizeof({x}), alignof({x})) in object({?d}) in sect({d})", .{
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index f15958b3df..5fb94b7c13 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -13,7 +13,6 @@ const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
const Arch = std.Target.Cpu.Arch;
-const Dwarf = @import("../Dwarf.zig");
const MachO = @import("../MachO.zig");
const Relocation = @import("Relocation.zig");
const SymbolWithLoc = MachO.SymbolWithLoc;
@@ -39,10 +38,11 @@ size: u64,
alignment: u32,
/// Points to the previous and next neighbours
-next: ?*Atom,
-prev: ?*Atom,
+/// TODO use the same trick as with symbols: reserve index 0 as null atom
+next_index: ?Index,
+prev_index: ?Index,
-dbg_info_atom: Dwarf.Atom,
+pub const Index = u32;
pub const Binding = struct {
target: SymbolWithLoc,
@@ -54,22 +54,6 @@ pub const SymbolAtOffset = struct {
offset: u64,
};
-pub const empty = Atom{
- .sym_index = 0,
- .file = null,
- .size = 0,
- .alignment = 0,
- .prev = null,
- .next = null,
- .dbg_info_atom = undefined,
-};
-
-pub fn ensureInitialized(self: *Atom, macho_file: *MachO) !void {
- if (self.getSymbolIndex() != null) return; // Already initialized
- self.sym_index = try macho_file.allocateSymbol();
- try macho_file.atom_by_index_table.putNoClobber(macho_file.base.allocator, self.sym_index, self);
-}
-
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.sym_index == 0) return null;
return self.sym_index;
@@ -108,7 +92,8 @@ pub fn getName(self: Atom, macho_file: *MachO) []const u8 {
/// this calculation.
pub fn capacity(self: Atom, macho_file: *MachO) u64 {
const self_sym = self.getSymbol(macho_file);
- if (self.next) |next| {
+ if (self.next_index) |next_index| {
+ const next = macho_file.getAtom(next_index);
const next_sym = next.getSymbol(macho_file);
return next_sym.n_value - self_sym.n_value;
} else {
@@ -120,7 +105,8 @@ pub fn capacity(self: Atom, macho_file: *MachO) u64 {
pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
// No need to keep a free list node for the last atom.
- const next = self.next orelse return false;
+ const next_index = self.next_index orelse return false;
+ const next = macho_file.getAtom(next_index);
const self_sym = self.getSymbol(macho_file);
const next_sym = next.getSymbol(macho_file);
const cap = next_sym.n_value - self_sym.n_value;
@@ -130,19 +116,19 @@ pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
return surplus >= MachO.min_text_capacity;
}
-pub fn addRelocation(self: *Atom, macho_file: *MachO, reloc: Relocation) !void {
- return self.addRelocations(macho_file, 1, .{reloc});
+pub fn addRelocation(macho_file: *MachO, atom_index: Index, reloc: Relocation) !void {
+ return addRelocations(macho_file, atom_index, 1, .{reloc});
}
pub fn addRelocations(
- self: *Atom,
macho_file: *MachO,
+ atom_index: Index,
comptime count: comptime_int,
relocs: [count]Relocation,
) !void {
const gpa = macho_file.base.allocator;
const target = macho_file.base.options.target;
- const gop = try macho_file.relocs.getOrPut(gpa, self);
+ const gop = try macho_file.relocs.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
@@ -156,56 +142,72 @@ pub fn addRelocations(
}
}
-pub fn addRebase(self: *Atom, macho_file: *MachO, offset: u32) !void {
+pub fn addRebase(macho_file: *MachO, atom_index: Index, offset: u32) !void {
const gpa = macho_file.base.allocator;
- log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, self.getSymbolIndex() });
- const gop = try macho_file.rebases.getOrPut(gpa, self);
+ const atom = macho_file.getAtom(atom_index);
+ log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, atom.getSymbolIndex() });
+ const gop = try macho_file.rebases.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, offset);
}
-pub fn addBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
+pub fn addBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
+ const atom = macho_file.getAtom(atom_index);
log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
- self.getSymbolIndex(),
+ atom.getSymbolIndex(),
});
- const gop = try macho_file.bindings.getOrPut(gpa, self);
+ const gop = try macho_file.bindings.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
-pub fn addLazyBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
+pub fn addLazyBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
const gpa = macho_file.base.allocator;
+ const atom = macho_file.getAtom(atom_index);
log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
- self.getSymbolIndex(),
+ atom.getSymbolIndex(),
});
- const gop = try macho_file.lazy_bindings.getOrPut(gpa, self);
+ const gop = try macho_file.lazy_bindings.getOrPut(gpa, atom_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, binding);
}
-pub fn resolveRelocations(self: *Atom, macho_file: *MachO) !void {
- const relocs = macho_file.relocs.get(self) orelse return;
- const source_sym = self.getSymbol(macho_file);
+pub fn resolveRelocations(macho_file: *MachO, atom_index: Index) !void {
+ const atom = macho_file.getAtom(atom_index);
+ const relocs = macho_file.relocs.get(atom_index) orelse return;
+ const source_sym = atom.getSymbol(macho_file);
const source_section = macho_file.sections.get(source_sym.n_sect - 1).header;
const file_offset = source_section.offset + source_sym.n_value - source_section.addr;
- log.debug("relocating '{s}'", .{self.getName(macho_file)});
+ log.debug("relocating '{s}'", .{atom.getName(macho_file)});
for (relocs.items) |*reloc| {
if (!reloc.dirty) continue;
- try reloc.resolve(self, macho_file, file_offset);
+ try reloc.resolve(macho_file, atom_index, file_offset);
reloc.dirty = false;
}
}
+
+pub fn freeRelocations(macho_file: *MachO, atom_index: Index) void {
+ const gpa = macho_file.base.allocator;
+ var removed_relocs = macho_file.relocs.fetchOrderedRemove(atom_index);
+ if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
+ var removed_rebases = macho_file.rebases.fetchOrderedRemove(atom_index);
+ if (removed_rebases) |*rebases| rebases.value.deinit(gpa);
+ var removed_bindings = macho_file.bindings.fetchOrderedRemove(atom_index);
+ if (removed_bindings) |*bindings| bindings.value.deinit(gpa);
+ var removed_lazy_bindings = macho_file.lazy_bindings.fetchOrderedRemove(atom_index);
+ if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(gpa);
+}
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 7c22f441cd..0a5c8b0372 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -82,11 +82,11 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
}
if (self.debug_str_section_index == null) {
- assert(self.dwarf.strtab.items.len == 0);
- try self.dwarf.strtab.append(self.allocator, 0);
+ assert(self.dwarf.strtab.buffer.items.len == 0);
+ try self.dwarf.strtab.buffer.append(self.allocator, 0);
self.debug_str_section_index = try self.allocateSection(
"__debug_str",
- @intCast(u32, self.dwarf.strtab.items.len),
+ @intCast(u32, self.dwarf.strtab.buffer.items.len),
0,
);
self.debug_string_table_dirty = true;
@@ -291,10 +291,10 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
{
const sect_index = self.debug_str_section_index.?;
- if (self.debug_string_table_dirty or self.dwarf.strtab.items.len != self.getSection(sect_index).size) {
- const needed_size = @intCast(u32, self.dwarf.strtab.items.len);
+ if (self.debug_string_table_dirty or self.dwarf.strtab.buffer.items.len != self.getSection(sect_index).size) {
+ const needed_size = @intCast(u32, self.dwarf.strtab.buffer.items.len);
try self.growSection(sect_index, needed_size, false);
- try self.file.pwriteAll(self.dwarf.strtab.items, self.getSection(sect_index).offset);
+ try self.file.pwriteAll(self.dwarf.strtab.buffer.items, self.getSection(sect_index).offset);
self.debug_string_table_dirty = false;
}
}
diff --git a/src/link/MachO/Relocation.zig b/src/link/MachO/Relocation.zig
index ca6bf9d681..07e5cf1aa2 100644
--- a/src/link/MachO/Relocation.zig
+++ b/src/link/MachO/Relocation.zig
@@ -29,33 +29,35 @@ pub fn fmtType(self: Relocation, target: std.Target) []const u8 {
}
}
-pub fn getTargetAtom(self: Relocation, macho_file: *MachO) ?*Atom {
+pub fn getTargetAtomIndex(self: Relocation, macho_file: *MachO) ?Atom.Index {
switch (macho_file.base.options.target.cpu.arch) {
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, self.type)) {
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
.ARM64_RELOC_POINTER_TO_GOT,
- => return macho_file.getGotAtomForSymbol(self.target),
+ => return macho_file.getGotAtomIndexForSymbol(self.target),
else => {},
},
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, self.type)) {
.X86_64_RELOC_GOT,
.X86_64_RELOC_GOT_LOAD,
- => return macho_file.getGotAtomForSymbol(self.target),
+ => return macho_file.getGotAtomIndexForSymbol(self.target),
else => {},
},
else => unreachable,
}
- if (macho_file.getStubsAtomForSymbol(self.target)) |stubs_atom| return stubs_atom;
- return macho_file.getAtomForSymbol(self.target);
+ if (macho_file.getStubsAtomIndexForSymbol(self.target)) |stubs_atom| return stubs_atom;
+ return macho_file.getAtomIndexForSymbol(self.target);
}
-pub fn resolve(self: Relocation, atom: *Atom, macho_file: *MachO, base_offset: u64) !void {
+pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, base_offset: u64) !void {
const arch = macho_file.base.options.target.cpu.arch;
+ const atom = macho_file.getAtom(atom_index);
const source_sym = atom.getSymbol(macho_file);
const source_addr = source_sym.n_value + self.offset;
- const target_atom = self.getTargetAtom(macho_file) orelse return;
+ const target_atom_index = self.getTargetAtomIndex(macho_file) orelse return;
+ const target_atom = macho_file.getAtom(target_atom_index);
const target_addr = @intCast(i64, target_atom.getSymbol(macho_file).n_value) + self.addend;
log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index 4cb346aa47..81fae399ef 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -3596,7 +3596,8 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
man.hash.addOptionalBytes(options.sysroot);
try man.addOptionalFile(options.entitlements);
- // We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
+ // We don't actually care whether it's a cache hit or miss; we just
+ // need the digest and the lock.
_ = try man.hit();
digest = man.final();
@@ -4177,9 +4178,11 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
log.debug("failed to save linking hash digest file: {s}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.
- man.writeManifest() catch |err| {
- log.debug("failed to write cache manifest when linking: {s}", .{@errorName(err)});
- };
+ if (man.have_exclusive_lock) {
+ man.writeManifest() catch |err| {
+ log.debug("failed to write cache manifest when linking: {s}", .{@errorName(err)});
+ };
+ }
// We hang on to this lock so that the output file path can be used without
// other processes clobbering it.
macho_file.base.lock = man.toOwnedLock();
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index a8b8caafab..87e3ca5c22 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -21,14 +21,7 @@ const Allocator = std.mem.Allocator;
const log = std.log.scoped(.link);
const assert = std.debug.assert;
-const FnDeclOutput = struct {
- /// this code is modified when relocated so it is mutable
- code: []u8,
- /// this might have to be modified in the linker, so thats why its mutable
- lineinfo: []u8,
- start_line: u32,
- end_line: u32,
-};
+pub const base_tag = .plan9;
base: link.File,
sixtyfour_bit: bool,
@@ -101,6 +94,9 @@ got_index_free_list: std.ArrayListUnmanaged(usize) = .{},
syms_index_free_list: std.ArrayListUnmanaged(usize) = .{},
+decl_blocks: std.ArrayListUnmanaged(DeclBlock) = .{},
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
+
const Reloc = struct {
target: Module.Decl.Index,
offset: u64,
@@ -115,6 +111,42 @@ const Bases = struct {
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(struct { info: DeclBlock, code: []const u8 }));
+pub const PtrWidth = enum { p32, p64 };
+
+pub const DeclBlock = struct {
+ type: aout.Sym.Type,
+ /// offset in the text or data sects
+ offset: ?u64,
+ /// offset into syms
+ sym_index: ?usize,
+ /// offset into got
+ got_index: ?usize,
+
+ pub const Index = u32;
+};
+
+const DeclMetadata = struct {
+ index: DeclBlock.Index,
+ exports: std.ArrayListUnmanaged(usize) = .{},
+
+ fn getExport(m: DeclMetadata, p9: *const Plan9, name: []const u8) ?usize {
+ for (m.exports.items) |exp| {
+ const sym = p9.syms.items[exp];
+ if (mem.eql(u8, name, sym.name)) return exp;
+ }
+ return null;
+ }
+};
+
+const FnDeclOutput = struct {
+ /// this code is modified when relocated so it is mutable
+ code: []u8,
+ /// this might have to be modified in the linker, so thats why its mutable
+ lineinfo: []u8,
+ start_line: u32,
+ end_line: u32,
+};
+
fn getAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 {
return addr + switch (t) {
.T, .t, .l, .L => self.bases.text,
@@ -127,22 +159,6 @@ fn getSymAddr(self: Plan9, s: aout.Sym) u64 {
return self.getAddr(s.value, s.type);
}
-pub const DeclBlock = struct {
- type: aout.Sym.Type,
- /// offset in the text or data sects
- offset: ?u64,
- /// offset into syms
- sym_index: ?usize,
- /// offset into got
- got_index: ?usize,
- pub const empty = DeclBlock{
- .type = .t,
- .offset = null,
- .sym_index = null,
- .got_index = null,
- };
-};
-
pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
return switch (arch) {
.x86_64 => .{
@@ -164,8 +180,6 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
};
}
-pub const PtrWidth = enum { p32, p64 };
-
pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
@@ -271,7 +285,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
const decl = module.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
@@ -313,11 +327,11 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
.end_line = end_line,
};
try self.putFn(decl_index, out);
- return self.updateFinish(decl);
+ return self.updateFinish(decl_index);
}
pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@@ -387,7 +401,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
}
}
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s}) ({d})", .{ decl, decl.name, decl_index });
@@ -414,28 +428,31 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
if (self.data_decl_table.fetchPutAssumeCapacity(decl_index, duped_code)) |old_entry| {
self.base.allocator.free(old_entry.value);
}
- return self.updateFinish(decl);
+ return self.updateFinish(decl_index);
}
/// called at the end of update{Decl,Func}
-fn updateFinish(self: *Plan9, decl: *Module.Decl) !void {
+fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void {
+ const decl = self.base.options.module.?.declPtr(decl_index);
const is_fn = (decl.ty.zigTypeTag() == .Fn);
log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name });
const sym_t: aout.Sym.Type = if (is_fn) .t else .d;
+
+ const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
// write the internal linker metadata
- decl.link.plan9.type = sym_t;
+ decl_block.type = sym_t;
// write the symbol
// we already have the got index
const sym: aout.Sym = .{
.value = undefined, // the value of stuff gets filled in in flushModule
- .type = decl.link.plan9.type,
+ .type = decl_block.type,
.name = mem.span(decl.name),
};
- if (decl.link.plan9.sym_index) |s| {
+ if (decl_block.sym_index) |s| {
self.syms.items[s] = sym;
} else {
const s = try self.allocateSymbolIndex();
- decl.link.plan9.sym_index = s;
+ decl_block.sym_index = s;
self.syms.items[s] = sym;
}
}
@@ -550,6 +567,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
+ const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
const out = entry.value_ptr.*;
log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line + 1, out.end_line });
{
@@ -568,16 +586,16 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
iovecs_i += 1;
const off = self.getAddr(text_i, .t);
text_i += out.code.len;
- decl.link.plan9.offset = off;
+ decl_block.offset = off;
if (!self.sixtyfour_bit) {
- mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off));
- mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeIntNative(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off));
+ mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
- mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
- self.syms.items[decl.link.plan9.sym_index.?].value = off;
+ self.syms.items[decl_block.sym_index.?].value = off;
if (mod.decl_exports.get(decl_index)) |exports| {
- try self.addDeclExports(mod, decl, exports.items);
+ try self.addDeclExports(mod, decl_index, exports.items);
}
}
}
@@ -598,6 +616,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
+ const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
const code = entry.value_ptr.*;
log.debug("write data decl {*} ({s})", .{ decl, decl.name });
@@ -606,15 +625,15 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
iovecs_i += 1;
const off = self.getAddr(data_i, .d);
data_i += code.len;
- decl.link.plan9.offset = off;
+ decl_block.offset = off;
if (!self.sixtyfour_bit) {
- mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
} else {
- mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
+ mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
}
- self.syms.items[decl.link.plan9.sym_index.?].value = off;
+ self.syms.items[decl_block.sym_index.?].value = off;
if (mod.decl_exports.get(decl_index)) |exports| {
- try self.addDeclExports(mod, decl, exports.items);
+ try self.addDeclExports(mod, decl_index, exports.items);
}
}
// write the unnamed constants after the other data decls
@@ -676,7 +695,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
for (kv.value_ptr.items) |reloc| {
const target_decl_index = reloc.target;
const target_decl = mod.declPtr(target_decl_index);
- const target_decl_offset = target_decl.link.plan9.offset.?;
+ const target_decl_block = self.getDeclBlock(self.decls.get(target_decl_index).?.index);
+ const target_decl_offset = target_decl_block.offset.?;
const offset = reloc.offset;
const addend = reloc.addend;
@@ -709,28 +729,36 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
fn addDeclExports(
self: *Plan9,
module: *Module,
- decl: *Module.Decl,
+ decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
+ const metadata = self.decls.getPtr(decl_index).?;
+ const decl_block = self.getDeclBlock(metadata.index);
+
for (exports) |exp| {
// plan9 does not support custom sections
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) {
- try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{}));
+ try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(
+ self.base.allocator,
+ module.declPtr(decl_index).srcLoc(),
+ "plan9 does not support extra sections",
+ .{},
+ ));
break;
}
}
const sym = .{
- .value = decl.link.plan9.offset.?,
- .type = decl.link.plan9.type.toGlobal(),
+ .value = decl_block.offset.?,
+ .type = decl_block.type.toGlobal(),
.name = exp.options.name,
};
- if (exp.link.plan9) |i| {
+ if (metadata.getExport(self, exp.options.name)) |i| {
self.syms.items[i] = sym;
} else {
try self.syms.append(self.base.allocator, sym);
- exp.link.plan9 = self.syms.items.len - 1;
+ try metadata.exports.append(self.base.allocator, self.syms.items.len - 1);
}
}
}
@@ -760,13 +788,18 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
self.base.allocator.free(removed_entry.value);
}
}
- if (decl.link.plan9.got_index) |i| {
- // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
- self.got_index_free_list.append(self.base.allocator, i) catch {};
- }
- if (decl.link.plan9.sym_index) |i| {
- self.syms_index_free_list.append(self.base.allocator, i) catch {};
- self.syms.items[i] = aout.Sym.undefined_symbol;
+ if (self.decls.fetchRemove(decl_index)) |const_kv| {
+ var kv = const_kv;
+ const decl_block = self.getDeclBlock(kv.value.index);
+ if (decl_block.got_index) |i| {
+ // TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
+ self.got_index_free_list.append(self.base.allocator, i) catch {};
+ }
+ if (decl_block.sym_index) |i| {
+ self.syms_index_free_list.append(self.base.allocator, i) catch {};
+ self.syms.items[i] = aout.Sym.undefined_symbol;
+ }
+ kv.value.exports.deinit(self.base.allocator);
}
self.freeUnnamedConsts(decl_index);
{
@@ -786,12 +819,30 @@ fn freeUnnamedConsts(self: *Plan9, decl_index: Module.Decl.Index) void {
unnamed_consts.clearAndFree(self.base.allocator);
}
-pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !void {
- const mod = self.base.options.module.?;
- const decl = mod.declPtr(decl_index);
- if (decl.link.plan9.got_index == null) {
- decl.link.plan9.got_index = self.allocateGotIndex();
+fn createDeclBlock(self: *Plan9) !DeclBlock.Index {
+ const gpa = self.base.allocator;
+ const index = @intCast(DeclBlock.Index, self.decl_blocks.items.len);
+ const decl_block = try self.decl_blocks.addOne(gpa);
+ decl_block.* = .{
+ .type = .t,
+ .offset = null,
+ .sym_index = null,
+ .got_index = null,
+ };
+ return index;
+}
+
+pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !DeclBlock.Index {
+ const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ const index = try self.createDeclBlock();
+ self.getDeclBlockPtr(index).got_index = self.allocateGotIndex();
+ gop.value_ptr.* = .{
+ .index = index,
+ .exports = .{},
+ };
}
+ return gop.value_ptr.index;
}
pub fn updateDeclExports(
@@ -800,7 +851,7 @@ pub fn updateDeclExports(
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
- try self.seeDecl(decl_index);
+ _ = try self.seeDecl(decl_index);
// we do all the things in flush
_ = module;
_ = exports;
@@ -842,10 +893,17 @@ pub fn deinit(self: *Plan9) void {
self.syms_index_free_list.deinit(gpa);
self.file_segments.deinit(gpa);
self.path_arena.deinit();
+ self.decl_blocks.deinit(gpa);
+
+ {
+ var it = self.decls.iterator();
+ while (it.next()) |entry| {
+ entry.value_ptr.exports.deinit(gpa);
+ }
+ self.decls.deinit(gpa);
+ }
}
-pub const Export = ?usize;
-pub const base_tag = .plan9;
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
if (options.use_llvm)
return error.LLVMBackendDoesNotSupportPlan9;
@@ -911,20 +969,19 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
}
}
- const mod = self.base.options.module.?;
-
// write the data symbols
{
var it = self.data_decl_table.iterator();
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
- const decl = mod.declPtr(decl_index);
- const sym = self.syms.items[decl.link.plan9.sym_index.?];
+ const decl_metadata = self.decls.get(decl_index).?;
+ const decl_block = self.getDeclBlock(decl_metadata.index);
+ const sym = self.syms.items[decl_block.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
- for (exports.items) |e| {
- try self.writeSym(writer, self.syms.items[e.link.plan9.?]);
- }
+ for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
+ try self.writeSym(writer, self.syms.items[exp_i]);
+ };
}
}
}
@@ -943,16 +1000,17 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
var submap_it = symidx_and_submap.functions.iterator();
while (submap_it.next()) |entry| {
const decl_index = entry.key_ptr.*;
- const decl = mod.declPtr(decl_index);
- const sym = self.syms.items[decl.link.plan9.sym_index.?];
+ const decl_metadata = self.decls.get(decl_index).?;
+ const decl_block = self.getDeclBlock(decl_metadata.index);
+ const sym = self.syms.items[decl_block.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
- for (exports.items) |e| {
- const s = self.syms.items[e.link.plan9.?];
+ for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
+ const s = self.syms.items[exp_i];
if (mem.eql(u8, s.name, "_start"))
self.entry_val = s.value;
try self.writeSym(writer, s);
- }
+ };
}
}
}
@@ -960,10 +1018,10 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
}
/// Must be called only after a successful call to `updateDecl`.
-pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void {
_ = self;
_ = mod;
- _ = decl;
+ _ = decl_index;
}
pub fn getDeclVAddr(
@@ -1004,3 +1062,11 @@ pub fn getDeclVAddr(
});
return undefined;
}
+
+pub fn getDeclBlock(self: *const Plan9, index: DeclBlock.Index) DeclBlock {
+ return self.decl_blocks.items[index];
+}
+
+fn getDeclBlockPtr(self: *Plan9, index: DeclBlock.Index) *DeclBlock {
+ return &self.decl_blocks.items[index];
+}
diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig
index 7dbd3a42ce..14a29e4498 100644
--- a/src/link/SpirV.zig
+++ b/src/link/SpirV.zig
@@ -42,13 +42,6 @@ const SpvModule = @import("../codegen/spirv/Module.zig");
const spec = @import("../codegen/spirv/spec.zig");
const IdResult = spec.IdResult;
-// TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl?
-pub const FnData = struct {
- // We're going to fill these in flushModule, and we're going to fill them unconditionally,
- // so just set it to undefined.
- id: IdResult = undefined,
-};
-
base: link.File,
/// This linker backend does not try to incrementally link output SPIR-V code.
@@ -209,16 +202,19 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
// so that we can access them before processing them.
// TODO: We're allocating an ID unconditionally now, are there
// declarations which don't generate a result?
- // TODO: fn_link is used here, but thats probably not the right field. It will work anyway though.
+ var ids = std.AutoHashMap(Module.Decl.Index, IdResult).init(self.base.allocator);
+ defer ids.deinit();
+ try ids.ensureTotalCapacity(@intCast(u32, self.decl_table.count()));
+
for (self.decl_table.keys()) |decl_index| {
const decl = module.declPtr(decl_index);
if (decl.has_tv) {
- decl.fn_link.spirv.id = spv.allocId();
+ ids.putAssumeCapacityNoClobber(decl_index, spv.allocId());
}
}
// Now, actually generate the code for all declarations.
- var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv);
+ var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv, &ids);
defer decl_gen.deinit();
var it = self.decl_table.iterator();
@@ -231,7 +227,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
const liveness = entry.value_ptr.liveness;
// Note, if `decl` is not a function, air/liveness may be undefined.
- if (try decl_gen.gen(decl, air, liveness)) |msg| {
+ if (try decl_gen.gen(decl_index, air, liveness)) |msg| {
try module.failed_decls.put(module.gpa, decl_index, msg);
return; // TODO: Attempt to generate more decls?
}
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 31dfb87659..9d20412788 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -9,7 +9,7 @@ const fs = std.fs;
const leb = std.leb;
const log = std.log.scoped(.link);
-const Atom = @import("Wasm/Atom.zig");
+pub const Atom = @import("Wasm/Atom.zig");
const Dwarf = @import("Dwarf.zig");
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
@@ -31,10 +31,7 @@ const Object = @import("Wasm/Object.zig");
const Archive = @import("Wasm/Archive.zig");
const types = @import("Wasm/types.zig");
-pub const base_tag = link.File.Tag.wasm;
-
-/// deprecated: Use `@import("Wasm/Atom.zig");`
-pub const DeclBlock = Atom;
+pub const base_tag: link.File.Tag = .wasm;
base: link.File,
/// Output name of the file
@@ -47,18 +44,19 @@ llvm_object: ?*LlvmObject = null,
/// TODO: Allow setting this through a flag?
host_name: []const u8 = "env",
/// List of all `Decl` that are currently alive.
-/// This is ment for bookkeeping so we can safely cleanup all codegen memory
-/// when calling `deinit`
-decls: std.AutoHashMapUnmanaged(Module.Decl.Index, void) = .{},
+/// Each index maps to the corresponding `Atom.Index`.
+decls: std.AutoHashMapUnmanaged(Module.Decl.Index, Atom.Index) = .{},
+/// Mapping between an `Atom` and its type index representing the Wasm
+/// type of the function signature.
+atom_types: std.AutoHashMapUnmanaged(Atom.Index, u32) = .{},
/// List of all symbols generated by Zig code.
symbols: std.ArrayListUnmanaged(Symbol) = .{},
/// List of symbol indexes which are free to be used.
symbols_free_list: std.ArrayListUnmanaged(u32) = .{},
/// Maps atoms to their segment index
-atoms: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
-/// Atoms managed and created by the linker. This contains atoms
-/// from object files, and not Atoms generated by a Decl.
-managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
+atoms: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
+/// List of all atoms.
+managed_atoms: std.ArrayListUnmanaged(Atom) = .{},
/// Represents the index into `segments` where the 'code' section
/// lives.
code_section_index: ?u32 = null,
@@ -148,7 +146,7 @@ undefs: std.StringArrayHashMapUnmanaged(SymbolLoc) = .{},
/// Maps a symbol's location to an atom. This can be used to find meta
/// data of a symbol, such as its size, or its offset to perform a relocation.
/// Undefined (and synthetic) symbols do not have an Atom and therefore cannot be mapped.
-symbol_atom: std.AutoHashMapUnmanaged(SymbolLoc, *Atom) = .{},
+symbol_atom: std.AutoHashMapUnmanaged(SymbolLoc, Atom.Index) = .{},
/// Maps a symbol's location to its export name, which may differ from the decl's name
/// which does the exporting.
/// Note: The value represents the offset into the string table, rather than the actual string.
@@ -165,14 +163,14 @@ error_table_symbol: ?u32 = null,
// unit contains Zig code. The lifetime of these atoms are extended
// until the end of the compiler's lifetime. Meaning they're not freed
// during `flush()` in incremental-mode.
-debug_info_atom: ?*Atom = null,
-debug_line_atom: ?*Atom = null,
-debug_loc_atom: ?*Atom = null,
-debug_ranges_atom: ?*Atom = null,
-debug_abbrev_atom: ?*Atom = null,
-debug_str_atom: ?*Atom = null,
-debug_pubnames_atom: ?*Atom = null,
-debug_pubtypes_atom: ?*Atom = null,
+debug_info_atom: ?Atom.Index = null,
+debug_line_atom: ?Atom.Index = null,
+debug_loc_atom: ?Atom.Index = null,
+debug_ranges_atom: ?Atom.Index = null,
+debug_abbrev_atom: ?Atom.Index = null,
+debug_str_atom: ?Atom.Index = null,
+debug_pubnames_atom: ?Atom.Index = null,
+debug_pubtypes_atom: ?Atom.Index = null,
pub const Segment = struct {
alignment: u32,
@@ -180,19 +178,6 @@ pub const Segment = struct {
offset: u32,
};
-pub const FnData = struct {
- /// Reference to the wasm type that represents this function.
- type_index: u32,
- /// Contains debug information related to this function.
- /// For Wasm, the offset is relative to the code-section.
- src_fn: Dwarf.SrcFn,
-
- pub const empty: FnData = .{
- .type_index = undefined,
- .src_fn = Dwarf.SrcFn.empty,
- };
-};
-
pub const Export = struct {
sym_index: ?u32 = null,
};
@@ -434,10 +419,10 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
// at the end during `initializeCallCtorsFunction`.
}
- if (!options.strip and options.module != null) {
- wasm_bin.dwarf = Dwarf.init(allocator, &wasm_bin.base, options.target);
- try wasm_bin.initDebugSections();
- }
+ // if (!options.strip and options.module != null) {
+ // wasm_bin.dwarf = Dwarf.init(allocator, &wasm_bin.base, options.target);
+ // try wasm_bin.initDebugSections();
+ // }
return wasm_bin;
}
@@ -478,6 +463,7 @@ fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !Symbol
try wasm.globals.put(wasm.base.allocator, name_offset, loc);
return loc;
}
+
/// Initializes symbols and atoms for the debug sections
/// Initialization is only done when compiling Zig code.
/// When Zig is invoked as a linker instead, the atoms
@@ -520,6 +506,36 @@ fn parseObjectFile(wasm: *Wasm, path: []const u8) !bool {
return true;
}
+/// For a given `Module.Decl.Index` returns its corresponding `Atom.Index`.
+/// When the index was not found, a new `Atom` will be created, and its index will be returned.
+/// The newly created Atom is empty with default fields as specified by `Atom.empty`.
+pub fn getOrCreateAtomForDecl(wasm: *Wasm, decl_index: Module.Decl.Index) !Atom.Index {
+ const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
+ if (!gop.found_existing) {
+ gop.value_ptr.* = try wasm.createAtom();
+ }
+ return gop.value_ptr.*;
+}
+
+/// Creates a new empty `Atom` and returns its `Atom.Index`
+fn createAtom(wasm: *Wasm) !Atom.Index {
+ const index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
+ atom.* = Atom.empty;
+ atom.sym_index = try wasm.allocateSymbol();
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, .{ .file = null, .index = atom.sym_index }, index);
+
+ return index;
+}
+
+pub inline fn getAtom(wasm: *const Wasm, index: Atom.Index) Atom {
+ return wasm.managed_atoms.items[index];
+}
+
+pub inline fn getAtomPtr(wasm: *Wasm, index: Atom.Index) *Atom {
+ return &wasm.managed_atoms.items[index];
+}
+
/// Parses an archive file and will then parse each object file
/// that was found in the archive file.
/// Returns false when the file is not an archive file.
@@ -861,15 +877,16 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations.
- const atom = try wasm.base.allocator.create(Atom);
- errdefer wasm.base.allocator.destroy(atom);
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
+ // TODO: Can we use `createAtom` here while also re-using the symbol
+ // from `createSyntheticSymbol`.
+ const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = Atom.empty;
atom.sym_index = loc.index;
atom.alignment = 1;
- try wasm.parseAtom(atom, .{ .data = .synthetic });
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom);
+ try wasm.parseAtom(atom_index, .{ .data = .synthetic });
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
}
if (wasm.undefs.fetchSwapRemove("__heap_end")) |kv| {
@@ -877,15 +894,14 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc);
- const atom = try wasm.base.allocator.create(Atom);
- errdefer wasm.base.allocator.destroy(atom);
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
+ const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = Atom.empty;
atom.sym_index = loc.index;
atom.alignment = 1;
- try wasm.parseAtom(atom, .{ .data = .synthetic });
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom);
+ try wasm.parseAtom(atom_index, .{ .data = .synthetic });
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
}
}
@@ -924,16 +940,6 @@ pub fn deinit(wasm: *Wasm) void {
if (wasm.llvm_object) |llvm_object| llvm_object.destroy(gpa);
}
- if (wasm.base.options.module) |mod| {
- var decl_it = wasm.decls.keyIterator();
- while (decl_it.next()) |decl_index_ptr| {
- const decl = mod.declPtr(decl_index_ptr.*);
- decl.link.wasm.deinit(gpa);
- }
- } else {
- assert(wasm.decls.count() == 0);
- }
-
for (wasm.func_types.items) |*func_type| {
func_type.deinit(gpa);
}
@@ -949,6 +955,7 @@ pub fn deinit(wasm: *Wasm) void {
}
wasm.decls.deinit(gpa);
+ wasm.atom_types.deinit(gpa);
wasm.symbols.deinit(gpa);
wasm.symbols_free_list.deinit(gpa);
wasm.globals.deinit(gpa);
@@ -958,9 +965,8 @@ pub fn deinit(wasm: *Wasm) void {
wasm.symbol_atom.deinit(gpa);
wasm.export_names.deinit(gpa);
wasm.atoms.deinit(gpa);
- for (wasm.managed_atoms.items) |managed_atom| {
- managed_atom.deinit(gpa);
- gpa.destroy(managed_atom);
+ for (wasm.managed_atoms.items) |*managed_atom| {
+ managed_atom.deinit(wasm);
}
wasm.managed_atoms.deinit(gpa);
wasm.segments.deinit(gpa);
@@ -1018,18 +1024,24 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
- const atom = &decl.link.wasm;
- try atom.ensureInitialized(wasm);
- const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
- if (gop.found_existing) {
- atom.clear();
- } else gop.value_ptr.* = {};
+ const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const atom = wasm.getAtomPtr(atom_index);
+ atom.clear();
- var decl_state: ?Dwarf.DeclState = if (wasm.dwarf) |*dwarf| try dwarf.initDeclState(mod, decl_index) else null;
- defer if (decl_state) |*ds| ds.deinit();
+ // var decl_state: ?Dwarf.DeclState = if (wasm.dwarf) |*dwarf| try dwarf.initDeclState(mod, decl_index) else null;
+ // defer if (decl_state) |*ds| ds.deinit();
var code_writer = std.ArrayList(u8).init(wasm.base.allocator);
defer code_writer.deinit();
+ // const result = try codegen.generateFunction(
+ // &wasm.base,
+ // decl.srcLoc(),
+ // func,
+ // air,
+ // liveness,
+ // &code_writer,
+ // if (decl_state) |*ds| .{ .dwarf = ds } else .none,
+ // );
const result = try codegen.generateFunction(
&wasm.base,
decl.srcLoc(),
@@ -1037,7 +1049,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
air,
liveness,
&code_writer,
- if (decl_state) |*ds| .{ .dwarf = ds } else .none,
+ .none,
);
const code = switch (result) {
@@ -1049,19 +1061,19 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
},
};
- if (wasm.dwarf) |*dwarf| {
- try dwarf.commitDeclState(
- mod,
- decl_index,
- // Actual value will be written after relocation.
- // For Wasm, this is the offset relative to the code section
- // which isn't known until flush().
- 0,
- code.len,
- &decl_state.?,
- );
- }
- return wasm.finishUpdateDecl(decl, code);
+ // if (wasm.dwarf) |*dwarf| {
+ // try dwarf.commitDeclState(
+ // mod,
+ // decl_index,
+ // // Actual value will be written after relocation.
+ // // For Wasm, this is the offset relative to the code section
+ // // which isn't known until flush().
+ // 0,
+ // code.len,
+ // &decl_state.?,
+ // );
+ // }
+ return wasm.finishUpdateDecl(decl_index, code);
}
// Generate code for the Decl, storing it in memory to be later written to
@@ -1084,17 +1096,14 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
return;
}
- const atom = &decl.link.wasm;
- try atom.ensureInitialized(wasm);
- const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
- if (gop.found_existing) {
- atom.clear();
- } else gop.value_ptr.* = {};
+ const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const atom = wasm.getAtomPtr(atom_index);
+ atom.clear();
if (decl.isExtern()) {
const variable = decl.getVariable().?;
const name = mem.sliceTo(decl.name, 0);
- return wasm.addOrUpdateImport(name, decl.link.wasm.sym_index, variable.lib_name, null);
+ return wasm.addOrUpdateImport(name, atom.sym_index, variable.lib_name, null);
}
const val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
@@ -1107,7 +1116,7 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
.{ .ty = decl.ty, .val = val },
&code_writer,
.none,
- .{ .parent_atom_index = decl.link.wasm.sym_index },
+ .{ .parent_atom_index = atom.sym_index },
);
const code = switch (res) {
@@ -1119,26 +1128,29 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
},
};
- return wasm.finishUpdateDecl(decl, code);
+ return wasm.finishUpdateDecl(decl_index, code);
}
-pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl: *const Module.Decl) !void {
+pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !void {
if (wasm.llvm_object) |_| return;
if (wasm.dwarf) |*dw| {
const tracy = trace(@src());
defer tracy.end();
+ const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(decl_name);
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });
- try dw.updateDeclLineNumber(decl);
+ try dw.updateDeclLineNumber(mod, decl_index);
}
}
-fn finishUpdateDecl(wasm: *Wasm, decl: *Module.Decl, code: []const u8) !void {
+fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8) !void {
const mod = wasm.base.options.module.?;
- const atom: *Atom = &decl.link.wasm;
+ const decl = mod.declPtr(decl_index);
+ const atom_index = wasm.decls.get(decl_index).?;
+ const atom = wasm.getAtomPtr(atom_index);
const symbol = &wasm.symbols.items[atom.sym_index];
const full_name = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(full_name);
@@ -1204,48 +1216,51 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
const decl = mod.declPtr(decl_index);
// Create and initialize a new local symbol and atom
- const local_index = decl.link.wasm.locals.items.len;
+ const atom_index = try wasm.createAtom();
+ const parent_atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const parent_atom = wasm.getAtomPtr(parent_atom_index);
+ const local_index = parent_atom.locals.items.len;
+ try parent_atom.locals.append(wasm.base.allocator, atom_index);
const fqdn = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(fqdn);
const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ fqdn, local_index });
defer wasm.base.allocator.free(name);
-
- const atom = try decl.link.wasm.locals.addOne(wasm.base.allocator);
- atom.* = Atom.empty;
- try atom.ensureInitialized(wasm);
- atom.alignment = tv.ty.abiAlignment(wasm.base.options.target);
- wasm.symbols.items[atom.sym_index] = .{
- .name = try wasm.string_table.put(wasm.base.allocator, name),
- .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
- .tag = .data,
- .index = undefined,
- };
-
- try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, atom.symbolLoc(), {});
-
var value_bytes = std.ArrayList(u8).init(wasm.base.allocator);
defer value_bytes.deinit();
- const result = try codegen.generateSymbol(
- &wasm.base,
- decl.srcLoc(),
- tv,
- &value_bytes,
- .none,
- .{
- .parent_atom_index = atom.sym_index,
- .addend = null,
- },
- );
- const code = switch (result) {
- .ok => value_bytes.items,
- .fail => |em| {
- decl.analysis = .codegen_failure;
- try mod.failed_decls.put(mod.gpa, decl_index, em);
- return error.AnalysisFail;
- },
+ const code = code: {
+ const atom = wasm.getAtomPtr(atom_index);
+ atom.alignment = tv.ty.abiAlignment(wasm.base.options.target);
+ wasm.symbols.items[atom.sym_index] = .{
+ .name = try wasm.string_table.put(wasm.base.allocator, name),
+ .flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
+ .tag = .data,
+ .index = undefined,
+ };
+ try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, atom.symbolLoc(), {});
+
+ const result = try codegen.generateSymbol(
+ &wasm.base,
+ decl.srcLoc(),
+ tv,
+ &value_bytes,
+ .none,
+ .{
+ .parent_atom_index = atom.sym_index,
+ .addend = null,
+ },
+ );
+ break :code switch (result) {
+ .ok => value_bytes.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ try mod.failed_decls.put(mod.gpa, decl_index, em);
+ return error.AnalysisFail;
+ },
+ };
};
+ const atom = wasm.getAtomPtr(atom_index);
atom.size = @intCast(u32, code.len);
try atom.code.appendSlice(wasm.base.allocator, code);
return atom.sym_index;
@@ -1293,10 +1308,13 @@ pub fn getDeclVAddr(
) !u64 {
const mod = wasm.base.options.module.?;
const decl = mod.declPtr(decl_index);
- try decl.link.wasm.ensureInitialized(wasm);
- const target_symbol_index = decl.link.wasm.sym_index;
+
+ const target_atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const target_symbol_index = wasm.getAtom(target_atom_index).sym_index;
+
assert(reloc_info.parent_atom_index != 0);
- const atom = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?;
+ const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?;
+ const atom = wasm.getAtomPtr(atom_index);
const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32;
if (decl.ty.zigTypeTag() == .Fn) {
assert(reloc_info.addend == 0); // addend not allowed for function relocations
@@ -1324,9 +1342,10 @@ pub fn getDeclVAddr(
return target_symbol_index;
}
-pub fn deleteExport(wasm: *Wasm, exp: Export) void {
+pub fn deleteDeclExport(wasm: *Wasm, decl_index: Module.Decl.Index) void {
if (wasm.llvm_object) |_| return;
- const sym_index = exp.sym_index orelse return;
+ const atom_index = wasm.decls.get(decl_index) orelse return;
+ const sym_index = wasm.getAtom(atom_index).sym_index;
const loc: SymbolLoc = .{ .file = null, .index = sym_index };
const symbol = loc.getSymbol(wasm);
const symbol_name = wasm.string_table.get(symbol.name);
@@ -1352,7 +1371,8 @@ pub fn updateDeclExports(
}
const decl = mod.declPtr(decl_index);
- if (decl.link.wasm.getSymbolIndex() == null) return; // unititialized
+ const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
+ const atom = wasm.getAtom(atom_index);
for (exports) |exp| {
if (exp.options.section) |section| {
@@ -1367,7 +1387,7 @@ pub fn updateDeclExports(
const export_name = try wasm.string_table.put(wasm.base.allocator, exp.options.name);
if (wasm.globals.getPtr(export_name)) |existing_loc| {
- if (existing_loc.index == decl.link.wasm.sym_index) continue;
+ if (existing_loc.index == atom.sym_index) continue;
const existing_sym: Symbol = existing_loc.getSymbol(wasm).*;
const exp_is_weak = exp.options.linkage == .Internal or exp.options.linkage == .Weak;
@@ -1388,15 +1408,16 @@ pub fn updateDeclExports(
} else if (exp_is_weak) {
continue; // to-be-exported symbol is weak, so we keep the existing symbol
} else {
- existing_loc.index = decl.link.wasm.sym_index;
+ // TODO: Revisit this, why was this needed?
+ existing_loc.index = atom.sym_index;
existing_loc.file = null;
- exp.link.wasm.sym_index = existing_loc.index;
+ // exp.link.wasm.sym_index = existing_loc.index;
}
}
- const exported_decl = mod.declPtr(exp.exported_decl);
- const sym_index = exported_decl.link.wasm.sym_index;
- const sym_loc = exported_decl.link.wasm.symbolLoc();
+ const exported_atom_index = try wasm.getOrCreateAtomForDecl(exp.exported_decl);
+ const exported_atom = wasm.getAtom(exported_atom_index);
+ const sym_loc = exported_atom.symbolLoc();
const symbol = sym_loc.getSymbol(wasm);
switch (exp.options.linkage) {
.Internal => {
@@ -1432,7 +1453,6 @@ pub fn updateDeclExports(
// if the symbol was previously undefined, remove it as an import
_ = wasm.imports.remove(sym_loc);
_ = wasm.undefs.swapRemove(exp.options.name);
- exp.link.wasm.sym_index = sym_index;
}
}
@@ -1442,11 +1462,13 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
}
const mod = wasm.base.options.module.?;
const decl = mod.declPtr(decl_index);
- const atom = &decl.link.wasm;
+ const atom_index = wasm.decls.get(decl_index).?;
+ const atom = wasm.getAtomPtr(atom_index);
wasm.symbols_free_list.append(wasm.base.allocator, atom.sym_index) catch {};
_ = wasm.decls.remove(decl_index);
wasm.symbols.items[atom.sym_index].tag = .dead;
- for (atom.locals.items) |local_atom| {
+ for (atom.locals.items) |local_atom_index| {
+ const local_atom = wasm.getAtom(local_atom_index);
const local_symbol = &wasm.symbols.items[local_atom.sym_index];
local_symbol.tag = .dead; // also for any local symbol
wasm.symbols_free_list.append(wasm.base.allocator, local_atom.sym_index) catch {};
@@ -1460,12 +1482,20 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
_ = wasm.resolved_symbols.swapRemove(atom.symbolLoc());
_ = wasm.symbol_atom.remove(atom.symbolLoc());
- if (wasm.dwarf) |*dwarf| {
- dwarf.freeDecl(decl);
- dwarf.freeAtom(&atom.dbg_info_atom);
- }
+ // if (wasm.dwarf) |*dwarf| {
+ // dwarf.freeDecl(decl_index);
+ // }
- atom.deinit(wasm.base.allocator);
+ if (atom.next) |next_atom_index| {
+ const next_atom = wasm.getAtomPtr(next_atom_index);
+ next_atom.prev = atom.prev;
+ atom.next = null;
+ }
+ if (atom.prev) |prev_index| {
+ const prev_atom = wasm.getAtomPtr(prev_index);
+ prev_atom.next = atom.next;
+ atom.prev = null;
+ }
}
/// Appends a new entry to the indirect function table
@@ -1572,7 +1602,7 @@ const Kind = union(enum) {
initialized,
synthetic,
},
- function: FnData,
+ function: void,
/// Returns the segment name the data kind represents.
/// Asserts `kind` has its active tag set to `data`.
@@ -1587,15 +1617,17 @@ const Kind = union(enum) {
};
/// Parses an Atom and inserts its metadata into the corresponding sections.
-fn parseAtom(wasm: *Wasm, atom: *Atom, kind: Kind) !void {
+fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
+ const atom = wasm.getAtomPtr(atom_index);
const symbol = (SymbolLoc{ .file = null, .index = atom.sym_index }).getSymbol(wasm);
const final_index: u32 = switch (kind) {
- .function => |fn_data| result: {
+ .function => result: {
const index = @intCast(u32, wasm.functions.count() + wasm.imported_functions_count);
+ const type_index = wasm.atom_types.get(atom_index).?;
try wasm.functions.putNoClobber(
wasm.base.allocator,
.{ .file = null, .index = index },
- .{ .type_index = fn_data.type_index },
+ .{ .type_index = type_index },
);
symbol.tag = .function;
symbol.index = index;
@@ -1662,18 +1694,20 @@ fn parseAtom(wasm: *Wasm, atom: *Atom, kind: Kind) !void {
const segment: *Segment = &wasm.segments.items[final_index];
segment.alignment = std.math.max(segment.alignment, atom.alignment);
- try wasm.appendAtomAtIndex(final_index, atom);
+ try wasm.appendAtomAtIndex(final_index, atom_index);
}
/// From a given index, append the given `Atom` at the back of the linked list.
/// Simply inserts it into the map of atoms when it doesn't exist yet.
-pub fn appendAtomAtIndex(wasm: *Wasm, index: u32, atom: *Atom) !void {
- if (wasm.atoms.getPtr(index)) |last| {
- last.*.next = atom;
- atom.prev = last.*;
- last.* = atom;
+pub fn appendAtomAtIndex(wasm: *Wasm, index: u32, atom_index: Atom.Index) !void {
+ const atom = wasm.getAtomPtr(atom_index);
+ if (wasm.atoms.getPtr(index)) |last_index_ptr| {
+ const last = wasm.getAtomPtr(last_index_ptr.*);
+ last.*.next = atom_index;
+ atom.prev = last_index_ptr.*;
+ last_index_ptr.* = atom_index;
} else {
- try wasm.atoms.putNoClobber(wasm.base.allocator, index, atom);
+ try wasm.atoms.putNoClobber(wasm.base.allocator, index, atom_index);
}
}
@@ -1683,16 +1717,17 @@ fn allocateDebugAtoms(wasm: *Wasm) !void {
if (wasm.dwarf == null) return;
const allocAtom = struct {
- fn f(bin: *Wasm, maybe_index: *?u32, atom: *Atom) !void {
+ fn f(bin: *Wasm, maybe_index: *?u32, atom_index: Atom.Index) !void {
const index = maybe_index.* orelse idx: {
const index = @intCast(u32, bin.segments.items.len);
try bin.appendDummySegment();
maybe_index.* = index;
break :idx index;
};
+ const atom = bin.getAtomPtr(atom_index);
atom.size = @intCast(u32, atom.code.items.len);
bin.symbols.items[atom.sym_index].index = index;
- try bin.appendAtomAtIndex(index, atom);
+ try bin.appendAtomAtIndex(index, atom_index);
}
}.f;
@@ -1714,15 +1749,16 @@ fn allocateAtoms(wasm: *Wasm) !void {
var it = wasm.atoms.iterator();
while (it.next()) |entry| {
const segment = &wasm.segments.items[entry.key_ptr.*];
- var atom: *Atom = entry.value_ptr.*.getFirst();
+ var atom_index = entry.value_ptr.*;
var offset: u32 = 0;
while (true) {
+ const atom = wasm.getAtomPtr(atom_index);
const symbol_loc = atom.symbolLoc();
if (wasm.code_section_index) |index| {
if (index == entry.key_ptr.*) {
if (!wasm.resolved_symbols.contains(symbol_loc)) {
// only allocate resolved function body's.
- atom = atom.next orelse break;
+ atom_index = atom.prev orelse break;
continue;
}
}
@@ -1736,8 +1772,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
atom.size,
});
offset += atom.size;
- try wasm.symbol_atom.put(wasm.base.allocator, symbol_loc, atom); // Update atom pointers
- atom = atom.next orelse break;
+ atom_index = atom.prev orelse break;
}
segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment);
}
@@ -1871,8 +1906,8 @@ fn initializeCallCtorsFunction(wasm: *Wasm) !void {
symbol.index = func_index;
// create the atom that will be output into the final binary
- const atom = try wasm.base.allocator.create(Atom);
- errdefer wasm.base.allocator.destroy(atom);
+ const atom_index = @intCast(Atom.Index, wasm.managed_atoms.items.len);
+ const atom = try wasm.managed_atoms.addOne(wasm.base.allocator);
atom.* = .{
.size = @intCast(u32, function_body.items.len),
.offset = 0,
@@ -1882,15 +1917,14 @@ fn initializeCallCtorsFunction(wasm: *Wasm) !void {
.next = null,
.prev = null,
.code = function_body.moveToUnmanaged(),
- .dbg_info_atom = undefined,
};
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
- try wasm.appendAtomAtIndex(wasm.code_section_index.?, atom);
- try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom);
+ try wasm.appendAtomAtIndex(wasm.code_section_index.?, atom_index);
+ try wasm.symbol_atom.putNoClobber(wasm.base.allocator, loc, atom_index);
// `allocateAtoms` has already been called, set the atom's offset manually.
// This is fine to do manually as we insert the atom at the very end.
- atom.offset = atom.prev.?.offset + atom.prev.?.size;
+ const prev_atom = wasm.getAtom(atom.prev.?);
+ atom.offset = prev_atom.offset + prev_atom.size;
}
fn setupImports(wasm: *Wasm) !void {
@@ -2093,7 +2127,8 @@ fn setupExports(wasm: *Wasm) !void {
break :blk try wasm.string_table.put(wasm.base.allocator, sym_name);
};
const exp: types.Export = if (symbol.tag == .data) exp: {
- const atom = wasm.symbol_atom.get(sym_loc).?;
+ const atom_index = wasm.symbol_atom.get(sym_loc).?;
+ const atom = wasm.getAtom(atom_index);
const va = atom.getVA(wasm, symbol);
const global_index = @intCast(u32, wasm.imported_globals_count + wasm.wasm_globals.items.len);
try wasm.wasm_globals.append(wasm.base.allocator, .{
@@ -2198,7 +2233,8 @@ fn setupMemory(wasm: *Wasm) !void {
const segment_index = wasm.data_segments.get(".synthetic").?;
const segment = &wasm.segments.items[segment_index];
segment.offset = 0; // for simplicity we store the entire VA into atom's offset.
- const atom = wasm.symbol_atom.get(loc).?;
+ const atom_index = wasm.symbol_atom.get(loc).?;
+ const atom = wasm.getAtomPtr(atom_index);
atom.offset = @intCast(u32, mem.alignForwardGeneric(u64, memory_ptr, heap_alignment));
}
@@ -2231,7 +2267,8 @@ fn setupMemory(wasm: *Wasm) !void {
const segment_index = wasm.data_segments.get(".synthetic").?;
const segment = &wasm.segments.items[segment_index];
segment.offset = 0;
- const atom = wasm.symbol_atom.get(loc).?;
+ const atom_index = wasm.symbol_atom.get(loc).?;
+ const atom = wasm.getAtomPtr(atom_index);
atom.offset = @intCast(u32, memory_ptr);
}
@@ -2357,15 +2394,14 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
// and then return said symbol's index. The final table will be populated
// during `flush` when we know all possible error names.
- // As sym_index '0' is reserved, we use it for our stack pointer symbol
- const symbol_index = wasm.symbols_free_list.popOrNull() orelse blk: {
- const index = @intCast(u32, wasm.symbols.items.len);
- _ = try wasm.symbols.addOne(wasm.base.allocator);
- break :blk index;
- };
+ const atom_index = try wasm.createAtom();
+ const atom = wasm.getAtomPtr(atom_index);
+ const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ atom.alignment = slice_ty.abiAlignment(wasm.base.options.target);
+ const sym_index = atom.sym_index;
const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_name_table");
- const symbol = &wasm.symbols.items[symbol_index];
+ const symbol = &wasm.symbols.items[sym_index];
symbol.* = .{
.name = sym_name,
.tag = .data,
@@ -2374,20 +2410,11 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
};
symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
- const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
+ try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
- const atom = try wasm.base.allocator.create(Atom);
- atom.* = Atom.empty;
- atom.sym_index = symbol_index;
- atom.alignment = slice_ty.abiAlignment(wasm.base.options.target);
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
- const loc = atom.symbolLoc();
- try wasm.resolved_symbols.put(wasm.base.allocator, loc, {});
- try wasm.symbol_atom.put(wasm.base.allocator, loc, atom);
-
- log.debug("Error name table was created with symbol index: ({d})", .{symbol_index});
- wasm.error_table_symbol = symbol_index;
- return symbol_index;
+ log.debug("Error name table was created with symbol index: ({d})", .{sym_index});
+ wasm.error_table_symbol = sym_index;
+ return sym_index;
}
/// Populates the error name table, when `error_table_symbol` is not null.
@@ -2396,22 +2423,17 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
/// The table is what is being pointed to within the runtime bodies that are generated.
fn populateErrorNameTable(wasm: *Wasm) !void {
const symbol_index = wasm.error_table_symbol orelse return;
- const atom: *Atom = wasm.symbol_atom.get(.{ .file = null, .index = symbol_index }).?;
+ const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = symbol_index }).?;
+ const atom = wasm.getAtomPtr(atom_index);
+
// Rather than creating a symbol for each individual error name,
// we create a symbol for the entire region of error names. We then calculate
// the pointers into the list using addends which are appended to the relocation.
- const names_atom = try wasm.base.allocator.create(Atom);
- names_atom.* = Atom.empty;
- try wasm.managed_atoms.append(wasm.base.allocator, names_atom);
- const names_symbol_index = wasm.symbols_free_list.popOrNull() orelse blk: {
- const index = @intCast(u32, wasm.symbols.items.len);
- _ = try wasm.symbols.addOne(wasm.base.allocator);
- break :blk index;
- };
- names_atom.sym_index = names_symbol_index;
+ const names_atom_index = try wasm.createAtom();
+ const names_atom = wasm.getAtomPtr(names_atom_index);
names_atom.alignment = 1;
const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_names");
- const names_symbol = &wasm.symbols.items[names_symbol_index];
+ const names_symbol = &wasm.symbols.items[names_atom.sym_index];
names_symbol.* = .{
.name = sym_name,
.tag = .data,
@@ -2435,7 +2457,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
try atom.code.writer(wasm.base.allocator).writeIntLittle(u32, len - 1);
// create relocation to the error name
try atom.relocs.append(wasm.base.allocator, .{
- .index = names_symbol_index,
+ .index = names_atom.sym_index,
.relocation_type = .R_WASM_MEMORY_ADDR_I32,
.offset = offset,
.addend = @intCast(i32, addend),
@@ -2454,61 +2476,53 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
const name_loc = names_atom.symbolLoc();
try wasm.resolved_symbols.put(wasm.base.allocator, name_loc, {});
- try wasm.symbol_atom.put(wasm.base.allocator, name_loc, names_atom);
+ try wasm.symbol_atom.put(wasm.base.allocator, name_loc, names_atom_index);
// link the atoms with the rest of the binary so they can be allocated
// and relocations will be performed.
- try wasm.parseAtom(atom, .{ .data = .read_only });
- try wasm.parseAtom(names_atom, .{ .data = .read_only });
+ try wasm.parseAtom(atom_index, .{ .data = .read_only });
+ try wasm.parseAtom(names_atom_index, .{ .data = .read_only });
}
/// From a given index variable, creates a new debug section.
/// This initializes the index, appends a new segment,
/// and finally, creates a managed `Atom`.
-pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !*Atom {
+pub fn createDebugSectionForIndex(wasm: *Wasm, index: *?u32, name: []const u8) !Atom.Index {
const new_index = @intCast(u32, wasm.segments.items.len);
index.* = new_index;
try wasm.appendDummySegment();
- const sym_index = wasm.symbols_free_list.popOrNull() orelse idx: {
- const tmp_index = @intCast(u32, wasm.symbols.items.len);
- _ = try wasm.symbols.addOne(wasm.base.allocator);
- break :idx tmp_index;
- };
- wasm.symbols.items[sym_index] = .{
+ const atom_index = try wasm.createAtom();
+ const atom = wasm.getAtomPtr(atom_index);
+ wasm.symbols.items[atom.sym_index] = .{
.tag = .section,
.name = try wasm.string_table.put(wasm.base.allocator, name),
.index = 0,
.flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
};
- const atom = try wasm.base.allocator.create(Atom);
- atom.* = Atom.empty;
atom.alignment = 1; // debug sections are always 1-byte-aligned
- atom.sym_index = sym_index;
- try wasm.managed_atoms.append(wasm.base.allocator, atom);
- try wasm.symbol_atom.put(wasm.base.allocator, atom.symbolLoc(), atom);
- return atom;
+ return atom_index;
}
fn resetState(wasm: *Wasm) void {
for (wasm.segment_info.values()) |segment_info| {
wasm.base.allocator.free(segment_info.name);
}
- if (wasm.base.options.module) |mod| {
- var decl_it = wasm.decls.keyIterator();
- while (decl_it.next()) |decl_index_ptr| {
- const decl = mod.declPtr(decl_index_ptr.*);
- const atom = &decl.link.wasm;
- atom.next = null;
- atom.prev = null;
- for (atom.locals.items) |*local_atom| {
- local_atom.next = null;
- local_atom.prev = null;
- }
+ var atom_it = wasm.decls.valueIterator();
+ while (atom_it.next()) |atom_index| {
+ const atom = wasm.getAtomPtr(atom_index.*);
+ atom.next = null;
+ atom.prev = null;
+
+ for (atom.locals.items) |local_atom_index| {
+ const local_atom = wasm.getAtomPtr(local_atom_index);
+ local_atom.next = null;
+ local_atom.prev = null;
}
}
+
wasm.functions.clearRetainingCapacity();
wasm.exports.clearRetainingCapacity();
wasm.segments.clearRetainingCapacity();
@@ -2805,28 +2819,29 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try wasm.setupStart();
try wasm.setupImports();
if (wasm.base.options.module) |mod| {
- var decl_it = wasm.decls.keyIterator();
- while (decl_it.next()) |decl_index_ptr| {
- const decl = mod.declPtr(decl_index_ptr.*);
+ var decl_it = wasm.decls.iterator();
+ while (decl_it.next()) |entry| {
+ const decl = mod.declPtr(entry.key_ptr.*);
if (decl.isExtern()) continue;
- const atom = &decl.*.link.wasm;
+ const atom_index = entry.value_ptr.*;
if (decl.ty.zigTypeTag() == .Fn) {
- try wasm.parseAtom(atom, .{ .function = decl.fn_link.wasm });
+ try wasm.parseAtom(atom_index, .function);
} else if (decl.getVariable()) |variable| {
if (!variable.is_mutable) {
- try wasm.parseAtom(atom, .{ .data = .read_only });
+ try wasm.parseAtom(atom_index, .{ .data = .read_only });
} else if (variable.init.isUndefDeep()) {
- try wasm.parseAtom(atom, .{ .data = .uninitialized });
+ try wasm.parseAtom(atom_index, .{ .data = .uninitialized });
} else {
- try wasm.parseAtom(atom, .{ .data = .initialized });
+ try wasm.parseAtom(atom_index, .{ .data = .initialized });
}
} else {
- try wasm.parseAtom(atom, .{ .data = .read_only });
+ try wasm.parseAtom(atom_index, .{ .data = .read_only });
}
// also parse atoms for a decl's locals
- for (atom.locals.items) |*local_atom| {
- try wasm.parseAtom(local_atom, .{ .data = .read_only });
+ const atom = wasm.getAtomPtr(atom_index);
+ for (atom.locals.items) |local_atom_index| {
+ try wasm.parseAtom(local_atom_index, .{ .data = .read_only });
}
}
@@ -3071,20 +3086,22 @@ fn writeToFile(
var code_section_size: u32 = 0;
if (wasm.code_section_index) |code_index| {
const header_offset = try reserveVecSectionHeader(&binary_bytes);
- var atom: *Atom = wasm.atoms.get(code_index).?.getFirst();
+ var atom_index = wasm.atoms.get(code_index).?;
// The code section must be sorted in line with the function order.
var sorted_atoms = try std.ArrayList(*Atom).initCapacity(wasm.base.allocator, wasm.functions.count());
defer sorted_atoms.deinit();
while (true) {
+ var atom = wasm.getAtomPtr(atom_index);
if (wasm.resolved_symbols.contains(atom.symbolLoc())) {
if (!is_obj) {
atom.resolveRelocs(wasm);
}
sorted_atoms.appendAssumeCapacity(atom);
}
- atom = atom.next orelse break;
+ // atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
+ atom_index = atom.prev orelse break;
}
const atom_sort_fn = struct {
@@ -3124,11 +3141,11 @@ fn writeToFile(
// do not output 'bss' section unless we import memory and therefore
// want to guarantee the data is zero initialized
if (!import_memory and std.mem.eql(u8, entry.key_ptr.*, ".bss")) continue;
- const atom_index = entry.value_ptr.*;
- const segment = wasm.segments.items[atom_index];
+ const segment_index = entry.value_ptr.*;
+ const segment = wasm.segments.items[segment_index];
if (segment.size == 0) continue; // do not emit empty segments
segment_count += 1;
- var atom: *Atom = wasm.atoms.getPtr(atom_index).?.*.getFirst();
+ var atom_index = wasm.atoms.get(segment_index).?;
// flag and index to memory section (currently, there can only be 1 memory section in wasm)
try leb.writeULEB128(binary_writer, @as(u32, 0));
@@ -3139,6 +3156,7 @@ fn writeToFile(
// fill in the offset table and the data segments
var current_offset: u32 = 0;
while (true) {
+ const atom = wasm.getAtomPtr(atom_index);
if (!is_obj) {
atom.resolveRelocs(wasm);
}
@@ -3154,8 +3172,8 @@ fn writeToFile(
try binary_writer.writeAll(atom.code.items);
current_offset += atom.size;
- if (atom.next) |next| {
- atom = next;
+ if (atom.prev) |prev| {
+ atom_index = prev;
} else {
// also pad with zeroes when last atom to ensure
// segments are aligned.
@@ -3197,15 +3215,15 @@ fn writeToFile(
}
if (!wasm.base.options.strip) {
- if (wasm.dwarf) |*dwarf| {
- const mod = wasm.base.options.module.?;
- try dwarf.writeDbgAbbrev();
- // for debug info and ranges, the address is always 0,
- // as locations are always offsets relative to 'code' section.
- try dwarf.writeDbgInfoHeader(mod, 0, code_section_size);
- try dwarf.writeDbgAranges(0, code_section_size);
- try dwarf.writeDbgLineHeader();
- }
+ // if (wasm.dwarf) |*dwarf| {
+ // const mod = wasm.base.options.module.?;
+ // try dwarf.writeDbgAbbrev();
+ // // for debug info and ranges, the address is always 0,
+ // // as locations are always offsets relative to 'code' section.
+ // try dwarf.writeDbgInfoHeader(mod, 0, code_section_size);
+ // try dwarf.writeDbgAranges(0, code_section_size);
+ // try dwarf.writeDbgLineHeader();
+ // }
var debug_bytes = std.ArrayList(u8).init(wasm.base.allocator);
defer debug_bytes.deinit();
@@ -3228,11 +3246,11 @@ fn writeToFile(
for (debug_sections) |item| {
if (item.index) |index| {
- var atom = wasm.atoms.get(index).?.getFirst();
+ var atom = wasm.getAtomPtr(wasm.atoms.get(index).?);
while (true) {
atom.resolveRelocs(wasm);
try debug_bytes.appendSlice(atom.code.items);
- atom = atom.next orelse break;
+ atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
}
try emitDebugSection(&binary_bytes, debug_bytes.items, item.name);
debug_bytes.clearRetainingCapacity();
@@ -3964,7 +3982,8 @@ fn emitSymbolTable(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table:
if (symbol.isDefined()) {
try leb.writeULEB128(writer, symbol.index);
- const atom = wasm.symbol_atom.get(sym_loc).?;
+ const atom_index = wasm.symbol_atom.get(sym_loc).?;
+ const atom = wasm.getAtom(atom_index);
try leb.writeULEB128(writer, @as(u32, atom.offset));
try leb.writeULEB128(writer, @as(u32, atom.size));
}
@@ -4042,7 +4061,7 @@ fn emitCodeRelocations(
const reloc_start = binary_bytes.items.len;
var count: u32 = 0;
- var atom: *Atom = wasm.atoms.get(code_index).?.getFirst();
+ var atom: *Atom = wasm.getAtomPtr(wasm.atoms.get(code_index).?);
// for each atom, we calculate the uleb size and append that
var size_offset: u32 = 5; // account for code section size leb128
while (true) {
@@ -4060,7 +4079,7 @@ fn emitCodeRelocations(
}
log.debug("Emit relocation: {}", .{relocation});
}
- atom = atom.next orelse break;
+ atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
}
if (count == 0) return;
var buf: [5]u8 = undefined;
@@ -4091,7 +4110,7 @@ fn emitDataRelocations(
// for each atom, we calculate the uleb size and append that
var size_offset: u32 = 5; // account for code section size leb128
for (wasm.data_segments.values()) |segment_index| {
- var atom: *Atom = wasm.atoms.get(segment_index).?.getFirst();
+ var atom: *Atom = wasm.getAtomPtr(wasm.atoms.get(segment_index).?);
while (true) {
size_offset += getULEB128Size(atom.size);
for (atom.relocs.items) |relocation| {
@@ -4110,7 +4129,7 @@ fn emitDataRelocations(
}
log.debug("Emit relocation: {}", .{relocation});
}
- atom = atom.next orelse break;
+ atom = if (atom.prev) |prev| wasm.getAtomPtr(prev) else break;
}
}
if (count == 0) return;
@@ -4149,3 +4168,13 @@ pub fn putOrGetFuncType(wasm: *Wasm, func_type: std.wasm.Type) !u32 {
});
return index;
}
+
+/// For the given `decl_index`, stores the corresponding type representing the function signature.
+/// Asserts declaration has an associated `Atom`.
+/// Returns the index into the list of types.
+pub fn storeDeclType(wasm: *Wasm, decl_index: Module.Decl.Index, func_type: std.wasm.Type) !u32 {
+ const atom_index = wasm.decls.get(decl_index).?;
+ const index = try wasm.putOrGetFuncType(func_type);
+ try wasm.atom_types.put(wasm.base.allocator, atom_index, index);
+ return index;
+}
diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig
index 20f847e475..e719f8dfcc 100644
--- a/src/link/Wasm/Atom.zig
+++ b/src/link/Wasm/Atom.zig
@@ -4,7 +4,6 @@ const std = @import("std");
const types = @import("types.zig");
const Wasm = @import("../Wasm.zig");
const Symbol = @import("Symbol.zig");
-const Dwarf = @import("../Dwarf.zig");
const leb = std.leb;
const log = std.log.scoped(.link);
@@ -30,17 +29,17 @@ file: ?u16,
/// Next atom in relation to this atom.
/// When null, this atom is the last atom
-next: ?*Atom,
+next: ?Atom.Index,
/// Previous atom in relation to this atom.
/// is null when this atom is the first in its order
-prev: ?*Atom,
+prev: ?Atom.Index,
/// Contains atoms local to a decl, all managed by this `Atom`.
/// When the parent atom is being freed, it will also do so for all local atoms.
-locals: std.ArrayListUnmanaged(Atom) = .{},
+locals: std.ArrayListUnmanaged(Atom.Index) = .{},
-/// Represents the debug Atom that holds all debug information of this Atom.
-dbg_info_atom: Dwarf.Atom,
+/// Alias to an unsigned 32-bit integer
+pub const Index = u32;
/// Represents a default empty wasm `Atom`
pub const empty: Atom = .{
@@ -51,18 +50,15 @@ pub const empty: Atom = .{
.prev = null,
.size = 0,
.sym_index = 0,
- .dbg_info_atom = undefined,
};
/// Frees all resources owned by this `Atom`.
-pub fn deinit(atom: *Atom, gpa: Allocator) void {
+pub fn deinit(atom: *Atom, wasm: *Wasm) void {
+ const gpa = wasm.base.allocator;
atom.relocs.deinit(gpa);
atom.code.deinit(gpa);
-
- for (atom.locals.items) |*local| {
- local.deinit(gpa);
- }
atom.locals.deinit(gpa);
+ atom.* = undefined;
}
/// Sets the length of relocations and code to '0',
@@ -83,24 +79,11 @@ pub fn format(atom: Atom, comptime fmt: []const u8, options: std.fmt.FormatOptio
});
}
-/// Returns the first `Atom` from a given atom
-pub fn getFirst(atom: *Atom) *Atom {
- var tmp = atom;
- while (tmp.prev) |prev| tmp = prev;
- return tmp;
-}
-
/// Returns the location of the symbol that represents this `Atom`
pub fn symbolLoc(atom: Atom) Wasm.SymbolLoc {
return .{ .file = atom.file, .index = atom.sym_index };
}
-pub fn ensureInitialized(atom: *Atom, wasm_bin: *Wasm) !void {
- if (atom.getSymbolIndex() != null) return; // already initialized
- atom.sym_index = try wasm_bin.allocateSymbol();
- try wasm_bin.symbol_atom.putNoClobber(wasm_bin.base.allocator, atom.symbolLoc(), atom);
-}
-
pub fn getSymbolIndex(atom: Atom) ?u32 {
if (atom.sym_index == 0) return null;
return atom.sym_index;
@@ -203,20 +186,28 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
if (symbol.isUndefined()) {
return 0;
}
- const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
+ const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
+ // this can only occur during incremental-compilation when a relocation
+ // still points to a freed decl. It is fine to emit the value 0 here
+ // as no actual code will point towards it.
+ return 0;
+ };
+ const target_atom = wasm_bin.getAtom(target_atom_index);
const va = @intCast(i32, target_atom.getVA(wasm_bin, symbol));
return @intCast(u32, va + relocation.addend);
},
.R_WASM_EVENT_INDEX_LEB => return symbol.index,
.R_WASM_SECTION_OFFSET_I32 => {
- const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
+ const target_atom_index = wasm_bin.symbol_atom.get(target_loc).?;
+ const target_atom = wasm_bin.getAtom(target_atom_index);
const rel_value = @intCast(i32, target_atom.offset) + relocation.addend;
return @intCast(u32, rel_value);
},
.R_WASM_FUNCTION_OFFSET_I32 => {
- const target_atom = wasm_bin.symbol_atom.get(target_loc) orelse {
+ const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
return @bitCast(u32, @as(i32, -1));
};
+ const target_atom = wasm_bin.getAtom(target_atom_index);
const offset: u32 = 11 + Wasm.getULEB128Size(target_atom.size); // Header (11 bytes fixed-size) + body size (leb-encoded)
const rel_value = @intCast(i32, target_atom.offset + offset) + relocation.addend;
return @intCast(u32, rel_value);
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index 8f49d68712..7d4f6a4e36 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -901,14 +901,9 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
};
- const atom = try gpa.create(Atom);
+ const atom_index = @intCast(Atom.Index, wasm_bin.managed_atoms.items.len);
+ const atom = try wasm_bin.managed_atoms.addOne(gpa);
atom.* = Atom.empty;
- errdefer {
- atom.deinit(gpa);
- gpa.destroy(atom);
- }
-
- try wasm_bin.managed_atoms.append(gpa, atom);
atom.file = object_index;
atom.size = relocatable_data.size;
atom.alignment = relocatable_data.getAlignment(object);
@@ -938,12 +933,12 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
.index = relocatable_data.getIndex(),
})) |symbols| {
atom.sym_index = symbols.pop();
- try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom);
+ try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom_index);
// symbols referencing the same atom will be added as alias
// or as 'parent' when they are global.
while (symbols.popOrNull()) |idx| {
- try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom);
+ try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom_index);
const alias_symbol = object.symtable[idx];
if (alias_symbol.isGlobal()) {
atom.sym_index = idx;
@@ -956,7 +951,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
segment.alignment = std.math.max(segment.alignment, atom.alignment);
}
- try wasm_bin.appendAtomAtIndex(final_index, atom);
+ try wasm_bin.appendAtomAtIndex(final_index, atom_index);
log.debug("Parsed into atom: '{s}' at segment index {d}", .{ object.string_table.get(object.symtable[atom.sym_index].name), final_index });
}
}
diff --git a/src/main.zig b/src/main.zig
index 72e7e094e6..f634c259ff 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -3915,6 +3915,7 @@ pub const usage_build =
;
pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
+ var color: Color = .auto;
var prominent_compile_errors: bool = false;
// We want to release all the locks before executing the child process, so we make a nice
@@ -4117,6 +4118,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
// Here we borrow main package's table and will replace it with a fresh
// one after this process completes.
main_pkg.fetchAndAddDependencies(
+ arena,
&thread_pool,
&http_client,
build_directory,
@@ -4125,6 +4127,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
&dependencies_source,
&build_roots_source,
"",
+ color,
) catch |err| switch (err) {
error.PackageFetchFailed => process.exit(1),
else => |e| return e,
@@ -4361,12 +4364,12 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
};
defer gpa.free(source_code);
- var tree = std.zig.parse(gpa, source_code) catch |err| {
+ var tree = Ast.parse(gpa, source_code, .zig) catch |err| {
fatal("error parsing stdin: {}", .{err});
};
defer tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, tree.errors, tree, "", color);
+ try printErrsMsgToStdErr(gpa, arena, tree, "", color);
var has_ast_error = false;
if (check_ast_flag) {
const Module = @import("Module.zig");
@@ -4566,10 +4569,10 @@ fn fmtPathFile(
// Add to set after no longer possible to get error.IsDir.
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
- var tree = try std.zig.parse(fmt.gpa, source_code);
+ var tree = try Ast.parse(fmt.gpa, source_code, .zig);
defer tree.deinit(fmt.gpa);
- try printErrsMsgToStdErr(fmt.gpa, fmt.arena, tree.errors, tree, file_path, fmt.color);
+ try printErrsMsgToStdErr(fmt.gpa, fmt.arena, tree, file_path, fmt.color);
if (tree.errors.len != 0) {
fmt.any_error = true;
return;
@@ -4649,14 +4652,14 @@ fn fmtPathFile(
}
}
-fn printErrsMsgToStdErr(
+pub fn printErrsMsgToStdErr(
gpa: mem.Allocator,
arena: mem.Allocator,
- parse_errors: []const Ast.Error,
tree: Ast,
path: []const u8,
color: Color,
) !void {
+ const parse_errors: []const Ast.Error = tree.errors;
var i: usize = 0;
while (i < parse_errors.len) : (i += 1) {
const parse_error = parse_errors[i];
@@ -5312,11 +5315,11 @@ pub fn cmdAstCheck(
file.pkg = try Package.create(gpa, "root", null, file.sub_file_path);
defer file.pkg.destroy(gpa);
- file.tree = try std.zig.parse(gpa, file.source);
+ file.tree = try Ast.parse(gpa, file.source, .zig);
file.tree_loaded = true;
defer file.tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, file.tree.errors, file.tree, file.sub_file_path, color);
+ try printErrsMsgToStdErr(gpa, arena, file.tree, file.sub_file_path, color);
if (file.tree.errors.len != 0) {
process.exit(1);
}
@@ -5438,11 +5441,11 @@ pub fn cmdChangelist(
file.source = source;
file.source_loaded = true;
- file.tree = try std.zig.parse(gpa, file.source);
+ file.tree = try Ast.parse(gpa, file.source, .zig);
file.tree_loaded = true;
defer file.tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, file.tree.errors, file.tree, old_source_file, .auto);
+ try printErrsMsgToStdErr(gpa, arena, file.tree, old_source_file, .auto);
if (file.tree.errors.len != 0) {
process.exit(1);
}
@@ -5476,10 +5479,10 @@ pub fn cmdChangelist(
if (new_amt != new_stat.size)
return error.UnexpectedEndOfFile;
- var new_tree = try std.zig.parse(gpa, new_source);
+ var new_tree = try Ast.parse(gpa, new_source, .zig);
defer new_tree.deinit(gpa);
- try printErrsMsgToStdErr(gpa, arena, new_tree.errors, new_tree, new_source_file, .auto);
+ try printErrsMsgToStdErr(gpa, arena, new_tree, new_source_file, .auto);
if (new_tree.errors.len != 0) {
process.exit(1);
}
diff --git a/src/mingw.zig b/src/mingw.zig
index 1fee8e90a4..06880743c6 100644
--- a/src/mingw.zig
+++ b/src/mingw.zig
@@ -106,6 +106,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.msvcrt_os_lib => {
const extra_flags = try arena.dupe([]const u8, &[_][]const u8{
"-DHAVE_CONFIG_H",
+ "-D__LIBMSVCRT__",
"-D__LIBMSVCRT_OS__",
"-I",
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 6e8923bed9..e5fc8815ed 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -332,6 +332,7 @@ const Writer = struct {
.float_cast,
.int_cast,
.ptr_cast,
+ .qual_cast,
.truncate,
.align_cast,
.div_exact,
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 47a21f5b5c..a6715d161c 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -4519,7 +4519,10 @@ fn transCreateNodeAssign(
defer block_scope.deinit();
const tmp = try block_scope.makeMangledName(c, "tmp");
- const rhs_node = try transExpr(c, &block_scope.base, rhs, .used);
+ var rhs_node = try transExpr(c, &block_scope.base, rhs, .used);
+ if (!exprIsBooleanType(lhs) and isBoolRes(rhs_node)) {
+ rhs_node = try Tag.bool_to_int.create(c.arena, rhs_node);
+ }
const tmp_decl = try Tag.var_simple.create(c.arena, .{ .name = tmp, .init = rhs_node });
try block_scope.statements.append(tmp_decl);
diff --git a/src/type.zig b/src/type.zig
index c675cd225d..a13e30cb4c 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -2937,24 +2937,24 @@ pub const Type = extern union {
.anyframe_T,
=> return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
- .c_short => return AbiAlignmentAdvanced{ .scalar = CType.short.alignment(target) },
- .c_ushort => return AbiAlignmentAdvanced{ .scalar = CType.ushort.alignment(target) },
- .c_int => return AbiAlignmentAdvanced{ .scalar = CType.int.alignment(target) },
- .c_uint => return AbiAlignmentAdvanced{ .scalar = CType.uint.alignment(target) },
- .c_long => return AbiAlignmentAdvanced{ .scalar = CType.long.alignment(target) },
- .c_ulong => return AbiAlignmentAdvanced{ .scalar = CType.ulong.alignment(target) },
- .c_longlong => return AbiAlignmentAdvanced{ .scalar = CType.longlong.alignment(target) },
- .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = CType.ulonglong.alignment(target) },
- .c_longdouble => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) },
+ .c_short => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.short) },
+ .c_ushort => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ushort) },
+ .c_int => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.int) },
+ .c_uint => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.uint) },
+ .c_long => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.long) },
+ .c_ulong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulong) },
+ .c_longlong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longlong) },
+ .c_ulonglong => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.ulonglong) },
+ .c_longdouble => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
.f16 => return AbiAlignmentAdvanced{ .scalar = 2 },
- .f32 => return AbiAlignmentAdvanced{ .scalar = CType.float.alignment(target) },
- .f64 => switch (CType.double.sizeInBits(target)) {
- 64 => return AbiAlignmentAdvanced{ .scalar = CType.double.alignment(target) },
+ .f32 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.float) },
+ .f64 => switch (target.c_type_bit_size(.double)) {
+ 64 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.double) },
else => return AbiAlignmentAdvanced{ .scalar = 8 },
},
- .f80 => switch (CType.longdouble.sizeInBits(target)) {
- 80 => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) },
+ .f80 => switch (target.c_type_bit_size(.longdouble)) {
+ 80 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
else => {
var payload: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
@@ -2964,8 +2964,8 @@ pub const Type = extern union {
return AbiAlignmentAdvanced{ .scalar = abiAlignment(u80_ty, target) };
},
},
- .f128 => switch (CType.longdouble.sizeInBits(target)) {
- 128 => return AbiAlignmentAdvanced{ .scalar = CType.longdouble.alignment(target) },
+ .f128 => switch (target.c_type_bit_size(.longdouble)) {
+ 128 => return AbiAlignmentAdvanced{ .scalar = target.c_type_alignment(.longdouble) },
else => return AbiAlignmentAdvanced{ .scalar = 16 },
},
@@ -3434,21 +3434,22 @@ pub const Type = extern union {
else => return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) },
},
- .c_short => return AbiSizeAdvanced{ .scalar = @divExact(CType.short.sizeInBits(target), 8) },
- .c_ushort => return AbiSizeAdvanced{ .scalar = @divExact(CType.ushort.sizeInBits(target), 8) },
- .c_int => return AbiSizeAdvanced{ .scalar = @divExact(CType.int.sizeInBits(target), 8) },
- .c_uint => return AbiSizeAdvanced{ .scalar = @divExact(CType.uint.sizeInBits(target), 8) },
- .c_long => return AbiSizeAdvanced{ .scalar = @divExact(CType.long.sizeInBits(target), 8) },
- .c_ulong => return AbiSizeAdvanced{ .scalar = @divExact(CType.ulong.sizeInBits(target), 8) },
- .c_longlong => return AbiSizeAdvanced{ .scalar = @divExact(CType.longlong.sizeInBits(target), 8) },
- .c_ulonglong => return AbiSizeAdvanced{ .scalar = @divExact(CType.ulonglong.sizeInBits(target), 8) },
+ .c_short => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.short) },
+ .c_ushort => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ushort) },
+ .c_int => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.int) },
+ .c_uint => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.uint) },
+ .c_long => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.long) },
+ .c_ulong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulong) },
+ .c_longlong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longlong) },
+ .c_ulonglong => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.ulonglong) },
+ .c_longdouble => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) },
.f16 => return AbiSizeAdvanced{ .scalar = 2 },
.f32 => return AbiSizeAdvanced{ .scalar = 4 },
.f64 => return AbiSizeAdvanced{ .scalar = 8 },
.f128 => return AbiSizeAdvanced{ .scalar = 16 },
- .f80 => switch (CType.longdouble.sizeInBits(target)) {
- 80 => return AbiSizeAdvanced{ .scalar = std.mem.alignForward(10, CType.longdouble.alignment(target)) },
+ .f80 => switch (target.c_type_bit_size(.longdouble)) {
+ 80 => return AbiSizeAdvanced{ .scalar = target.c_type_byte_size(.longdouble) },
else => {
var payload: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
@@ -3458,14 +3459,6 @@ pub const Type = extern union {
return AbiSizeAdvanced{ .scalar = abiSize(u80_ty, target) };
},
},
- .c_longdouble => switch (CType.longdouble.sizeInBits(target)) {
- 16 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f16, target) },
- 32 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f32, target) },
- 64 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f64, target) },
- 80 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f80, target) },
- 128 => return AbiSizeAdvanced{ .scalar = abiSize(Type.f128, target) },
- else => unreachable,
- },
// TODO revisit this when we have the concept of the error tag type
.anyerror_void_error_union,
@@ -3748,15 +3741,15 @@ pub const Type = extern union {
.manyptr_const_u8_sentinel_0,
=> return target.cpu.arch.ptrBitWidth(),
- .c_short => return CType.short.sizeInBits(target),
- .c_ushort => return CType.ushort.sizeInBits(target),
- .c_int => return CType.int.sizeInBits(target),
- .c_uint => return CType.uint.sizeInBits(target),
- .c_long => return CType.long.sizeInBits(target),
- .c_ulong => return CType.ulong.sizeInBits(target),
- .c_longlong => return CType.longlong.sizeInBits(target),
- .c_ulonglong => return CType.ulonglong.sizeInBits(target),
- .c_longdouble => return CType.longdouble.sizeInBits(target),
+ .c_short => return target.c_type_bit_size(.short),
+ .c_ushort => return target.c_type_bit_size(.ushort),
+ .c_int => return target.c_type_bit_size(.int),
+ .c_uint => return target.c_type_bit_size(.uint),
+ .c_long => return target.c_type_bit_size(.long),
+ .c_ulong => return target.c_type_bit_size(.ulong),
+ .c_longlong => return target.c_type_bit_size(.longlong),
+ .c_ulonglong => return target.c_type_bit_size(.ulonglong),
+ .c_longdouble => return target.c_type_bit_size(.longdouble),
.error_set,
.error_set_single,
@@ -4631,14 +4624,14 @@ pub const Type = extern union {
.i128 => return .{ .signedness = .signed, .bits = 128 },
.usize => return .{ .signedness = .unsigned, .bits = target.cpu.arch.ptrBitWidth() },
.isize => return .{ .signedness = .signed, .bits = target.cpu.arch.ptrBitWidth() },
- .c_short => return .{ .signedness = .signed, .bits = CType.short.sizeInBits(target) },
- .c_ushort => return .{ .signedness = .unsigned, .bits = CType.ushort.sizeInBits(target) },
- .c_int => return .{ .signedness = .signed, .bits = CType.int.sizeInBits(target) },
- .c_uint => return .{ .signedness = .unsigned, .bits = CType.uint.sizeInBits(target) },
- .c_long => return .{ .signedness = .signed, .bits = CType.long.sizeInBits(target) },
- .c_ulong => return .{ .signedness = .unsigned, .bits = CType.ulong.sizeInBits(target) },
- .c_longlong => return .{ .signedness = .signed, .bits = CType.longlong.sizeInBits(target) },
- .c_ulonglong => return .{ .signedness = .unsigned, .bits = CType.ulonglong.sizeInBits(target) },
+ .c_short => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.short) },
+ .c_ushort => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ushort) },
+ .c_int => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.int) },
+ .c_uint => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.uint) },
+ .c_long => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.long) },
+ .c_ulong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulong) },
+ .c_longlong => return .{ .signedness = .signed, .bits = target.c_type_bit_size(.longlong) },
+ .c_ulonglong => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
.enum_full, .enum_nonexhaustive => ty = ty.cast(Payload.EnumFull).?.data.tag_ty,
.enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty,
@@ -4724,7 +4717,7 @@ pub const Type = extern union {
.f64 => 64,
.f80 => 80,
.f128, .comptime_float => 128,
- .c_longdouble => CType.longdouble.sizeInBits(target),
+ .c_longdouble => target.c_type_bit_size(.longdouble),
else => unreachable,
};
@@ -6689,536 +6682,3 @@ pub const Type = extern union {
/// to packed struct layout to find out all the places in the codebase you need to edit!
pub const packed_struct_layout_version = 2;
};
-
-pub const CType = enum {
- short,
- ushort,
- int,
- uint,
- long,
- ulong,
- longlong,
- ulonglong,
- longdouble,
-
- // We don't have a `c_float`/`c_double` type in Zig, but these
- // are useful for querying target-correct alignment and checking
- // whether C's double is f64 or f32
- float,
- double,
-
- pub fn sizeInBits(self: CType, target: Target) u16 {
- switch (target.os.tag) {
- .freestanding, .other => switch (target.cpu.arch) {
- .msp430 => switch (self) {
- .short, .ushort, .int, .uint => return 16,
- .float, .long, .ulong => return 32,
- .longlong, .ulonglong, .double, .longdouble => return 64,
- },
- .avr => switch (self) {
- .short, .ushort, .int, .uint => return 16,
- .long, .ulong, .float, .double, .longdouble => return 32,
- .longlong, .ulonglong => return 64,
- },
- .tce, .tcele => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
- .float, .double, .longdouble => return 32,
- },
- .mips64, .mips64el => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 128,
- },
- .x86_64 => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.abi) {
- .gnux32, .muslx32 => return 32,
- else => return 64,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 80,
- },
- else => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return target.cpu.arch.ptrBitWidth(),
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.cpu.arch) {
- .x86 => switch (target.abi) {
- .android => return 64,
- else => return 80,
- },
-
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
- => switch (target.abi) {
- .musl,
- .musleabi,
- .musleabihf,
- .muslx32,
- => return 64,
- else => return 128,
- },
-
- .riscv32,
- .riscv64,
- .aarch64,
- .aarch64_be,
- .aarch64_32,
- .s390x,
- .sparc,
- .sparc64,
- .sparcel,
- .wasm32,
- .wasm64,
- => return 128,
-
- else => return 64,
- },
- },
- },
-
- .linux,
- .freebsd,
- .netbsd,
- .dragonfly,
- .openbsd,
- .wasi,
- .emscripten,
- .plan9,
- .solaris,
- .haiku,
- .ananas,
- .fuchsia,
- .minix,
- => switch (target.cpu.arch) {
- .msp430 => switch (self) {
- .short, .ushort, .int, .uint => return 16,
- .long, .ulong, .float => return 32,
- .longlong, .ulonglong, .double, .longdouble => return 64,
- },
- .avr => switch (self) {
- .short, .ushort, .int, .uint => return 16,
- .long, .ulong, .float, .double, .longdouble => return 32,
- .longlong, .ulonglong => return 64,
- },
- .tce, .tcele => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
- .float, .double, .longdouble => return 32,
- },
- .mips64, .mips64el => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
- .longlong, .ulonglong, .double => return 64,
- .longdouble => if (target.os.tag == .freebsd) return 64 else return 128,
- },
- .x86_64 => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.abi) {
- .gnux32, .muslx32 => return 32,
- else => return 64,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 80,
- },
- else => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return target.cpu.arch.ptrBitWidth(),
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.cpu.arch) {
- .x86 => switch (target.abi) {
- .android => return 64,
- else => return 80,
- },
-
- .powerpc,
- .powerpcle,
- => switch (target.abi) {
- .musl,
- .musleabi,
- .musleabihf,
- .muslx32,
- => return 64,
- else => switch (target.os.tag) {
- .freebsd, .netbsd, .openbsd => return 64,
- else => return 128,
- },
- },
-
- .powerpc64,
- .powerpc64le,
- => switch (target.abi) {
- .musl,
- .musleabi,
- .musleabihf,
- .muslx32,
- => return 64,
- else => switch (target.os.tag) {
- .freebsd, .openbsd => return 64,
- else => return 128,
- },
- },
-
- .riscv32,
- .riscv64,
- .aarch64,
- .aarch64_be,
- .aarch64_32,
- .s390x,
- .mips64,
- .mips64el,
- .sparc,
- .sparc64,
- .sparcel,
- .wasm32,
- .wasm64,
- => return 128,
-
- else => return 64,
- },
- },
- },
-
- .windows, .uefi => switch (target.cpu.arch) {
- .x86 => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return 32,
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 80,
- else => return 64,
- },
- },
- .x86_64 => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.abi) {
- .cygnus => return 64,
- else => return 32,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 80,
- else => return 64,
- },
- },
- else => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => return 32,
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 64,
- },
- },
-
- .macos, .ios, .tvos, .watchos => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.cpu.arch) {
- .x86, .arm, .aarch64_32 => return 32,
- .x86_64 => switch (target.abi) {
- .gnux32, .muslx32 => return 32,
- else => return 64,
- },
- else => return 64,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => switch (target.cpu.arch) {
- .x86 => switch (target.abi) {
- .android => return 64,
- else => return 80,
- },
- .x86_64 => return 80,
- else => return 64,
- },
- },
-
- .nvcl, .cuda => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong => switch (target.cpu.arch) {
- .nvptx => return 32,
- .nvptx64 => return 64,
- else => return 64,
- },
- .longlong, .ulonglong, .double => return 64,
- .longdouble => return 64,
- },
-
- .amdhsa, .amdpal => switch (self) {
- .short, .ushort => return 16,
- .int, .uint, .float => return 32,
- .long, .ulong, .longlong, .ulonglong, .double => return 64,
- .longdouble => return 128,
- },
-
- .cloudabi,
- .kfreebsd,
- .lv2,
- .zos,
- .rtems,
- .nacl,
- .aix,
- .ps4,
- .ps5,
- .elfiamcu,
- .mesa3d,
- .contiki,
- .hermit,
- .hurd,
- .opencl,
- .glsl450,
- .vulkan,
- .driverkit,
- .shadermodel,
- => @panic("TODO specify the C integer and float type sizes for this OS"),
- }
- }
-
- pub fn alignment(self: CType, target: Target) u16 {
-
- // Overrides for unusual alignments
- switch (target.cpu.arch) {
- .avr => switch (self) {
- .short, .ushort => return 2,
- else => return 1,
- },
- .x86 => switch (target.os.tag) {
- .windows, .uefi => switch (self) {
- .longlong, .ulonglong, .double => return 8,
- .longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 4,
- else => return 8,
- },
- else => {},
- },
- else => {},
- },
- else => {},
- }
-
- // Next-power-of-two-aligned, up to a maximum.
- return @min(
- std.math.ceilPowerOfTwoAssert(u16, (self.sizeInBits(target) + 7) / 8),
- switch (target.cpu.arch) {
- .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
- .netbsd => switch (target.abi) {
- .gnueabi,
- .gnueabihf,
- .eabi,
- .eabihf,
- .android,
- .musleabi,
- .musleabihf,
- => 8,
-
- else => @as(u16, 4),
- },
- .ios, .tvos, .watchos => 4,
- else => 8,
- },
-
- .msp430,
- .avr,
- => 2,
-
- .arc,
- .csky,
- .x86,
- .xcore,
- .dxil,
- .loongarch32,
- .tce,
- .tcele,
- .le32,
- .amdil,
- .hsail,
- .spir,
- .spirv32,
- .kalimba,
- .shave,
- .renderscript32,
- .ve,
- .spu_2,
- => 4,
-
- .aarch64_32,
- .amdgcn,
- .amdil64,
- .bpfel,
- .bpfeb,
- .hexagon,
- .hsail64,
- .loongarch64,
- .m68k,
- .mips,
- .mipsel,
- .sparc,
- .sparcel,
- .sparc64,
- .lanai,
- .le64,
- .nvptx,
- .nvptx64,
- .r600,
- .s390x,
- .spir64,
- .spirv64,
- .renderscript64,
- => 8,
-
- .aarch64,
- .aarch64_be,
- .mips64,
- .mips64el,
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
- .riscv32,
- .riscv64,
- .x86_64,
- .wasm32,
- .wasm64,
- => 16,
- },
- );
- }
-
- pub fn preferredAlignment(self: CType, target: Target) u16 {
-
- // Overrides for unusual alignments
- switch (target.cpu.arch) {
- .arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
- .netbsd => switch (target.abi) {
- .gnueabi,
- .gnueabihf,
- .eabi,
- .eabihf,
- .android,
- .musleabi,
- .musleabihf,
- => {},
-
- else => switch (self) {
- .longdouble => return 4,
- else => {},
- },
- },
- .ios, .tvos, .watchos => switch (self) {
- .longdouble => return 4,
- else => {},
- },
- else => {},
- },
- .arc => switch (self) {
- .longdouble => return 4,
- else => {},
- },
- .avr => switch (self) {
- .int, .uint, .long, .ulong, .float, .longdouble => return 1,
- .short, .ushort => return 2,
- .double => return 4,
- .longlong, .ulonglong => return 8,
- },
- .x86 => switch (target.os.tag) {
- .windows, .uefi => switch (self) {
- .longdouble => switch (target.abi) {
- .gnu, .gnuilp32, .cygnus => return 4,
- else => return 8,
- },
- else => {},
- },
- else => switch (self) {
- .longdouble => return 4,
- else => {},
- },
- },
- else => {},
- }
-
- // Next-power-of-two-aligned, up to a maximum.
- return @min(
- std.math.ceilPowerOfTwoAssert(u16, (self.sizeInBits(target) + 7) / 8),
- switch (target.cpu.arch) {
- .msp430 => @as(u16, 2),
-
- .csky,
- .xcore,
- .dxil,
- .loongarch32,
- .tce,
- .tcele,
- .le32,
- .amdil,
- .hsail,
- .spir,
- .spirv32,
- .kalimba,
- .shave,
- .renderscript32,
- .ve,
- .spu_2,
- => 4,
-
- .arc,
- .arm,
- .armeb,
- .avr,
- .thumb,
- .thumbeb,
- .aarch64_32,
- .amdgcn,
- .amdil64,
- .bpfel,
- .bpfeb,
- .hexagon,
- .hsail64,
- .x86,
- .loongarch64,
- .m68k,
- .mips,
- .mipsel,
- .sparc,
- .sparcel,
- .sparc64,
- .lanai,
- .le64,
- .nvptx,
- .nvptx64,
- .r600,
- .s390x,
- .spir64,
- .spirv64,
- .renderscript64,
- => 8,
-
- .aarch64,
- .aarch64_be,
- .mips64,
- .mips64el,
- .powerpc,
- .powerpcle,
- .powerpc64,
- .powerpc64le,
- .riscv32,
- .riscv64,
- .x86_64,
- .wasm32,
- .wasm64,
- => 16,
- },
- );
- }
-};
diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm
index e64e05fa13..ef7d7891c9 100644
Binary files a/stage1/zig1.wasm and b/stage1/zig1.wasm differ
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index 8a97b3cbcd..b82bfab99e 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -1125,3 +1125,21 @@ test "returning an opaque type from a function" {
};
try expect(S.foo(123).b == 123);
}
+
+test "orelse coercion as function argument" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+
+ const Loc = struct { start: i32 = -1 };
+ const Container = struct {
+ a: ?Loc = null,
+ fn init(a: Loc) @This() {
+ return .{
+ .a = a,
+ };
+ }
+ };
+ var optional: ?Loc = .{};
+ var foo = Container.init(optional orelse .{});
+ try expect(foo.a.?.start == -1);
+}
diff --git a/test/behavior/error.zig b/test/behavior/error.zig
index b2a6cc5a50..f30290eb91 100644
--- a/test/behavior/error.zig
+++ b/test/behavior/error.zig
@@ -896,3 +896,18 @@ test "optional error union return type" {
};
try expect(1234 == try S.foo().?);
}
+
+test "optional error set return type" {
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ const E = error{ A, B };
+ const S = struct {
+ fn foo(return_null: bool) ?E {
+ return if (return_null) null else E.A;
+ }
+ };
+
+ try expect(null == S.foo(true));
+ try expect(E.A == S.foo(false).?);
+}
diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig
index ef4487d9b9..cfe948ac02 100644
--- a/test/behavior/sizeof_and_typeof.zig
+++ b/test/behavior/sizeof_and_typeof.zig
@@ -292,3 +292,12 @@ test "@sizeOf optional of previously unresolved union" {
const Node = union { a: usize };
try expect(@sizeOf(?Node) == @sizeOf(Node) + @alignOf(Node));
}
+
+test "@offsetOf zero-bit field" {
+ const S = packed struct {
+ a: u32,
+ b: u0,
+ c: u32,
+ };
+ try expect(@offsetOf(S, "b") == @offsetOf(S, "c"));
+}
diff --git a/test/cases/compile_errors/assigning_to_struct_or_union_fields_that_are_not_optionals_with_a_function_that_returns_an_optional.zig b/test/cases/compile_errors/assigning_to_struct_or_union_fields_that_are_not_optionals_with_a_function_that_returns_an_optional.zig
index 762eb284f2..530e5ffb74 100644
--- a/test/cases/compile_errors/assigning_to_struct_or_union_fields_that_are_not_optionals_with_a_function_that_returns_an_optional.zig
+++ b/test/cases/compile_errors/assigning_to_struct_or_union_fields_that_are_not_optionals_with_a_function_that_returns_an_optional.zig
@@ -20,4 +20,4 @@ export fn entry() void {
//
// :11:27: error: expected type 'u8', found '?u8'
// :11:27: note: cannot convert optional to payload type
-// :11:27: note: consider using `.?`, `orelse`, or `if`
+// :11:27: note: consider using '.?', 'orelse', or 'if'
diff --git a/test/cases/compile_errors/comptime_arg_to_generic_fn_callee_error.zig b/test/cases/compile_errors/comptime_arg_to_generic_fn_callee_error.zig
new file mode 100644
index 0000000000..efc3f556a9
--- /dev/null
+++ b/test/cases/compile_errors/comptime_arg_to_generic_fn_callee_error.zig
@@ -0,0 +1,21 @@
+const std = @import("std");
+const MyStruct = struct {
+ a: i32,
+ b: i32,
+
+ pub fn getA(self: *List) i32 {
+ return self.items(.c);
+ }
+};
+const List = std.MultiArrayList(MyStruct);
+pub export fn entry() void {
+ var list = List{};
+ _ = MyStruct.getA(&list);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :7:28: error: no field named 'c' in enum 'meta.FieldEnum(tmp.MyStruct)'
+// :?:?: note: enum declared here
diff --git a/test/cases/compile_errors/discarding_error_value.zig b/test/cases/compile_errors/discarding_error_value.zig
index 6dfe0be231..c24d517d3e 100644
--- a/test/cases/compile_errors/discarding_error_value.zig
+++ b/test/cases/compile_errors/discarding_error_value.zig
@@ -10,4 +10,4 @@ fn foo() !void {
// target=native
//
// :2:12: error: error is discarded
-// :2:12: note: consider using `try`, `catch`, or `if`
+// :2:12: note: consider using 'try', 'catch', or 'if'
diff --git a/test/cases/compile_errors/helpful_return_type_error_message.zig b/test/cases/compile_errors/helpful_return_type_error_message.zig
index b8e48036de..871e948537 100644
--- a/test/cases/compile_errors/helpful_return_type_error_message.zig
+++ b/test/cases/compile_errors/helpful_return_type_error_message.zig
@@ -26,7 +26,7 @@ export fn quux() u32 {
// :11:15: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(tmp.bar)).Fn.return_type.?).ErrorUnion.error_set!u32'
// :10:17: note: function cannot return an error
// :11:15: note: cannot convert error union to payload type
-// :11:15: note: consider using `try`, `catch`, or `if`
+// :11:15: note: consider using 'try', 'catch', or 'if'
// :15:14: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(tmp.bar)).Fn.return_type.?).ErrorUnion.error_set!u32'
// :15:14: note: cannot convert error union to payload type
-// :15:14: note: consider using `try`, `catch`, or `if`
+// :15:14: note: consider using 'try', 'catch', or 'if'
diff --git a/test/cases/compile_errors/ignored_deferred_function_call.zig b/test/cases/compile_errors/ignored_deferred_function_call.zig
index 05c4373705..b318baa16c 100644
--- a/test/cases/compile_errors/ignored_deferred_function_call.zig
+++ b/test/cases/compile_errors/ignored_deferred_function_call.zig
@@ -8,4 +8,4 @@ fn bar() anyerror!i32 { return 0; }
// target=native
//
// :2:14: error: error is ignored
-// :2:14: note: consider using `try`, `catch`, or `if`
+// :2:14: note: consider using 'try', 'catch', or 'if'
diff --git a/test/cases/compile_errors/ignored_expression_in_while_continuation.zig b/test/cases/compile_errors/ignored_expression_in_while_continuation.zig
index d7de0aac57..d108903c82 100644
--- a/test/cases/compile_errors/ignored_expression_in_while_continuation.zig
+++ b/test/cases/compile_errors/ignored_expression_in_while_continuation.zig
@@ -18,8 +18,8 @@ fn bad() anyerror!void {
// target=native
//
// :2:24: error: error is ignored
-// :2:24: note: consider using `try`, `catch`, or `if`
+// :2:24: note: consider using 'try', 'catch', or 'if'
// :6:25: error: error is ignored
-// :6:25: note: consider using `try`, `catch`, or `if`
+// :6:25: note: consider using 'try', 'catch', or 'if'
// :10:25: error: error is ignored
-// :10:25: note: consider using `try`, `catch`, or `if`
+// :10:25: note: consider using 'try', 'catch', or 'if'
diff --git a/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig b/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig
index 9cc5ed3a42..242454e859 100644
--- a/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig
+++ b/test/cases/compile_errors/increase_pointer_alignment_in_ptrCast.zig
@@ -11,3 +11,4 @@ export fn entry() u32 {
// :3:17: error: cast increases pointer alignment
// :3:32: note: '*u8' has alignment '1'
// :3:26: note: '*u32' has alignment '4'
+// :3:17: note: consider using '@alignCast'
diff --git a/test/cases/compile_errors/inline_call_runtime_value_to_comptime_param.zig b/test/cases/compile_errors/inline_call_runtime_value_to_comptime_param.zig
new file mode 100644
index 0000000000..cddd91384b
--- /dev/null
+++ b/test/cases/compile_errors/inline_call_runtime_value_to_comptime_param.zig
@@ -0,0 +1,17 @@
+inline fn needComptime(comptime a: u64) void {
+ if (a != 0) @compileError("foo");
+}
+fn acceptRuntime(value: u64) void {
+ needComptime(value);
+}
+pub export fn entry() void {
+ var value: u64 = 0;
+ acceptRuntime(value);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :5:18: error: unable to resolve comptime value
+// :5:18: note: parameter is comptime
diff --git a/test/cases/compile_errors/invalid_decltest.zig b/test/cases/compile_errors/invalid_decltest.zig
new file mode 100644
index 0000000000..cde984f366
--- /dev/null
+++ b/test/cases/compile_errors/invalid_decltest.zig
@@ -0,0 +1,13 @@
+export fn foo() void {
+ const a = 1;
+ struct {
+ test a {}
+ };
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :4:14: error: cannot test a local constant
+// :2:11: note: local constant declared here
diff --git a/test/cases/compile_errors/invalid_member_of_builtin_enum.zig b/test/cases/compile_errors/invalid_member_of_builtin_enum.zig
index 3edb17ffbf..b0a176d792 100644
--- a/test/cases/compile_errors/invalid_member_of_builtin_enum.zig
+++ b/test/cases/compile_errors/invalid_member_of_builtin_enum.zig
@@ -1,6 +1,6 @@
const builtin = @import("std").builtin;
export fn entry() void {
- const foo = builtin.Mode.x86;
+ const foo = builtin.OptimizeMode.x86;
_ = foo;
}
@@ -8,5 +8,5 @@ export fn entry() void {
// backend=stage2
// target=native
//
-// :3:30: error: enum 'builtin.Mode' has no member named 'x86'
+// :3:38: error: enum 'builtin.OptimizeMode' has no member named 'x86'
// :?:18: note: enum declared here
diff --git a/test/cases/compile_errors/invalid_qualcast.zig b/test/cases/compile_errors/invalid_qualcast.zig
new file mode 100644
index 0000000000..20b223b727
--- /dev/null
+++ b/test/cases/compile_errors/invalid_qualcast.zig
@@ -0,0 +1,12 @@
+pub export fn entry() void {
+ var a: [*:0]const volatile u16 = undefined;
+ _ = @qualCast([*]u16, a);
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :3:9: error: '@qualCast' can only modify 'const' and 'volatile' qualifiers
+// :3:9: note: expected type '[*]const volatile u16'
+// :3:9: note: got type '[*:0]const volatile u16'
diff --git a/test/cases/compile_errors/issue_5618_coercion_of_optional_anyopaque_to_anyopaque_must_fail.zig b/test/cases/compile_errors/issue_5618_coercion_of_optional_anyopaque_to_anyopaque_must_fail.zig
index f4716bc24d..95bba054b3 100644
--- a/test/cases/compile_errors/issue_5618_coercion_of_optional_anyopaque_to_anyopaque_must_fail.zig
+++ b/test/cases/compile_errors/issue_5618_coercion_of_optional_anyopaque_to_anyopaque_must_fail.zig
@@ -10,5 +10,5 @@ export fn foo() void {
//
// :4:9: error: expected type '*anyopaque', found '?*anyopaque'
// :4:9: note: cannot convert optional to payload type
-// :4:9: note: consider using `.?`, `orelse`, or `if`
+// :4:9: note: consider using '.?', 'orelse', or 'if'
// :4:9: note: '?*anyopaque' could have null values which are illegal in type '*anyopaque'
diff --git a/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig b/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig
index a2fea4ff11..eedef01234 100644
--- a/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig
+++ b/test/cases/compile_errors/ptrCast_discards_const_qualifier.zig
@@ -9,3 +9,4 @@ export fn entry() void {
// target=native
//
// :3:15: error: cast discards const qualifier
+// :3:15: note: consider using '@qualCast'
diff --git a/test/cases/compile_errors/regression_test_2980_base_type_u32_is_not_type_checked_properly_when_assigning_a_value_within_a_struct.zig b/test/cases/compile_errors/regression_test_2980_base_type_u32_is_not_type_checked_properly_when_assigning_a_value_within_a_struct.zig
index 09c496211a..1b951528bb 100644
--- a/test/cases/compile_errors/regression_test_2980_base_type_u32_is_not_type_checked_properly_when_assigning_a_value_within_a_struct.zig
+++ b/test/cases/compile_errors/regression_test_2980_base_type_u32_is_not_type_checked_properly_when_assigning_a_value_within_a_struct.zig
@@ -20,4 +20,4 @@ export fn entry() void {
//
// :12:25: error: expected type 'u32', found '@typeInfo(@typeInfo(@TypeOf(tmp.get_uval)).Fn.return_type.?).ErrorUnion.error_set!u32'
// :12:25: note: cannot convert error union to payload type
-// :12:25: note: consider using `try`, `catch`, or `if`
+// :12:25: note: consider using 'try', 'catch', or 'if'
diff --git a/test/cases/compile_errors/result_location_incompatibility_mismatching_handle_is_ptr.zig b/test/cases/compile_errors/result_location_incompatibility_mismatching_handle_is_ptr.zig
index cc1d2c976a..26c1a8d9cf 100644
--- a/test/cases/compile_errors/result_location_incompatibility_mismatching_handle_is_ptr.zig
+++ b/test/cases/compile_errors/result_location_incompatibility_mismatching_handle_is_ptr.zig
@@ -17,4 +17,4 @@ pub const Container = struct {
//
// :3:36: error: expected type 'i32', found '?i32'
// :3:36: note: cannot convert optional to payload type
-// :3:36: note: consider using `.?`, `orelse`, or `if`
+// :3:36: note: consider using '.?', 'orelse', or 'if'
diff --git a/test/cases/compile_errors/result_location_incompatibility_mismatching_handle_is_ptr_generic_call.zig b/test/cases/compile_errors/result_location_incompatibility_mismatching_handle_is_ptr_generic_call.zig
index 897675d448..471f9cca04 100644
--- a/test/cases/compile_errors/result_location_incompatibility_mismatching_handle_is_ptr_generic_call.zig
+++ b/test/cases/compile_errors/result_location_incompatibility_mismatching_handle_is_ptr_generic_call.zig
@@ -17,4 +17,4 @@ pub const Container = struct {
//
// :3:36: error: expected type 'i32', found '?i32'
// :3:36: note: cannot convert optional to payload type
-// :3:36: note: consider using `.?`, `orelse`, or `if`
+// :3:36: note: consider using '.?', 'orelse', or 'if'
diff --git a/test/link/bss/build.zig b/test/link/bss/build.zig
index 76e9bdb305..0df9c1d323 100644
--- a/test/link/bss/build.zig
+++ b/test/link/bss/build.zig
@@ -1,12 +1,15 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test");
- const exe = b.addExecutable("bss", "main.zig");
+ const exe = b.addExecutable(.{
+ .name = "bss",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ });
b.default_step.dependOn(&exe.step);
- exe.setBuildMode(mode);
const run = exe.run();
run.expectStdOutEqual("0, 1, 0\n");
diff --git a/test/link/common_symbols/build.zig b/test/link/common_symbols/build.zig
index 2f9f892e86..ee9dd94ebd 100644
--- a/test/link/common_symbols/build.zig
+++ b/test/link/common_symbols/build.zig
@@ -1,14 +1,19 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
- const lib_a = b.addStaticLibrary("a", null);
+ const lib_a = b.addStaticLibrary(.{
+ .name = "a",
+ .optimize = optimize,
+ .target = .{},
+ });
lib_a.addCSourceFiles(&.{ "c.c", "a.c", "b.c" }, &.{"-fcommon"});
- lib_a.setBuildMode(mode);
- const test_exe = b.addTest("main.zig");
- test_exe.setBuildMode(mode);
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ });
test_exe.linkLibrary(lib_a);
const test_step = b.step("test", "Test it");
diff --git a/test/link/common_symbols_alignment/build.zig b/test/link/common_symbols_alignment/build.zig
index a62d86af4f..f6efdc784b 100644
--- a/test/link/common_symbols_alignment/build.zig
+++ b/test/link/common_symbols_alignment/build.zig
@@ -1,14 +1,21 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
+ const target = b.standardTargetOptions(.{});
- const lib_a = b.addStaticLibrary("a", null);
+ const lib_a = b.addStaticLibrary(.{
+ .name = "a",
+ .optimize = optimize,
+ .target = target,
+ });
lib_a.addCSourceFiles(&.{"a.c"}, &.{"-fcommon"});
- lib_a.setBuildMode(mode);
- const test_exe = b.addTest("main.zig");
- test_exe.setBuildMode(mode);
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
test_exe.linkLibrary(lib_a);
const test_step = b.step("test", "Test it");
diff --git a/test/link/interdependent_static_c_libs/build.zig b/test/link/interdependent_static_c_libs/build.zig
index bd1b6100da..d8962a8e08 100644
--- a/test/link/interdependent_static_c_libs/build.zig
+++ b/test/link/interdependent_static_c_libs/build.zig
@@ -1,20 +1,30 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
+ const target = b.standardTargetOptions(.{});
- const lib_a = b.addStaticLibrary("a", null);
+ const lib_a = b.addStaticLibrary(.{
+ .name = "a",
+ .optimize = optimize,
+ .target = target,
+ });
lib_a.addCSourceFile("a.c", &[_][]const u8{});
- lib_a.setBuildMode(mode);
lib_a.addIncludePath(".");
- const lib_b = b.addStaticLibrary("b", null);
+ const lib_b = b.addStaticLibrary(.{
+ .name = "b",
+ .optimize = optimize,
+ .target = target,
+ });
lib_b.addCSourceFile("b.c", &[_][]const u8{});
- lib_b.setBuildMode(mode);
lib_b.addIncludePath(".");
- const test_exe = b.addTest("main.zig");
- test_exe.setBuildMode(mode);
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
test_exe.linkLibrary(lib_a);
test_exe.linkLibrary(lib_b);
test_exe.addIncludePath(".");
diff --git a/test/link/macho/bugs/13056/build.zig b/test/link/macho/bugs/13056/build.zig
index 751a7c4db6..662fd25c92 100644
--- a/test/link/macho/bugs/13056/build.zig
+++ b/test/link/macho/bugs/13056/build.zig
@@ -1,8 +1,7 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const target_info = std.zig.system.NativeTargetInfo.detect(target) catch unreachable;
@@ -11,7 +10,10 @@ pub fn build(b: *Builder) void {
const test_step = b.step("test", "Test the program");
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
b.default_step.dependOn(&exe.step);
exe.addIncludePath(std.fs.path.join(b.allocator, &.{ sdk.path, "/usr/include" }) catch unreachable);
exe.addIncludePath(std.fs.path.join(b.allocator, &.{ sdk.path, "/usr/include/c++/v1" }) catch unreachable);
@@ -20,7 +22,6 @@ pub fn build(b: *Builder) void {
"-nostdinc++",
});
exe.addObjectFile(std.fs.path.join(b.allocator, &.{ sdk.path, "/usr/lib/libc++.tbd" }) catch unreachable);
- exe.setBuildMode(mode);
const run_cmd = exe.run();
run_cmd.expectStdErrEqual("x: 5\n");
diff --git a/test/link/macho/bugs/13457/build.zig b/test/link/macho/bugs/13457/build.zig
index 2de8c01c6a..3560b4a168 100644
--- a/test/link/macho/bugs/13457/build.zig
+++ b/test/link/macho/bugs/13457/build.zig
@@ -1,16 +1,17 @@
const std = @import("std");
-const Builder = std.build.Builder;
-const LibExeObjectStep = std.build.LibExeObjStep;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
- const exe = b.addExecutable("test", "main.zig");
- exe.setBuildMode(mode);
- exe.setTarget(target);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
const run = exe.runEmulatable();
test_step.dependOn(&run.step);
diff --git a/test/link/macho/dead_strip/build.zig b/test/link/macho/dead_strip/build.zig
index 25759f5619..d82c81edca 100644
--- a/test/link/macho/dead_strip/build.zig
+++ b/test/link/macho/dead_strip/build.zig
@@ -1,9 +1,7 @@
const std = @import("std");
-const Builder = std.build.Builder;
-const LibExeObjectStep = std.build.LibExeObjStep;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
@@ -11,7 +9,7 @@ pub fn build(b: *Builder) void {
{
// Without -dead_strip, we expect `iAmUnused` symbol present
- const exe = createScenario(b, mode, target);
+ const exe = createScenario(b, optimize, target);
const check = exe.checkObject(.macho);
check.checkInSymtab();
@@ -24,7 +22,7 @@ pub fn build(b: *Builder) void {
{
// With -dead_strip, no `iAmUnused` symbol should be present
- const exe = createScenario(b, mode, target);
+ const exe = createScenario(b, optimize, target);
exe.link_gc_sections = true;
const check = exe.checkObject(.macho);
@@ -37,11 +35,17 @@ pub fn build(b: *Builder) void {
}
}
-fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
- const exe = b.addExecutable("test", null);
+fn createScenario(
+ b: *std.Build,
+ optimize: std.builtin.OptimizeMode,
+ target: std.zig.CrossTarget,
+) *std.Build.CompileStep {
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
- exe.setBuildMode(mode);
- exe.setTarget(target);
exe.linkLibC();
return exe;
}
diff --git a/test/link/macho/dead_strip_dylibs/build.zig b/test/link/macho/dead_strip_dylibs/build.zig
index efdaf191bd..8b62cec6e6 100644
--- a/test/link/macho/dead_strip_dylibs/build.zig
+++ b/test/link/macho/dead_strip_dylibs/build.zig
@@ -1,16 +1,14 @@
const std = @import("std");
-const Builder = std.build.Builder;
-const LibExeObjectStep = std.build.LibExeObjStep;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
{
// Without -dead_strip_dylibs we expect `-la` to include liba.dylib in the final executable
- const exe = createScenario(b, mode);
+ const exe = createScenario(b, optimize);
const check = exe.checkObject(.macho);
check.checkStart("cmd LOAD_DYLIB");
@@ -27,7 +25,7 @@ pub fn build(b: *Builder) void {
{
// With -dead_strip_dylibs, we should include liba.dylib as it's unreachable
- const exe = createScenario(b, mode);
+ const exe = createScenario(b, optimize);
exe.dead_strip_dylibs = true;
const run_cmd = exe.run();
@@ -36,10 +34,12 @@ pub fn build(b: *Builder) void {
}
}
-fn createScenario(b: *Builder, mode: std.builtin.Mode) *LibExeObjectStep {
- const exe = b.addExecutable("test", null);
+fn createScenario(b: *std.Build, optimize: std.builtin.OptimizeMode) *std.Build.CompileStep {
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
- exe.setBuildMode(mode);
exe.linkLibC();
exe.linkFramework("Cocoa");
return exe;
diff --git a/test/link/macho/dylib/build.zig b/test/link/macho/dylib/build.zig
index a5baf255c6..7a1e2d862c 100644
--- a/test/link/macho/dylib/build.zig
+++ b/test/link/macho/dylib/build.zig
@@ -1,16 +1,18 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const dylib = b.addSharedLibrary("a", null, b.version(1, 0, 0));
- dylib.setBuildMode(mode);
- dylib.setTarget(target);
+ const dylib = b.addSharedLibrary(.{
+ .name = "a",
+ .version = .{ .major = 1, .minor = 0 },
+ .optimize = optimize,
+ .target = target,
+ });
dylib.addCSourceFile("a.c", &.{});
dylib.linkLibC();
dylib.install();
@@ -24,9 +26,11 @@ pub fn build(b: *Builder) void {
test_step.dependOn(&check_dylib.step);
- const exe = b.addExecutable("main", null);
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkSystemLibrary("a");
exe.linkLibC();
diff --git a/test/link/macho/empty/build.zig b/test/link/macho/empty/build.zig
index ab016fd4bd..586da1511b 100644
--- a/test/link/macho/empty/build.zig
+++ b/test/link/macho/empty/build.zig
@@ -1,21 +1,22 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
exe.addCSourceFile("empty.c", &[0][]const u8{});
- exe.setBuildMode(mode);
- exe.setTarget(target);
exe.linkLibC();
- const run_cmd = std.build.EmulatableRunStep.create(b, "run", exe);
+ const run_cmd = std.Build.EmulatableRunStep.create(b, "run", exe);
run_cmd.expectStdOutEqual("Hello!\n");
test_step.dependOn(&run_cmd.step);
}
diff --git a/test/link/macho/entry/build.zig b/test/link/macho/entry/build.zig
index 0ecca14aa2..4504da9c6c 100644
--- a/test/link/macho/entry/build.zig
+++ b/test/link/macho/entry/build.zig
@@ -1,15 +1,16 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const exe = b.addExecutable("main", null);
- exe.setTarget(.{ .os_tag = .macos });
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .optimize = optimize,
+ .target = .{ .os_tag = .macos },
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
exe.entry_symbol_name = "_non_main";
diff --git a/test/link/macho/headerpad/build.zig b/test/link/macho/headerpad/build.zig
index 0730a01d44..3ef17573f8 100644
--- a/test/link/macho/headerpad/build.zig
+++ b/test/link/macho/headerpad/build.zig
@@ -1,17 +1,15 @@
const std = @import("std");
const builtin = @import("builtin");
-const Builder = std.build.Builder;
-const LibExeObjectStep = std.build.LibExeObjStep;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
{
// Test -headerpad_max_install_names
- const exe = simpleExe(b, mode);
+ const exe = simpleExe(b, optimize);
exe.headerpad_max_install_names = true;
const check = exe.checkObject(.macho);
@@ -36,7 +34,7 @@ pub fn build(b: *Builder) void {
{
// Test -headerpad
- const exe = simpleExe(b, mode);
+ const exe = simpleExe(b, optimize);
exe.headerpad_size = 0x10000;
const check = exe.checkObject(.macho);
@@ -52,7 +50,7 @@ pub fn build(b: *Builder) void {
{
// Test both flags with -headerpad overriding -headerpad_max_install_names
- const exe = simpleExe(b, mode);
+ const exe = simpleExe(b, optimize);
exe.headerpad_max_install_names = true;
exe.headerpad_size = 0x10000;
@@ -69,7 +67,7 @@ pub fn build(b: *Builder) void {
{
// Test both flags with -headerpad_max_install_names overriding -headerpad
- const exe = simpleExe(b, mode);
+ const exe = simpleExe(b, optimize);
exe.headerpad_size = 0x1000;
exe.headerpad_max_install_names = true;
@@ -94,9 +92,11 @@ pub fn build(b: *Builder) void {
}
}
-fn simpleExe(b: *Builder, mode: std.builtin.Mode) *LibExeObjectStep {
- const exe = b.addExecutable("main", null);
- exe.setBuildMode(mode);
+fn simpleExe(b: *std.Build, optimize: std.builtin.OptimizeMode) *std.Build.CompileStep {
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .optimize = optimize,
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
exe.linkFramework("CoreFoundation");
diff --git a/test/link/macho/linksection/build.zig b/test/link/macho/linksection/build.zig
index 9204499803..227d4eeb63 100644
--- a/test/link/macho/linksection/build.zig
+++ b/test/link/macho/linksection/build.zig
@@ -1,15 +1,18 @@
const std = @import("std");
-pub fn build(b: *std.build.Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target = std.zig.CrossTarget{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const obj = b.addObject("test", "main.zig");
- obj.setBuildMode(mode);
- obj.setTarget(target);
+ const obj = b.addObject(.{
+ .name = "test",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
const check = obj.checkObject(.macho);
@@ -19,7 +22,7 @@ pub fn build(b: *std.build.Builder) void {
check.checkInSymtab();
check.checkNext("{*} (__TEXT,__TestFn) external _testFn");
- if (mode == .Debug) {
+ if (optimize == .Debug) {
check.checkInSymtab();
check.checkNext("{*} (__TEXT,__TestGenFnA) _main.testGenericFn__anon_{*}");
}
diff --git a/test/link/macho/needed_framework/build.zig b/test/link/macho/needed_framework/build.zig
index 4315935941..8b6e3dd87f 100644
--- a/test/link/macho/needed_framework/build.zig
+++ b/test/link/macho/needed_framework/build.zig
@@ -1,18 +1,18 @@
const std = @import("std");
-const Builder = std.build.Builder;
-const LibExeObjectStep = std.build.LibExeObjStep;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
// -dead_strip_dylibs
// -needed_framework Cocoa
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
- exe.setBuildMode(mode);
exe.linkLibC();
exe.linkFrameworkNeeded("Cocoa");
exe.dead_strip_dylibs = true;
diff --git a/test/link/macho/needed_library/build.zig b/test/link/macho/needed_library/build.zig
index a314fd2201..92a73d22b7 100644
--- a/test/link/macho/needed_library/build.zig
+++ b/test/link/macho/needed_library/build.zig
@@ -1,27 +1,30 @@
const std = @import("std");
-const Builder = std.build.Builder;
-const LibExeObjectStep = std.build.LibExeObjStep;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
- const dylib = b.addSharedLibrary("a", null, b.version(1, 0, 0));
- dylib.setTarget(target);
- dylib.setBuildMode(mode);
+ const dylib = b.addSharedLibrary(.{
+ .name = "a",
+ .version = .{ .major = 1, .minor = 0 },
+ .optimize = optimize,
+ .target = target,
+ });
dylib.addCSourceFile("a.c", &.{});
dylib.linkLibC();
dylib.install();
// -dead_strip_dylibs
// -needed-la
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
- exe.setBuildMode(mode);
- exe.setTarget(target);
exe.linkLibC();
exe.linkSystemLibraryNeeded("a");
exe.addLibraryPath(b.pathFromRoot("zig-out/lib"));
diff --git a/test/link/macho/objc/build.zig b/test/link/macho/objc/build.zig
index d7fd872f77..10d293baab 100644
--- a/test/link/macho/objc/build.zig
+++ b/test/link/macho/objc/build.zig
@@ -1,21 +1,22 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test the program");
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
exe.addIncludePath(".");
exe.addCSourceFile("Foo.m", &[0][]const u8{});
exe.addCSourceFile("test.m", &[0][]const u8{});
- exe.setBuildMode(mode);
exe.linkLibC();
// TODO when we figure out how to ship framework stubs for cross-compilation,
// populate paths to the sysroot here.
exe.linkFramework("Foundation");
- const run_cmd = std.build.EmulatableRunStep.create(b, "run", exe);
+ const run_cmd = std.Build.EmulatableRunStep.create(b, "run", exe);
test_step.dependOn(&run_cmd.step);
}
diff --git a/test/link/macho/objcpp/build.zig b/test/link/macho/objcpp/build.zig
index 767578e225..2a3459be50 100644
--- a/test/link/macho/objcpp/build.zig
+++ b/test/link/macho/objcpp/build.zig
@@ -1,17 +1,18 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test the program");
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
b.default_step.dependOn(&exe.step);
exe.addIncludePath(".");
exe.addCSourceFile("Foo.mm", &[0][]const u8{});
exe.addCSourceFile("test.mm", &[0][]const u8{});
- exe.setBuildMode(mode);
exe.linkLibCpp();
// TODO when we figure out how to ship framework stubs for cross-compilation,
// populate paths to the sysroot here.
diff --git a/test/link/macho/pagezero/build.zig b/test/link/macho/pagezero/build.zig
index 5a7044d960..0a8471b919 100644
--- a/test/link/macho/pagezero/build.zig
+++ b/test/link/macho/pagezero/build.zig
@@ -1,17 +1,18 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
{
- const exe = b.addExecutable("pagezero", null);
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "pagezero",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
exe.pagezero_size = 0x4000;
@@ -29,9 +30,11 @@ pub fn build(b: *Builder) void {
}
{
- const exe = b.addExecutable("no_pagezero", null);
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "no_pagezero",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
exe.pagezero_size = 0;
diff --git a/test/link/macho/search_strategy/build.zig b/test/link/macho/search_strategy/build.zig
index e556b5bb23..62757f885b 100644
--- a/test/link/macho/search_strategy/build.zig
+++ b/test/link/macho/search_strategy/build.zig
@@ -1,9 +1,7 @@
const std = @import("std");
-const Builder = std.build.Builder;
-const LibExeObjectStep = std.build.LibExeObjStep;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
@@ -11,7 +9,7 @@ pub fn build(b: *Builder) void {
{
// -search_dylibs_first
- const exe = createScenario(b, mode, target);
+ const exe = createScenario(b, optimize, target);
exe.search_strategy = .dylibs_first;
const check = exe.checkObject(.macho);
@@ -26,40 +24,51 @@ pub fn build(b: *Builder) void {
{
// -search_paths_first
- const exe = createScenario(b, mode, target);
+ const exe = createScenario(b, optimize, target);
exe.search_strategy = .paths_first;
- const run = std.build.EmulatableRunStep.create(b, "run", exe);
+ const run = std.Build.EmulatableRunStep.create(b, "run", exe);
run.cwd = b.pathFromRoot(".");
run.expectStdOutEqual("Hello world");
test_step.dependOn(&run.step);
}
}
-fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
- const static = b.addStaticLibrary("a", null);
- static.setTarget(target);
- static.setBuildMode(mode);
+fn createScenario(
+ b: *std.Build,
+ optimize: std.builtin.OptimizeMode,
+ target: std.zig.CrossTarget,
+) *std.Build.CompileStep {
+ const static = b.addStaticLibrary(.{
+ .name = "a",
+ .optimize = optimize,
+ .target = target,
+ });
static.addCSourceFile("a.c", &.{});
static.linkLibC();
- static.override_dest_dir = std.build.InstallDir{
+ static.override_dest_dir = std.Build.InstallDir{
.custom = "static",
};
static.install();
- const dylib = b.addSharedLibrary("a", null, b.version(1, 0, 0));
- dylib.setTarget(target);
- dylib.setBuildMode(mode);
+ const dylib = b.addSharedLibrary(.{
+ .name = "a",
+ .version = .{ .major = 1, .minor = 0 },
+ .optimize = optimize,
+ .target = target,
+ });
dylib.addCSourceFile("a.c", &.{});
dylib.linkLibC();
- dylib.override_dest_dir = std.build.InstallDir{
+ dylib.override_dest_dir = std.Build.InstallDir{
.custom = "dynamic",
};
dylib.install();
- const exe = b.addExecutable("main", null);
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkSystemLibraryName("a");
exe.linkLibC();
diff --git a/test/link/macho/stack_size/build.zig b/test/link/macho/stack_size/build.zig
index 91c44baf52..3529a134eb 100644
--- a/test/link/macho/stack_size/build.zig
+++ b/test/link/macho/stack_size/build.zig
@@ -1,16 +1,17 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const exe = b.addExecutable("main", null);
- exe.setTarget(target);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("main.c", &.{});
exe.linkLibC();
exe.stack_size = 0x100000000;
diff --git a/test/link/macho/strict_validation/build.zig b/test/link/macho/strict_validation/build.zig
index 0ea150252c..408076657b 100644
--- a/test/link/macho/strict_validation/build.zig
+++ b/test/link/macho/strict_validation/build.zig
@@ -1,18 +1,19 @@
const std = @import("std");
const builtin = @import("builtin");
-const Builder = std.build.Builder;
-const LibExeObjectStep = std.build.LibExeObjStep;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const exe = b.addExecutable("main", "main.zig");
- exe.setBuildMode(mode);
- exe.setTarget(target);
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
exe.linkLibC();
const check_exe = exe.checkObject(.macho);
diff --git a/test/link/macho/tls/build.zig b/test/link/macho/tls/build.zig
index 031a05cedf..c77588cb5d 100644
--- a/test/link/macho/tls/build.zig
+++ b/test/link/macho/tls/build.zig
@@ -1,19 +1,23 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
- const lib = b.addSharedLibrary("a", null, b.version(1, 0, 0));
- lib.setBuildMode(mode);
- lib.setTarget(target);
+ const lib = b.addSharedLibrary(.{
+ .name = "a",
+ .version = .{ .major = 1, .minor = 0 },
+ .optimize = optimize,
+ .target = target,
+ });
lib.addCSourceFile("a.c", &.{});
lib.linkLibC();
- const test_exe = b.addTest("main.zig");
- test_exe.setBuildMode(mode);
- test_exe.setTarget(target);
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
test_exe.linkLibrary(lib);
test_exe.linkLibC();
diff --git a/test/link/macho/unwind_info/build.zig b/test/link/macho/unwind_info/build.zig
index cc00854465..408f762f5d 100644
--- a/test/link/macho/unwind_info/build.zig
+++ b/test/link/macho/unwind_info/build.zig
@@ -1,26 +1,24 @@
const std = @import("std");
const builtin = @import("builtin");
-const Builder = std.build.Builder;
-const LibExeObjectStep = std.build.LibExeObjStep;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
- testUnwindInfo(b, test_step, mode, target, false);
- testUnwindInfo(b, test_step, mode, target, true);
+ testUnwindInfo(b, test_step, optimize, target, false);
+ testUnwindInfo(b, test_step, optimize, target, true);
}
fn testUnwindInfo(
- b: *Builder,
- test_step: *std.build.Step,
- mode: std.builtin.Mode,
+ b: *std.Build,
+ test_step: *std.Build.Step,
+ optimize: std.builtin.OptimizeMode,
target: std.zig.CrossTarget,
dead_strip: bool,
) void {
- const exe = createScenario(b, mode, target);
+ const exe = createScenario(b, optimize, target);
exe.link_gc_sections = dead_strip;
const check = exe.checkObject(.macho);
@@ -52,8 +50,16 @@ fn testUnwindInfo(
test_step.dependOn(&run_cmd.step);
}
-fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
- const exe = b.addExecutable("test", null);
+fn createScenario(
+ b: *std.Build,
+ optimize: std.builtin.OptimizeMode,
+ target: std.zig.CrossTarget,
+) *std.Build.CompileStep {
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ .target = target,
+ });
b.default_step.dependOn(&exe.step);
exe.addIncludePath(".");
exe.addCSourceFiles(&[_][]const u8{
@@ -61,8 +67,6 @@ fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarg
"simple_string.cpp",
"simple_string_owner.cpp",
}, &[0][]const u8{});
- exe.setBuildMode(mode);
- exe.setTarget(target);
exe.linkLibCpp();
return exe;
}
diff --git a/test/link/macho/uuid/build.zig b/test/link/macho/uuid/build.zig
index 314febdb20..6a68263fbf 100644
--- a/test/link/macho/uuid/build.zig
+++ b/test/link/macho/uuid/build.zig
@@ -1,8 +1,6 @@
const std = @import("std");
-const Builder = std.build.Builder;
-const LibExeObjectStep = std.build.LibExeObjStep;
-pub fn build(b: *Builder) void {
+pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
@@ -27,23 +25,23 @@ pub fn build(b: *Builder) void {
}
fn testUuid(
- b: *Builder,
- test_step: *std.build.Step,
- mode: std.builtin.Mode,
+ b: *std.Build,
+ test_step: *std.Build.Step,
+ optimize: std.builtin.OptimizeMode,
target: std.zig.CrossTarget,
comptime exp: []const u8,
) void {
// The calculated UUID value is independent of debug info and so it should
// stay the same across builds.
{
- const dylib = simpleDylib(b, mode, target);
+ const dylib = simpleDylib(b, optimize, target);
const check_dylib = dylib.checkObject(.macho);
check_dylib.checkStart("cmd UUID");
check_dylib.checkNext("uuid " ++ exp);
test_step.dependOn(&check_dylib.step);
}
{
- const dylib = simpleDylib(b, mode, target);
+ const dylib = simpleDylib(b, optimize, target);
dylib.strip = true;
const check_dylib = dylib.checkObject(.macho);
check_dylib.checkStart("cmd UUID");
@@ -52,10 +50,17 @@ fn testUuid(
}
}
-fn simpleDylib(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
- const dylib = b.addSharedLibrary("test", null, b.version(1, 0, 0));
- dylib.setTarget(target);
- dylib.setBuildMode(mode);
+fn simpleDylib(
+ b: *std.Build,
+ optimize: std.builtin.OptimizeMode,
+ target: std.zig.CrossTarget,
+) *std.Build.CompileStep {
+ const dylib = b.addSharedLibrary(.{
+ .name = "test",
+ .version = .{ .major = 1, .minor = 0 },
+ .optimize = optimize,
+ .target = target,
+ });
dylib.addCSourceFile("test.c", &.{});
dylib.linkLibC();
return dylib;
diff --git a/test/link/macho/weak_framework/build.zig b/test/link/macho/weak_framework/build.zig
index 44675a15f8..ca28458d77 100644
--- a/test/link/macho/weak_framework/build.zig
+++ b/test/link/macho/weak_framework/build.zig
@@ -1,16 +1,16 @@
const std = @import("std");
-const Builder = std.build.Builder;
-const LibExeObjectStep = std.build.LibExeObjStep;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
- exe.setBuildMode(mode);
exe.linkLibC();
exe.linkFrameworkWeak("Cocoa");
diff --git a/test/link/macho/weak_library/build.zig b/test/link/macho/weak_library/build.zig
index 79f67bd7df..de5aa45e30 100644
--- a/test/link/macho/weak_library/build.zig
+++ b/test/link/macho/weak_library/build.zig
@@ -1,25 +1,28 @@
const std = @import("std");
-const Builder = std.build.Builder;
-const LibExeObjectStep = std.build.LibExeObjStep;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
const test_step = b.step("test", "Test the program");
test_step.dependOn(b.getInstallStep());
- const dylib = b.addSharedLibrary("a", null, b.version(1, 0, 0));
- dylib.setTarget(target);
- dylib.setBuildMode(mode);
+ const dylib = b.addSharedLibrary(.{
+ .name = "a",
+ .version = .{ .major = 1, .minor = 0, .patch = 0 },
+ .target = target,
+ .optimize = optimize,
+ });
dylib.addCSourceFile("a.c", &.{});
dylib.linkLibC();
dylib.install();
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .target = target,
+ .optimize = optimize,
+ });
exe.addCSourceFile("main.c", &[0][]const u8{});
- exe.setTarget(target);
- exe.setBuildMode(mode);
exe.linkLibC();
exe.linkSystemLibraryWeak("a");
exe.addLibraryPath(b.pathFromRoot("zig-out/lib"));
diff --git a/test/link/static_lib_as_system_lib/build.zig b/test/link/static_lib_as_system_lib/build.zig
index f39f3fac2a..b6cf32d711 100644
--- a/test/link/static_lib_as_system_lib/build.zig
+++ b/test/link/static_lib_as_system_lib/build.zig
@@ -1,17 +1,23 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
+ const target = b.standardTargetOptions(.{});
- const lib_a = b.addStaticLibrary("a", null);
+ const lib_a = b.addStaticLibrary(.{
+ .name = "a",
+ .optimize = optimize,
+ .target = target,
+ });
lib_a.addCSourceFile("a.c", &[_][]const u8{});
- lib_a.setBuildMode(mode);
lib_a.addIncludePath(".");
lib_a.install();
- const test_exe = b.addTest("main.zig");
- test_exe.setBuildMode(mode);
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
test_exe.linkSystemLibrary("a"); // force linking liba.a as -la
test_exe.addSystemIncludePath(".");
const search_path = std.fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "lib" }) catch unreachable;
diff --git a/test/link/wasm/archive/build.zig b/test/link/wasm/archive/build.zig
index 7efa88999a..342c4c08d1 100644
--- a/test/link/wasm/archive/build.zig
+++ b/test/link/wasm/archive/build.zig
@@ -1,17 +1,17 @@
const std = @import("std");
-const Builder = std.build.Builder;
-
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
// The code in question will pull-in compiler-rt,
// and therefore link with its archive file.
- const lib = b.addSharedLibrary("main", "main.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
diff --git a/test/link/wasm/basic-features/build.zig b/test/link/wasm/basic-features/build.zig
index 2c565f9263..9f57066518 100644
--- a/test/link/wasm/basic-features/build.zig
+++ b/test/link/wasm/basic-features/build.zig
@@ -1,14 +1,18 @@
const std = @import("std");
-pub fn build(b: *std.build.Builder) void {
- const mode = b.standardReleaseOptions();
-
+pub fn build(b: *std.Build) void {
// Library with explicitly set cpu features
- const lib = b.addSharedLibrary("lib", "main.zig", .unversioned);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
- lib.target.cpu_model = .{ .explicit = &std.Target.wasm.cpu.mvp };
- lib.target.cpu_features_add.addFeature(0); // index 0 == atomics (see std.Target.wasm.Features)
- lib.setBuildMode(mode);
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ .target = .{
+ .cpu_arch = .wasm32,
+ .cpu_model = .{ .explicit = &std.Target.wasm.cpu.mvp },
+ .cpu_features_add = std.Target.wasm.featureSet(&.{.atomics}),
+ .os_tag = .freestanding,
+ },
+ });
lib.use_llvm = false;
lib.use_lld = false;
diff --git a/test/link/wasm/bss/build.zig b/test/link/wasm/bss/build.zig
index e234a3f402..1017e70a71 100644
--- a/test/link/wasm/bss/build.zig
+++ b/test/link/wasm/bss/build.zig
@@ -1,15 +1,15 @@
const std = @import("std");
-const Builder = std.build.Builder;
-
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
diff --git a/test/link/wasm/export-data/build.zig b/test/link/wasm/export-data/build.zig
index 283566dab3..c989153e47 100644
--- a/test/link/wasm/export-data/build.zig
+++ b/test/link/wasm/export-data/build.zig
@@ -1,13 +1,15 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
+pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(.ReleaseSafe); // to make the output deterministic in address positions
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .optimize = .ReleaseSafe, // to make the output deterministic in address positions
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ });
lib.use_lld = false;
lib.export_symbol_names = &.{ "foo", "bar" };
lib.global_base = 0; // put data section at address 0 to make data symbols easier to parse
@@ -23,8 +25,8 @@ pub fn build(b: *Builder) void {
check_lib.checkNext("type i32");
check_lib.checkNext("mutable false");
check_lib.checkNext("i32.const {bar_address}");
- check_lib.checkComputeCompare("foo_address", .{ .op = .eq, .value = .{ .literal = 0 } });
- check_lib.checkComputeCompare("bar_address", .{ .op = .eq, .value = .{ .literal = 4 } });
+ check_lib.checkComputeCompare("foo_address", .{ .op = .eq, .value = .{ .literal = 4 } });
+ check_lib.checkComputeCompare("bar_address", .{ .op = .eq, .value = .{ .literal = 0 } });
check_lib.checkStart("Section export");
check_lib.checkNext("entries 3");
diff --git a/test/link/wasm/export/build.zig b/test/link/wasm/export/build.zig
index 181e77e296..69c34a320e 100644
--- a/test/link/wasm/export/build.zig
+++ b/test/link/wasm/export/build.zig
@@ -1,24 +1,33 @@
const std = @import("std");
-pub fn build(b: *std.build.Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
- const no_export = b.addSharedLibrary("no-export", "main.zig", .unversioned);
- no_export.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
- no_export.setBuildMode(mode);
+ const no_export = b.addSharedLibrary(.{
+ .name = "no-export",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ });
no_export.use_llvm = false;
no_export.use_lld = false;
- const dynamic_export = b.addSharedLibrary("dynamic", "main.zig", .unversioned);
- dynamic_export.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
- dynamic_export.setBuildMode(mode);
+ const dynamic_export = b.addSharedLibrary(.{
+ .name = "dynamic",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ });
dynamic_export.rdynamic = true;
dynamic_export.use_llvm = false;
dynamic_export.use_lld = false;
- const force_export = b.addSharedLibrary("force", "main.zig", .unversioned);
- force_export.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
- force_export.setBuildMode(mode);
+ const force_export = b.addSharedLibrary(.{
+ .name = "force",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ });
force_export.export_symbol_names = &.{"foo"};
force_export.use_llvm = false;
force_export.use_lld = false;
diff --git a/test/link/wasm/extern-mangle/build.zig b/test/link/wasm/extern-mangle/build.zig
index ae46117f18..19913e6eca 100644
--- a/test/link/wasm/extern-mangle/build.zig
+++ b/test/link/wasm/extern-mangle/build.zig
@@ -1,15 +1,15 @@
const std = @import("std");
-const Builder = std.build.Builder;
-
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
lib.import_symbols = true; // import `a` and `b`
lib.rdynamic = true; // export `foo`
lib.install();
diff --git a/test/link/wasm/extern/build.zig b/test/link/wasm/extern/build.zig
index 88cce88d98..569d94091a 100644
--- a/test/link/wasm/extern/build.zig
+++ b/test/link/wasm/extern/build.zig
@@ -1,10 +1,12 @@
const std = @import("std");
-pub fn build(b: *std.build.Builder) void {
- const mode = b.standardReleaseOptions();
- const exe = b.addExecutable("extern", "main.zig");
- exe.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .wasi });
- exe.setBuildMode(mode);
+pub fn build(b: *std.Build) void {
+ const exe = b.addExecutable(.{
+ .name = "extern",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .wasi },
+ });
exe.addCSourceFile("foo.c", &.{});
exe.use_llvm = false;
exe.use_lld = false;
diff --git a/test/link/wasm/function-table/build.zig b/test/link/wasm/function-table/build.zig
index f7572bd6b1..4c25d0d860 100644
--- a/test/link/wasm/function-table/build.zig
+++ b/test/link/wasm/function-table/build.zig
@@ -1,29 +1,37 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const import_table = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- import_table.setBuildMode(mode);
- import_table.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const import_table = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = optimize,
+ });
import_table.use_llvm = false;
import_table.use_lld = false;
import_table.import_table = true;
- const export_table = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- export_table.setBuildMode(mode);
- export_table.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const export_table = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = optimize,
+ });
export_table.use_llvm = false;
export_table.use_lld = false;
export_table.export_table = true;
- const regular_table = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- regular_table.setBuildMode(mode);
- regular_table.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const regular_table = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = optimize,
+ });
regular_table.use_llvm = false;
regular_table.use_lld = false;
diff --git a/test/link/wasm/infer-features/build.zig b/test/link/wasm/infer-features/build.zig
index b50caf7264..d6d706a33d 100644
--- a/test/link/wasm/infer-features/build.zig
+++ b/test/link/wasm/infer-features/build.zig
@@ -1,21 +1,32 @@
const std = @import("std");
-pub fn build(b: *std.build.Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
// Wasm Object file which we will use to infer the features from
- const c_obj = b.addObject("c_obj", null);
- c_obj.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
- c_obj.target.cpu_model = .{ .explicit = &std.Target.wasm.cpu.bleeding_edge };
+ const c_obj = b.addObject(.{
+ .name = "c_obj",
+ .optimize = optimize,
+ .target = .{
+ .cpu_arch = .wasm32,
+ .cpu_model = .{ .explicit = &std.Target.wasm.cpu.bleeding_edge },
+ .os_tag = .freestanding,
+ },
+ });
c_obj.addCSourceFile("foo.c", &.{});
- c_obj.setBuildMode(mode);
// Wasm library that doesn't have any features specified. This will
// infer its featureset from other linked object files.
- const lib = b.addSharedLibrary("lib", "main.zig", .unversioned);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
- lib.target.cpu_model = .{ .explicit = &std.Target.wasm.cpu.mvp };
- lib.setBuildMode(mode);
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = .{
+ .cpu_arch = .wasm32,
+ .cpu_model = .{ .explicit = &std.Target.wasm.cpu.mvp },
+ .os_tag = .freestanding,
+ },
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.addObject(c_obj);
diff --git a/test/link/wasm/producers/build.zig b/test/link/wasm/producers/build.zig
index 7557b4fa41..2589b0dfcf 100644
--- a/test/link/wasm/producers/build.zig
+++ b/test/link/wasm/producers/build.zig
@@ -1,16 +1,16 @@
const std = @import("std");
const builtin = @import("builtin");
-const Builder = std.build.Builder;
-
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
diff --git a/test/link/wasm/segments/build.zig b/test/link/wasm/segments/build.zig
index 1b2cdf87ab..76160e905f 100644
--- a/test/link/wasm/segments/build.zig
+++ b/test/link/wasm/segments/build.zig
@@ -1,15 +1,15 @@
const std = @import("std");
-const Builder = std.build.Builder;
-
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
diff --git a/test/link/wasm/stack_pointer/build.zig b/test/link/wasm/stack_pointer/build.zig
index 5b67c3caa3..95c7643880 100644
--- a/test/link/wasm/stack_pointer/build.zig
+++ b/test/link/wasm/stack_pointer/build.zig
@@ -1,15 +1,15 @@
const std = @import("std");
-const Builder = std.build.Builder;
-
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
diff --git a/test/link/wasm/type/build.zig b/test/link/wasm/type/build.zig
index fbae6dc741..816b57ccab 100644
--- a/test/link/wasm/type/build.zig
+++ b/test/link/wasm/type/build.zig
@@ -1,15 +1,15 @@
const std = @import("std");
-const Builder = std.build.Builder;
-
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
test_step.dependOn(b.getInstallStep());
- const lib = b.addSharedLibrary("lib", "lib.zig", .unversioned);
- lib.setBuildMode(mode);
- lib.setTarget(.{ .cpu_arch = .wasm32, .os_tag = .freestanding });
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
lib.use_llvm = false;
lib.use_lld = false;
lib.strip = false;
diff --git a/test/src/compare_output.zig b/test/src/compare_output.zig
index 538e4023f0..edd48321c9 100644
--- a/test/src/compare_output.zig
+++ b/test/src/compare_output.zig
@@ -1,19 +1,18 @@
// This is the implementation of the test harness.
// For the actual test cases, see test/compare_output.zig.
const std = @import("std");
-const build = std.build;
const ArrayList = std.ArrayList;
const fmt = std.fmt;
const mem = std.mem;
const fs = std.fs;
-const Mode = std.builtin.Mode;
+const OptimizeMode = std.builtin.OptimizeMode;
pub const CompareOutputContext = struct {
- b: *build.Builder,
- step: *build.Step,
+ b: *std.Build,
+ step: *std.Build.Step,
test_index: usize,
test_filter: ?[]const u8,
- modes: []const Mode,
+ optimize_modes: []const OptimizeMode,
const Special = enum {
None,
@@ -102,7 +101,11 @@ pub const CompareOutputContext = struct {
if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
}
- const exe = b.addExecutable("test", null);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .target = .{},
+ .optimize = .Debug,
+ });
exe.addAssemblyFileSource(write_src.getFileSource(case.sources.items[0].filename).?);
const run = exe.run();
@@ -113,19 +116,23 @@ pub const CompareOutputContext = struct {
self.step.dependOn(&run.step);
},
Special.None => {
- for (self.modes) |mode| {
+ for (self.optimize_modes) |optimize| {
const annotated_case_name = fmt.allocPrint(self.b.allocator, "{s} {s} ({s})", .{
"compare-output",
case.name,
- @tagName(mode),
+ @tagName(optimize),
}) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;
}
const basename = case.sources.items[0].filename;
- const exe = b.addExecutableSource("test", write_src.getFileSource(basename).?);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = write_src.getFileSource(basename).?,
+ .optimize = optimize,
+ .target = .{},
+ });
if (case.link_libc) {
exe.linkSystemLibrary("c");
}
@@ -139,13 +146,20 @@ pub const CompareOutputContext = struct {
}
},
Special.RuntimeSafety => {
+ // TODO iterate over self.optimize_modes and test this in both
+ // debug and release safe mode
const annotated_case_name = fmt.allocPrint(self.b.allocator, "safety {s}", .{case.name}) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
}
const basename = case.sources.items[0].filename;
- const exe = b.addExecutableSource("test", write_src.getFileSource(basename).?);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = write_src.getFileSource(basename).?,
+ .target = .{},
+ .optimize = .Debug,
+ });
if (case.link_libc) {
exe.linkSystemLibrary("c");
}
diff --git a/test/src/run_translated_c.zig b/test/src/run_translated_c.zig
index 0204272f97..2103172ed6 100644
--- a/test/src/run_translated_c.zig
+++ b/test/src/run_translated_c.zig
@@ -1,15 +1,14 @@
// This is the implementation of the test harness for running translated
// C code. For the actual test cases, see test/run_translated_c.zig.
const std = @import("std");
-const build = std.build;
const ArrayList = std.ArrayList;
const fmt = std.fmt;
const mem = std.mem;
const fs = std.fs;
pub const RunTranslatedCContext = struct {
- b: *build.Builder,
- step: *build.Step,
+ b: *std.Build,
+ step: *std.Build.Step,
test_index: usize,
test_filter: ?[]const u8,
target: std.zig.CrossTarget,
@@ -85,11 +84,14 @@ pub const RunTranslatedCContext = struct {
for (case.sources.items) |src_file| {
write_src.add(src_file.filename, src_file.source);
}
- const translate_c = b.addTranslateC(write_src.getFileSource(case.sources.items[0].filename).?);
+ const translate_c = b.addTranslateC(.{
+ .source_file = write_src.getFileSource(case.sources.items[0].filename).?,
+ .target = .{},
+ .optimize = .Debug,
+ });
translate_c.step.name = b.fmt("{s} translate-c", .{annotated_case_name});
- const exe = translate_c.addExecutable();
- exe.setTarget(self.target);
+ const exe = translate_c.addExecutable(.{});
exe.step.name = b.fmt("{s} build-exe", .{annotated_case_name});
exe.linkLibC();
const run = exe.run();
diff --git a/test/src/translate_c.zig b/test/src/translate_c.zig
index f0f6f30c57..e275ee57ee 100644
--- a/test/src/translate_c.zig
+++ b/test/src/translate_c.zig
@@ -1,7 +1,6 @@
// This is the implementation of the test harness.
// For the actual test cases, see test/translate_c.zig.
const std = @import("std");
-const build = std.build;
const ArrayList = std.ArrayList;
const fmt = std.fmt;
const mem = std.mem;
@@ -9,8 +8,8 @@ const fs = std.fs;
const CrossTarget = std.zig.CrossTarget;
pub const TranslateCContext = struct {
- b: *build.Builder,
- step: *build.Step,
+ b: *std.Build,
+ step: *std.Build.Step,
test_index: usize,
test_filter: ?[]const u8,
@@ -108,10 +107,13 @@ pub const TranslateCContext = struct {
write_src.add(src_file.filename, src_file.source);
}
- const translate_c = b.addTranslateC(write_src.getFileSource(case.sources.items[0].filename).?);
+ const translate_c = b.addTranslateC(.{
+ .source_file = write_src.getFileSource(case.sources.items[0].filename).?,
+ .target = case.target,
+ .optimize = .Debug,
+ });
translate_c.step.name = annotated_case_name;
- translate_c.setTarget(case.target);
const check_file = translate_c.addCheckFile(case.expected_lines.items);
diff --git a/test/standalone.zig b/test/standalone.zig
index c0cb9ff02b..af972ccb86 100644
--- a/test/standalone.zig
+++ b/test/standalone.zig
@@ -102,4 +102,5 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
cases.addBuildFile("test/standalone/issue_13030/build.zig", .{ .build_modes = true });
cases.addBuildFile("test/standalone/emit_asm_and_bin/build.zig", .{});
cases.addBuildFile("test/standalone/issue_12588/build.zig", .{});
+ cases.addBuildFile("test/standalone/embed_generated_file/build.zig", .{});
}
diff --git a/test/standalone/brace_expansion/build.zig b/test/standalone/brace_expansion/build.zig
index 64f3c08583..7c32a09bef 100644
--- a/test/standalone/brace_expansion/build.zig
+++ b/test/standalone/brace_expansion/build.zig
@@ -1,8 +1,10 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const main = b.addTest("main.zig");
- main.setBuildMode(b.standardReleaseOptions());
+pub fn build(b: *std.Build) void {
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
const test_step = b.step("test", "Test it");
test_step.dependOn(&main.step);
diff --git a/test/standalone/c_compiler/build.zig b/test/standalone/c_compiler/build.zig
index 240d535182..dce999d4a2 100644
--- a/test/standalone/c_compiler/build.zig
+++ b/test/standalone/c_compiler/build.zig
@@ -1,9 +1,8 @@
const std = @import("std");
const builtin = @import("builtin");
-const Builder = std.build.Builder;
const CrossTarget = std.zig.CrossTarget;
-// TODO integrate this with the std.build executor API
+// TODO integrate this with the std.Build executor API
fn isRunnableTarget(t: CrossTarget) bool {
if (t.isNative()) return true;
@@ -11,24 +10,28 @@ fn isRunnableTarget(t: CrossTarget) bool {
t.getCpuArch() == builtin.cpu.arch);
}
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
const test_step = b.step("test", "Test the program");
- const exe_c = b.addExecutable("test_c", null);
+ const exe_c = b.addExecutable(.{
+ .name = "test_c",
+ .optimize = optimize,
+ .target = target,
+ });
b.default_step.dependOn(&exe_c.step);
exe_c.addCSourceFile("test.c", &[0][]const u8{});
- exe_c.setBuildMode(mode);
- exe_c.setTarget(target);
exe_c.linkLibC();
- const exe_cpp = b.addExecutable("test_cpp", null);
+ const exe_cpp = b.addExecutable(.{
+ .name = "test_cpp",
+ .optimize = optimize,
+ .target = target,
+ });
b.default_step.dependOn(&exe_cpp.step);
exe_cpp.addCSourceFile("test.cpp", &[0][]const u8{});
- exe_cpp.setBuildMode(mode);
- exe_cpp.setTarget(target);
exe_cpp.linkLibCpp();
switch (target.getOsTag()) {
diff --git a/test/standalone/embed_generated_file/bootloader.zig b/test/standalone/embed_generated_file/bootloader.zig
new file mode 100644
index 0000000000..dc79a847f4
--- /dev/null
+++ b/test/standalone/embed_generated_file/bootloader.zig
@@ -0,0 +1 @@
+pub export fn _start() void {}
diff --git a/test/standalone/embed_generated_file/build.zig b/test/standalone/embed_generated_file/build.zig
new file mode 100644
index 0000000000..3b17ff0b8f
--- /dev/null
+++ b/test/standalone/embed_generated_file/build.zig
@@ -0,0 +1,28 @@
+const std = @import("std");
+
+pub fn build(b: *std.Build) void {
+ const target = b.standardTargetOptions(.{});
+ const optimize = b.standardOptimizeOption(.{});
+
+ const bootloader = b.addExecutable(.{
+ .name = "bootloader",
+ .root_source_file = .{ .path = "bootloader.zig" },
+ .target = .{
+ .cpu_arch = .x86,
+ .os_tag = .freestanding,
+ },
+ .optimize = .ReleaseSmall,
+ });
+
+ const exe = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
+ exe.addAnonymousModule("bootloader.elf", .{
+ .source_file = bootloader.getOutputSource(),
+ });
+
+ const test_step = b.step("test", "Test the program");
+ test_step.dependOn(&exe.step);
+}
diff --git a/test/standalone/embed_generated_file/main.zig b/test/standalone/embed_generated_file/main.zig
new file mode 100644
index 0000000000..31f6e8c628
--- /dev/null
+++ b/test/standalone/embed_generated_file/main.zig
@@ -0,0 +1,8 @@
+const std = @import("std");
+const blah = @embedFile("bootloader.elf");
+
+test {
+ comptime {
+ std.debug.assert(std.mem.eql(u8, blah[1..][0..3], "ELF"));
+ }
+}
diff --git a/test/standalone/emit_asm_and_bin/build.zig b/test/standalone/emit_asm_and_bin/build.zig
index 43b7bb791d..5345f0f538 100644
--- a/test/standalone/emit_asm_and_bin/build.zig
+++ b/test/standalone/emit_asm_and_bin/build.zig
@@ -1,8 +1,10 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const main = b.addTest("main.zig");
- main.setBuildMode(b.standardReleaseOptions());
+pub fn build(b: *std.Build) void {
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
main.emit_asm = .{ .emit_to = b.pathFromRoot("main.s") };
main.emit_bin = .{ .emit_to = b.pathFromRoot("main") };
diff --git a/test/standalone/empty_env/build.zig b/test/standalone/empty_env/build.zig
index 2a184dcd2e..c4b4846141 100644
--- a/test/standalone/empty_env/build.zig
+++ b/test/standalone/empty_env/build.zig
@@ -1,8 +1,11 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const main = b.addExecutable("main", "main.zig");
- main.setBuildMode(b.standardReleaseOptions());
+pub fn build(b: *std.Build) void {
+ const main = b.addExecutable(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
const run = main.run();
run.clearEnvironment();
diff --git a/test/standalone/global_linkage/build.zig b/test/standalone/global_linkage/build.zig
index e13c0e8873..9f79c80fcf 100644
--- a/test/standalone/global_linkage/build.zig
+++ b/test/standalone/global_linkage/build.zig
@@ -1,16 +1,26 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
- const obj1 = b.addStaticLibrary("obj1", "obj1.zig");
- obj1.setBuildMode(mode);
+ const obj1 = b.addStaticLibrary(.{
+ .name = "obj1",
+ .root_source_file = .{ .path = "obj1.zig" },
+ .optimize = optimize,
+ .target = .{},
+ });
- const obj2 = b.addStaticLibrary("obj2", "obj2.zig");
- obj2.setBuildMode(mode);
+ const obj2 = b.addStaticLibrary(.{
+ .name = "obj2",
+ .root_source_file = .{ .path = "obj2.zig" },
+ .optimize = optimize,
+ .target = .{},
+ });
- const main = b.addTest("main.zig");
- main.setBuildMode(mode);
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ });
main.linkLibrary(obj1);
main.linkLibrary(obj2);
diff --git a/test/standalone/install_raw_hex/build.zig b/test/standalone/install_raw_hex/build.zig
index 0038c4c298..b0f938a344 100644
--- a/test/standalone/install_raw_hex/build.zig
+++ b/test/standalone/install_raw_hex/build.zig
@@ -1,8 +1,8 @@
const builtin = @import("builtin");
const std = @import("std");
-const CheckFileStep = std.build.CheckFileStep;
+const CheckFileStep = std.Build.CheckFileStep;
-pub fn build(b: *std.build.Builder) void {
+pub fn build(b: *std.Build) void {
const target = .{
.cpu_arch = .thumb,
.cpu_model = .{ .explicit = &std.Target.arm.cpu.cortex_m4 },
@@ -10,11 +10,14 @@ pub fn build(b: *std.build.Builder) void {
.abi = .gnueabihf,
};
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
- const elf = b.addExecutable("zig-nrf52-blink.elf", "main.zig");
- elf.setTarget(target);
- elf.setBuildMode(mode);
+ const elf = b.addExecutable(.{
+ .name = "zig-nrf52-blink.elf",
+ .root_source_file = .{ .path = "main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
const test_step = b.step("test", "Test the program");
b.default_step.dependOn(test_step);
diff --git a/test/standalone/issue_11595/build.zig b/test/standalone/issue_11595/build.zig
index d636f63ebc..c335fb73da 100644
--- a/test/standalone/issue_11595/build.zig
+++ b/test/standalone/issue_11595/build.zig
@@ -1,9 +1,8 @@
const std = @import("std");
const builtin = @import("builtin");
-const Builder = std.build.Builder;
const CrossTarget = std.zig.CrossTarget;
-// TODO integrate this with the std.build executor API
+// TODO integrate this with the std.Build executor API
fn isRunnableTarget(t: CrossTarget) bool {
if (t.isNative()) return true;
@@ -11,12 +10,16 @@ fn isRunnableTarget(t: CrossTarget) bool {
t.getCpuArch() == builtin.cpu.arch);
}
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
- const exe = b.addExecutable("zigtest", "main.zig");
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "zigtest",
+ .root_source_file = .{ .path = "main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
exe.install();
const c_sources = [_][]const u8{
@@ -39,7 +42,6 @@ pub fn build(b: *Builder) void {
exe.defineCMacro("QUX", "\"Q\" \"UX\"");
exe.defineCMacro("QUUX", "\"QU\\\"UX\"");
- exe.setTarget(target);
b.default_step.dependOn(&exe.step);
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/issue_12588/build.zig b/test/standalone/issue_12588/build.zig
index 02fa5e1680..9f14c53e38 100644
--- a/test/standalone/issue_12588/build.zig
+++ b/test/standalone/issue_12588/build.zig
@@ -1,13 +1,15 @@
const std = @import("std");
-const Builder = std.build.Builder;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
- const obj = b.addObject("main", "main.zig");
- obj.setBuildMode(mode);
- obj.setTarget(target);
+ const obj = b.addObject(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
obj.emit_llvm_ir = .{ .emit_to = b.pathFromRoot("main.ll") };
obj.emit_llvm_bc = .{ .emit_to = b.pathFromRoot("main.bc") };
obj.emit_bin = .no_emit;
diff --git a/test/standalone/issue_12706/build.zig b/test/standalone/issue_12706/build.zig
index d84160a4f4..9d616477a2 100644
--- a/test/standalone/issue_12706/build.zig
+++ b/test/standalone/issue_12706/build.zig
@@ -1,9 +1,8 @@
const std = @import("std");
const builtin = @import("builtin");
-const Builder = std.build.Builder;
const CrossTarget = std.zig.CrossTarget;
-// TODO integrate this with the std.build executor API
+// TODO integrate this with the std.Build executor API
fn isRunnableTarget(t: CrossTarget) bool {
if (t.isNative()) return true;
@@ -11,12 +10,16 @@ fn isRunnableTarget(t: CrossTarget) bool {
t.getCpuArch() == builtin.cpu.arch);
}
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
- const exe = b.addExecutable("main", "main.zig");
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
exe.install();
const c_sources = [_][]const u8{
@@ -26,7 +29,6 @@ pub fn build(b: *Builder) void {
exe.addCSourceFiles(&c_sources, &.{});
exe.linkLibC();
- exe.setTarget(target);
b.default_step.dependOn(&exe.step);
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/issue_13030/build.zig b/test/standalone/issue_13030/build.zig
index 8c05e47cf6..258d9b7db8 100644
--- a/test/standalone/issue_13030/build.zig
+++ b/test/standalone/issue_13030/build.zig
@@ -1,16 +1,17 @@
const std = @import("std");
const builtin = @import("builtin");
-const Builder = std.build.Builder;
const CrossTarget = std.zig.CrossTarget;
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
- const obj = b.addObject("main", "main.zig");
- obj.setBuildMode(mode);
-
- obj.setTarget(target);
+ const obj = b.addObject(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
b.default_step.dependOn(&obj.step);
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/issue_339/build.zig b/test/standalone/issue_339/build.zig
index 733b3729c1..62ac128aab 100644
--- a/test/standalone/issue_339/build.zig
+++ b/test/standalone/issue_339/build.zig
@@ -1,7 +1,12 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const obj = b.addObject("test", "test.zig");
+pub fn build(b: *std.Build) void {
+ const obj = b.addObject(.{
+ .name = "test",
+ .root_source_file = .{ .path = "test.zig" },
+ .target = b.standardTargetOptions(.{}),
+ .optimize = b.standardOptimizeOption(.{}),
+ });
const test_step = b.step("test", "Test the program");
test_step.dependOn(&obj.step);
diff --git a/test/standalone/issue_5825/build.zig b/test/standalone/issue_5825/build.zig
index 8f43ae1358..89272280d4 100644
--- a/test/standalone/issue_5825/build.zig
+++ b/test/standalone/issue_5825/build.zig
@@ -1,22 +1,27 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
+pub fn build(b: *std.Build) void {
const target = .{
.cpu_arch = .x86_64,
.os_tag = .windows,
.abi = .msvc,
};
- const mode = b.standardReleaseOptions();
- const obj = b.addObject("issue_5825", "main.zig");
- obj.setTarget(target);
- obj.setBuildMode(mode);
+ const optimize = b.standardOptimizeOption(.{});
+ const obj = b.addObject(.{
+ .name = "issue_5825",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
- const exe = b.addExecutable("issue_5825", null);
+ const exe = b.addExecutable(.{
+ .name = "issue_5825",
+ .optimize = optimize,
+ .target = target,
+ });
exe.subsystem = .Console;
exe.linkSystemLibrary("kernel32");
exe.linkSystemLibrary("ntdll");
- exe.setTarget(target);
- exe.setBuildMode(mode);
exe.addObject(obj);
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/issue_7030/build.zig b/test/standalone/issue_7030/build.zig
index ab3677370a..dc535318cc 100644
--- a/test/standalone/issue_7030/build.zig
+++ b/test/standalone/issue_7030/build.zig
@@ -1,10 +1,13 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const exe = b.addExecutable("issue_7030", "main.zig");
- exe.setTarget(.{
- .cpu_arch = .wasm32,
- .os_tag = .freestanding,
+pub fn build(b: *std.Build) void {
+ const exe = b.addExecutable(.{
+ .name = "issue_7030",
+ .root_source_file = .{ .path = "main.zig" },
+ .target = .{
+ .cpu_arch = .wasm32,
+ .os_tag = .freestanding,
+ },
});
exe.install();
b.default_step.dependOn(&exe.step);
diff --git a/test/standalone/issue_794/build.zig b/test/standalone/issue_794/build.zig
index ece74f0e98..3089a28fd0 100644
--- a/test/standalone/issue_794/build.zig
+++ b/test/standalone/issue_794/build.zig
@@ -1,7 +1,9 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const test_artifact = b.addTest("main.zig");
+pub fn build(b: *std.Build) void {
+ const test_artifact = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ });
test_artifact.addIncludePath("a_directory");
b.default_step.dependOn(&test_artifact.step);
diff --git a/test/standalone/issue_8550/build.zig b/test/standalone/issue_8550/build.zig
index 03e8d04bfb..c3303d55db 100644
--- a/test/standalone/issue_8550/build.zig
+++ b/test/standalone/issue_8550/build.zig
@@ -1,6 +1,6 @@
const std = @import("std");
-pub fn build(b: *std.build.Builder) !void {
+pub fn build(b: *std.Build) !void {
const target = std.zig.CrossTarget{
.os_tag = .freestanding,
.cpu_arch = .arm,
@@ -8,12 +8,15 @@ pub fn build(b: *std.build.Builder) !void {
.explicit = &std.Target.arm.cpu.arm1176jz_s,
},
};
- const mode = b.standardReleaseOptions();
- const kernel = b.addExecutable("kernel", "./main.zig");
+ const optimize = b.standardOptimizeOption(.{});
+ const kernel = b.addExecutable(.{
+ .name = "kernel",
+ .root_source_file = .{ .path = "./main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
kernel.addObjectFile("./boot.S");
kernel.setLinkerScriptPath(.{ .path = "./linker.ld" });
- kernel.setBuildMode(mode);
- kernel.setTarget(target);
kernel.install();
const test_step = b.step("test", "Test it");
diff --git a/test/standalone/issue_9812/build.zig b/test/standalone/issue_9812/build.zig
index 677c589a84..4ca55ce999 100644
--- a/test/standalone/issue_9812/build.zig
+++ b/test/standalone/issue_9812/build.zig
@@ -1,9 +1,11 @@
const std = @import("std");
-pub fn build(b: *std.build.Builder) !void {
- const mode = b.standardReleaseOptions();
- const zip_add = b.addTest("main.zig");
- zip_add.setBuildMode(mode);
+pub fn build(b: *std.Build) !void {
+ const optimize = b.standardOptimizeOption(.{});
+ const zip_add = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ });
zip_add.addCSourceFile("vendor/kuba-zip/zip.c", &[_][]const u8{
"-std=c99",
"-fno-sanitize=undefined",
diff --git a/test/standalone/load_dynamic_library/build.zig b/test/standalone/load_dynamic_library/build.zig
index 109c742c6f..44fc37893c 100644
--- a/test/standalone/load_dynamic_library/build.zig
+++ b/test/standalone/load_dynamic_library/build.zig
@@ -1,13 +1,23 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const opts = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const target = b.standardTargetOptions(.{});
+ const optimize = b.standardOptimizeOption(.{});
- const lib = b.addSharedLibrary("add", "add.zig", b.version(1, 0, 0));
- lib.setBuildMode(opts);
+ const lib = b.addSharedLibrary(.{
+ .name = "add",
+ .root_source_file = .{ .path = "add.zig" },
+ .version = .{ .major = 1, .minor = 0 },
+ .optimize = optimize,
+ .target = target,
+ });
- const main = b.addExecutable("main", "main.zig");
- main.setBuildMode(opts);
+ const main = b.addExecutable(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
const run = main.run();
run.addArtifactArg(lib);
diff --git a/test/standalone/main_pkg_path/build.zig b/test/standalone/main_pkg_path/build.zig
index c4ac18f967..f9919d5ab5 100644
--- a/test/standalone/main_pkg_path/build.zig
+++ b/test/standalone/main_pkg_path/build.zig
@@ -1,7 +1,9 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const test_exe = b.addTest("a/test.zig");
+pub fn build(b: *std.Build) void {
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "a/test.zig" },
+ });
test_exe.setMainPkgPath(".");
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/mix_c_files/build.zig b/test/standalone/mix_c_files/build.zig
index 68486ea18d..f2dfb2093f 100644
--- a/test/standalone/mix_c_files/build.zig
+++ b/test/standalone/mix_c_files/build.zig
@@ -1,9 +1,8 @@
const std = @import("std");
const builtin = @import("builtin");
-const Builder = std.build.Builder;
const CrossTarget = std.zig.CrossTarget;
-// TODO integrate this with the std.build executor API
+// TODO integrate this with the std.Build executor API
fn isRunnableTarget(t: CrossTarget) bool {
if (t.isNative()) return true;
@@ -11,15 +10,18 @@ fn isRunnableTarget(t: CrossTarget) bool {
t.getCpuArch() == builtin.cpu.arch);
}
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
- const exe = b.addExecutable("test", "main.zig");
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ .target = target,
+ });
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c11"});
- exe.setBuildMode(mode);
exe.linkLibC();
- exe.setTarget(target);
b.default_step.dependOn(&exe.step);
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/mix_o_files/build.zig b/test/standalone/mix_o_files/build.zig
index d498e2e20a..2708343aa5 100644
--- a/test/standalone/mix_o_files/build.zig
+++ b/test/standalone/mix_o_files/build.zig
@@ -1,9 +1,19 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const obj = b.addObject("base64", "base64.zig");
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
- const exe = b.addExecutable("test", null);
+ const obj = b.addObject(.{
+ .name = "base64",
+ .root_source_file = .{ .path = "base64.zig" },
+ .optimize = optimize,
+ .target = .{},
+ });
+
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .optimize = optimize,
+ });
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
exe.addObject(obj);
exe.linkSystemLibrary("c");
diff --git a/test/standalone/options/build.zig b/test/standalone/options/build.zig
index 087aceff01..3f1e823359 100644
--- a/test/standalone/options/build.zig
+++ b/test/standalone/options/build.zig
@@ -1,12 +1,14 @@
const std = @import("std");
-pub fn build(b: *std.build.Builder) void {
+pub fn build(b: *std.Build) void {
const target = b.standardTargetOptions(.{});
- const mode = b.standardReleaseOptions();
+ const optimize = b.standardOptimizeOption(.{});
- const main = b.addTest("src/main.zig");
- main.setTarget(target);
- main.setBuildMode(mode);
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "src/main.zig" },
+ .target = target,
+ .optimize = optimize,
+ });
const options = b.addOptions();
main.addOptions("build_options", options);
diff --git a/test/standalone/pie/build.zig b/test/standalone/pie/build.zig
index d008fd31c9..d51ea27328 100644
--- a/test/standalone/pie/build.zig
+++ b/test/standalone/pie/build.zig
@@ -1,8 +1,10 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const main = b.addTest("main.zig");
- main.setBuildMode(b.standardReleaseOptions());
+pub fn build(b: *std.Build) void {
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
main.pie = true;
const test_step = b.step("test", "Test the program");
diff --git a/test/standalone/pkg_import/build.zig b/test/standalone/pkg_import/build.zig
index 7529d106f9..5ea6c90af7 100644
--- a/test/standalone/pkg_import/build.zig
+++ b/test/standalone/pkg_import/build.zig
@@ -1,13 +1,14 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const exe = b.addExecutable("test", "test.zig");
- exe.addPackagePath("my_pkg", "pkg.zig");
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
- // This is duplicated to test that you are allowed to call
- // b.standardReleaseOptions() twice.
- exe.setBuildMode(b.standardReleaseOptions());
- exe.setBuildMode(b.standardReleaseOptions());
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = .{ .path = "test.zig" },
+ .optimize = optimize,
+ });
+ exe.addAnonymousModule("my_pkg", .{ .source_file = .{ .path = "pkg.zig" } });
const run = exe.run();
diff --git a/test/standalone/shared_library/build.zig b/test/standalone/shared_library/build.zig
index 18188311c7..91f7c8a06a 100644
--- a/test/standalone/shared_library/build.zig
+++ b/test/standalone/shared_library/build.zig
@@ -1,12 +1,21 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
const target = b.standardTargetOptions(.{});
- const lib = b.addSharedLibrary("mathtest", "mathtest.zig", b.version(1, 0, 0));
- lib.setTarget(target);
+ const lib = b.addSharedLibrary(.{
+ .name = "mathtest",
+ .root_source_file = .{ .path = "mathtest.zig" },
+ .version = .{ .major = 1, .minor = 0 },
+ .target = target,
+ .optimize = optimize,
+ });
- const exe = b.addExecutable("test", null);
- exe.setTarget(target);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .target = target,
+ .optimize = optimize,
+ });
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
exe.linkLibrary(lib);
exe.linkSystemLibrary("c");
diff --git a/test/standalone/static_c_lib/build.zig b/test/standalone/static_c_lib/build.zig
index c64ae48dba..9937888843 100644
--- a/test/standalone/static_c_lib/build.zig
+++ b/test/standalone/static_c_lib/build.zig
@@ -1,15 +1,20 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
- const foo = b.addStaticLibrary("foo", null);
+ const foo = b.addStaticLibrary(.{
+ .name = "foo",
+ .optimize = optimize,
+ .target = .{},
+ });
foo.addCSourceFile("foo.c", &[_][]const u8{});
- foo.setBuildMode(mode);
foo.addIncludePath(".");
- const test_exe = b.addTest("foo.zig");
- test_exe.setBuildMode(mode);
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "foo.zig" },
+ .optimize = optimize,
+ });
test_exe.linkLibrary(foo);
test_exe.addIncludePath(".");
diff --git a/test/standalone/test_runner_path/build.zig b/test/standalone/test_runner_path/build.zig
index 738cac9783..f073c55d4a 100644
--- a/test/standalone/test_runner_path/build.zig
+++ b/test/standalone/test_runner_path/build.zig
@@ -1,7 +1,10 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const test_exe = b.addTestExe("test", "test.zig");
+pub fn build(b: *std.Build) void {
+ const test_exe = b.addTest(.{
+ .root_source_file = .{ .path = "test.zig" },
+ .kind = .test_exe,
+ });
test_exe.test_runner = "test_runner.zig";
const test_run = test_exe.run();
diff --git a/test/standalone/use_alias/build.zig b/test/standalone/use_alias/build.zig
index da4e8bef4b..89e07efb22 100644
--- a/test/standalone/use_alias/build.zig
+++ b/test/standalone/use_alias/build.zig
@@ -1,8 +1,10 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const main = b.addTest("main.zig");
- main.setBuildMode(b.standardReleaseOptions());
+pub fn build(b: *std.Build) void {
+ const main = b.addTest(.{
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = b.standardOptimizeOption(.{}),
+ });
main.addIncludePath(".");
const test_step = b.step("test", "Test it");
diff --git a/test/standalone/windows_spawn/build.zig b/test/standalone/windows_spawn/build.zig
index 10a1132d3a..3ebde5a50c 100644
--- a/test/standalone/windows_spawn/build.zig
+++ b/test/standalone/windows_spawn/build.zig
@@ -1,13 +1,20 @@
-const Builder = @import("std").build.Builder;
+const std = @import("std");
-pub fn build(b: *Builder) void {
- const mode = b.standardReleaseOptions();
+pub fn build(b: *std.Build) void {
+ const optimize = b.standardOptimizeOption(.{});
- const hello = b.addExecutable("hello", "hello.zig");
- hello.setBuildMode(mode);
+ const hello = b.addExecutable(.{
+ .name = "hello",
+ .root_source_file = .{ .path = "hello.zig" },
+ .optimize = optimize,
+ });
+
+ const main = b.addExecutable(.{
+ .name = "main",
+ .root_source_file = .{ .path = "main.zig" },
+ .optimize = optimize,
+ });
- const main = b.addExecutable("main", "main.zig");
- main.setBuildMode(mode);
const run = main.run();
run.addArtifactArg(hello);
diff --git a/test/tests.zig b/test/tests.zig
index 8e972b9ba6..94030ce851 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -1,17 +1,17 @@
const std = @import("std");
const builtin = @import("builtin");
const debug = std.debug;
-const build = std.build;
const CrossTarget = std.zig.CrossTarget;
const io = std.io;
const fs = std.fs;
const mem = std.mem;
const fmt = std.fmt;
const ArrayList = std.ArrayList;
-const Mode = std.builtin.Mode;
-const LibExeObjStep = build.LibExeObjStep;
+const OptimizeMode = std.builtin.OptimizeMode;
+const CompileStep = std.Build.CompileStep;
const Allocator = mem.Allocator;
-const ExecError = build.Builder.ExecError;
+const ExecError = std.Build.ExecError;
+const Step = std.Build.Step;
// Cases
const compare_output = @import("compare_output.zig");
@@ -30,7 +30,7 @@ pub const CompareOutputContext = @import("src/compare_output.zig").CompareOutput
const TestTarget = struct {
target: CrossTarget = @as(CrossTarget, .{}),
- mode: std.builtin.Mode = .Debug,
+ optimize_mode: std.builtin.OptimizeMode = .Debug,
link_libc: bool = false,
single_threaded: bool = false,
disable_native: bool = false,
@@ -423,38 +423,38 @@ const test_targets = blk: {
// Do the release tests last because they take a long time
.{
- .mode = .ReleaseFast,
+ .optimize_mode = .ReleaseFast,
},
.{
.link_libc = true,
- .mode = .ReleaseFast,
+ .optimize_mode = .ReleaseFast,
},
.{
- .mode = .ReleaseFast,
+ .optimize_mode = .ReleaseFast,
.single_threaded = true,
},
.{
- .mode = .ReleaseSafe,
+ .optimize_mode = .ReleaseSafe,
},
.{
.link_libc = true,
- .mode = .ReleaseSafe,
+ .optimize_mode = .ReleaseSafe,
},
.{
- .mode = .ReleaseSafe,
+ .optimize_mode = .ReleaseSafe,
.single_threaded = true,
},
.{
- .mode = .ReleaseSmall,
+ .optimize_mode = .ReleaseSmall,
},
.{
.link_libc = true,
- .mode = .ReleaseSmall,
+ .optimize_mode = .ReleaseSmall,
},
.{
- .mode = .ReleaseSmall,
+ .optimize_mode = .ReleaseSmall,
.single_threaded = true,
},
};
@@ -462,14 +462,14 @@ const test_targets = blk: {
const max_stdout_size = 1 * 1024 * 1024; // 1 MB
-pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+pub fn addCompareOutputTests(b: *std.Build, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
.b = b,
.step = b.step("test-compare-output", "Run the compare output tests"),
.test_index = 0,
.test_filter = test_filter,
- .modes = modes,
+ .optimize_modes = optimize_modes,
};
compare_output.addCases(cases);
@@ -477,14 +477,14 @@ pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8, modes:
return cases.step;
}
-pub fn addStackTraceTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+pub fn addStackTraceTests(b: *std.Build, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *Step {
const cases = b.allocator.create(StackTracesContext) catch unreachable;
cases.* = StackTracesContext{
.b = b,
.step = b.step("test-stack-traces", "Run the stack trace tests"),
.test_index = 0,
.test_filter = test_filter,
- .modes = modes,
+ .optimize_modes = optimize_modes,
};
stack_traces.addCases(cases);
@@ -493,9 +493,9 @@ pub fn addStackTraceTests(b: *build.Builder, test_filter: ?[]const u8, modes: []
}
pub fn addStandaloneTests(
- b: *build.Builder,
+ b: *std.Build,
test_filter: ?[]const u8,
- modes: []const Mode,
+ optimize_modes: []const OptimizeMode,
skip_non_native: bool,
enable_macos_sdk: bool,
target: std.zig.CrossTarget,
@@ -506,14 +506,14 @@ pub fn addStandaloneTests(
enable_wasmtime: bool,
enable_wine: bool,
enable_symlinks_windows: bool,
-) *build.Step {
+) *Step {
const cases = b.allocator.create(StandaloneContext) catch unreachable;
cases.* = StandaloneContext{
.b = b,
.step = b.step("test-standalone", "Run the standalone tests"),
.test_index = 0,
.test_filter = test_filter,
- .modes = modes,
+ .optimize_modes = optimize_modes,
.skip_non_native = skip_non_native,
.enable_macos_sdk = enable_macos_sdk,
.target = target,
@@ -532,20 +532,20 @@ pub fn addStandaloneTests(
}
pub fn addLinkTests(
- b: *build.Builder,
+ b: *std.Build,
test_filter: ?[]const u8,
- modes: []const Mode,
+ optimize_modes: []const OptimizeMode,
enable_macos_sdk: bool,
omit_stage2: bool,
enable_symlinks_windows: bool,
-) *build.Step {
+) *Step {
const cases = b.allocator.create(StandaloneContext) catch unreachable;
cases.* = StandaloneContext{
.b = b,
.step = b.step("test-link", "Run the linker tests"),
.test_index = 0,
.test_filter = test_filter,
- .modes = modes,
+ .optimize_modes = optimize_modes,
.skip_non_native = true,
.enable_macos_sdk = enable_macos_sdk,
.target = .{},
@@ -556,12 +556,17 @@ pub fn addLinkTests(
return cases.step;
}
-pub fn addCliTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+pub fn addCliTests(b: *std.Build, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *Step {
_ = test_filter;
- _ = modes;
+ _ = optimize_modes;
const step = b.step("test-cli", "Test the command line interface");
- const exe = b.addExecutable("test-cli", "test/cli.zig");
+ const exe = b.addExecutable(.{
+ .name = "test-cli",
+ .root_source_file = .{ .path = "test/cli.zig" },
+ .target = .{},
+ .optimize = .Debug,
+ });
const run_cmd = exe.run();
run_cmd.addArgs(&[_][]const u8{
fs.realpathAlloc(b.allocator, b.zig_exe) catch unreachable,
@@ -572,14 +577,14 @@ pub fn addCliTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const M
return step;
}
-pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+pub fn addAssembleAndLinkTests(b: *std.Build, test_filter: ?[]const u8, optimize_modes: []const OptimizeMode) *Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
.b = b,
.step = b.step("test-asm-link", "Run the assemble and link tests"),
.test_index = 0,
.test_filter = test_filter,
- .modes = modes,
+ .optimize_modes = optimize_modes,
};
assemble_and_link.addCases(cases);
@@ -587,7 +592,7 @@ pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8, mode
return cases.step;
}
-pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
+pub fn addTranslateCTests(b: *std.Build, test_filter: ?[]const u8) *Step {
const cases = b.allocator.create(TranslateCContext) catch unreachable;
cases.* = TranslateCContext{
.b = b,
@@ -602,10 +607,10 @@ pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.St
}
pub fn addRunTranslatedCTests(
- b: *build.Builder,
+ b: *std.Build,
test_filter: ?[]const u8,
target: std.zig.CrossTarget,
-) *build.Step {
+) *Step {
const cases = b.allocator.create(RunTranslatedCContext) catch unreachable;
cases.* = .{
.b = b,
@@ -620,7 +625,7 @@ pub fn addRunTranslatedCTests(
return cases.step;
}
-pub fn addGenHTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
+pub fn addGenHTests(b: *std.Build, test_filter: ?[]const u8) *Step {
const cases = b.allocator.create(GenHContext) catch unreachable;
cases.* = GenHContext{
.b = b,
@@ -635,18 +640,18 @@ pub fn addGenHTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
}
pub fn addPkgTests(
- b: *build.Builder,
+ b: *std.Build,
test_filter: ?[]const u8,
root_src: []const u8,
name: []const u8,
desc: []const u8,
- modes: []const Mode,
+ optimize_modes: []const OptimizeMode,
skip_single_threaded: bool,
skip_non_native: bool,
skip_libc: bool,
skip_stage1: bool,
skip_stage2: bool,
-) *build.Step {
+) *Step {
const step = b.step(b.fmt("test-{s}", .{name}), desc);
for (test_targets) |test_target| {
@@ -677,8 +682,8 @@ pub fn addPkgTests(
else => if (skip_stage2) continue,
};
- const want_this_mode = for (modes) |m| {
- if (m == test_target.mode) break true;
+ const want_this_mode = for (optimize_modes) |m| {
+ if (m == test_target.optimize_mode) break true;
} else false;
if (!want_this_mode) continue;
@@ -691,21 +696,23 @@ pub fn addPkgTests(
const triple_prefix = test_target.target.zigTriple(b.allocator) catch unreachable;
- const these_tests = b.addTest(root_src);
+ const these_tests = b.addTest(.{
+ .root_source_file = .{ .path = root_src },
+ .optimize = test_target.optimize_mode,
+ .target = test_target.target,
+ });
const single_threaded_txt = if (test_target.single_threaded) "single" else "multi";
const backend_txt = if (test_target.backend) |backend| @tagName(backend) else "default";
these_tests.setNamePrefix(b.fmt("{s}-{s}-{s}-{s}-{s}-{s} ", .{
name,
triple_prefix,
- @tagName(test_target.mode),
+ @tagName(test_target.optimize_mode),
libc_prefix,
single_threaded_txt,
backend_txt,
}));
these_tests.single_threaded = test_target.single_threaded;
these_tests.setFilter(test_filter);
- these_tests.setBuildMode(test_target.mode);
- these_tests.setTarget(test_target.target);
if (test_target.link_libc) {
these_tests.linkSystemLibrary("c");
}
@@ -735,13 +742,13 @@ pub fn addPkgTests(
}
pub const StackTracesContext = struct {
- b: *build.Builder,
- step: *build.Step,
+ b: *std.Build,
+ step: *Step,
test_index: usize,
test_filter: ?[]const u8,
- modes: []const Mode,
+ optimize_modes: []const OptimizeMode,
- const Expect = [@typeInfo(Mode).Enum.fields.len][]const u8;
+ const Expect = [@typeInfo(OptimizeMode).Enum.fields.len][]const u8;
pub fn addCase(self: *StackTracesContext, config: anytype) void {
if (@hasField(@TypeOf(config), "exclude")) {
@@ -755,26 +762,26 @@ pub const StackTracesContext = struct {
const exclude_os: []const std.Target.Os.Tag = &config.exclude_os;
for (exclude_os) |os| if (os == builtin.os.tag) return;
}
- for (self.modes) |mode| {
- switch (mode) {
+ for (self.optimize_modes) |optimize_mode| {
+ switch (optimize_mode) {
.Debug => {
if (@hasField(@TypeOf(config), "Debug")) {
- self.addExpect(config.name, config.source, mode, config.Debug);
+ self.addExpect(config.name, config.source, optimize_mode, config.Debug);
}
},
.ReleaseSafe => {
if (@hasField(@TypeOf(config), "ReleaseSafe")) {
- self.addExpect(config.name, config.source, mode, config.ReleaseSafe);
+ self.addExpect(config.name, config.source, optimize_mode, config.ReleaseSafe);
}
},
.ReleaseFast => {
if (@hasField(@TypeOf(config), "ReleaseFast")) {
- self.addExpect(config.name, config.source, mode, config.ReleaseFast);
+ self.addExpect(config.name, config.source, optimize_mode, config.ReleaseFast);
}
},
.ReleaseSmall => {
if (@hasField(@TypeOf(config), "ReleaseSmall")) {
- self.addExpect(config.name, config.source, mode, config.ReleaseSmall);
+ self.addExpect(config.name, config.source, optimize_mode, config.ReleaseSmall);
}
},
}
@@ -785,7 +792,7 @@ pub const StackTracesContext = struct {
self: *StackTracesContext,
name: []const u8,
source: []const u8,
- mode: Mode,
+ optimize_mode: OptimizeMode,
mode_config: anytype,
) void {
if (@hasField(@TypeOf(mode_config), "exclude")) {
@@ -803,7 +810,7 @@ pub const StackTracesContext = struct {
const annotated_case_name = fmt.allocPrint(self.b.allocator, "{s} {s} ({s})", .{
"stack-trace",
name,
- @tagName(mode),
+ @tagName(optimize_mode),
}) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
@@ -812,14 +819,18 @@ pub const StackTracesContext = struct {
const b = self.b;
const src_basename = "source.zig";
const write_src = b.addWriteFile(src_basename, source);
- const exe = b.addExecutableSource("test", write_src.getFileSource(src_basename).?);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = write_src.getFileSource(src_basename).?,
+ .optimize = optimize_mode,
+ .target = .{},
+ });
const run_and_compare = RunAndCompareStep.create(
self,
exe,
annotated_case_name,
- mode,
+ optimize_mode,
mode_config.expect,
);
@@ -829,29 +840,29 @@ pub const StackTracesContext = struct {
const RunAndCompareStep = struct {
pub const base_id = .custom;
- step: build.Step,
+ step: Step,
context: *StackTracesContext,
- exe: *LibExeObjStep,
+ exe: *CompileStep,
name: []const u8,
- mode: Mode,
+ optimize_mode: OptimizeMode,
expect_output: []const u8,
test_index: usize,
pub fn create(
context: *StackTracesContext,
- exe: *LibExeObjStep,
+ exe: *CompileStep,
name: []const u8,
- mode: Mode,
+ optimize_mode: OptimizeMode,
expect_output: []const u8,
) *RunAndCompareStep {
const allocator = context.b.allocator;
const ptr = allocator.create(RunAndCompareStep) catch unreachable;
ptr.* = RunAndCompareStep{
- .step = build.Step.init(.custom, "StackTraceCompareOutputStep", allocator, make),
+ .step = Step.init(.custom, "StackTraceCompareOutputStep", allocator, make),
.context = context,
.exe = exe,
.name = name,
- .mode = mode,
+ .optimize_mode = optimize_mode,
.expect_output = expect_output,
.test_index = context.test_index,
};
@@ -860,7 +871,7 @@ pub const StackTracesContext = struct {
return ptr;
}
- fn make(step: *build.Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(RunAndCompareStep, "step", step);
const b = self.context.b;
@@ -932,7 +943,7 @@ pub const StackTracesContext = struct {
// process result
// - keep only basename of source file path
// - replace address with symbolic string
- // - replace function name with symbolic string when mode != .Debug
+ // - replace function name with symbolic string when optimize_mode != .Debug
// - skip empty lines
const got: []const u8 = got_result: {
var buf = ArrayList(u8).init(b.allocator);
@@ -968,7 +979,7 @@ pub const StackTracesContext = struct {
// emit substituted line
try buf.appendSlice(line[pos + 1 .. marks[2] + delims[2].len]);
try buf.appendSlice(" [address]");
- if (self.mode == .Debug) {
+ if (self.optimize_mode == .Debug) {
// On certain platforms (windows) or possibly depending on how we choose to link main
// the object file extension may be present so we simply strip any extension.
if (mem.indexOfScalar(u8, line[marks[4]..marks[5]], '.')) |idot| {
@@ -1003,11 +1014,11 @@ pub const StackTracesContext = struct {
};
pub const StandaloneContext = struct {
- b: *build.Builder,
- step: *build.Step,
+ b: *std.Build,
+ step: *Step,
test_index: usize,
test_filter: ?[]const u8,
- modes: []const Mode,
+ optimize_modes: []const OptimizeMode,
skip_non_native: bool,
enable_macos_sdk: bool,
target: std.zig.CrossTarget,
@@ -1087,13 +1098,13 @@ pub const StandaloneContext = struct {
}
}
- const modes = if (features.build_modes) self.modes else &[1]Mode{.Debug};
- for (modes) |mode| {
- const arg = switch (mode) {
+ const optimize_modes = if (features.build_modes) self.optimize_modes else &[1]OptimizeMode{.Debug};
+ for (optimize_modes) |optimize_mode| {
+ const arg = switch (optimize_mode) {
.Debug => "",
- .ReleaseFast => "-Drelease-fast",
- .ReleaseSafe => "-Drelease-safe",
- .ReleaseSmall => "-Drelease-small",
+ .ReleaseFast => "-Doptimize=ReleaseFast",
+ .ReleaseSafe => "-Doptimize=ReleaseSafe",
+ .ReleaseSmall => "-Doptimize=ReleaseSmall",
};
const zig_args_base_len = zig_args.items.len;
if (arg.len > 0)
@@ -1101,7 +1112,7 @@ pub const StandaloneContext = struct {
defer zig_args.resize(zig_args_base_len) catch unreachable;
const run_cmd = b.addSystemCommand(zig_args.items);
- const log_step = b.addLog("PASS {s} ({s})", .{ annotated_case_name, @tagName(mode) });
+ const log_step = b.addLog("PASS {s} ({s})", .{ annotated_case_name, @tagName(optimize_mode) });
log_step.step.dependOn(&run_cmd.step);
self.step.dependOn(&log_step.step);
@@ -1111,17 +1122,21 @@ pub const StandaloneContext = struct {
pub fn addAllArgs(self: *StandaloneContext, root_src: []const u8, link_libc: bool) void {
const b = self.b;
- for (self.modes) |mode| {
+ for (self.optimize_modes) |optimize| {
const annotated_case_name = fmt.allocPrint(self.b.allocator, "build {s} ({s})", .{
root_src,
- @tagName(mode),
+ @tagName(optimize),
}) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null) continue;
}
- const exe = b.addExecutable("test", root_src);
- exe.setBuildMode(mode);
+ const exe = b.addExecutable(.{
+ .name = "test",
+ .root_source_file = .{ .path = root_src },
+ .optimize = optimize,
+ .target = .{},
+ });
if (link_libc) {
exe.linkSystemLibrary("c");
}
@@ -1135,8 +1150,8 @@ pub const StandaloneContext = struct {
};
pub const GenHContext = struct {
- b: *build.Builder,
- step: *build.Step,
+ b: *std.Build,
+ step: *Step,
test_index: usize,
test_filter: ?[]const u8,
@@ -1163,23 +1178,23 @@ pub const GenHContext = struct {
};
const GenHCmpOutputStep = struct {
- step: build.Step,
+ step: Step,
context: *GenHContext,
- obj: *LibExeObjStep,
+ obj: *CompileStep,
name: []const u8,
test_index: usize,
case: *const TestCase,
pub fn create(
context: *GenHContext,
- obj: *LibExeObjStep,
+ obj: *CompileStep,
name: []const u8,
case: *const TestCase,
) *GenHCmpOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(GenHCmpOutputStep) catch unreachable;
ptr.* = GenHCmpOutputStep{
- .step = build.Step.init(.Custom, "ParseCCmpOutput", allocator, make),
+ .step = Step.init(.Custom, "ParseCCmpOutput", allocator, make),
.context = context,
.obj = obj,
.name = name,
@@ -1191,7 +1206,7 @@ pub const GenHContext = struct {
return ptr;
}
- fn make(step: *build.Step) !void {
+ fn make(step: *Step) !void {
const self = @fieldParentPtr(GenHCmpOutputStep, "step", step);
const b = self.context.b;
@@ -1247,8 +1262,8 @@ pub const GenHContext = struct {
pub fn addCase(self: *GenHContext, case: *const TestCase) void {
const b = self.b;
- const mode = std.builtin.Mode.Debug;
- const annotated_case_name = fmt.allocPrint(self.b.allocator, "gen-h {s} ({s})", .{ case.name, @tagName(mode) }) catch unreachable;
+ const optimize_mode = std.builtin.OptimizeMode.Debug;
+ const annotated_case_name = fmt.allocPrint(self.b.allocator, "gen-h {s} ({s})", .{ case.name, @tagName(optimize_mode) }) catch unreachable;
if (self.test_filter) |filter| {
if (mem.indexOf(u8, annotated_case_name, filter) == null) return;
}
@@ -1259,7 +1274,7 @@ pub const GenHContext = struct {
}
const obj = b.addObjectFromWriteFileStep("test", write_src, case.sources.items[0].filename);
- obj.setBuildMode(mode);
+ obj.setBuildMode(optimize_mode);
const cmp_h = GenHCmpOutputStep.create(self, obj, annotated_case_name, case);
@@ -1333,17 +1348,20 @@ const c_abi_targets = [_]CrossTarget{
},
};
-pub fn addCAbiTests(b: *build.Builder, skip_non_native: bool, skip_release: bool) *build.Step {
+pub fn addCAbiTests(b: *std.Build, skip_non_native: bool, skip_release: bool) *Step {
const step = b.step("test-c-abi", "Run the C ABI tests");
- const modes: [2]Mode = .{ .Debug, .ReleaseFast };
+ const optimize_modes: [2]OptimizeMode = .{ .Debug, .ReleaseFast };
- for (modes[0 .. @as(u8, 1) + @boolToInt(!skip_release)]) |mode| for (c_abi_targets) |c_abi_target| {
+ for (optimize_modes[0 .. @as(u8, 1) + @boolToInt(!skip_release)]) |optimize_mode| for (c_abi_targets) |c_abi_target| {
if (skip_non_native and !c_abi_target.isNative())
continue;
- const test_step = b.addTest("test/c_abi/main.zig");
- test_step.setTarget(c_abi_target);
+ const test_step = b.addTest(.{
+ .root_source_file = .{ .path = "test/c_abi/main.zig" },
+ .optimize = optimize_mode,
+ .target = c_abi_target,
+ });
if (c_abi_target.abi != null and c_abi_target.abi.?.isMusl()) {
// TODO NativeTargetInfo insists on dynamically linking musl
// for some reason?
@@ -1351,7 +1369,6 @@ pub fn addCAbiTests(b: *build.Builder, skip_non_native: bool, skip_release: bool
}
test_step.linkLibC();
test_step.addCSourceFile("test/c_abi/cfuncs.c", &.{"-std=c99"});
- test_step.setBuildMode(mode);
if (c_abi_target.isWindows() and (c_abi_target.getCpuArch() == .x86 or builtin.target.os.tag == .linux)) {
// LTO currently incorrectly strips stdcall name-mangled functions
@@ -1363,7 +1380,7 @@ pub fn addCAbiTests(b: *build.Builder, skip_non_native: bool, skip_release: bool
test_step.setNamePrefix(b.fmt("{s}-{s}-{s} ", .{
"test-c-abi",
triple_prefix,
- @tagName(mode),
+ @tagName(optimize_mode),
}));
step.dependOn(&test_step.step);
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 4ecb6835f5..d2db895a5a 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -3900,4 +3900,20 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub const ZERO = @as(c_int, 0);
\\pub const WORLD = @as(c_int, 0o0000123);
});
+
+ cases.add("Assign expression from bool to int",
+ \\void foo(void) {
+ \\ int a;
+ \\ if (a = 1 > 0) {}
+ \\}
+ , &[_][]const u8{
+ \\pub export fn foo() void {
+ \\ var a: c_int = undefined;
+ \\ if ((blk: {
+ \\ const tmp = @boolToInt(@as(c_int, 1) > @as(c_int, 0));
+ \\ a = tmp;
+ \\ break :blk tmp;
+ \\ }) != 0) {}
+ \\}
+ });
}
diff --git a/tools/generate_linux_syscalls.zig b/tools/generate_linux_syscalls.zig
index 67f098ac4f..11b18ae3bf 100644
--- a/tools/generate_linux_syscalls.zig
+++ b/tools/generate_linux_syscalls.zig
@@ -167,6 +167,31 @@ pub fn main() !void {
try writer.writeAll("};\n\n");
}
+ {
+ try writer.writeAll(
+ \\pub const Mips64 = enum(usize) {
+ \\ pub const Linux = 5000;
+ \\
+ \\
+ );
+
+ const table = try linux_dir.readFile("arch/mips/kernel/syscalls/syscall_n64.tbl", buf);
+ var lines = mem.tokenize(u8, table, "\n");
+ while (lines.next()) |line| {
+ if (line[0] == '#') continue;
+
+ var fields = mem.tokenize(u8, line, " \t");
+ const number = fields.next() orelse return error.Incomplete;
+ // abi is always n64
+ _ = fields.next() orelse return error.Incomplete;
+ const name = fields.next() orelse return error.Incomplete;
+ const fixed_name = if (stdlib_renames.get(name)) |fixed| fixed else name;
+
+ try writer.print(" {s} = Linux + {s},\n", .{ zig.fmtId(fixed_name), number });
+ }
+
+ try writer.writeAll("};\n\n");
+ }
{
try writer.writeAll("pub const PowerPC = enum(usize) {\n");
diff --git a/tools/update_cpu_features.zig b/tools/update_cpu_features.zig
index 647d2c5a9b..ee92d63227 100644
--- a/tools/update_cpu_features.zig
+++ b/tools/update_cpu_features.zig
@@ -1306,7 +1306,7 @@ fn usageAndExit(file: fs.File, arg0: []const u8, code: u8) noreturn {
\\
\\Updates lib/std/target/.zig from llvm/lib/Target//.td .
\\
- \\On a less beefy system, or when debugging, compile with --single-threaded.
+ \\On a less beefy system, or when debugging, compile with -fsingle-threaded.
\\
, .{arg0}) catch std.process.exit(1);
std.process.exit(code);