mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
Merge branch 'master' into lzma
This commit is contained in:
commit
baa877fd12
@ -513,7 +513,7 @@ set(ZIG_STAGE2_SOURCES
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/zig/Ast.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/zig/CrossTarget.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/zig/c_builtins.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/zig/parse.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/zig/Parse.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/zig/render.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/zig/string_literal.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/zig/system.zig"
|
||||
|
||||
259
build.zig
259
build.zig
@ -1,19 +1,18 @@
|
||||
const std = @import("std");
|
||||
const builtin = std.builtin;
|
||||
const Builder = std.build.Builder;
|
||||
const tests = @import("test/tests.zig");
|
||||
const BufMap = std.BufMap;
|
||||
const mem = std.mem;
|
||||
const ArrayList = std.ArrayList;
|
||||
const io = std.io;
|
||||
const fs = std.fs;
|
||||
const InstallDirectoryOptions = std.build.InstallDirectoryOptions;
|
||||
const InstallDirectoryOptions = std.Build.InstallDirectoryOptions;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const zig_version = std.builtin.Version{ .major = 0, .minor = 11, .patch = 0 };
|
||||
const stack_size = 32 * 1024 * 1024;
|
||||
|
||||
pub fn build(b: *Builder) !void {
|
||||
pub fn build(b: *std.Build) !void {
|
||||
const release = b.option(bool, "release", "Build in release mode") orelse false;
|
||||
const only_c = b.option(bool, "only-c", "Translate the Zig compiler to C code, with only the C backend enabled") orelse false;
|
||||
const target = t: {
|
||||
@ -23,7 +22,7 @@ pub fn build(b: *Builder) !void {
|
||||
}
|
||||
break :t b.standardTargetOptions(.{ .default_target = default_target });
|
||||
};
|
||||
const mode: std.builtin.Mode = if (release) switch (target.getCpuArch()) {
|
||||
const optimize: std.builtin.OptimizeMode = if (release) switch (target.getCpuArch()) {
|
||||
.wasm32 => .ReleaseSmall,
|
||||
else => .ReleaseFast,
|
||||
} else .Debug;
|
||||
@ -33,7 +32,12 @@ pub fn build(b: *Builder) !void {
|
||||
|
||||
const test_step = b.step("test", "Run all the tests");
|
||||
|
||||
const docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
|
||||
const docgen_exe = b.addExecutable(.{
|
||||
.name = "docgen",
|
||||
.root_source_file = .{ .path = "doc/docgen.zig" },
|
||||
.target = .{},
|
||||
.optimize = .Debug,
|
||||
});
|
||||
docgen_exe.single_threaded = single_threaded;
|
||||
|
||||
const rel_zig_exe = try fs.path.relative(b.allocator, b.build_root, b.zig_exe);
|
||||
@ -53,10 +57,12 @@ pub fn build(b: *Builder) !void {
|
||||
const docs_step = b.step("docs", "Build documentation");
|
||||
docs_step.dependOn(&docgen_cmd.step);
|
||||
|
||||
const test_cases = b.addTest("src/test.zig");
|
||||
const test_cases = b.addTest(.{
|
||||
.root_source_file = .{ .path = "src/test.zig" },
|
||||
.optimize = optimize,
|
||||
});
|
||||
test_cases.main_pkg_path = ".";
|
||||
test_cases.stack_size = stack_size;
|
||||
test_cases.setBuildMode(mode);
|
||||
test_cases.single_threaded = single_threaded;
|
||||
|
||||
const fmt_build_zig = b.addFmt(&[_][]const u8{"build.zig"});
|
||||
@ -151,17 +157,15 @@ pub fn build(b: *Builder) !void {
|
||||
|
||||
const mem_leak_frames: u32 = b.option(u32, "mem-leak-frames", "How many stack frames to print when a memory leak occurs. Tests get 2x this amount.") orelse blk: {
|
||||
if (strip == true) break :blk @as(u32, 0);
|
||||
if (mode != .Debug) break :blk 0;
|
||||
if (optimize != .Debug) break :blk 0;
|
||||
break :blk 4;
|
||||
};
|
||||
|
||||
const exe = addCompilerStep(b);
|
||||
const exe = addCompilerStep(b, optimize, target);
|
||||
exe.strip = strip;
|
||||
exe.sanitize_thread = sanitize_thread;
|
||||
exe.build_id = b.option(bool, "build-id", "Include a build id note") orelse false;
|
||||
exe.install();
|
||||
exe.setBuildMode(mode);
|
||||
exe.setTarget(target);
|
||||
|
||||
const compile_step = b.step("compile", "Build the self-hosted compiler");
|
||||
compile_step.dependOn(&exe.step);
|
||||
@ -197,7 +201,7 @@ pub fn build(b: *Builder) !void {
|
||||
test_cases.linkLibC();
|
||||
}
|
||||
|
||||
const is_debug = mode == .Debug;
|
||||
const is_debug = optimize == .Debug;
|
||||
const enable_logging = b.option(bool, "log", "Enable debug logging with --debug-log") orelse is_debug;
|
||||
const enable_link_snapshots = b.option(bool, "link-snapshot", "Whether to enable linker state snapshots") orelse false;
|
||||
|
||||
@ -362,25 +366,25 @@ pub fn build(b: *Builder) !void {
|
||||
test_step.dependOn(test_cases_step);
|
||||
}
|
||||
|
||||
var chosen_modes: [4]builtin.Mode = undefined;
|
||||
var chosen_opt_modes_buf: [4]builtin.Mode = undefined;
|
||||
var chosen_mode_index: usize = 0;
|
||||
if (!skip_debug) {
|
||||
chosen_modes[chosen_mode_index] = builtin.Mode.Debug;
|
||||
chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.Debug;
|
||||
chosen_mode_index += 1;
|
||||
}
|
||||
if (!skip_release_safe) {
|
||||
chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseSafe;
|
||||
chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.ReleaseSafe;
|
||||
chosen_mode_index += 1;
|
||||
}
|
||||
if (!skip_release_fast) {
|
||||
chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseFast;
|
||||
chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.ReleaseFast;
|
||||
chosen_mode_index += 1;
|
||||
}
|
||||
if (!skip_release_small) {
|
||||
chosen_modes[chosen_mode_index] = builtin.Mode.ReleaseSmall;
|
||||
chosen_opt_modes_buf[chosen_mode_index] = builtin.Mode.ReleaseSmall;
|
||||
chosen_mode_index += 1;
|
||||
}
|
||||
const modes = chosen_modes[0..chosen_mode_index];
|
||||
const optimization_modes = chosen_opt_modes_buf[0..chosen_mode_index];
|
||||
|
||||
// run stage1 `zig fmt` on this build.zig file just to make sure it works
|
||||
test_step.dependOn(&fmt_build_zig.step);
|
||||
@ -393,7 +397,7 @@ pub fn build(b: *Builder) !void {
|
||||
"test/behavior.zig",
|
||||
"behavior",
|
||||
"Run the behavior tests",
|
||||
modes,
|
||||
optimization_modes,
|
||||
skip_single_threaded,
|
||||
skip_non_native,
|
||||
skip_libc,
|
||||
@ -407,7 +411,7 @@ pub fn build(b: *Builder) !void {
|
||||
"lib/compiler_rt.zig",
|
||||
"compiler-rt",
|
||||
"Run the compiler_rt tests",
|
||||
modes,
|
||||
optimization_modes,
|
||||
true, // skip_single_threaded
|
||||
skip_non_native,
|
||||
true, // skip_libc
|
||||
@ -421,7 +425,7 @@ pub fn build(b: *Builder) !void {
|
||||
"lib/c.zig",
|
||||
"universal-libc",
|
||||
"Run the universal libc tests",
|
||||
modes,
|
||||
optimization_modes,
|
||||
true, // skip_single_threaded
|
||||
skip_non_native,
|
||||
true, // skip_libc
|
||||
@ -429,11 +433,11 @@ pub fn build(b: *Builder) !void {
|
||||
skip_stage2_tests or true, // TODO get these all passing
|
||||
));
|
||||
|
||||
test_step.dependOn(tests.addCompareOutputTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addCompareOutputTests(b, test_filter, optimization_modes));
|
||||
test_step.dependOn(tests.addStandaloneTests(
|
||||
b,
|
||||
test_filter,
|
||||
modes,
|
||||
optimization_modes,
|
||||
skip_non_native,
|
||||
enable_macos_sdk,
|
||||
target,
|
||||
@ -446,10 +450,10 @@ pub fn build(b: *Builder) !void {
|
||||
enable_symlinks_windows,
|
||||
));
|
||||
test_step.dependOn(tests.addCAbiTests(b, skip_non_native, skip_release));
|
||||
test_step.dependOn(tests.addLinkTests(b, test_filter, modes, enable_macos_sdk, skip_stage2_tests, enable_symlinks_windows));
|
||||
test_step.dependOn(tests.addStackTraceTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addCliTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, modes));
|
||||
test_step.dependOn(tests.addLinkTests(b, test_filter, optimization_modes, enable_macos_sdk, skip_stage2_tests, enable_symlinks_windows));
|
||||
test_step.dependOn(tests.addStackTraceTests(b, test_filter, optimization_modes));
|
||||
test_step.dependOn(tests.addCliTests(b, test_filter, optimization_modes));
|
||||
test_step.dependOn(tests.addAssembleAndLinkTests(b, test_filter, optimization_modes));
|
||||
test_step.dependOn(tests.addTranslateCTests(b, test_filter));
|
||||
if (!skip_run_translated_c) {
|
||||
test_step.dependOn(tests.addRunTranslatedCTests(b, test_filter, target));
|
||||
@ -463,7 +467,7 @@ pub fn build(b: *Builder) !void {
|
||||
"lib/std/std.zig",
|
||||
"std",
|
||||
"Run the standard library tests",
|
||||
modes,
|
||||
optimization_modes,
|
||||
skip_single_threaded,
|
||||
skip_non_native,
|
||||
skip_libc,
|
||||
@ -474,7 +478,7 @@ pub fn build(b: *Builder) !void {
|
||||
try addWasiUpdateStep(b, version);
|
||||
}
|
||||
|
||||
fn addWasiUpdateStep(b: *Builder, version: [:0]const u8) !void {
|
||||
fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
|
||||
const semver = try std.SemanticVersion.parse(version);
|
||||
|
||||
var target: std.zig.CrossTarget = .{
|
||||
@ -483,9 +487,7 @@ fn addWasiUpdateStep(b: *Builder, version: [:0]const u8) !void {
|
||||
};
|
||||
target.cpu_features_add.addFeature(@enumToInt(std.Target.wasm.Feature.bulk_memory));
|
||||
|
||||
const exe = addCompilerStep(b);
|
||||
exe.setBuildMode(.ReleaseSmall);
|
||||
exe.setTarget(target);
|
||||
const exe = addCompilerStep(b, .ReleaseSmall, target);
|
||||
|
||||
const exe_options = b.addOptions();
|
||||
exe.addOptions("build_options", exe_options);
|
||||
@ -512,8 +514,17 @@ fn addWasiUpdateStep(b: *Builder, version: [:0]const u8) !void {
|
||||
update_zig1_step.dependOn(&run_opt.step);
|
||||
}
|
||||
|
||||
fn addCompilerStep(b: *Builder) *std.build.LibExeObjStep {
|
||||
const exe = b.addExecutable("zig", "src/main.zig");
|
||||
fn addCompilerStep(
|
||||
b: *std.Build,
|
||||
optimize: std.builtin.OptimizeMode,
|
||||
target: std.zig.CrossTarget,
|
||||
) *std.Build.CompileStep {
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "zig",
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
exe.stack_size = stack_size;
|
||||
return exe;
|
||||
}
|
||||
@ -533,9 +544,9 @@ const exe_cflags = [_][]const u8{
|
||||
};
|
||||
|
||||
fn addCmakeCfgOptionsToExe(
|
||||
b: *Builder,
|
||||
b: *std.Build,
|
||||
cfg: CMakeConfig,
|
||||
exe: *std.build.LibExeObjStep,
|
||||
exe: *std.Build.CompileStep,
|
||||
use_zig_libcxx: bool,
|
||||
) !void {
|
||||
if (exe.target.isDarwin()) {
|
||||
@ -614,7 +625,7 @@ fn addCmakeCfgOptionsToExe(
|
||||
}
|
||||
}
|
||||
|
||||
fn addStaticLlvmOptionsToExe(exe: *std.build.LibExeObjStep) !void {
|
||||
fn addStaticLlvmOptionsToExe(exe: *std.Build.CompileStep) !void {
|
||||
// Adds the Zig C++ sources which both stage1 and stage2 need.
|
||||
//
|
||||
// We need this because otherwise zig_clang_cc1_main.cpp ends up pulling
|
||||
@ -651,9 +662,9 @@ fn addStaticLlvmOptionsToExe(exe: *std.build.LibExeObjStep) !void {
|
||||
}
|
||||
|
||||
fn addCxxKnownPath(
|
||||
b: *Builder,
|
||||
b: *std.Build,
|
||||
ctx: CMakeConfig,
|
||||
exe: *std.build.LibExeObjStep,
|
||||
exe: *std.Build.CompileStep,
|
||||
objname: []const u8,
|
||||
errtxt: ?[]const u8,
|
||||
need_cpp_includes: bool,
|
||||
@ -686,7 +697,7 @@ fn addCxxKnownPath(
|
||||
}
|
||||
}
|
||||
|
||||
fn addCMakeLibraryList(exe: *std.build.LibExeObjStep, list: []const u8) void {
|
||||
fn addCMakeLibraryList(exe: *std.Build.CompileStep, list: []const u8) void {
|
||||
var it = mem.tokenize(u8, list, ";");
|
||||
while (it.next()) |lib| {
|
||||
if (mem.startsWith(u8, lib, "-l")) {
|
||||
@ -700,7 +711,7 @@ fn addCMakeLibraryList(exe: *std.build.LibExeObjStep, list: []const u8) void {
|
||||
}
|
||||
|
||||
const CMakeConfig = struct {
|
||||
llvm_linkage: std.build.LibExeObjStep.Linkage,
|
||||
llvm_linkage: std.Build.CompileStep.Linkage,
|
||||
cmake_binary_dir: []const u8,
|
||||
cmake_prefix_path: []const u8,
|
||||
cmake_static_library_prefix: []const u8,
|
||||
@ -717,7 +728,7 @@ const CMakeConfig = struct {
|
||||
|
||||
const max_config_h_bytes = 1 * 1024 * 1024;
|
||||
|
||||
fn findConfigH(b: *Builder, config_h_path_option: ?[]const u8) ?[]const u8 {
|
||||
fn findConfigH(b: *std.Build, config_h_path_option: ?[]const u8) ?[]const u8 {
|
||||
if (config_h_path_option) |path| {
|
||||
var config_h_or_err = fs.cwd().openFile(path, .{});
|
||||
if (config_h_or_err) |*file| {
|
||||
@ -763,7 +774,7 @@ fn findConfigH(b: *Builder, config_h_path_option: ?[]const u8) ?[]const u8 {
|
||||
} else unreachable; // TODO should not need `else unreachable`.
|
||||
}
|
||||
|
||||
fn parseConfigH(b: *Builder, config_h_text: []const u8) ?CMakeConfig {
|
||||
fn parseConfigH(b: *std.Build, config_h_text: []const u8) ?CMakeConfig {
|
||||
var ctx: CMakeConfig = .{
|
||||
.llvm_linkage = undefined,
|
||||
.cmake_binary_dir = undefined,
|
||||
@ -852,7 +863,7 @@ fn parseConfigH(b: *Builder, config_h_text: []const u8) ?CMakeConfig {
|
||||
return ctx;
|
||||
}
|
||||
|
||||
fn toNativePathSep(b: *Builder, s: []const u8) []u8 {
|
||||
fn toNativePathSep(b: *std.Build, s: []const u8) []u8 {
|
||||
const duplicated = b.allocator.dupe(u8, s) catch unreachable;
|
||||
for (duplicated) |*byte| switch (byte.*) {
|
||||
'/' => byte.* = fs.path.sep,
|
||||
@ -861,166 +872,6 @@ fn toNativePathSep(b: *Builder, s: []const u8) []u8 {
|
||||
return duplicated;
|
||||
}
|
||||
|
||||
const softfloat_sources = [_][]const u8{
|
||||
"deps/SoftFloat-3e/source/8086/f128M_isSignalingNaN.c",
|
||||
"deps/SoftFloat-3e/source/8086/extF80M_isSignalingNaN.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_commonNaNToF128M.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_commonNaNToExtF80M.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_commonNaNToF16UI.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_commonNaNToF32UI.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_commonNaNToF64UI.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_f128MToCommonNaN.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_extF80MToCommonNaN.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_f16UIToCommonNaN.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_f32UIToCommonNaN.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_f64UIToCommonNaN.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_propagateNaNF128M.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_propagateNaNExtF80M.c",
|
||||
"deps/SoftFloat-3e/source/8086/s_propagateNaNF16UI.c",
|
||||
"deps/SoftFloat-3e/source/8086/softfloat_raiseFlags.c",
|
||||
"deps/SoftFloat-3e/source/f128M_add.c",
|
||||
"deps/SoftFloat-3e/source/f128M_div.c",
|
||||
"deps/SoftFloat-3e/source/f128M_eq.c",
|
||||
"deps/SoftFloat-3e/source/f128M_eq_signaling.c",
|
||||
"deps/SoftFloat-3e/source/f128M_le.c",
|
||||
"deps/SoftFloat-3e/source/f128M_le_quiet.c",
|
||||
"deps/SoftFloat-3e/source/f128M_lt.c",
|
||||
"deps/SoftFloat-3e/source/f128M_lt_quiet.c",
|
||||
"deps/SoftFloat-3e/source/f128M_mul.c",
|
||||
"deps/SoftFloat-3e/source/f128M_mulAdd.c",
|
||||
"deps/SoftFloat-3e/source/f128M_rem.c",
|
||||
"deps/SoftFloat-3e/source/f128M_roundToInt.c",
|
||||
"deps/SoftFloat-3e/source/f128M_sqrt.c",
|
||||
"deps/SoftFloat-3e/source/f128M_sub.c",
|
||||
"deps/SoftFloat-3e/source/f128M_to_f16.c",
|
||||
"deps/SoftFloat-3e/source/f128M_to_f32.c",
|
||||
"deps/SoftFloat-3e/source/f128M_to_f64.c",
|
||||
"deps/SoftFloat-3e/source/f128M_to_extF80M.c",
|
||||
"deps/SoftFloat-3e/source/f128M_to_i32.c",
|
||||
"deps/SoftFloat-3e/source/f128M_to_i32_r_minMag.c",
|
||||
"deps/SoftFloat-3e/source/f128M_to_i64.c",
|
||||
"deps/SoftFloat-3e/source/f128M_to_i64_r_minMag.c",
|
||||
"deps/SoftFloat-3e/source/f128M_to_ui32.c",
|
||||
"deps/SoftFloat-3e/source/f128M_to_ui32_r_minMag.c",
|
||||
"deps/SoftFloat-3e/source/f128M_to_ui64.c",
|
||||
"deps/SoftFloat-3e/source/f128M_to_ui64_r_minMag.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_add.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_div.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_eq.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_le.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_lt.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_mul.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_rem.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_roundToInt.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_sqrt.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_sub.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_to_f16.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_to_f32.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_to_f64.c",
|
||||
"deps/SoftFloat-3e/source/extF80M_to_f128M.c",
|
||||
"deps/SoftFloat-3e/source/f16_add.c",
|
||||
"deps/SoftFloat-3e/source/f16_div.c",
|
||||
"deps/SoftFloat-3e/source/f16_eq.c",
|
||||
"deps/SoftFloat-3e/source/f16_isSignalingNaN.c",
|
||||
"deps/SoftFloat-3e/source/f16_lt.c",
|
||||
"deps/SoftFloat-3e/source/f16_mul.c",
|
||||
"deps/SoftFloat-3e/source/f16_mulAdd.c",
|
||||
"deps/SoftFloat-3e/source/f16_rem.c",
|
||||
"deps/SoftFloat-3e/source/f16_roundToInt.c",
|
||||
"deps/SoftFloat-3e/source/f16_sqrt.c",
|
||||
"deps/SoftFloat-3e/source/f16_sub.c",
|
||||
"deps/SoftFloat-3e/source/f16_to_extF80M.c",
|
||||
"deps/SoftFloat-3e/source/f16_to_f128M.c",
|
||||
"deps/SoftFloat-3e/source/f16_to_f64.c",
|
||||
"deps/SoftFloat-3e/source/f32_to_extF80M.c",
|
||||
"deps/SoftFloat-3e/source/f32_to_f128M.c",
|
||||
"deps/SoftFloat-3e/source/f64_to_extF80M.c",
|
||||
"deps/SoftFloat-3e/source/f64_to_f128M.c",
|
||||
"deps/SoftFloat-3e/source/f64_to_f16.c",
|
||||
"deps/SoftFloat-3e/source/i32_to_f128M.c",
|
||||
"deps/SoftFloat-3e/source/s_add256M.c",
|
||||
"deps/SoftFloat-3e/source/s_addCarryM.c",
|
||||
"deps/SoftFloat-3e/source/s_addComplCarryM.c",
|
||||
"deps/SoftFloat-3e/source/s_addF128M.c",
|
||||
"deps/SoftFloat-3e/source/s_addExtF80M.c",
|
||||
"deps/SoftFloat-3e/source/s_addM.c",
|
||||
"deps/SoftFloat-3e/source/s_addMagsF16.c",
|
||||
"deps/SoftFloat-3e/source/s_addMagsF32.c",
|
||||
"deps/SoftFloat-3e/source/s_addMagsF64.c",
|
||||
"deps/SoftFloat-3e/source/s_approxRecip32_1.c",
|
||||
"deps/SoftFloat-3e/source/s_approxRecipSqrt32_1.c",
|
||||
"deps/SoftFloat-3e/source/s_approxRecipSqrt_1Ks.c",
|
||||
"deps/SoftFloat-3e/source/s_approxRecip_1Ks.c",
|
||||
"deps/SoftFloat-3e/source/s_compare128M.c",
|
||||
"deps/SoftFloat-3e/source/s_compare96M.c",
|
||||
"deps/SoftFloat-3e/source/s_compareNonnormExtF80M.c",
|
||||
"deps/SoftFloat-3e/source/s_countLeadingZeros16.c",
|
||||
"deps/SoftFloat-3e/source/s_countLeadingZeros32.c",
|
||||
"deps/SoftFloat-3e/source/s_countLeadingZeros64.c",
|
||||
"deps/SoftFloat-3e/source/s_countLeadingZeros8.c",
|
||||
"deps/SoftFloat-3e/source/s_eq128.c",
|
||||
"deps/SoftFloat-3e/source/s_invalidF128M.c",
|
||||
"deps/SoftFloat-3e/source/s_invalidExtF80M.c",
|
||||
"deps/SoftFloat-3e/source/s_isNaNF128M.c",
|
||||
"deps/SoftFloat-3e/source/s_le128.c",
|
||||
"deps/SoftFloat-3e/source/s_lt128.c",
|
||||
"deps/SoftFloat-3e/source/s_mul128MTo256M.c",
|
||||
"deps/SoftFloat-3e/source/s_mul64To128M.c",
|
||||
"deps/SoftFloat-3e/source/s_mulAddF128M.c",
|
||||
"deps/SoftFloat-3e/source/s_mulAddF16.c",
|
||||
"deps/SoftFloat-3e/source/s_mulAddF32.c",
|
||||
"deps/SoftFloat-3e/source/s_mulAddF64.c",
|
||||
"deps/SoftFloat-3e/source/s_negXM.c",
|
||||
"deps/SoftFloat-3e/source/s_normExtF80SigM.c",
|
||||
"deps/SoftFloat-3e/source/s_normRoundPackMToF128M.c",
|
||||
"deps/SoftFloat-3e/source/s_normRoundPackMToExtF80M.c",
|
||||
"deps/SoftFloat-3e/source/s_normRoundPackToF16.c",
|
||||
"deps/SoftFloat-3e/source/s_normRoundPackToF32.c",
|
||||
"deps/SoftFloat-3e/source/s_normRoundPackToF64.c",
|
||||
"deps/SoftFloat-3e/source/s_normSubnormalF128SigM.c",
|
||||
"deps/SoftFloat-3e/source/s_normSubnormalF16Sig.c",
|
||||
"deps/SoftFloat-3e/source/s_normSubnormalF32Sig.c",
|
||||
"deps/SoftFloat-3e/source/s_normSubnormalF64Sig.c",
|
||||
"deps/SoftFloat-3e/source/s_remStepMBy32.c",
|
||||
"deps/SoftFloat-3e/source/s_roundMToI64.c",
|
||||
"deps/SoftFloat-3e/source/s_roundMToUI64.c",
|
||||
"deps/SoftFloat-3e/source/s_roundPackMToExtF80M.c",
|
||||
"deps/SoftFloat-3e/source/s_roundPackMToF128M.c",
|
||||
"deps/SoftFloat-3e/source/s_roundPackToF16.c",
|
||||
"deps/SoftFloat-3e/source/s_roundPackToF32.c",
|
||||
"deps/SoftFloat-3e/source/s_roundPackToF64.c",
|
||||
"deps/SoftFloat-3e/source/s_roundToI32.c",
|
||||
"deps/SoftFloat-3e/source/s_roundToI64.c",
|
||||
"deps/SoftFloat-3e/source/s_roundToUI32.c",
|
||||
"deps/SoftFloat-3e/source/s_roundToUI64.c",
|
||||
"deps/SoftFloat-3e/source/s_shiftLeftM.c",
|
||||
"deps/SoftFloat-3e/source/s_shiftNormSigF128M.c",
|
||||
"deps/SoftFloat-3e/source/s_shiftRightJam256M.c",
|
||||
"deps/SoftFloat-3e/source/s_shiftRightJam32.c",
|
||||
"deps/SoftFloat-3e/source/s_shiftRightJam64.c",
|
||||
"deps/SoftFloat-3e/source/s_shiftRightJamM.c",
|
||||
"deps/SoftFloat-3e/source/s_shiftRightM.c",
|
||||
"deps/SoftFloat-3e/source/s_shortShiftLeft64To96M.c",
|
||||
"deps/SoftFloat-3e/source/s_shortShiftLeftM.c",
|
||||
"deps/SoftFloat-3e/source/s_shortShiftRightExtendM.c",
|
||||
"deps/SoftFloat-3e/source/s_shortShiftRightJam64.c",
|
||||
"deps/SoftFloat-3e/source/s_shortShiftRightJamM.c",
|
||||
"deps/SoftFloat-3e/source/s_shortShiftRightM.c",
|
||||
"deps/SoftFloat-3e/source/s_sub1XM.c",
|
||||
"deps/SoftFloat-3e/source/s_sub256M.c",
|
||||
"deps/SoftFloat-3e/source/s_subM.c",
|
||||
"deps/SoftFloat-3e/source/s_subMagsF16.c",
|
||||
"deps/SoftFloat-3e/source/s_subMagsF32.c",
|
||||
"deps/SoftFloat-3e/source/s_subMagsF64.c",
|
||||
"deps/SoftFloat-3e/source/s_tryPropagateNaNF128M.c",
|
||||
"deps/SoftFloat-3e/source/s_tryPropagateNaNExtF80M.c",
|
||||
"deps/SoftFloat-3e/source/softfloat_state.c",
|
||||
"deps/SoftFloat-3e/source/ui32_to_f128M.c",
|
||||
"deps/SoftFloat-3e/source/ui64_to_f128M.c",
|
||||
"deps/SoftFloat-3e/source/ui32_to_extF80M.c",
|
||||
"deps/SoftFloat-3e/source/ui64_to_extF80M.c",
|
||||
};
|
||||
|
||||
const zig_cpp_sources = [_][]const u8{
|
||||
// These are planned to stay even when we are self-hosted.
|
||||
"src/zig_llvm.cpp",
|
||||
|
||||
@ -871,6 +871,13 @@ pub fn main() void {
|
||||
However, it is possible to embed non-UTF-8 bytes into a string literal using <code>\xNN</code> notation.
|
||||
</p>
|
||||
<p>
|
||||
Indexing into a string containing non-ASCII bytes will return individual bytes, whether valid
|
||||
UTF-8 or not.
|
||||
The {#link|Zig Standard Library#} provides routines for checking the validity of UTF-8 encoded
|
||||
strings, accessing their code points and other encoding/decoding related tasks in
|
||||
{#syntax#}std.unicode{#endsyntax#}.
|
||||
</p>
|
||||
<p>
|
||||
Unicode code point literals have type {#syntax#}comptime_int{#endsyntax#}, the same as
|
||||
{#link|Integer Literals#}. All {#link|Escape Sequences#} are valid in both string literals
|
||||
and Unicode code point literals.
|
||||
@ -894,9 +901,12 @@ pub fn main() void {
|
||||
print("{}\n", .{'e' == '\x65'}); // true
|
||||
print("{d}\n", .{'\u{1f4a9}'}); // 128169
|
||||
print("{d}\n", .{'💯'}); // 128175
|
||||
print("{}\n", .{mem.eql(u8, "hello", "h\x65llo")}); // true
|
||||
print("0x{x}\n", .{"\xff"[0]}); // non-UTF-8 strings are possible with \xNN notation.
|
||||
print("{u}\n", .{'⚡'});
|
||||
print("{}\n", .{mem.eql(u8, "hello", "h\x65llo")}); // true
|
||||
print("{}\n", .{mem.eql(u8, "💯", "\xf0\x9f\x92\xaf")}); // also true
|
||||
const invalid_utf8 = "\xff\xfe"; // non-UTF-8 strings are possible with \xNN notation.
|
||||
print("0x{x}\n", .{invalid_utf8[1]}); // indexing them returns individual bytes...
|
||||
print("0x{x}\n", .{"💯"[1]}); // ...as does indexing part-way through non-ASCII characters
|
||||
}
|
||||
{#code_end#}
|
||||
{#see_also|Arrays|Source Encoding#}
|
||||
@ -8799,6 +8809,15 @@ pub const PrefetchOptions = struct {
|
||||
{#link|Optional Pointers#} are allowed. Casting an optional pointer which is {#link|null#}
|
||||
to a non-optional pointer invokes safety-checked {#link|Undefined Behavior#}.
|
||||
</p>
|
||||
<p>
|
||||
{#syntax#}@ptrCast{#endsyntax#} cannot be used for:
|
||||
</p>
|
||||
<ul>
|
||||
<li>Removing {#syntax#}const{#endsyntax#} or {#syntax#}volatile{#endsyntax#} qualifier, use {#link|@qualCast#}.</li>
|
||||
<li>Changing pointer address space, use {#link|@addrSpaceCast#}.</li>
|
||||
<li>Increasing pointer alignment, use {#link|@alignCast#}.</li>
|
||||
<li>Casting a non-slice pointer to a slice, use slicing syntax {#syntax#}ptr[start..end]{#endsyntax#}.</li>
|
||||
</ul>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@ptrToInt#}
|
||||
@ -8811,6 +8830,13 @@ pub const PrefetchOptions = struct {
|
||||
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@qualCast#}
|
||||
<pre>{#syntax#}@qualCast(comptime DestType: type, value: anytype) DestType{#endsyntax#}</pre>
|
||||
<p>
|
||||
Remove {#syntax#}const{#endsyntax#} or {#syntax#}volatile{#endsyntax#} qualifier from a pointer.
|
||||
</p>
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@rem#}
|
||||
<pre>{#syntax#}@rem(numerator: T, denominator: T) T{#endsyntax#}</pre>
|
||||
<p>
|
||||
@ -9180,8 +9206,7 @@ fn doTheTest() !void {
|
||||
when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@sin#}
|
||||
@ -9191,8 +9216,7 @@ fn doTheTest() !void {
|
||||
when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
|
||||
@ -9203,8 +9227,7 @@ fn doTheTest() !void {
|
||||
when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
|
||||
@ -9215,8 +9238,7 @@ fn doTheTest() !void {
|
||||
Uses a dedicated hardware instruction when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
|
||||
@ -9227,8 +9249,7 @@ fn doTheTest() !void {
|
||||
when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@exp2#}
|
||||
@ -9238,8 +9259,7 @@ fn doTheTest() !void {
|
||||
when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@log#}
|
||||
@ -9249,8 +9269,7 @@ fn doTheTest() !void {
|
||||
when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@log2#}
|
||||
@ -9260,8 +9279,7 @@ fn doTheTest() !void {
|
||||
when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@log10#}
|
||||
@ -9271,8 +9289,7 @@ fn doTheTest() !void {
|
||||
when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@fabs#}
|
||||
@ -9282,8 +9299,7 @@ fn doTheTest() !void {
|
||||
when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@floor#}
|
||||
@ -9293,8 +9309,7 @@ fn doTheTest() !void {
|
||||
Uses a dedicated hardware instruction when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@ceil#}
|
||||
@ -9304,8 +9319,7 @@ fn doTheTest() !void {
|
||||
Uses a dedicated hardware instruction when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@trunc#}
|
||||
@ -9315,8 +9329,7 @@ fn doTheTest() !void {
|
||||
Uses a dedicated hardware instruction when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@round#}
|
||||
@ -9326,8 +9339,7 @@ fn doTheTest() !void {
|
||||
when available.
|
||||
</p>
|
||||
<p>
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats, with the caveat that
|
||||
<a href="https://github.com/ziglang/zig/issues/4026">some float operations are not yet implemented for all float types</a>.
|
||||
Supports {#link|Floats#} and {#link|Vectors#} of floats.
|
||||
</p>
|
||||
{#header_close#}
|
||||
|
||||
@ -9528,11 +9540,15 @@ fn foo(comptime T: type, ptr: *T) T {
|
||||
To add standard build options to a <code class="file">build.zig</code> file:
|
||||
</p>
|
||||
{#code_begin|syntax|build#}
|
||||
const Builder = @import("std").build.Builder;
|
||||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *Builder) void {
|
||||
const exe = b.addExecutable("example", "example.zig");
|
||||
exe.setBuildMode(b.standardReleaseOptions());
|
||||
pub fn build(b: *std.Build) void {
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "example",
|
||||
.root_source_file = .{ .path = "example.zig" },
|
||||
.optimize = optimize,
|
||||
});
|
||||
b.default_step.dependOn(&exe.step);
|
||||
}
|
||||
{#code_end#}
|
||||
@ -9588,7 +9604,7 @@ pub fn build(b: *Builder) void {
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|Single Threaded Builds#}
|
||||
<p>Zig has a compile option <kbd>--single-threaded</kbd> which has the following effects:</p>
|
||||
<p>Zig has a compile option <kbd>-fsingle-threaded</kbd> which has the following effects:</p>
|
||||
<ul>
|
||||
<li>All {#link|Thread Local Variables#} are treated as regular {#link|Container Level Variables#}.</li>
|
||||
<li>The overhead of {#link|Async Functions#} becomes equivalent to function call overhead.</li>
|
||||
@ -10547,22 +10563,26 @@ const separator = if (builtin.os.tag == .windows) '\\' else '/';
|
||||
<p>This <code class="file">build.zig</code> file is automatically generated
|
||||
by <kbd>zig init-exe</kbd>.</p>
|
||||
{#code_begin|syntax|build_executable#}
|
||||
const Builder = @import("std").build.Builder;
|
||||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *Builder) void {
|
||||
pub fn build(b: *std.Build) void {
|
||||
// Standard target options allows the person running `zig build` to choose
|
||||
// what target to build for. Here we do not override the defaults, which
|
||||
// means any target is allowed, and the default is native. Other options
|
||||
// for restricting supported target set are available.
|
||||
const target = b.standardTargetOptions(.{});
|
||||
|
||||
// Standard release options allow the person running `zig build` to select
|
||||
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
|
||||
const mode = b.standardReleaseOptions();
|
||||
// Standard optimization options allow the person running `zig build` to select
|
||||
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
|
||||
// set a preferred release mode, allowing the user to decide how to optimize.
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const exe = b.addExecutable("example", "src/main.zig");
|
||||
exe.setTarget(target);
|
||||
exe.setBuildMode(mode);
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "example",
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
exe.install();
|
||||
|
||||
const run_cmd = exe.run();
|
||||
@ -10581,16 +10601,21 @@ pub fn build(b: *Builder) void {
|
||||
<p>This <code class="file">build.zig</code> file is automatically generated
|
||||
by <kbd>zig init-lib</kbd>.</p>
|
||||
{#code_begin|syntax|build_library#}
|
||||
const Builder = @import("std").build.Builder;
|
||||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *Builder) void {
|
||||
const mode = b.standardReleaseOptions();
|
||||
const lib = b.addStaticLibrary("example", "src/main.zig");
|
||||
lib.setBuildMode(mode);
|
||||
pub fn build(b: *std.Build) void {
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
const lib = b.addStaticLibrary(.{
|
||||
.name = "example",
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.optimize = optimize,
|
||||
});
|
||||
lib.install();
|
||||
|
||||
var main_tests = b.addTest("src/main.zig");
|
||||
main_tests.setBuildMode(mode);
|
||||
const main_tests = b.addTest(.{
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
const test_step = b.step("test", "Run library tests");
|
||||
test_step.dependOn(&main_tests.step);
|
||||
@ -10949,12 +10974,17 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
{#end_syntax_block#}
|
||||
{#code_begin|syntax|build_c#}
|
||||
const Builder = @import("std").build.Builder;
|
||||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *Builder) void {
|
||||
const lib = b.addSharedLibrary("mathtest", "mathtest.zig", b.version(1, 0, 0));
|
||||
|
||||
const exe = b.addExecutable("test", null);
|
||||
pub fn build(b: *std.Build) void {
|
||||
const lib = b.addSharedLibrary(.{
|
||||
.name = "mathtest",
|
||||
.root_source_file = .{ .path = "mathtest.zig" },
|
||||
.version = .{ .major = 1, .minor = 0, .patch = 0 },
|
||||
});
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "test",
|
||||
});
|
||||
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
|
||||
exe.linkLibrary(lib);
|
||||
exe.linkSystemLibrary("c");
|
||||
@ -11011,12 +11041,17 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
{#end_syntax_block#}
|
||||
{#code_begin|syntax|build_object#}
|
||||
const Builder = @import("std").build.Builder;
|
||||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *Builder) void {
|
||||
const obj = b.addObject("base64", "base64.zig");
|
||||
pub fn build(b: *std.Build) void {
|
||||
const obj = b.addObject(.{
|
||||
.name = "base64",
|
||||
.root_source_file = .{ .path = "base64.zig" },
|
||||
});
|
||||
|
||||
const exe = b.addExecutable("test", null);
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "test",
|
||||
});
|
||||
exe.addCSourceFile("test.c", &[_][]const u8{"-std=c99"});
|
||||
exe.addObject(obj);
|
||||
exe.linkSystemLibrary("c");
|
||||
|
||||
@ -3,7 +3,6 @@ const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const io = std.io;
|
||||
const fmt = std.fmt;
|
||||
const Builder = std.build.Builder;
|
||||
const mem = std.mem;
|
||||
const process = std.process;
|
||||
const ArrayList = std.ArrayList;
|
||||
@ -42,12 +41,15 @@ pub fn main() !void {
|
||||
return error.InvalidArgs;
|
||||
};
|
||||
|
||||
const builder = try Builder.create(
|
||||
const host = try std.zig.system.NativeTargetInfo.detect(.{});
|
||||
|
||||
const builder = try std.Build.create(
|
||||
allocator,
|
||||
zig_exe,
|
||||
build_root,
|
||||
cache_root,
|
||||
global_cache_root,
|
||||
host,
|
||||
);
|
||||
defer builder.destroy();
|
||||
|
||||
@ -58,7 +60,7 @@ pub fn main() !void {
|
||||
const stdout_stream = io.getStdOut().writer();
|
||||
|
||||
var install_prefix: ?[]const u8 = null;
|
||||
var dir_list = Builder.DirList{};
|
||||
var dir_list = std.Build.DirList{};
|
||||
|
||||
// before arg parsing, check for the NO_COLOR environment variable
|
||||
// if it exists, default the color setting to .off
|
||||
@ -230,7 +232,7 @@ pub fn main() !void {
|
||||
};
|
||||
}
|
||||
|
||||
fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void {
|
||||
fn usage(builder: *std.Build, already_ran_build: bool, out_stream: anytype) !void {
|
||||
// run the build script to collect the options
|
||||
if (!already_ran_build) {
|
||||
builder.resolveInstallPrefix(null, .{});
|
||||
@ -330,7 +332,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: anytype) !void
|
||||
);
|
||||
}
|
||||
|
||||
fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: anytype) void {
|
||||
fn usageAndErr(builder: *std.Build, already_ran_build: bool, out_stream: anytype) void {
|
||||
usage(builder, already_ran_build, out_stream) catch {};
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
@ -354,7 +354,7 @@ fn clone() callconv(.Naked) void {
|
||||
\\ ecall
|
||||
);
|
||||
},
|
||||
.mips, .mipsel => {
|
||||
.mips, .mipsel, .mips64, .mips64el => {
|
||||
// __clone(func, stack, flags, arg, ptid, tls, ctid)
|
||||
// 3, 4, 5, 6, 7, 8, 9
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1354,6 +1354,10 @@ const NAV_MODES = {
|
||||
payloadHtml += "ptrCast";
|
||||
break;
|
||||
}
|
||||
case "qual_cast": {
|
||||
payloadHtml += "qualCast";
|
||||
break;
|
||||
}
|
||||
case "truncate": {
|
||||
payloadHtml += "truncate";
|
||||
break;
|
||||
@ -3158,7 +3162,6 @@ const NAV_MODES = {
|
||||
canonTypeDecls = new Array(zigAnalysis.types.length);
|
||||
|
||||
for (let pkgI = 0; pkgI < zigAnalysis.packages.length; pkgI += 1) {
|
||||
if (pkgI === zigAnalysis.rootPkg && rootIsStd) continue;
|
||||
let pkg = zigAnalysis.packages[pkgI];
|
||||
let pkgNames = canonPkgPaths[pkgI];
|
||||
if (pkgNames === undefined) continue;
|
||||
|
||||
@ -1,34 +1,67 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *std.build.Builder) void {
|
||||
// Although this function looks imperative, note that its job is to
|
||||
// declaratively construct a build graph that will be executed by an external
|
||||
// runner.
|
||||
pub fn build(b: *std.Build) void {
|
||||
// Standard target options allows the person running `zig build` to choose
|
||||
// what target to build for. Here we do not override the defaults, which
|
||||
// means any target is allowed, and the default is native. Other options
|
||||
// for restricting supported target set are available.
|
||||
const target = b.standardTargetOptions(.{});
|
||||
|
||||
// Standard release options allow the person running `zig build` to select
|
||||
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
|
||||
const mode = b.standardReleaseOptions();
|
||||
// Standard optimization options allow the person running `zig build` to select
|
||||
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
|
||||
// set a preferred release mode, allowing the user to decide how to optimize.
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const exe = b.addExecutable("$", "src/main.zig");
|
||||
exe.setTarget(target);
|
||||
exe.setBuildMode(mode);
|
||||
const exe = b.addExecutable(.{
|
||||
.name = "$",
|
||||
// In this case the main source file is merely a path, however, in more
|
||||
// complicated build scripts, this could be a generated file.
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
// This declares intent for the executable to be installed into the
|
||||
// standard location when the user invokes the "install" step (the default
|
||||
// step when running `zig build`).
|
||||
exe.install();
|
||||
|
||||
// This *creates* a RunStep in the build graph, to be executed when another
|
||||
// step is evaluated that depends on it. The next line below will establish
|
||||
// such a dependency.
|
||||
const run_cmd = exe.run();
|
||||
|
||||
// By making the run step depend on the install step, it will be run from the
|
||||
// installation directory rather than directly from within the cache directory.
|
||||
// This is not necessary, however, if the application depends on other installed
|
||||
// files, this ensures they will be present and in the expected location.
|
||||
run_cmd.step.dependOn(b.getInstallStep());
|
||||
|
||||
// This allows the user to pass arguments to the application in the build
|
||||
// command itself, like this: `zig build run -- arg1 arg2 etc`
|
||||
if (b.args) |args| {
|
||||
run_cmd.addArgs(args);
|
||||
}
|
||||
|
||||
// This creates a build step. It will be visible in the `zig build --help` menu,
|
||||
// and can be selected like this: `zig build run`
|
||||
// This will evaluate the `run` step rather than the default, which is "install".
|
||||
const run_step = b.step("run", "Run the app");
|
||||
run_step.dependOn(&run_cmd.step);
|
||||
|
||||
const exe_tests = b.addTest("src/main.zig");
|
||||
exe_tests.setTarget(target);
|
||||
exe_tests.setBuildMode(mode);
|
||||
// Creates a step for unit testing.
|
||||
const exe_tests = b.addTest(.{
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
// Similar to creating the run step earlier, this exposes a `test` step to
|
||||
// the `zig build --help` menu, providing a way for the user to request
|
||||
// running the unit tests.
|
||||
const test_step = b.step("test", "Run unit tests");
|
||||
test_step.dependOn(&exe_tests.step);
|
||||
}
|
||||
|
||||
@ -1,17 +1,44 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *std.build.Builder) void {
|
||||
// Standard release options allow the person running `zig build` to select
|
||||
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall.
|
||||
const mode = b.standardReleaseOptions();
|
||||
// Although this function looks imperative, note that its job is to
|
||||
// declaratively construct a build graph that will be executed by an external
|
||||
// runner.
|
||||
pub fn build(b: *std.Build) void {
|
||||
// Standard target options allows the person running `zig build` to choose
|
||||
// what target to build for. Here we do not override the defaults, which
|
||||
// means any target is allowed, and the default is native. Other options
|
||||
// for restricting supported target set are available.
|
||||
const target = b.standardTargetOptions(.{});
|
||||
|
||||
const lib = b.addStaticLibrary("$", "src/main.zig");
|
||||
lib.setBuildMode(mode);
|
||||
// Standard optimization options allow the person running `zig build` to select
|
||||
// between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
|
||||
// set a preferred release mode, allowing the user to decide how to optimize.
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const lib = b.addStaticLibrary(.{
|
||||
.name = "$",
|
||||
// In this case the main source file is merely a path, however, in more
|
||||
// complicated build scripts, this could be a generated file.
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
// This declares intent for the library to be installed into the standard
|
||||
// location when the user invokes the "install" step (the default step when
|
||||
// running `zig build`).
|
||||
lib.install();
|
||||
|
||||
const main_tests = b.addTest("src/main.zig");
|
||||
main_tests.setBuildMode(mode);
|
||||
// Creates a step for unit testing.
|
||||
const main_tests = b.addTest(.{
|
||||
.root_source_file = .{ .path = "src/main.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
// This creates a build step. It will be visible in the `zig build --help` menu,
|
||||
// and can be selected like this: `zig build test`
|
||||
// This will evaluate the `test` step rather than the default, which is "install".
|
||||
const test_step = b.step("test", "Run library tests");
|
||||
test_step.dependOn(&main_tests.step);
|
||||
}
|
||||
|
||||
5
lib/libc/mingw/misc/strtoimax.c
vendored
5
lib/libc/mingw/misc/strtoimax.c
vendored
@ -31,10 +31,7 @@
|
||||
#define valid(n, b) ((n) >= 0 && (n) < (b))
|
||||
|
||||
intmax_t
|
||||
strtoimax(nptr, endptr, base)
|
||||
register const char * __restrict__ nptr;
|
||||
char ** __restrict__ endptr;
|
||||
register int base;
|
||||
strtoimax(const char * __restrict__ nptr, char ** __restrict__ endptr, int base)
|
||||
{
|
||||
register uintmax_t accum; /* accumulates converted value */
|
||||
register int n; /* numeral from digit character */
|
||||
|
||||
5
lib/libc/mingw/misc/strtoumax.c
vendored
5
lib/libc/mingw/misc/strtoumax.c
vendored
@ -31,10 +31,7 @@
|
||||
#define valid(n, b) ((n) >= 0 && (n) < (b))
|
||||
|
||||
uintmax_t
|
||||
strtoumax(nptr, endptr, base)
|
||||
register const char * __restrict__ nptr;
|
||||
char ** __restrict__ endptr;
|
||||
register int base;
|
||||
strtoumax(const char * __restrict__ nptr, char ** __restrict__ endptr, int base)
|
||||
{
|
||||
register uintmax_t accum; /* accumulates converted value */
|
||||
register uintmax_t next; /* for computing next value of accum */
|
||||
|
||||
5
lib/libc/mingw/misc/wcstoimax.c
vendored
5
lib/libc/mingw/misc/wcstoimax.c
vendored
@ -33,10 +33,7 @@
|
||||
#define valid(n, b) ((n) >= 0 && (n) < (b))
|
||||
|
||||
intmax_t
|
||||
wcstoimax(nptr, endptr, base)
|
||||
register const wchar_t * __restrict__ nptr;
|
||||
wchar_t ** __restrict__ endptr;
|
||||
register int base;
|
||||
wcstoimax(const wchar_t * __restrict__ nptr, wchar_t ** __restrict__ endptr, int base)
|
||||
{
|
||||
register uintmax_t accum; /* accumulates converted value */
|
||||
register int n; /* numeral from digit character */
|
||||
|
||||
5
lib/libc/mingw/misc/wcstoumax.c
vendored
5
lib/libc/mingw/misc/wcstoumax.c
vendored
@ -33,10 +33,7 @@
|
||||
#define valid(n, b) ((n) >= 0 && (n) < (b))
|
||||
|
||||
uintmax_t
|
||||
wcstoumax(nptr, endptr, base)
|
||||
register const wchar_t * __restrict__ nptr;
|
||||
wchar_t ** __restrict__ endptr;
|
||||
register int base;
|
||||
wcstoumax(const wchar_t * __restrict__ nptr, wchar_t ** __restrict__ endptr, int base)
|
||||
{
|
||||
register uintmax_t accum; /* accumulates converted value */
|
||||
register uintmax_t next; /* for computing next value of accum */
|
||||
|
||||
1774
lib/std/Build.zig
Normal file
1774
lib/std/Build.zig
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,5 @@
|
||||
const std = @import("../std.zig");
|
||||
const build = std.build;
|
||||
const Step = build.Step;
|
||||
const Builder = build.Builder;
|
||||
const Step = std.Build.Step;
|
||||
const fs = std.fs;
|
||||
const mem = std.mem;
|
||||
|
||||
@ -10,17 +8,17 @@ const CheckFileStep = @This();
|
||||
pub const base_id = .check_file;
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
expected_matches: []const []const u8,
|
||||
source: build.FileSource,
|
||||
source: std.Build.FileSource,
|
||||
max_bytes: usize = 20 * 1024 * 1024,
|
||||
|
||||
pub fn create(
|
||||
builder: *Builder,
|
||||
source: build.FileSource,
|
||||
builder: *std.Build,
|
||||
source: std.Build.FileSource,
|
||||
expected_matches: []const []const u8,
|
||||
) *CheckFileStep {
|
||||
const self = builder.allocator.create(CheckFileStep) catch unreachable;
|
||||
const self = builder.allocator.create(CheckFileStep) catch @panic("OOM");
|
||||
self.* = CheckFileStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(.check_file, "CheckFile", builder.allocator, make),
|
||||
@ -1,6 +1,5 @@
|
||||
const std = @import("../std.zig");
|
||||
const assert = std.debug.assert;
|
||||
const build = std.build;
|
||||
const fs = std.fs;
|
||||
const macho = std.macho;
|
||||
const math = std.math;
|
||||
@ -10,23 +9,22 @@ const testing = std.testing;
|
||||
const CheckObjectStep = @This();
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const Builder = build.Builder;
|
||||
const Step = build.Step;
|
||||
const EmulatableRunStep = build.EmulatableRunStep;
|
||||
const Step = std.Build.Step;
|
||||
const EmulatableRunStep = std.Build.EmulatableRunStep;
|
||||
|
||||
pub const base_id = .check_object;
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
source: build.FileSource,
|
||||
builder: *std.Build,
|
||||
source: std.Build.FileSource,
|
||||
max_bytes: usize = 20 * 1024 * 1024,
|
||||
checks: std.ArrayList(Check),
|
||||
dump_symtab: bool = false,
|
||||
obj_format: std.Target.ObjectFormat,
|
||||
|
||||
pub fn create(builder: *Builder, source: build.FileSource, obj_format: std.Target.ObjectFormat) *CheckObjectStep {
|
||||
pub fn create(builder: *std.Build, source: std.Build.FileSource, obj_format: std.Target.ObjectFormat) *CheckObjectStep {
|
||||
const gpa = builder.allocator;
|
||||
const self = gpa.create(CheckObjectStep) catch unreachable;
|
||||
const self = gpa.create(CheckObjectStep) catch @panic("OOM");
|
||||
self.* = .{
|
||||
.builder = builder,
|
||||
.step = Step.init(.check_file, "CheckObject", gpa, make),
|
||||
@ -44,7 +42,7 @@ pub fn runAndCompare(self: *CheckObjectStep) *EmulatableRunStep {
|
||||
const dependencies_len = self.step.dependencies.items.len;
|
||||
assert(dependencies_len > 0);
|
||||
const exe_step = self.step.dependencies.items[dependencies_len - 1];
|
||||
const exe = exe_step.cast(std.build.LibExeObjStep).?;
|
||||
const exe = exe_step.cast(std.Build.CompileStep).?;
|
||||
const emulatable_step = EmulatableRunStep.create(self.builder, "EmulatableRun", exe);
|
||||
emulatable_step.step.dependOn(&self.step);
|
||||
return emulatable_step;
|
||||
@ -216,10 +214,10 @@ const ComputeCompareExpected = struct {
|
||||
};
|
||||
|
||||
const Check = struct {
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
actions: std.ArrayList(Action),
|
||||
|
||||
fn create(b: *Builder) Check {
|
||||
fn create(b: *std.Build) Check {
|
||||
return .{
|
||||
.builder = b,
|
||||
.actions = std.ArrayList(Action).init(b.allocator),
|
||||
@ -230,14 +228,14 @@ const Check = struct {
|
||||
self.actions.append(.{
|
||||
.tag = .match,
|
||||
.phrase = self.builder.dupe(phrase),
|
||||
}) catch unreachable;
|
||||
}) catch @panic("OOM");
|
||||
}
|
||||
|
||||
fn notPresent(self: *Check, phrase: []const u8) void {
|
||||
self.actions.append(.{
|
||||
.tag = .not_present,
|
||||
.phrase = self.builder.dupe(phrase),
|
||||
}) catch unreachable;
|
||||
}) catch @panic("OOM");
|
||||
}
|
||||
|
||||
fn computeCmp(self: *Check, phrase: []const u8, expected: ComputeCompareExpected) void {
|
||||
@ -245,7 +243,7 @@ const Check = struct {
|
||||
.tag = .compute_cmp,
|
||||
.phrase = self.builder.dupe(phrase),
|
||||
.expected = expected,
|
||||
}) catch unreachable;
|
||||
}) catch @panic("OOM");
|
||||
}
|
||||
};
|
||||
|
||||
@ -253,7 +251,7 @@ const Check = struct {
|
||||
pub fn checkStart(self: *CheckObjectStep, phrase: []const u8) void {
|
||||
var new_check = Check.create(self.builder);
|
||||
new_check.match(phrase);
|
||||
self.checks.append(new_check) catch unreachable;
|
||||
self.checks.append(new_check) catch @panic("OOM");
|
||||
}
|
||||
|
||||
/// Adds another searched phrase to the latest created Check with `CheckObjectStep.checkStart(...)`.
|
||||
@ -295,7 +293,7 @@ pub fn checkComputeCompare(
|
||||
) void {
|
||||
var new_check = Check.create(self.builder);
|
||||
new_check.computeCmp(program, expected);
|
||||
self.checks.append(new_check) catch unreachable;
|
||||
self.checks.append(new_check) catch @panic("OOM");
|
||||
}
|
||||
|
||||
fn make(step: *Step) !void {
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,17 +1,25 @@
|
||||
const std = @import("../std.zig");
|
||||
const ConfigHeaderStep = @This();
|
||||
const Step = std.build.Step;
|
||||
const Builder = std.build.Builder;
|
||||
const Step = std.Build.Step;
|
||||
|
||||
pub const base_id: Step.Id = .config_header;
|
||||
|
||||
pub const Style = enum {
|
||||
pub const Style = union(enum) {
|
||||
/// The configure format supported by autotools. It uses `#undef foo` to
|
||||
/// mark lines that can be substituted with different values.
|
||||
autoconf,
|
||||
autoconf: std.Build.FileSource,
|
||||
/// The configure format supported by CMake. It uses `@@FOO@@` and
|
||||
/// `#cmakedefine` for template substitution.
|
||||
cmake,
|
||||
cmake: std.Build.FileSource,
|
||||
/// Instead of starting with an input file, start with nothing.
|
||||
blank,
|
||||
|
||||
pub fn getFileSource(style: Style) ?std.Build.FileSource {
|
||||
switch (style) {
|
||||
.autoconf, .cmake => |s| return s,
|
||||
.blank => return null,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Value = union(enum) {
|
||||
@ -24,35 +32,51 @@ pub const Value = union(enum) {
|
||||
};
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
source: std.build.FileSource,
|
||||
style: Style,
|
||||
values: std.StringHashMap(Value),
|
||||
max_bytes: usize = 2 * 1024 * 1024,
|
||||
output_dir: []const u8,
|
||||
output_basename: []const u8,
|
||||
builder: *std.Build,
|
||||
values: std.StringArrayHashMap(Value),
|
||||
output_file: std.Build.GeneratedFile,
|
||||
|
||||
pub fn create(builder: *Builder, source: std.build.FileSource, style: Style) *ConfigHeaderStep {
|
||||
style: Style,
|
||||
max_bytes: usize,
|
||||
include_path: []const u8,
|
||||
|
||||
pub const Options = struct {
|
||||
style: Style = .blank,
|
||||
max_bytes: usize = 2 * 1024 * 1024,
|
||||
include_path: ?[]const u8 = null,
|
||||
};
|
||||
|
||||
pub fn create(builder: *std.Build, options: Options) *ConfigHeaderStep {
|
||||
const self = builder.allocator.create(ConfigHeaderStep) catch @panic("OOM");
|
||||
const name = builder.fmt("configure header {s}", .{source.getDisplayName()});
|
||||
const name = if (options.style.getFileSource()) |s|
|
||||
builder.fmt("configure {s} header {s}", .{ @tagName(options.style), s.getDisplayName() })
|
||||
else
|
||||
builder.fmt("configure {s} header", .{@tagName(options.style)});
|
||||
self.* = .{
|
||||
.builder = builder,
|
||||
.step = Step.init(base_id, name, builder.allocator, make),
|
||||
.source = source,
|
||||
.style = style,
|
||||
.values = std.StringHashMap(Value).init(builder.allocator),
|
||||
.output_dir = undefined,
|
||||
.output_basename = "config.h",
|
||||
.style = options.style,
|
||||
.values = std.StringArrayHashMap(Value).init(builder.allocator),
|
||||
|
||||
.max_bytes = options.max_bytes,
|
||||
.include_path = "config.h",
|
||||
.output_file = .{ .step = &self.step },
|
||||
};
|
||||
switch (source) {
|
||||
|
||||
if (options.style.getFileSource()) |s| switch (s) {
|
||||
.path => |p| {
|
||||
const basename = std.fs.path.basename(p);
|
||||
if (std.mem.endsWith(u8, basename, ".h.in")) {
|
||||
self.output_basename = basename[0 .. basename.len - 3];
|
||||
self.include_path = basename[0 .. basename.len - 3];
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
};
|
||||
|
||||
if (options.include_path) |include_path| {
|
||||
self.include_path = include_path;
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
@ -62,47 +86,57 @@ pub fn addValues(self: *ConfigHeaderStep, values: anytype) void {
|
||||
|
||||
fn addValuesInner(self: *ConfigHeaderStep, values: anytype) !void {
|
||||
inline for (@typeInfo(@TypeOf(values)).Struct.fields) |field| {
|
||||
switch (@typeInfo(field.type)) {
|
||||
.Null => {
|
||||
try self.values.put(field.name, .undef);
|
||||
},
|
||||
.Void => {
|
||||
try self.values.put(field.name, .defined);
|
||||
},
|
||||
.Bool => {
|
||||
try self.values.put(field.name, .{ .boolean = @field(values, field.name) });
|
||||
},
|
||||
.ComptimeInt => {
|
||||
try self.values.put(field.name, .{ .int = @field(values, field.name) });
|
||||
},
|
||||
.EnumLiteral => {
|
||||
try self.values.put(field.name, .{ .ident = @tagName(@field(values, field.name)) });
|
||||
},
|
||||
.Pointer => |ptr| {
|
||||
switch (@typeInfo(ptr.child)) {
|
||||
.Array => |array| {
|
||||
if (ptr.size == .One and array.child == u8) {
|
||||
try self.values.put(field.name, .{ .string = @field(values, field.name) });
|
||||
continue;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
try putValue(self, field.name, field.type, @field(values, field.name));
|
||||
}
|
||||
}
|
||||
|
||||
@compileError("unsupported ConfigHeaderStep value type: " ++
|
||||
@typeName(field.type));
|
||||
},
|
||||
else => @compileError("unsupported ConfigHeaderStep value type: " ++
|
||||
@typeName(field.type)),
|
||||
}
|
||||
fn putValue(self: *ConfigHeaderStep, field_name: []const u8, comptime T: type, v: T) !void {
|
||||
switch (@typeInfo(T)) {
|
||||
.Null => {
|
||||
try self.values.put(field_name, .undef);
|
||||
},
|
||||
.Void => {
|
||||
try self.values.put(field_name, .defined);
|
||||
},
|
||||
.Bool => {
|
||||
try self.values.put(field_name, .{ .boolean = v });
|
||||
},
|
||||
.Int => {
|
||||
try self.values.put(field_name, .{ .int = v });
|
||||
},
|
||||
.ComptimeInt => {
|
||||
try self.values.put(field_name, .{ .int = v });
|
||||
},
|
||||
.EnumLiteral => {
|
||||
try self.values.put(field_name, .{ .ident = @tagName(v) });
|
||||
},
|
||||
.Optional => {
|
||||
if (v) |x| {
|
||||
return putValue(self, field_name, @TypeOf(x), x);
|
||||
} else {
|
||||
try self.values.put(field_name, .undef);
|
||||
}
|
||||
},
|
||||
.Pointer => |ptr| {
|
||||
switch (@typeInfo(ptr.child)) {
|
||||
.Array => |array| {
|
||||
if (ptr.size == .One and array.child == u8) {
|
||||
try self.values.put(field_name, .{ .string = v });
|
||||
return;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
@compileError("unsupported ConfigHeaderStep value type: " ++ @typeName(T));
|
||||
},
|
||||
else => @compileError("unsupported ConfigHeaderStep value type: " ++ @typeName(T)),
|
||||
}
|
||||
}
|
||||
|
||||
fn make(step: *Step) !void {
|
||||
const self = @fieldParentPtr(ConfigHeaderStep, "step", step);
|
||||
const gpa = self.builder.allocator;
|
||||
const src_path = self.source.getPath(self.builder);
|
||||
const contents = try std.fs.cwd().readFileAlloc(gpa, src_path, self.max_bytes);
|
||||
|
||||
// The cache is used here not really as a way to speed things up - because writing
|
||||
// the data to a file would probably be very fast - but as a way to find a canonical
|
||||
@ -119,9 +153,30 @@ fn make(step: *Step) !void {
|
||||
// Random bytes to make ConfigHeaderStep unique. Refresh this with new
|
||||
// random bytes when ConfigHeaderStep implementation is modified in a
|
||||
// non-backwards-compatible way.
|
||||
var hash = Hasher.init("X1pQzdDt91Zlh7Eh");
|
||||
hash.update(self.source.getDisplayName());
|
||||
hash.update(contents);
|
||||
var hash = Hasher.init("PGuDTpidxyMqnkGM");
|
||||
|
||||
var output = std.ArrayList(u8).init(gpa);
|
||||
defer output.deinit();
|
||||
|
||||
try output.appendSlice("/* This file was generated by ConfigHeaderStep using the Zig Build System. */\n");
|
||||
|
||||
switch (self.style) {
|
||||
.autoconf => |file_source| {
|
||||
const src_path = file_source.getPath(self.builder);
|
||||
const contents = try std.fs.cwd().readFileAlloc(gpa, src_path, self.max_bytes);
|
||||
try render_autoconf(contents, &output, self.values, src_path);
|
||||
},
|
||||
.cmake => |file_source| {
|
||||
const src_path = file_source.getPath(self.builder);
|
||||
const contents = try std.fs.cwd().readFileAlloc(gpa, src_path, self.max_bytes);
|
||||
try render_cmake(contents, &output, self.values, src_path);
|
||||
},
|
||||
.blank => {
|
||||
try render_blank(&output, self.values, self.include_path);
|
||||
},
|
||||
}
|
||||
|
||||
hash.update(output.items);
|
||||
|
||||
var digest: [16]u8 = undefined;
|
||||
hash.final(&digest);
|
||||
@ -132,38 +187,42 @@ fn make(step: *Step) !void {
|
||||
.{std.fmt.fmtSliceHexLower(&digest)},
|
||||
) catch unreachable;
|
||||
|
||||
self.output_dir = try std.fs.path.join(gpa, &[_][]const u8{
|
||||
const output_dir = try std.fs.path.join(gpa, &[_][]const u8{
|
||||
self.builder.cache_root, "o", &hash_basename,
|
||||
});
|
||||
var dir = std.fs.cwd().makeOpenPath(self.output_dir, .{}) catch |err| {
|
||||
std.debug.print("unable to make path {s}: {s}\n", .{ self.output_dir, @errorName(err) });
|
||||
|
||||
// If output_path has directory parts, deal with them. Example:
|
||||
// output_dir is zig-cache/o/HASH
|
||||
// output_path is libavutil/avconfig.h
|
||||
// We want to open directory zig-cache/o/HASH/libavutil/
|
||||
// but keep output_dir as zig-cache/o/HASH for -I include
|
||||
const sub_dir_path = if (std.fs.path.dirname(self.include_path)) |d|
|
||||
try std.fs.path.join(gpa, &.{ output_dir, d })
|
||||
else
|
||||
output_dir;
|
||||
|
||||
var dir = std.fs.cwd().makeOpenPath(sub_dir_path, .{}) catch |err| {
|
||||
std.debug.print("unable to make path {s}: {s}\n", .{ output_dir, @errorName(err) });
|
||||
return err;
|
||||
};
|
||||
defer dir.close();
|
||||
|
||||
var values_copy = try self.values.clone();
|
||||
defer values_copy.deinit();
|
||||
try dir.writeFile(std.fs.path.basename(self.include_path), output.items);
|
||||
|
||||
var output = std.ArrayList(u8).init(gpa);
|
||||
defer output.deinit();
|
||||
try output.ensureTotalCapacity(contents.len);
|
||||
|
||||
try output.appendSlice("/* This file was generated by ConfigHeaderStep using the Zig Build System. */\n");
|
||||
|
||||
switch (self.style) {
|
||||
.autoconf => try render_autoconf(contents, &output, &values_copy, src_path),
|
||||
.cmake => try render_cmake(contents, &output, &values_copy, src_path),
|
||||
}
|
||||
|
||||
try dir.writeFile(self.output_basename, output.items);
|
||||
self.output_file.path = try std.fs.path.join(self.builder.allocator, &.{
|
||||
output_dir, self.include_path,
|
||||
});
|
||||
}
|
||||
|
||||
fn render_autoconf(
|
||||
contents: []const u8,
|
||||
output: *std.ArrayList(u8),
|
||||
values_copy: *std.StringHashMap(Value),
|
||||
values: std.StringArrayHashMap(Value),
|
||||
src_path: []const u8,
|
||||
) !void {
|
||||
var values_copy = try values.clone();
|
||||
defer values_copy.deinit();
|
||||
|
||||
var any_errors = false;
|
||||
var line_index: u32 = 0;
|
||||
var line_it = std.mem.split(u8, contents, "\n");
|
||||
@ -181,7 +240,7 @@ fn render_autoconf(
|
||||
continue;
|
||||
}
|
||||
const name = it.rest();
|
||||
const kv = values_copy.fetchRemove(name) orelse {
|
||||
const kv = values_copy.fetchSwapRemove(name) orelse {
|
||||
std.debug.print("{s}:{d}: error: unspecified config header value: '{s}'\n", .{
|
||||
src_path, line_index + 1, name,
|
||||
});
|
||||
@ -191,12 +250,8 @@ fn render_autoconf(
|
||||
try renderValue(output, name, kv.value);
|
||||
}
|
||||
|
||||
{
|
||||
var it = values_copy.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const name = entry.key_ptr.*;
|
||||
std.debug.print("{s}: error: config header value unused: '{s}'\n", .{ src_path, name });
|
||||
}
|
||||
for (values_copy.keys()) |name| {
|
||||
std.debug.print("{s}: error: config header value unused: '{s}'\n", .{ src_path, name });
|
||||
}
|
||||
|
||||
if (any_errors) {
|
||||
@ -207,9 +262,12 @@ fn render_autoconf(
|
||||
fn render_cmake(
|
||||
contents: []const u8,
|
||||
output: *std.ArrayList(u8),
|
||||
values_copy: *std.StringHashMap(Value),
|
||||
values: std.StringArrayHashMap(Value),
|
||||
src_path: []const u8,
|
||||
) !void {
|
||||
var values_copy = try values.clone();
|
||||
defer values_copy.deinit();
|
||||
|
||||
var any_errors = false;
|
||||
var line_index: u32 = 0;
|
||||
var line_it = std.mem.split(u8, contents, "\n");
|
||||
@ -233,7 +291,7 @@ fn render_cmake(
|
||||
any_errors = true;
|
||||
continue;
|
||||
};
|
||||
const kv = values_copy.fetchRemove(name) orelse {
|
||||
const kv = values_copy.fetchSwapRemove(name) orelse {
|
||||
std.debug.print("{s}:{d}: error: unspecified config header value: '{s}'\n", .{
|
||||
src_path, line_index + 1, name,
|
||||
});
|
||||
@ -243,12 +301,8 @@ fn render_cmake(
|
||||
try renderValue(output, name, kv.value);
|
||||
}
|
||||
|
||||
{
|
||||
var it = values_copy.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const name = entry.key_ptr.*;
|
||||
std.debug.print("{s}: error: config header value unused: '{s}'\n", .{ src_path, name });
|
||||
}
|
||||
for (values_copy.keys()) |name| {
|
||||
std.debug.print("{s}: error: config header value unused: '{s}'\n", .{ src_path, name });
|
||||
}
|
||||
|
||||
if (any_errors) {
|
||||
@ -256,6 +310,36 @@ fn render_cmake(
|
||||
}
|
||||
}
|
||||
|
||||
fn render_blank(
|
||||
output: *std.ArrayList(u8),
|
||||
defines: std.StringArrayHashMap(Value),
|
||||
include_path: []const u8,
|
||||
) !void {
|
||||
const include_guard_name = try output.allocator.dupe(u8, include_path);
|
||||
for (include_guard_name) |*byte| {
|
||||
switch (byte.*) {
|
||||
'a'...'z' => byte.* = byte.* - 'a' + 'A',
|
||||
'A'...'Z', '0'...'9' => continue,
|
||||
else => byte.* = '_',
|
||||
}
|
||||
}
|
||||
|
||||
try output.appendSlice("#ifndef ");
|
||||
try output.appendSlice(include_guard_name);
|
||||
try output.appendSlice("\n#define ");
|
||||
try output.appendSlice(include_guard_name);
|
||||
try output.appendSlice("\n");
|
||||
|
||||
const values = defines.values();
|
||||
for (defines.keys()) |name, i| {
|
||||
try renderValue(output, name, values[i]);
|
||||
}
|
||||
|
||||
try output.appendSlice("#endif /* ");
|
||||
try output.appendSlice(include_guard_name);
|
||||
try output.appendSlice(" */\n");
|
||||
}
|
||||
|
||||
fn renderValue(output: *std.ArrayList(u8), name: []const u8, value: Value) !void {
|
||||
switch (value) {
|
||||
.undef => {
|
||||
@ -5,11 +5,9 @@
|
||||
//! without having to verify if it's possible to be ran against.
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const build = std.build;
|
||||
const Step = std.build.Step;
|
||||
const Builder = std.build.Builder;
|
||||
const LibExeObjStep = std.build.LibExeObjStep;
|
||||
const RunStep = std.build.RunStep;
|
||||
const Step = std.Build.Step;
|
||||
const CompileStep = std.Build.CompileStep;
|
||||
const RunStep = std.Build.RunStep;
|
||||
|
||||
const fs = std.fs;
|
||||
const process = std.process;
|
||||
@ -22,10 +20,10 @@ pub const base_id = .emulatable_run;
|
||||
const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
|
||||
/// The artifact (executable) to be run by this step
|
||||
exe: *LibExeObjStep,
|
||||
exe: *CompileStep,
|
||||
|
||||
/// Set this to `null` to ignore the exit code for the purpose of determining a successful execution
|
||||
expected_exit_code: ?u8 = 0,
|
||||
@ -47,9 +45,9 @@ hide_foreign_binaries_warning: bool,
|
||||
/// binary through emulation when any of the emulation options such as `enable_rosetta` are set to true.
|
||||
/// When set to false, and the binary is foreign, running the executable is skipped.
|
||||
/// Asserts given artifact is an executable.
|
||||
pub fn create(builder: *Builder, name: []const u8, artifact: *LibExeObjStep) *EmulatableRunStep {
|
||||
pub fn create(builder: *std.Build, name: []const u8, artifact: *CompileStep) *EmulatableRunStep {
|
||||
std.debug.assert(artifact.kind == .exe or artifact.kind == .test_exe);
|
||||
const self = builder.allocator.create(EmulatableRunStep) catch unreachable;
|
||||
const self = builder.allocator.create(EmulatableRunStep) catch @panic("OOM");
|
||||
|
||||
const option_name = "hide-foreign-warnings";
|
||||
const hide_warnings = if (builder.available_options_map.get(option_name) == null) warn: {
|
||||
@ -156,9 +154,9 @@ fn warnAboutForeignBinaries(step: *EmulatableRunStep) void {
|
||||
const builder = step.builder;
|
||||
const artifact = step.exe;
|
||||
|
||||
const host_name = builder.host.target.zigTriple(builder.allocator) catch unreachable;
|
||||
const foreign_name = artifact.target.zigTriple(builder.allocator) catch unreachable;
|
||||
const target_info = std.zig.system.NativeTargetInfo.detect(artifact.target) catch unreachable;
|
||||
const host_name = builder.host.target.zigTriple(builder.allocator) catch @panic("unhandled error");
|
||||
const foreign_name = artifact.target.zigTriple(builder.allocator) catch @panic("unhandled error");
|
||||
const target_info = std.zig.system.NativeTargetInfo.detect(artifact.target) catch @panic("unhandled error");
|
||||
const need_cross_glibc = artifact.target.isGnuLibC() and artifact.is_linking_libc;
|
||||
switch (builder.host.getExternalExecutor(target_info, .{
|
||||
.qemu_fixes_dl = need_cross_glibc and builder.glibc_runtimes_dir != null,
|
||||
@ -1,25 +1,20 @@
|
||||
const std = @import("../std.zig");
|
||||
const build = @import("../build.zig");
|
||||
const Step = build.Step;
|
||||
const Builder = build.Builder;
|
||||
const BufMap = std.BufMap;
|
||||
const mem = std.mem;
|
||||
|
||||
const Step = std.Build.Step;
|
||||
const FmtStep = @This();
|
||||
|
||||
pub const base_id = .fmt;
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
argv: [][]const u8,
|
||||
|
||||
pub fn create(builder: *Builder, paths: []const []const u8) *FmtStep {
|
||||
const self = builder.allocator.create(FmtStep) catch unreachable;
|
||||
pub fn create(builder: *std.Build, paths: []const []const u8) *FmtStep {
|
||||
const self = builder.allocator.create(FmtStep) catch @panic("OOM");
|
||||
const name = "zig fmt";
|
||||
self.* = FmtStep{
|
||||
.step = Step.init(.fmt, name, builder.allocator, make),
|
||||
.builder = builder,
|
||||
.argv = builder.allocator.alloc([]u8, paths.len + 2) catch unreachable,
|
||||
.argv = builder.allocator.alloc([]u8, paths.len + 2) catch @panic("OOM"),
|
||||
};
|
||||
|
||||
self.argv[0] = builder.zig_exe;
|
||||
@ -1,32 +1,29 @@
|
||||
const std = @import("../std.zig");
|
||||
const build = @import("../build.zig");
|
||||
const Step = build.Step;
|
||||
const Builder = build.Builder;
|
||||
const LibExeObjStep = std.build.LibExeObjStep;
|
||||
const InstallDir = std.build.InstallDir;
|
||||
const Step = std.Build.Step;
|
||||
const CompileStep = std.Build.CompileStep;
|
||||
const InstallDir = std.Build.InstallDir;
|
||||
const InstallArtifactStep = @This();
|
||||
|
||||
pub const base_id = .install_artifact;
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
artifact: *LibExeObjStep,
|
||||
builder: *std.Build,
|
||||
artifact: *CompileStep,
|
||||
dest_dir: InstallDir,
|
||||
pdb_dir: ?InstallDir,
|
||||
h_dir: ?InstallDir,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
|
||||
pub fn create(builder: *std.Build, artifact: *CompileStep) *InstallArtifactStep {
|
||||
if (artifact.install_step) |s| return s;
|
||||
|
||||
const self = builder.allocator.create(Self) catch unreachable;
|
||||
self.* = Self{
|
||||
const self = builder.allocator.create(InstallArtifactStep) catch @panic("OOM");
|
||||
self.* = InstallArtifactStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(.install_artifact, builder.fmt("install {s}", .{artifact.step.name}), builder.allocator, make),
|
||||
.artifact = artifact,
|
||||
.dest_dir = artifact.override_dest_dir orelse switch (artifact.kind) {
|
||||
.obj => @panic("Cannot install a .obj build artifact."),
|
||||
.@"test" => @panic("Cannot install a test build artifact, use addTestExe instead."),
|
||||
.@"test" => @panic("Cannot install a .test build artifact, use .test_exe instead."),
|
||||
.exe, .test_exe => InstallDir{ .bin = {} },
|
||||
.lib => InstallDir{ .lib = {} },
|
||||
},
|
||||
@ -64,13 +61,13 @@ pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
|
||||
}
|
||||
|
||||
fn make(step: *Step) !void {
|
||||
const self = @fieldParentPtr(Self, "step", step);
|
||||
const self = @fieldParentPtr(InstallArtifactStep, "step", step);
|
||||
const builder = self.builder;
|
||||
|
||||
const full_dest_path = builder.getInstallPath(self.dest_dir, self.artifact.out_filename);
|
||||
try builder.updateFile(self.artifact.getOutputSource().getPath(builder), full_dest_path);
|
||||
if (self.artifact.isDynamicLibrary() and self.artifact.version != null and self.artifact.target.wantSharedLibSymLinks()) {
|
||||
try LibExeObjStep.doAtomicSymLinks(builder.allocator, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?);
|
||||
try CompileStep.doAtomicSymLinks(builder.allocator, full_dest_path, self.artifact.major_only_filename.?, self.artifact.name_only_filename.?);
|
||||
}
|
||||
if (self.artifact.isDynamicLibrary() and self.artifact.target.isWindows() and self.artifact.emit_implib != .no_emit) {
|
||||
const full_implib_path = builder.getInstallPath(self.dest_dir, self.artifact.out_lib_filename);
|
||||
@ -1,19 +1,17 @@
|
||||
const std = @import("../std.zig");
|
||||
const mem = std.mem;
|
||||
const fs = std.fs;
|
||||
const build = @import("../build.zig");
|
||||
const Step = build.Step;
|
||||
const Builder = build.Builder;
|
||||
const InstallDir = std.build.InstallDir;
|
||||
const Step = std.Build.Step;
|
||||
const InstallDir = std.Build.InstallDir;
|
||||
const InstallDirStep = @This();
|
||||
const log = std.log;
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
options: Options,
|
||||
/// This is used by the build system when a file being installed comes from one
|
||||
/// package but is being installed by another.
|
||||
override_source_builder: ?*Builder = null,
|
||||
override_source_builder: ?*std.Build = null,
|
||||
|
||||
pub const base_id = .install_dir;
|
||||
|
||||
@ -31,7 +29,7 @@ pub const Options = struct {
|
||||
/// `@import("test.zig")` would be a compile error.
|
||||
blank_extensions: []const []const u8 = &.{},
|
||||
|
||||
fn dupe(self: Options, b: *Builder) Options {
|
||||
fn dupe(self: Options, b: *std.Build) Options {
|
||||
return .{
|
||||
.source_dir = b.dupe(self.source_dir),
|
||||
.install_dir = self.install_dir.dupe(b),
|
||||
@ -43,7 +41,7 @@ pub const Options = struct {
|
||||
};
|
||||
|
||||
pub fn init(
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
options: Options,
|
||||
) InstallDirStep {
|
||||
builder.pushInstalledFile(options.install_dir, options.install_subdir);
|
||||
@ -1,24 +1,22 @@
|
||||
const std = @import("../std.zig");
|
||||
const build = @import("../build.zig");
|
||||
const Step = build.Step;
|
||||
const Builder = build.Builder;
|
||||
const FileSource = std.build.FileSource;
|
||||
const InstallDir = std.build.InstallDir;
|
||||
const Step = std.Build.Step;
|
||||
const FileSource = std.Build.FileSource;
|
||||
const InstallDir = std.Build.InstallDir;
|
||||
const InstallFileStep = @This();
|
||||
|
||||
pub const base_id = .install_file;
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
source: FileSource,
|
||||
dir: InstallDir,
|
||||
dest_rel_path: []const u8,
|
||||
/// This is used by the build system when a file being installed comes from one
|
||||
/// package but is being installed by another.
|
||||
override_source_builder: ?*Builder = null,
|
||||
override_source_builder: ?*std.Build = null,
|
||||
|
||||
pub fn init(
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
source: FileSource,
|
||||
dir: InstallDir,
|
||||
dest_rel_path: []const u8,
|
||||
@ -7,11 +7,10 @@ const InstallRawStep = @This();
|
||||
const Allocator = std.mem.Allocator;
|
||||
const ArenaAllocator = std.heap.ArenaAllocator;
|
||||
const ArrayListUnmanaged = std.ArrayListUnmanaged;
|
||||
const Builder = std.build.Builder;
|
||||
const File = std.fs.File;
|
||||
const InstallDir = std.build.InstallDir;
|
||||
const LibExeObjStep = std.build.LibExeObjStep;
|
||||
const Step = std.build.Step;
|
||||
const InstallDir = std.Build.InstallDir;
|
||||
const CompileStep = std.Build.CompileStep;
|
||||
const Step = std.Build.Step;
|
||||
const elf = std.elf;
|
||||
const fs = std.fs;
|
||||
const io = std.io;
|
||||
@ -25,12 +24,12 @@ pub const RawFormat = enum {
|
||||
};
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
artifact: *LibExeObjStep,
|
||||
builder: *std.Build,
|
||||
artifact: *CompileStep,
|
||||
dest_dir: InstallDir,
|
||||
dest_filename: []const u8,
|
||||
options: CreateOptions,
|
||||
output_file: std.build.GeneratedFile,
|
||||
output_file: std.Build.GeneratedFile,
|
||||
|
||||
pub const CreateOptions = struct {
|
||||
format: ?RawFormat = null,
|
||||
@ -39,8 +38,13 @@ pub const CreateOptions = struct {
|
||||
pad_to: ?u64 = null,
|
||||
};
|
||||
|
||||
pub fn create(builder: *Builder, artifact: *LibExeObjStep, dest_filename: []const u8, options: CreateOptions) *InstallRawStep {
|
||||
const self = builder.allocator.create(InstallRawStep) catch unreachable;
|
||||
pub fn create(
|
||||
builder: *std.Build,
|
||||
artifact: *CompileStep,
|
||||
dest_filename: []const u8,
|
||||
options: CreateOptions,
|
||||
) *InstallRawStep {
|
||||
const self = builder.allocator.create(InstallRawStep) catch @panic("OOM");
|
||||
self.* = InstallRawStep{
|
||||
.step = Step.init(.install_raw, builder.fmt("install raw binary {s}", .{artifact.step.name}), builder.allocator, make),
|
||||
.builder = builder,
|
||||
@ -53,7 +57,7 @@ pub fn create(builder: *Builder, artifact: *LibExeObjStep, dest_filename: []cons
|
||||
},
|
||||
.dest_filename = dest_filename,
|
||||
.options = options,
|
||||
.output_file = std.build.GeneratedFile{ .step = &self.step },
|
||||
.output_file = std.Build.GeneratedFile{ .step = &self.step },
|
||||
};
|
||||
self.step.dependOn(&artifact.step);
|
||||
|
||||
@ -61,8 +65,8 @@ pub fn create(builder: *Builder, artifact: *LibExeObjStep, dest_filename: []cons
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn getOutputSource(self: *const InstallRawStep) std.build.FileSource {
|
||||
return std.build.FileSource{ .generated = &self.output_file };
|
||||
pub fn getOutputSource(self: *const InstallRawStep) std.Build.FileSource {
|
||||
return std.Build.FileSource{ .generated = &self.output_file };
|
||||
}
|
||||
|
||||
fn make(step: *Step) !void {
|
||||
@ -78,7 +82,7 @@ fn make(step: *Step) !void {
|
||||
const full_dest_path = b.getInstallPath(self.dest_dir, self.dest_filename);
|
||||
self.output_file.path = full_dest_path;
|
||||
|
||||
fs.cwd().makePath(b.getInstallPath(self.dest_dir, "")) catch unreachable;
|
||||
try fs.cwd().makePath(b.getInstallPath(self.dest_dir, ""));
|
||||
|
||||
var argv_list = std.ArrayList([]const u8).init(b.allocator);
|
||||
try argv_list.appendSlice(&.{ b.zig_exe, "objcopy" });
|
||||
@ -1,17 +1,15 @@
|
||||
const std = @import("../std.zig");
|
||||
const log = std.log;
|
||||
const build = @import("../build.zig");
|
||||
const Step = build.Step;
|
||||
const Builder = build.Builder;
|
||||
const Step = std.Build.Step;
|
||||
const LogStep = @This();
|
||||
|
||||
pub const base_id = .log;
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
data: []const u8,
|
||||
|
||||
pub fn init(builder: *Builder, data: []const u8) LogStep {
|
||||
pub fn init(builder: *std.Build, data: []const u8) LogStep {
|
||||
return LogStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(.log, builder.fmt("log {s}", .{data}), builder.allocator, make),
|
||||
@ -1,12 +1,10 @@
|
||||
const std = @import("../std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const build = std.build;
|
||||
const fs = std.fs;
|
||||
const Step = build.Step;
|
||||
const Builder = build.Builder;
|
||||
const GeneratedFile = build.GeneratedFile;
|
||||
const LibExeObjStep = build.LibExeObjStep;
|
||||
const FileSource = build.FileSource;
|
||||
const Step = std.Build.Step;
|
||||
const GeneratedFile = std.Build.GeneratedFile;
|
||||
const CompileStep = std.Build.CompileStep;
|
||||
const FileSource = std.Build.FileSource;
|
||||
|
||||
const OptionsStep = @This();
|
||||
|
||||
@ -14,14 +12,14 @@ pub const base_id = .options;
|
||||
|
||||
step: Step,
|
||||
generated_file: GeneratedFile,
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
|
||||
contents: std.ArrayList(u8),
|
||||
artifact_args: std.ArrayList(OptionArtifactArg),
|
||||
file_source_args: std.ArrayList(OptionFileSourceArg),
|
||||
|
||||
pub fn create(builder: *Builder) *OptionsStep {
|
||||
const self = builder.allocator.create(OptionsStep) catch unreachable;
|
||||
pub fn create(builder: *std.Build) *OptionsStep {
|
||||
const self = builder.allocator.create(OptionsStep) catch @panic("OOM");
|
||||
self.* = .{
|
||||
.builder = builder,
|
||||
.step = Step.init(.options, "options", builder.allocator, make),
|
||||
@ -36,44 +34,48 @@ pub fn create(builder: *Builder) *OptionsStep {
|
||||
}
|
||||
|
||||
pub fn addOption(self: *OptionsStep, comptime T: type, name: []const u8, value: T) void {
|
||||
return addOptionFallible(self, T, name, value) catch @panic("unhandled error");
|
||||
}
|
||||
|
||||
fn addOptionFallible(self: *OptionsStep, comptime T: type, name: []const u8, value: T) !void {
|
||||
const out = self.contents.writer();
|
||||
switch (T) {
|
||||
[]const []const u8 => {
|
||||
out.print("pub const {}: []const []const u8 = &[_][]const u8{{\n", .{std.zig.fmtId(name)}) catch unreachable;
|
||||
try out.print("pub const {}: []const []const u8 = &[_][]const u8{{\n", .{std.zig.fmtId(name)});
|
||||
for (value) |slice| {
|
||||
out.print(" \"{}\",\n", .{std.zig.fmtEscapes(slice)}) catch unreachable;
|
||||
try out.print(" \"{}\",\n", .{std.zig.fmtEscapes(slice)});
|
||||
}
|
||||
out.writeAll("};\n") catch unreachable;
|
||||
try out.writeAll("};\n");
|
||||
return;
|
||||
},
|
||||
[:0]const u8 => {
|
||||
out.print("pub const {}: [:0]const u8 = \"{}\";\n", .{ std.zig.fmtId(name), std.zig.fmtEscapes(value) }) catch unreachable;
|
||||
try out.print("pub const {}: [:0]const u8 = \"{}\";\n", .{ std.zig.fmtId(name), std.zig.fmtEscapes(value) });
|
||||
return;
|
||||
},
|
||||
[]const u8 => {
|
||||
out.print("pub const {}: []const u8 = \"{}\";\n", .{ std.zig.fmtId(name), std.zig.fmtEscapes(value) }) catch unreachable;
|
||||
try out.print("pub const {}: []const u8 = \"{}\";\n", .{ std.zig.fmtId(name), std.zig.fmtEscapes(value) });
|
||||
return;
|
||||
},
|
||||
?[:0]const u8 => {
|
||||
out.print("pub const {}: ?[:0]const u8 = ", .{std.zig.fmtId(name)}) catch unreachable;
|
||||
try out.print("pub const {}: ?[:0]const u8 = ", .{std.zig.fmtId(name)});
|
||||
if (value) |payload| {
|
||||
out.print("\"{}\";\n", .{std.zig.fmtEscapes(payload)}) catch unreachable;
|
||||
try out.print("\"{}\";\n", .{std.zig.fmtEscapes(payload)});
|
||||
} else {
|
||||
out.writeAll("null;\n") catch unreachable;
|
||||
try out.writeAll("null;\n");
|
||||
}
|
||||
return;
|
||||
},
|
||||
?[]const u8 => {
|
||||
out.print("pub const {}: ?[]const u8 = ", .{std.zig.fmtId(name)}) catch unreachable;
|
||||
try out.print("pub const {}: ?[]const u8 = ", .{std.zig.fmtId(name)});
|
||||
if (value) |payload| {
|
||||
out.print("\"{}\";\n", .{std.zig.fmtEscapes(payload)}) catch unreachable;
|
||||
try out.print("\"{}\";\n", .{std.zig.fmtEscapes(payload)});
|
||||
} else {
|
||||
out.writeAll("null;\n") catch unreachable;
|
||||
try out.writeAll("null;\n");
|
||||
}
|
||||
return;
|
||||
},
|
||||
std.builtin.Version => {
|
||||
out.print(
|
||||
try out.print(
|
||||
\\pub const {}: @import("std").builtin.Version = .{{
|
||||
\\ .major = {d},
|
||||
\\ .minor = {d},
|
||||
@ -86,11 +88,11 @@ pub fn addOption(self: *OptionsStep, comptime T: type, name: []const u8, value:
|
||||
value.major,
|
||||
value.minor,
|
||||
value.patch,
|
||||
}) catch unreachable;
|
||||
});
|
||||
return;
|
||||
},
|
||||
std.SemanticVersion => {
|
||||
out.print(
|
||||
try out.print(
|
||||
\\pub const {}: @import("std").SemanticVersion = .{{
|
||||
\\ .major = {d},
|
||||
\\ .minor = {d},
|
||||
@ -102,38 +104,38 @@ pub fn addOption(self: *OptionsStep, comptime T: type, name: []const u8, value:
|
||||
value.major,
|
||||
value.minor,
|
||||
value.patch,
|
||||
}) catch unreachable;
|
||||
});
|
||||
if (value.pre) |some| {
|
||||
out.print(" .pre = \"{}\",\n", .{std.zig.fmtEscapes(some)}) catch unreachable;
|
||||
try out.print(" .pre = \"{}\",\n", .{std.zig.fmtEscapes(some)});
|
||||
}
|
||||
if (value.build) |some| {
|
||||
out.print(" .build = \"{}\",\n", .{std.zig.fmtEscapes(some)}) catch unreachable;
|
||||
try out.print(" .build = \"{}\",\n", .{std.zig.fmtEscapes(some)});
|
||||
}
|
||||
out.writeAll("};\n") catch unreachable;
|
||||
try out.writeAll("};\n");
|
||||
return;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
switch (@typeInfo(T)) {
|
||||
.Enum => |enum_info| {
|
||||
out.print("pub const {} = enum {{\n", .{std.zig.fmtId(@typeName(T))}) catch unreachable;
|
||||
try out.print("pub const {} = enum {{\n", .{std.zig.fmtId(@typeName(T))});
|
||||
inline for (enum_info.fields) |field| {
|
||||
out.print(" {},\n", .{std.zig.fmtId(field.name)}) catch unreachable;
|
||||
try out.print(" {},\n", .{std.zig.fmtId(field.name)});
|
||||
}
|
||||
out.writeAll("};\n") catch unreachable;
|
||||
out.print("pub const {}: {s} = {s}.{s};\n", .{
|
||||
try out.writeAll("};\n");
|
||||
try out.print("pub const {}: {s} = {s}.{s};\n", .{
|
||||
std.zig.fmtId(name),
|
||||
std.zig.fmtId(@typeName(T)),
|
||||
std.zig.fmtId(@typeName(T)),
|
||||
std.zig.fmtId(@tagName(value)),
|
||||
}) catch unreachable;
|
||||
});
|
||||
return;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
out.print("pub const {}: {s} = ", .{ std.zig.fmtId(name), @typeName(T) }) catch unreachable;
|
||||
printLiteral(out, value, 0) catch unreachable;
|
||||
out.writeAll(";\n") catch unreachable;
|
||||
try out.print("pub const {}: {s} = ", .{ std.zig.fmtId(name), @typeName(T) });
|
||||
try printLiteral(out, value, 0);
|
||||
try out.writeAll(";\n");
|
||||
}
|
||||
|
||||
// TODO: non-recursive?
|
||||
@ -191,19 +193,22 @@ pub fn addOptionFileSource(
|
||||
self.file_source_args.append(.{
|
||||
.name = name,
|
||||
.source = source.dupe(self.builder),
|
||||
}) catch unreachable;
|
||||
}) catch @panic("OOM");
|
||||
source.addStepDependencies(&self.step);
|
||||
}
|
||||
|
||||
/// The value is the path in the cache dir.
|
||||
/// Adds a dependency automatically.
|
||||
pub fn addOptionArtifact(self: *OptionsStep, name: []const u8, artifact: *LibExeObjStep) void {
|
||||
self.artifact_args.append(.{ .name = self.builder.dupe(name), .artifact = artifact }) catch unreachable;
|
||||
pub fn addOptionArtifact(self: *OptionsStep, name: []const u8, artifact: *CompileStep) void {
|
||||
self.artifact_args.append(.{ .name = self.builder.dupe(name), .artifact = artifact }) catch @panic("OOM");
|
||||
self.step.dependOn(&artifact.step);
|
||||
}
|
||||
|
||||
pub fn getPackage(self: *OptionsStep, package_name: []const u8) build.Pkg {
|
||||
return .{ .name = package_name, .source = self.getSource() };
|
||||
pub fn createModule(self: *OptionsStep) *std.Build.Module {
|
||||
return self.builder.createModule(.{
|
||||
.source_file = self.getSource(),
|
||||
.dependencies = &.{},
|
||||
});
|
||||
}
|
||||
|
||||
pub fn getSource(self: *OptionsStep) FileSource {
|
||||
@ -268,7 +273,7 @@ fn hashContentsToFileName(self: *OptionsStep) [64]u8 {
|
||||
|
||||
const OptionArtifactArg = struct {
|
||||
name: []const u8,
|
||||
artifact: *LibExeObjStep,
|
||||
artifact: *CompileStep,
|
||||
};
|
||||
|
||||
const OptionFileSourceArg = struct {
|
||||
@ -281,12 +286,16 @@ test "OptionsStep" {
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
defer arena.deinit();
|
||||
var builder = try Builder.create(
|
||||
|
||||
const host = try std.zig.system.NativeTargetInfo.detect(.{});
|
||||
|
||||
var builder = try std.Build.create(
|
||||
arena.allocator(),
|
||||
"test",
|
||||
"test",
|
||||
"test",
|
||||
"test",
|
||||
host,
|
||||
);
|
||||
defer builder.destroy();
|
||||
|
||||
@ -361,5 +370,5 @@ test "OptionsStep" {
|
||||
\\
|
||||
, options.contents.items);
|
||||
|
||||
_ = try std.zig.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0));
|
||||
_ = try std.zig.Ast.parse(arena.allocator(), try options.contents.toOwnedSliceSentinel(0), .zig);
|
||||
}
|
||||
@ -1,18 +1,16 @@
|
||||
const std = @import("../std.zig");
|
||||
const log = std.log;
|
||||
const fs = std.fs;
|
||||
const build = @import("../build.zig");
|
||||
const Step = build.Step;
|
||||
const Builder = build.Builder;
|
||||
const Step = std.Build.Step;
|
||||
const RemoveDirStep = @This();
|
||||
|
||||
pub const base_id = .remove_dir;
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
dir_path: []const u8,
|
||||
|
||||
pub fn init(builder: *Builder, dir_path: []const u8) RemoveDirStep {
|
||||
pub fn init(builder: *std.Build, dir_path: []const u8) RemoveDirStep {
|
||||
return RemoveDirStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(.remove_dir, builder.fmt("RemoveDir {s}", .{dir_path}), builder.allocator, make),
|
||||
@ -1,17 +1,15 @@
|
||||
const std = @import("../std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const build = std.build;
|
||||
const Step = build.Step;
|
||||
const Builder = build.Builder;
|
||||
const LibExeObjStep = build.LibExeObjStep;
|
||||
const WriteFileStep = build.WriteFileStep;
|
||||
const Step = std.Build.Step;
|
||||
const CompileStep = std.Build.CompileStep;
|
||||
const WriteFileStep = std.Build.WriteFileStep;
|
||||
const fs = std.fs;
|
||||
const mem = std.mem;
|
||||
const process = std.process;
|
||||
const ArrayList = std.ArrayList;
|
||||
const EnvMap = process.EnvMap;
|
||||
const Allocator = mem.Allocator;
|
||||
const ExecError = build.Builder.ExecError;
|
||||
const ExecError = std.Build.ExecError;
|
||||
|
||||
const max_stdout_size = 1 * 1024 * 1024; // 1 MiB
|
||||
|
||||
@ -20,7 +18,7 @@ const RunStep = @This();
|
||||
pub const base_id: Step.Id = .run;
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
|
||||
/// See also addArg and addArgs to modifying this directly
|
||||
argv: ArrayList(Arg),
|
||||
@ -50,13 +48,13 @@ pub const StdIoAction = union(enum) {
|
||||
};
|
||||
|
||||
pub const Arg = union(enum) {
|
||||
artifact: *LibExeObjStep,
|
||||
file_source: build.FileSource,
|
||||
artifact: *CompileStep,
|
||||
file_source: std.Build.FileSource,
|
||||
bytes: []u8,
|
||||
};
|
||||
|
||||
pub fn create(builder: *Builder, name: []const u8) *RunStep {
|
||||
const self = builder.allocator.create(RunStep) catch unreachable;
|
||||
pub fn create(builder: *std.Build, name: []const u8) *RunStep {
|
||||
const self = builder.allocator.create(RunStep) catch @panic("OOM");
|
||||
self.* = RunStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(base_id, name, builder.allocator, make),
|
||||
@ -68,20 +66,20 @@ pub fn create(builder: *Builder, name: []const u8) *RunStep {
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn addArtifactArg(self: *RunStep, artifact: *LibExeObjStep) void {
|
||||
self.argv.append(Arg{ .artifact = artifact }) catch unreachable;
|
||||
pub fn addArtifactArg(self: *RunStep, artifact: *CompileStep) void {
|
||||
self.argv.append(Arg{ .artifact = artifact }) catch @panic("OOM");
|
||||
self.step.dependOn(&artifact.step);
|
||||
}
|
||||
|
||||
pub fn addFileSourceArg(self: *RunStep, file_source: build.FileSource) void {
|
||||
pub fn addFileSourceArg(self: *RunStep, file_source: std.Build.FileSource) void {
|
||||
self.argv.append(Arg{
|
||||
.file_source = file_source.dupe(self.builder),
|
||||
}) catch unreachable;
|
||||
}) catch @panic("OOM");
|
||||
file_source.addStepDependencies(&self.step);
|
||||
}
|
||||
|
||||
pub fn addArg(self: *RunStep, arg: []const u8) void {
|
||||
self.argv.append(Arg{ .bytes = self.builder.dupe(arg) }) catch unreachable;
|
||||
self.argv.append(Arg{ .bytes = self.builder.dupe(arg) }) catch @panic("OOM");
|
||||
}
|
||||
|
||||
pub fn addArgs(self: *RunStep, args: []const []const u8) void {
|
||||
@ -91,7 +89,7 @@ pub fn addArgs(self: *RunStep, args: []const []const u8) void {
|
||||
}
|
||||
|
||||
pub fn clearEnvironment(self: *RunStep) void {
|
||||
const new_env_map = self.builder.allocator.create(EnvMap) catch unreachable;
|
||||
const new_env_map = self.builder.allocator.create(EnvMap) catch @panic("OOM");
|
||||
new_env_map.* = EnvMap.init(self.builder.allocator);
|
||||
self.env_map = new_env_map;
|
||||
}
|
||||
@ -101,7 +99,7 @@ pub fn addPathDir(self: *RunStep, search_path: []const u8) void {
|
||||
}
|
||||
|
||||
/// For internal use only, users of `RunStep` should use `addPathDir` directly.
|
||||
pub fn addPathDirInternal(step: *Step, builder: *Builder, search_path: []const u8) void {
|
||||
pub fn addPathDirInternal(step: *Step, builder: *std.Build, search_path: []const u8) void {
|
||||
const env_map = getEnvMapInternal(step, builder.allocator);
|
||||
|
||||
const key = "PATH";
|
||||
@ -109,9 +107,9 @@ pub fn addPathDirInternal(step: *Step, builder: *Builder, search_path: []const u
|
||||
|
||||
if (prev_path) |pp| {
|
||||
const new_path = builder.fmt("{s}" ++ [1]u8{fs.path.delimiter} ++ "{s}", .{ pp, search_path });
|
||||
env_map.put(key, new_path) catch unreachable;
|
||||
env_map.put(key, new_path) catch @panic("OOM");
|
||||
} else {
|
||||
env_map.put(key, builder.dupePath(search_path)) catch unreachable;
|
||||
env_map.put(key, builder.dupePath(search_path)) catch @panic("OOM");
|
||||
}
|
||||
}
|
||||
|
||||
@ -122,12 +120,12 @@ pub fn getEnvMap(self: *RunStep) *EnvMap {
|
||||
fn getEnvMapInternal(step: *Step, allocator: Allocator) *EnvMap {
|
||||
const maybe_env_map = switch (step.id) {
|
||||
.run => step.cast(RunStep).?.env_map,
|
||||
.emulatable_run => step.cast(build.EmulatableRunStep).?.env_map,
|
||||
.emulatable_run => step.cast(std.Build.EmulatableRunStep).?.env_map,
|
||||
else => unreachable,
|
||||
};
|
||||
return maybe_env_map orelse {
|
||||
const env_map = allocator.create(EnvMap) catch unreachable;
|
||||
env_map.* = process.getEnvMap(allocator) catch unreachable;
|
||||
const env_map = allocator.create(EnvMap) catch @panic("OOM");
|
||||
env_map.* = process.getEnvMap(allocator) catch @panic("unhandled error");
|
||||
switch (step.id) {
|
||||
.run => step.cast(RunStep).?.env_map = env_map,
|
||||
.emulatable_run => step.cast(RunStep).?.env_map = env_map,
|
||||
@ -142,7 +140,7 @@ pub fn setEnvironmentVariable(self: *RunStep, key: []const u8, value: []const u8
|
||||
env_map.put(
|
||||
self.builder.dupe(key),
|
||||
self.builder.dupe(value),
|
||||
) catch unreachable;
|
||||
) catch @panic("unhandled error");
|
||||
}
|
||||
|
||||
pub fn expectStdErrEqual(self: *RunStep, bytes: []const u8) void {
|
||||
@ -195,7 +193,7 @@ fn make(step: *Step) !void {
|
||||
|
||||
pub fn runCommand(
|
||||
argv: []const []const u8,
|
||||
builder: *Builder,
|
||||
builder: *std.Build,
|
||||
expected_exit_code: ?u8,
|
||||
stdout_action: StdIoAction,
|
||||
stderr_action: StdIoAction,
|
||||
@ -236,7 +234,7 @@ pub fn runCommand(
|
||||
|
||||
switch (stdout_action) {
|
||||
.expect_exact, .expect_matches => {
|
||||
stdout = child.stdout.?.reader().readAllAlloc(builder.allocator, max_stdout_size) catch unreachable;
|
||||
stdout = try child.stdout.?.reader().readAllAlloc(builder.allocator, max_stdout_size);
|
||||
},
|
||||
.inherit, .ignore => {},
|
||||
}
|
||||
@ -246,7 +244,7 @@ pub fn runCommand(
|
||||
|
||||
switch (stderr_action) {
|
||||
.expect_exact, .expect_matches => {
|
||||
stderr = child.stderr.?.reader().readAllAlloc(builder.allocator, max_stdout_size) catch unreachable;
|
||||
stderr = try child.stderr.?.reader().readAllAlloc(builder.allocator, max_stdout_size);
|
||||
},
|
||||
.inherit, .ignore => {},
|
||||
}
|
||||
@ -357,13 +355,13 @@ fn printCmd(cwd: ?[]const u8, argv: []const []const u8) void {
|
||||
std.debug.print("\n", .{});
|
||||
}
|
||||
|
||||
fn addPathForDynLibs(self: *RunStep, artifact: *LibExeObjStep) void {
|
||||
fn addPathForDynLibs(self: *RunStep, artifact: *CompileStep) void {
|
||||
addPathForDynLibsInternal(&self.step, self.builder, artifact);
|
||||
}
|
||||
|
||||
/// This should only be used for internal usage, this is called automatically
|
||||
/// for the user.
|
||||
pub fn addPathForDynLibsInternal(step: *Step, builder: *Builder, artifact: *LibExeObjStep) void {
|
||||
pub fn addPathForDynLibsInternal(step: *Step, builder: *std.Build, artifact: *CompileStep) void {
|
||||
for (artifact.link_objects.items) |link_object| {
|
||||
switch (link_object) {
|
||||
.other_step => |other| {
|
||||
97
lib/std/Build/Step.zig
Normal file
97
lib/std/Build/Step.zig
Normal file
@ -0,0 +1,97 @@
|
||||
id: Id,
|
||||
name: []const u8,
|
||||
makeFn: *const fn (self: *Step) anyerror!void,
|
||||
dependencies: std.ArrayList(*Step),
|
||||
loop_flag: bool,
|
||||
done_flag: bool,
|
||||
|
||||
pub const Id = enum {
|
||||
top_level,
|
||||
compile,
|
||||
install_artifact,
|
||||
install_file,
|
||||
install_dir,
|
||||
log,
|
||||
remove_dir,
|
||||
fmt,
|
||||
translate_c,
|
||||
write_file,
|
||||
run,
|
||||
emulatable_run,
|
||||
check_file,
|
||||
check_object,
|
||||
config_header,
|
||||
install_raw,
|
||||
options,
|
||||
custom,
|
||||
|
||||
pub fn Type(comptime id: Id) type {
|
||||
return switch (id) {
|
||||
.top_level => Build.TopLevelStep,
|
||||
.compile => Build.CompileStep,
|
||||
.install_artifact => Build.InstallArtifactStep,
|
||||
.install_file => Build.InstallFileStep,
|
||||
.install_dir => Build.InstallDirStep,
|
||||
.log => Build.LogStep,
|
||||
.remove_dir => Build.RemoveDirStep,
|
||||
.fmt => Build.FmtStep,
|
||||
.translate_c => Build.TranslateCStep,
|
||||
.write_file => Build.WriteFileStep,
|
||||
.run => Build.RunStep,
|
||||
.emulatable_run => Build.EmulatableRunStep,
|
||||
.check_file => Build.CheckFileStep,
|
||||
.check_object => Build.CheckObjectStep,
|
||||
.config_header => Build.ConfigHeaderStep,
|
||||
.install_raw => Build.InstallRawStep,
|
||||
.options => Build.OptionsStep,
|
||||
.custom => @compileError("no type available for custom step"),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init(
|
||||
id: Id,
|
||||
name: []const u8,
|
||||
allocator: Allocator,
|
||||
makeFn: *const fn (self: *Step) anyerror!void,
|
||||
) Step {
|
||||
return Step{
|
||||
.id = id,
|
||||
.name = allocator.dupe(u8, name) catch @panic("OOM"),
|
||||
.makeFn = makeFn,
|
||||
.dependencies = std.ArrayList(*Step).init(allocator),
|
||||
.loop_flag = false,
|
||||
.done_flag = false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn initNoOp(id: Id, name: []const u8, allocator: Allocator) Step {
|
||||
return init(id, name, allocator, makeNoOp);
|
||||
}
|
||||
|
||||
pub fn make(self: *Step) !void {
|
||||
if (self.done_flag) return;
|
||||
|
||||
try self.makeFn(self);
|
||||
self.done_flag = true;
|
||||
}
|
||||
|
||||
pub fn dependOn(self: *Step, other: *Step) void {
|
||||
self.dependencies.append(other) catch @panic("OOM");
|
||||
}
|
||||
|
||||
fn makeNoOp(self: *Step) anyerror!void {
|
||||
_ = self;
|
||||
}
|
||||
|
||||
pub fn cast(step: *Step, comptime T: type) ?*T {
|
||||
if (step.id == T.base_id) {
|
||||
return @fieldParentPtr(T, "step", step);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
const Step = @This();
|
||||
const std = @import("../std.zig");
|
||||
const Build = std.Build;
|
||||
const Allocator = std.mem.Allocator;
|
||||
@ -1,9 +1,7 @@
|
||||
const std = @import("../std.zig");
|
||||
const build = std.build;
|
||||
const Step = build.Step;
|
||||
const Builder = build.Builder;
|
||||
const LibExeObjStep = build.LibExeObjStep;
|
||||
const CheckFileStep = build.CheckFileStep;
|
||||
const Step = std.Build.Step;
|
||||
const CompileStep = std.Build.CompileStep;
|
||||
const CheckFileStep = std.Build.CheckFileStep;
|
||||
const fs = std.fs;
|
||||
const mem = std.mem;
|
||||
const CrossTarget = std.zig.CrossTarget;
|
||||
@ -13,42 +11,61 @@ const TranslateCStep = @This();
|
||||
pub const base_id = .translate_c;
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
source: build.FileSource,
|
||||
builder: *std.Build,
|
||||
source: std.Build.FileSource,
|
||||
include_dirs: std.ArrayList([]const u8),
|
||||
c_macros: std.ArrayList([]const u8),
|
||||
output_dir: ?[]const u8,
|
||||
out_basename: []const u8,
|
||||
target: CrossTarget = CrossTarget{},
|
||||
output_file: build.GeneratedFile,
|
||||
target: CrossTarget,
|
||||
optimize: std.builtin.OptimizeMode,
|
||||
output_file: std.Build.GeneratedFile,
|
||||
|
||||
pub fn create(builder: *Builder, source: build.FileSource) *TranslateCStep {
|
||||
const self = builder.allocator.create(TranslateCStep) catch unreachable;
|
||||
pub const Options = struct {
|
||||
source_file: std.Build.FileSource,
|
||||
target: CrossTarget,
|
||||
optimize: std.builtin.OptimizeMode,
|
||||
};
|
||||
|
||||
pub fn create(builder: *std.Build, options: Options) *TranslateCStep {
|
||||
const self = builder.allocator.create(TranslateCStep) catch @panic("OOM");
|
||||
const source = options.source_file.dupe(builder);
|
||||
self.* = TranslateCStep{
|
||||
.step = Step.init(.translate_c, "translate-c", builder.allocator, make),
|
||||
.builder = builder,
|
||||
.source = source,
|
||||
.include_dirs = std.ArrayList([]const u8).init(builder.allocator),
|
||||
.c_macros = std.ArrayList([]const u8).init(builder.allocator),
|
||||
.output_dir = null,
|
||||
.out_basename = undefined,
|
||||
.output_file = build.GeneratedFile{ .step = &self.step },
|
||||
.target = options.target,
|
||||
.optimize = options.optimize,
|
||||
.output_file = std.Build.GeneratedFile{ .step = &self.step },
|
||||
};
|
||||
source.addStepDependencies(&self.step);
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn setTarget(self: *TranslateCStep, target: CrossTarget) void {
|
||||
self.target = target;
|
||||
}
|
||||
pub const AddExecutableOptions = struct {
|
||||
name: ?[]const u8 = null,
|
||||
version: ?std.builtin.Version = null,
|
||||
target: ?CrossTarget = null,
|
||||
optimize: ?std.builtin.Mode = null,
|
||||
linkage: ?CompileStep.Linkage = null,
|
||||
};
|
||||
|
||||
/// Creates a step to build an executable from the translated source.
|
||||
pub fn addExecutable(self: *TranslateCStep) *LibExeObjStep {
|
||||
return self.builder.addExecutableSource("translated_c", build.FileSource{ .generated = &self.output_file });
|
||||
pub fn addExecutable(self: *TranslateCStep, options: AddExecutableOptions) *CompileStep {
|
||||
return self.builder.addExecutable(.{
|
||||
.root_source_file = .{ .generated = &self.output_file },
|
||||
.name = options.name orelse "translated_c",
|
||||
.version = options.version,
|
||||
.target = options.target orelse self.target,
|
||||
.optimize = options.optimize orelse self.optimize,
|
||||
.linkage = options.linkage,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn addIncludeDir(self: *TranslateCStep, include_dir: []const u8) void {
|
||||
self.include_dirs.append(self.builder.dupePath(include_dir)) catch unreachable;
|
||||
self.include_dirs.append(self.builder.dupePath(include_dir)) catch @panic("OOM");
|
||||
}
|
||||
|
||||
pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8) *CheckFileStep {
|
||||
@ -58,13 +75,13 @@ pub fn addCheckFile(self: *TranslateCStep, expected_matches: []const []const u8)
|
||||
/// If the value is omitted, it is set to 1.
|
||||
/// `name` and `value` need not live longer than the function call.
|
||||
pub fn defineCMacro(self: *TranslateCStep, name: []const u8, value: ?[]const u8) void {
|
||||
const macro = build.constructCMacro(self.builder.allocator, name, value);
|
||||
self.c_macros.append(macro) catch unreachable;
|
||||
const macro = std.Build.constructCMacro(self.builder.allocator, name, value);
|
||||
self.c_macros.append(macro) catch @panic("OOM");
|
||||
}
|
||||
|
||||
/// name_and_value looks like [name]=[value]. If the value is omitted, it is set to 1.
|
||||
pub fn defineCMacroRaw(self: *TranslateCStep, name_and_value: []const u8) void {
|
||||
self.c_macros.append(self.builder.dupe(name_and_value)) catch unreachable;
|
||||
self.c_macros.append(self.builder.dupe(name_and_value)) catch @panic("OOM");
|
||||
}
|
||||
|
||||
fn make(step: *Step) !void {
|
||||
@ -82,6 +99,11 @@ fn make(step: *Step) !void {
|
||||
try argv_list.append(try self.target.zigTriple(self.builder.allocator));
|
||||
}
|
||||
|
||||
switch (self.optimize) {
|
||||
.Debug => {}, // Skip since it's the default.
|
||||
else => try argv_list.append(self.builder.fmt("-O{s}", .{@tagName(self.optimize)})),
|
||||
}
|
||||
|
||||
for (self.include_dirs.items) |include_dir| {
|
||||
try argv_list.append("-I");
|
||||
try argv_list.append(include_dir);
|
||||
@ -98,15 +120,10 @@ fn make(step: *Step) !void {
|
||||
const output_path = mem.trimRight(u8, output_path_nl, "\r\n");
|
||||
|
||||
self.out_basename = fs.path.basename(output_path);
|
||||
if (self.output_dir) |output_dir| {
|
||||
const full_dest = try fs.path.join(self.builder.allocator, &[_][]const u8{ output_dir, self.out_basename });
|
||||
try self.builder.updateFile(output_path, full_dest);
|
||||
} else {
|
||||
self.output_dir = fs.path.dirname(output_path).?;
|
||||
}
|
||||
const output_dir = fs.path.dirname(output_path).?;
|
||||
|
||||
self.output_file.path = fs.path.join(
|
||||
self.output_file.path = try fs.path.join(
|
||||
self.builder.allocator,
|
||||
&[_][]const u8{ self.output_dir.?, self.out_basename },
|
||||
) catch unreachable;
|
||||
&[_][]const u8{ output_dir, self.out_basename },
|
||||
);
|
||||
}
|
||||
@ -1,7 +1,5 @@
|
||||
const std = @import("../std.zig");
|
||||
const build = @import("../build.zig");
|
||||
const Step = build.Step;
|
||||
const Builder = build.Builder;
|
||||
const Step = std.Build.Step;
|
||||
const fs = std.fs;
|
||||
const ArrayList = std.ArrayList;
|
||||
|
||||
@ -10,30 +8,28 @@ const WriteFileStep = @This();
|
||||
pub const base_id = .write_file;
|
||||
|
||||
step: Step,
|
||||
builder: *Builder,
|
||||
output_dir: []const u8,
|
||||
builder: *std.Build,
|
||||
files: std.TailQueue(File),
|
||||
|
||||
pub const File = struct {
|
||||
source: build.GeneratedFile,
|
||||
source: std.Build.GeneratedFile,
|
||||
basename: []const u8,
|
||||
bytes: []const u8,
|
||||
};
|
||||
|
||||
pub fn init(builder: *Builder) WriteFileStep {
|
||||
pub fn init(builder: *std.Build) WriteFileStep {
|
||||
return WriteFileStep{
|
||||
.builder = builder,
|
||||
.step = Step.init(.write_file, "writefile", builder.allocator, make),
|
||||
.files = .{},
|
||||
.output_dir = undefined,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn add(self: *WriteFileStep, basename: []const u8, bytes: []const u8) void {
|
||||
const node = self.builder.allocator.create(std.TailQueue(File).Node) catch unreachable;
|
||||
const node = self.builder.allocator.create(std.TailQueue(File).Node) catch @panic("unhandled error");
|
||||
node.* = .{
|
||||
.data = .{
|
||||
.source = build.GeneratedFile{ .step = &self.step },
|
||||
.source = std.Build.GeneratedFile{ .step = &self.step },
|
||||
.basename = self.builder.dupePath(basename),
|
||||
.bytes = self.builder.dupe(bytes),
|
||||
},
|
||||
@ -43,11 +39,11 @@ pub fn add(self: *WriteFileStep, basename: []const u8, bytes: []const u8) void {
|
||||
}
|
||||
|
||||
/// Gets a file source for the given basename. If the file does not exist, returns `null`.
|
||||
pub fn getFileSource(step: *WriteFileStep, basename: []const u8) ?build.FileSource {
|
||||
pub fn getFileSource(step: *WriteFileStep, basename: []const u8) ?std.Build.FileSource {
|
||||
var it = step.files.first;
|
||||
while (it) |node| : (it = node.next) {
|
||||
if (std.mem.eql(u8, node.data.basename, basename))
|
||||
return build.FileSource{ .generated = &node.data.source };
|
||||
return std.Build.FileSource{ .generated = &node.data.source };
|
||||
}
|
||||
return null;
|
||||
}
|
||||
@ -89,11 +85,11 @@ fn make(step: *Step) !void {
|
||||
.{std.fmt.fmtSliceHexLower(&digest)},
|
||||
) catch unreachable;
|
||||
|
||||
self.output_dir = try fs.path.join(self.builder.allocator, &[_][]const u8{
|
||||
const output_dir = try fs.path.join(self.builder.allocator, &[_][]const u8{
|
||||
self.builder.cache_root, "o", &hash_basename,
|
||||
});
|
||||
var dir = fs.cwd().makeOpenPath(self.output_dir, .{}) catch |err| {
|
||||
std.debug.print("unable to make path {s}: {s}\n", .{ self.output_dir, @errorName(err) });
|
||||
var dir = fs.cwd().makeOpenPath(output_dir, .{}) catch |err| {
|
||||
std.debug.print("unable to make path {s}: {s}\n", .{ output_dir, @errorName(err) });
|
||||
return err;
|
||||
};
|
||||
defer dir.close();
|
||||
@ -103,15 +99,15 @@ fn make(step: *Step) !void {
|
||||
dir.writeFile(node.data.basename, node.data.bytes) catch |err| {
|
||||
std.debug.print("unable to write {s} into {s}: {s}\n", .{
|
||||
node.data.basename,
|
||||
self.output_dir,
|
||||
output_dir,
|
||||
@errorName(err),
|
||||
});
|
||||
return err;
|
||||
};
|
||||
node.data.source.path = fs.path.join(
|
||||
node.data.source.path = try fs.path.join(
|
||||
self.builder.allocator,
|
||||
&[_][]const u8{ self.output_dir, node.data.basename },
|
||||
) catch unreachable;
|
||||
&[_][]const u8{ output_dir, node.data.basename },
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1145,7 +1145,8 @@ pub fn ArrayHashMapUnmanaged(
|
||||
}
|
||||
|
||||
/// Create a copy of the hash map which can be modified separately.
|
||||
/// The copy uses the same context and allocator as this instance.
|
||||
/// The copy uses the same context as this instance, but is allocated
|
||||
/// with the provided allocator.
|
||||
pub fn clone(self: Self, allocator: Allocator) !Self {
|
||||
if (@sizeOf(ByIndexContext) != 0)
|
||||
@compileError("Cannot infer context " ++ @typeName(Context) ++ ", call cloneContext instead.");
|
||||
|
||||
@ -482,14 +482,14 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
|
||||
/// Return the last element from the list.
|
||||
/// Asserts the list has at least one item.
|
||||
pub fn getLast(self: *Self) T {
|
||||
pub fn getLast(self: Self) T {
|
||||
const val = self.items[self.items.len - 1];
|
||||
return val;
|
||||
}
|
||||
|
||||
/// Return the last element from the list, or
|
||||
/// return `null` if list is empty.
|
||||
pub fn getLastOrNull(self: *Self) ?T {
|
||||
pub fn getLastOrNull(self: Self) ?T {
|
||||
if (self.items.len == 0) return null;
|
||||
return self.getLast();
|
||||
}
|
||||
@ -961,14 +961,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
|
||||
/// Return the last element from the list.
|
||||
/// Asserts the list has at least one item.
|
||||
pub fn getLast(self: *Self) T {
|
||||
pub fn getLast(self: Self) T {
|
||||
const val = self.items[self.items.len - 1];
|
||||
return val;
|
||||
}
|
||||
|
||||
/// Return the last element from the list, or
|
||||
/// return `null` if list is empty.
|
||||
pub fn getLastOrNull(self: *Self) ?T {
|
||||
pub fn getLastOrNull(self: Self) ?T {
|
||||
if (self.items.len == 0) return null;
|
||||
return self.getLast();
|
||||
}
|
||||
@ -1719,3 +1719,27 @@ test "std.ArrayList(?u32).popOrNull()" {
|
||||
try testing.expect(list.popOrNull().? == null);
|
||||
try testing.expect(list.popOrNull() == null);
|
||||
}
|
||||
|
||||
test "std.ArrayList(u32).getLast()" {
|
||||
const a = testing.allocator;
|
||||
|
||||
var list = ArrayList(u32).init(a);
|
||||
defer list.deinit();
|
||||
|
||||
try list.append(2);
|
||||
const const_list = list;
|
||||
try testing.expectEqual(const_list.getLast(), 2);
|
||||
}
|
||||
|
||||
test "std.ArrayList(u32).getLastOrNull()" {
|
||||
const a = testing.allocator;
|
||||
|
||||
var list = ArrayList(u32).init(a);
|
||||
defer list.deinit();
|
||||
|
||||
try testing.expectEqual(list.getLastOrNull(), null);
|
||||
|
||||
try list.append(2);
|
||||
const const_list = list;
|
||||
try testing.expectEqual(const_list.getLastOrNull().?, 2);
|
||||
}
|
||||
|
||||
1781
lib/std/build.zig
1781
lib/std/build.zig
File diff suppressed because it is too large
Load Diff
@ -131,13 +131,16 @@ pub const CodeModel = enum {
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Mode = enum {
|
||||
pub const OptimizeMode = enum {
|
||||
Debug,
|
||||
ReleaseSafe,
|
||||
ReleaseFast,
|
||||
ReleaseSmall,
|
||||
};
|
||||
|
||||
/// Deprecated; use OptimizeMode.
|
||||
pub const Mode = OptimizeMode;
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const CallingConvention = enum {
|
||||
|
||||
@ -1164,7 +1164,7 @@ fn windowsCreateProcessPathExt(
|
||||
var app_name_unicode_string = windows.UNICODE_STRING{
|
||||
.Length = app_name_len_bytes,
|
||||
.MaximumLength = app_name_len_bytes,
|
||||
.Buffer = @intToPtr([*]u16, @ptrToInt(app_name_wildcard.ptr)),
|
||||
.Buffer = @qualCast([*:0]u16, app_name_wildcard.ptr),
|
||||
};
|
||||
const rc = windows.ntdll.NtQueryDirectoryFile(
|
||||
dir.fd,
|
||||
@ -1261,7 +1261,7 @@ fn windowsCreateProcessPathExt(
|
||||
var app_name_unicode_string = windows.UNICODE_STRING{
|
||||
.Length = app_name_len_bytes,
|
||||
.MaximumLength = app_name_len_bytes,
|
||||
.Buffer = @intToPtr([*]u16, @ptrToInt(app_name_appended.ptr)),
|
||||
.Buffer = @qualCast([*:0]u16, app_name_appended.ptr),
|
||||
};
|
||||
|
||||
// Re-use the directory handle but this time we call with the appended app name
|
||||
|
||||
@ -1,11 +1,12 @@
|
||||
const std = @import("std.zig");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
const io = std.io;
|
||||
const math = std.math;
|
||||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const unicode = std.unicode;
|
||||
const meta = std.meta;
|
||||
const builtin = @import("builtin");
|
||||
const errol = @import("fmt/errol.zig");
|
||||
const lossyCast = std.math.lossyCast;
|
||||
const expectFmt = std.testing.expectFmt;
|
||||
@ -190,7 +191,7 @@ pub fn format(
|
||||
.precision = precision,
|
||||
},
|
||||
writer,
|
||||
default_max_depth,
|
||||
std.options.fmt_max_depth,
|
||||
);
|
||||
}
|
||||
|
||||
@ -2140,15 +2141,15 @@ test "buffer" {
|
||||
{
|
||||
var buf1: [32]u8 = undefined;
|
||||
var fbs = std.io.fixedBufferStream(&buf1);
|
||||
try formatType(1234, "", FormatOptions{}, fbs.writer(), default_max_depth);
|
||||
try formatType(1234, "", FormatOptions{}, fbs.writer(), std.options.fmt_max_depth);
|
||||
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1234"));
|
||||
|
||||
fbs.reset();
|
||||
try formatType('a', "c", FormatOptions{}, fbs.writer(), default_max_depth);
|
||||
try formatType('a', "c", FormatOptions{}, fbs.writer(), std.options.fmt_max_depth);
|
||||
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "a"));
|
||||
|
||||
fbs.reset();
|
||||
try formatType(0b1100, "b", FormatOptions{}, fbs.writer(), default_max_depth);
|
||||
try formatType(0b1100, "b", FormatOptions{}, fbs.writer(), std.options.fmt_max_depth);
|
||||
try std.testing.expect(mem.eql(u8, fbs.getWritten(), "1100"));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1763,7 +1763,7 @@ pub const Dir = struct {
|
||||
var nt_name = w.UNICODE_STRING{
|
||||
.Length = path_len_bytes,
|
||||
.MaximumLength = path_len_bytes,
|
||||
.Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w)),
|
||||
.Buffer = @qualCast([*:0]u16, sub_path_w),
|
||||
};
|
||||
var attr = w.OBJECT_ATTRIBUTES{
|
||||
.Length = @sizeOf(w.OBJECT_ATTRIBUTES),
|
||||
|
||||
@ -179,7 +179,7 @@ pub const File = struct {
|
||||
lock_nonblocking: bool = false,
|
||||
|
||||
/// For POSIX systems this is the file system mode the file will
|
||||
/// be created with.
|
||||
/// be created with. On other systems this is always 0.
|
||||
mode: Mode = default_mode,
|
||||
|
||||
/// Setting this to `.blocking` prevents `O.NONBLOCK` from being passed even
|
||||
@ -307,6 +307,7 @@ pub const File = struct {
|
||||
/// is unique to each filesystem.
|
||||
inode: INode,
|
||||
size: u64,
|
||||
/// This is available on POSIX systems and is always 0 otherwise.
|
||||
mode: Mode,
|
||||
kind: Kind,
|
||||
|
||||
|
||||
@ -13,7 +13,6 @@ pub const Log2Limb = std.math.Log2Int(Limb);
|
||||
|
||||
comptime {
|
||||
assert(std.math.floorPowerOfTwo(usize, limb_info.bits) == limb_info.bits);
|
||||
assert(limb_info.bits <= 64); // u128 set is unsupported
|
||||
assert(limb_info.signedness == .unsigned);
|
||||
}
|
||||
|
||||
|
||||
@ -30,7 +30,7 @@ pub fn calcLimbLen(scalar: anytype) usize {
|
||||
}
|
||||
|
||||
const w_value = std.math.absCast(scalar);
|
||||
return @divFloor(@intCast(Limb, math.log2(w_value)), limb_bits) + 1;
|
||||
return @intCast(usize, @divFloor(@intCast(Limb, math.log2(w_value)), limb_bits) + 1);
|
||||
}
|
||||
|
||||
pub fn calcToStringLimbsBufferLen(a_len: usize, base: u8) usize {
|
||||
@ -238,10 +238,7 @@ pub const Mutable = struct {
|
||||
var i: usize = 0;
|
||||
while (true) : (i += 1) {
|
||||
self.limbs[i] = @truncate(Limb, w_value);
|
||||
|
||||
// TODO: shift == 64 at compile-time fails. Fails on u128 limbs.
|
||||
w_value >>= limb_bits / 2;
|
||||
w_value >>= limb_bits / 2;
|
||||
w_value >>= limb_bits;
|
||||
|
||||
if (w_value == 0) break;
|
||||
}
|
||||
@ -258,9 +255,7 @@ pub const Mutable = struct {
|
||||
comptime var i = 0;
|
||||
inline while (true) : (i += 1) {
|
||||
self.limbs[i] = w_value & mask;
|
||||
|
||||
w_value >>= limb_bits / 2;
|
||||
w_value >>= limb_bits / 2;
|
||||
w_value >>= limb_bits;
|
||||
|
||||
if (w_value == 0) break;
|
||||
}
|
||||
|
||||
@ -332,7 +332,7 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
|
||||
@compileError("Unable to derive a sentinel pointer type from " ++ @typeName(T));
|
||||
}
|
||||
|
||||
const assumeSentinel = @compileError("This function has been removed, consider using std.mem.sliceTo() or if needed a @ptrCast()");
|
||||
pub const assumeSentinel = @compileError("This function has been removed, consider using std.mem.sliceTo() or if needed a @ptrCast()");
|
||||
|
||||
pub fn containerLayout(comptime T: type) Type.ContainerLayout {
|
||||
return switch (@typeInfo(T)) {
|
||||
|
||||
@ -4513,7 +4513,7 @@ pub fn faccessatW(dirfd: fd_t, sub_path_w: [*:0]const u16, mode: u32, flags: u32
|
||||
var nt_name = windows.UNICODE_STRING{
|
||||
.Length = path_len_bytes,
|
||||
.MaximumLength = path_len_bytes,
|
||||
.Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w)),
|
||||
.Buffer = @qualCast([*:0]u16, sub_path_w),
|
||||
};
|
||||
var attr = windows.OBJECT_ATTRIBUTES{
|
||||
.Length = @sizeOf(windows.OBJECT_ATTRIBUTES),
|
||||
|
||||
@ -40,6 +40,7 @@ const arch_bits = switch (native_arch) {
|
||||
.riscv64 => @import("linux/riscv64.zig"),
|
||||
.sparc64 => @import("linux/sparc64.zig"),
|
||||
.mips, .mipsel => @import("linux/mips.zig"),
|
||||
.mips64, .mips64el => @import("linux/mips64.zig"),
|
||||
.powerpc => @import("linux/powerpc.zig"),
|
||||
.powerpc64, .powerpc64le => @import("linux/powerpc64.zig"),
|
||||
else => struct {},
|
||||
@ -101,6 +102,7 @@ pub const SYS = switch (@import("builtin").cpu.arch) {
|
||||
.riscv64 => syscalls.RiscV64,
|
||||
.sparc64 => syscalls.Sparc64,
|
||||
.mips, .mipsel => syscalls.Mips,
|
||||
.mips64, .mips64el => syscalls.Mips64,
|
||||
.powerpc => syscalls.PowerPC,
|
||||
.powerpc64, .powerpc64le => syscalls.PowerPC64,
|
||||
else => @compileError("The Zig Standard Library is missing syscall definitions for the target CPU architecture"),
|
||||
|
||||
413
lib/std/os/linux/mips64.zig
Normal file
413
lib/std/os/linux/mips64.zig
Normal file
@ -0,0 +1,413 @@
|
||||
const std = @import("../../std.zig");
|
||||
const maxInt = std.math.maxInt;
|
||||
const linux = std.os.linux;
|
||||
const SYS = linux.SYS;
|
||||
const socklen_t = linux.socklen_t;
|
||||
const iovec = std.os.iovec;
|
||||
const iovec_const = std.os.iovec_const;
|
||||
const uid_t = linux.uid_t;
|
||||
const gid_t = linux.gid_t;
|
||||
const pid_t = linux.pid_t;
|
||||
const sockaddr = linux.sockaddr;
|
||||
const timespec = linux.timespec;
|
||||
|
||||
pub fn syscall0(number: SYS) usize {
|
||||
return asm volatile (
|
||||
\\ syscall
|
||||
\\ blez $7, 1f
|
||||
\\ dsubu $2, $0, $2
|
||||
\\ 1:
|
||||
: [ret] "={$2}" (-> usize),
|
||||
: [number] "{$2}" (@enumToInt(number)),
|
||||
: "$1", "$3", "$4", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
pub fn syscall_pipe(fd: *[2]i32) usize {
|
||||
return asm volatile (
|
||||
\\ .set noat
|
||||
\\ .set noreorder
|
||||
\\ syscall
|
||||
\\ blez $7, 1f
|
||||
\\ nop
|
||||
\\ b 2f
|
||||
\\ subu $2, $0, $2
|
||||
\\ 1:
|
||||
\\ sw $2, 0($4)
|
||||
\\ sw $3, 4($4)
|
||||
\\ 2:
|
||||
: [ret] "={$2}" (-> usize),
|
||||
: [number] "{$2}" (@enumToInt(SYS.pipe)),
|
||||
[fd] "{$4}" (fd),
|
||||
: "$1", "$3", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
pub fn syscall1(number: SYS, arg1: usize) usize {
|
||||
return asm volatile (
|
||||
\\ syscall
|
||||
\\ blez $7, 1f
|
||||
\\ dsubu $2, $0, $2
|
||||
\\ 1:
|
||||
: [ret] "={$2}" (-> usize),
|
||||
: [number] "{$2}" (@enumToInt(number)),
|
||||
[arg1] "{$4}" (arg1),
|
||||
: "$1", "$3", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
pub fn syscall2(number: SYS, arg1: usize, arg2: usize) usize {
|
||||
return asm volatile (
|
||||
\\ syscall
|
||||
\\ blez $7, 1f
|
||||
\\ dsubu $2, $0, $2
|
||||
\\ 1:
|
||||
: [ret] "={$2}" (-> usize),
|
||||
: [number] "{$2}" (@enumToInt(number)),
|
||||
[arg1] "{$4}" (arg1),
|
||||
[arg2] "{$5}" (arg2),
|
||||
: "$1", "$3", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
pub fn syscall3(number: SYS, arg1: usize, arg2: usize, arg3: usize) usize {
|
||||
return asm volatile (
|
||||
\\ syscall
|
||||
\\ blez $7, 1f
|
||||
\\ dsubu $2, $0, $2
|
||||
\\ 1:
|
||||
: [ret] "={$2}" (-> usize),
|
||||
: [number] "{$2}" (@enumToInt(number)),
|
||||
[arg1] "{$4}" (arg1),
|
||||
[arg2] "{$5}" (arg2),
|
||||
[arg3] "{$6}" (arg3),
|
||||
: "$1", "$3", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
pub fn syscall4(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize) usize {
|
||||
return asm volatile (
|
||||
\\ syscall
|
||||
\\ blez $7, 1f
|
||||
\\ dsubu $2, $0, $2
|
||||
\\ 1:
|
||||
: [ret] "={$2}" (-> usize),
|
||||
: [number] "{$2}" (@enumToInt(number)),
|
||||
[arg1] "{$4}" (arg1),
|
||||
[arg2] "{$5}" (arg2),
|
||||
[arg3] "{$6}" (arg3),
|
||||
[arg4] "{$7}" (arg4),
|
||||
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
pub fn syscall5(number: SYS, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) usize {
|
||||
return asm volatile (
|
||||
\\ syscall
|
||||
\\ blez $7, 1f
|
||||
\\ dsubu $2, $0, $2
|
||||
\\ 1:
|
||||
: [ret] "={$2}" (-> usize),
|
||||
: [number] "{$2}" (@enumToInt(number)),
|
||||
[arg1] "{$4}" (arg1),
|
||||
[arg2] "{$5}" (arg2),
|
||||
[arg3] "{$6}" (arg3),
|
||||
[arg4] "{$7}" (arg4),
|
||||
[arg5] "{$8}" (arg5),
|
||||
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
// NOTE: The o32 calling convention requires the callee to reserve 16 bytes for
|
||||
// the first four arguments even though they're passed in $a0-$a3.
|
||||
|
||||
pub fn syscall6(
|
||||
number: SYS,
|
||||
arg1: usize,
|
||||
arg2: usize,
|
||||
arg3: usize,
|
||||
arg4: usize,
|
||||
arg5: usize,
|
||||
arg6: usize,
|
||||
) usize {
|
||||
return asm volatile (
|
||||
\\ syscall
|
||||
\\ blez $7, 1f
|
||||
\\ dsubu $2, $0, $2
|
||||
\\ 1:
|
||||
: [ret] "={$2}" (-> usize),
|
||||
: [number] "{$2}" (@enumToInt(number)),
|
||||
[arg1] "{$4}" (arg1),
|
||||
[arg2] "{$5}" (arg2),
|
||||
[arg3] "{$6}" (arg3),
|
||||
[arg4] "{$7}" (arg4),
|
||||
[arg5] "{$8}" (arg5),
|
||||
[arg6] "{$9}" (arg6),
|
||||
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
pub fn syscall7(
|
||||
number: SYS,
|
||||
arg1: usize,
|
||||
arg2: usize,
|
||||
arg3: usize,
|
||||
arg4: usize,
|
||||
arg5: usize,
|
||||
arg6: usize,
|
||||
arg7: usize,
|
||||
) usize {
|
||||
return asm volatile (
|
||||
\\ syscall
|
||||
\\ blez $7, 1f
|
||||
\\ dsubu $2, $0, $2
|
||||
\\ 1:
|
||||
: [ret] "={$2}" (-> usize),
|
||||
: [number] "{$2}" (@enumToInt(number)),
|
||||
[arg1] "{$4}" (arg1),
|
||||
[arg2] "{$5}" (arg2),
|
||||
[arg3] "{$6}" (arg3),
|
||||
[arg4] "{$7}" (arg4),
|
||||
[arg5] "{$8}" (arg5),
|
||||
[arg6] "{$9}" (arg6),
|
||||
[arg7] "{$10}" (arg7),
|
||||
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
const CloneFn = *const fn (arg: usize) callconv(.C) u8;
|
||||
|
||||
/// This matches the libc clone function.
|
||||
pub extern fn clone(func: CloneFn, stack: usize, flags: u32, arg: usize, ptid: *i32, tls: usize, ctid: *i32) usize;
|
||||
|
||||
pub fn restore() callconv(.Naked) void {
|
||||
return asm volatile ("syscall"
|
||||
:
|
||||
: [number] "{$2}" (@enumToInt(SYS.rt_sigreturn)),
|
||||
: "$1", "$3", "$4", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
pub fn restore_rt() callconv(.Naked) void {
|
||||
return asm volatile ("syscall"
|
||||
:
|
||||
: [number] "{$2}" (@enumToInt(SYS.rt_sigreturn)),
|
||||
: "$1", "$3", "$4", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
|
||||
);
|
||||
}
|
||||
|
||||
pub const O = struct {
|
||||
pub const CREAT = 0o0400;
|
||||
pub const EXCL = 0o02000;
|
||||
pub const NOCTTY = 0o04000;
|
||||
pub const TRUNC = 0o01000;
|
||||
pub const APPEND = 0o0010;
|
||||
pub const NONBLOCK = 0o0200;
|
||||
pub const DSYNC = 0o0020;
|
||||
pub const SYNC = 0o040020;
|
||||
pub const RSYNC = 0o040020;
|
||||
pub const DIRECTORY = 0o0200000;
|
||||
pub const NOFOLLOW = 0o0400000;
|
||||
pub const CLOEXEC = 0o02000000;
|
||||
|
||||
pub const ASYNC = 0o010000;
|
||||
pub const DIRECT = 0o0100000;
|
||||
pub const LARGEFILE = 0o020000;
|
||||
pub const NOATIME = 0o01000000;
|
||||
pub const PATH = 0o010000000;
|
||||
pub const TMPFILE = 0o020200000;
|
||||
pub const NDELAY = NONBLOCK;
|
||||
};
|
||||
|
||||
pub const F = struct {
|
||||
pub const DUPFD = 0;
|
||||
pub const GETFD = 1;
|
||||
pub const SETFD = 2;
|
||||
pub const GETFL = 3;
|
||||
pub const SETFL = 4;
|
||||
|
||||
pub const SETOWN = 24;
|
||||
pub const GETOWN = 23;
|
||||
pub const SETSIG = 10;
|
||||
pub const GETSIG = 11;
|
||||
|
||||
pub const GETLK = 33;
|
||||
pub const SETLK = 34;
|
||||
pub const SETLKW = 35;
|
||||
|
||||
pub const RDLCK = 0;
|
||||
pub const WRLCK = 1;
|
||||
pub const UNLCK = 2;
|
||||
|
||||
pub const SETOWN_EX = 15;
|
||||
pub const GETOWN_EX = 16;
|
||||
|
||||
pub const GETOWNER_UIDS = 17;
|
||||
};
|
||||
|
||||
pub const LOCK = struct {
|
||||
pub const SH = 1;
|
||||
pub const EX = 2;
|
||||
pub const UN = 8;
|
||||
pub const NB = 4;
|
||||
};
|
||||
|
||||
pub const MMAP2_UNIT = 4096;
|
||||
|
||||
pub const MAP = struct {
|
||||
pub const NORESERVE = 0x0400;
|
||||
pub const GROWSDOWN = 0x1000;
|
||||
pub const DENYWRITE = 0x2000;
|
||||
pub const EXECUTABLE = 0x4000;
|
||||
pub const LOCKED = 0x8000;
|
||||
pub const @"32BIT" = 0x40;
|
||||
};
|
||||
|
||||
pub const VDSO = struct {
|
||||
pub const CGT_SYM = "__kernel_clock_gettime";
|
||||
pub const CGT_VER = "LINUX_2.6.39";
|
||||
};
|
||||
|
||||
pub const Flock = extern struct {
|
||||
type: i16,
|
||||
whence: i16,
|
||||
__pad0: [4]u8,
|
||||
start: off_t,
|
||||
len: off_t,
|
||||
pid: pid_t,
|
||||
__unused: [4]u8,
|
||||
};
|
||||
|
||||
pub const msghdr = extern struct {
|
||||
name: ?*sockaddr,
|
||||
namelen: socklen_t,
|
||||
iov: [*]iovec,
|
||||
iovlen: i32,
|
||||
control: ?*anyopaque,
|
||||
controllen: socklen_t,
|
||||
flags: i32,
|
||||
};
|
||||
|
||||
pub const msghdr_const = extern struct {
|
||||
name: ?*const sockaddr,
|
||||
namelen: socklen_t,
|
||||
iov: [*]const iovec_const,
|
||||
iovlen: i32,
|
||||
control: ?*const anyopaque,
|
||||
controllen: socklen_t,
|
||||
flags: i32,
|
||||
};
|
||||
|
||||
pub const blksize_t = i32;
|
||||
pub const nlink_t = u32;
|
||||
pub const time_t = i32;
|
||||
pub const mode_t = u32;
|
||||
pub const off_t = i64;
|
||||
pub const ino_t = u64;
|
||||
pub const dev_t = u64;
|
||||
pub const blkcnt_t = i64;
|
||||
|
||||
// The `stat` definition used by the Linux kernel.
|
||||
pub const Stat = extern struct {
|
||||
dev: u32,
|
||||
__pad0: [3]u32, // Reserved for st_dev expansion
|
||||
ino: ino_t,
|
||||
mode: mode_t,
|
||||
nlink: nlink_t,
|
||||
uid: uid_t,
|
||||
gid: gid_t,
|
||||
rdev: u32,
|
||||
__pad1: [3]u32,
|
||||
size: off_t,
|
||||
atim: timespec,
|
||||
mtim: timespec,
|
||||
ctim: timespec,
|
||||
blksize: blksize_t,
|
||||
__pad3: u32,
|
||||
blocks: blkcnt_t,
|
||||
__pad4: [14]usize,
|
||||
|
||||
pub fn atime(self: @This()) timespec {
|
||||
return self.atim;
|
||||
}
|
||||
|
||||
pub fn mtime(self: @This()) timespec {
|
||||
return self.mtim;
|
||||
}
|
||||
|
||||
pub fn ctime(self: @This()) timespec {
|
||||
return self.ctim;
|
||||
}
|
||||
};
|
||||
|
||||
pub const timeval = extern struct {
|
||||
tv_sec: isize,
|
||||
tv_usec: isize,
|
||||
};
|
||||
|
||||
pub const timezone = extern struct {
|
||||
tz_minuteswest: i32,
|
||||
tz_dsttime: i32,
|
||||
};
|
||||
|
||||
pub const Elf_Symndx = u32;
|
||||
|
||||
pub const rlimit_resource = enum(c_int) {
|
||||
/// Per-process CPU limit, in seconds.
|
||||
CPU,
|
||||
|
||||
/// Largest file that can be created, in bytes.
|
||||
FSIZE,
|
||||
|
||||
/// Maximum size of data segment, in bytes.
|
||||
DATA,
|
||||
|
||||
/// Maximum size of stack segment, in bytes.
|
||||
STACK,
|
||||
|
||||
/// Largest core file that can be created, in bytes.
|
||||
CORE,
|
||||
|
||||
/// Number of open files.
|
||||
NOFILE,
|
||||
|
||||
/// Address space limit.
|
||||
AS,
|
||||
|
||||
/// Largest resident set size, in bytes.
|
||||
/// This affects swapping; processes that are exceeding their
|
||||
/// resident set size will be more likely to have physical memory
|
||||
/// taken from them.
|
||||
RSS,
|
||||
|
||||
/// Number of processes.
|
||||
NPROC,
|
||||
|
||||
/// Locked-in-memory address space.
|
||||
MEMLOCK,
|
||||
|
||||
/// Maximum number of file locks.
|
||||
LOCKS,
|
||||
|
||||
/// Maximum number of pending signals.
|
||||
SIGPENDING,
|
||||
|
||||
/// Maximum bytes in POSIX message queues.
|
||||
MSGQUEUE,
|
||||
|
||||
/// Maximum nice priority allowed to raise to.
|
||||
/// Nice levels 19 .. -20 correspond to 0 .. 39
|
||||
/// values of this resource limit.
|
||||
NICE,
|
||||
|
||||
/// Maximum realtime priority allowed for non-priviledged
|
||||
/// processes.
|
||||
RTPRIO,
|
||||
|
||||
/// Maximum CPU time in µs that a process scheduled under a real-time
|
||||
/// scheduling policy may consume without making a blocking system
|
||||
/// call before being forcibly descheduled.
|
||||
RTTIME,
|
||||
|
||||
_,
|
||||
};
|
||||
@ -2032,6 +2032,365 @@ pub const Mips = enum(usize) {
|
||||
set_mempolicy_home_node = Linux + 450,
|
||||
};
|
||||
|
||||
pub const Mips64 = enum(usize) {
|
||||
pub const Linux = 5000;
|
||||
|
||||
read = Linux + 0,
|
||||
write = Linux + 1,
|
||||
open = Linux + 2,
|
||||
close = Linux + 3,
|
||||
stat = Linux + 4,
|
||||
fstat = Linux + 5,
|
||||
lstat = Linux + 6,
|
||||
poll = Linux + 7,
|
||||
lseek = Linux + 8,
|
||||
mmap = Linux + 9,
|
||||
mprotect = Linux + 10,
|
||||
munmap = Linux + 11,
|
||||
brk = Linux + 12,
|
||||
rt_sigaction = Linux + 13,
|
||||
rt_sigprocmask = Linux + 14,
|
||||
ioctl = Linux + 15,
|
||||
pread64 = Linux + 16,
|
||||
pwrite64 = Linux + 17,
|
||||
readv = Linux + 18,
|
||||
writev = Linux + 19,
|
||||
access = Linux + 20,
|
||||
pipe = Linux + 21,
|
||||
_newselect = Linux + 22,
|
||||
sched_yield = Linux + 23,
|
||||
mremap = Linux + 24,
|
||||
msync = Linux + 25,
|
||||
mincore = Linux + 26,
|
||||
madvise = Linux + 27,
|
||||
shmget = Linux + 28,
|
||||
shmat = Linux + 29,
|
||||
shmctl = Linux + 30,
|
||||
dup = Linux + 31,
|
||||
dup2 = Linux + 32,
|
||||
pause = Linux + 33,
|
||||
nanosleep = Linux + 34,
|
||||
getitimer = Linux + 35,
|
||||
setitimer = Linux + 36,
|
||||
alarm = Linux + 37,
|
||||
getpid = Linux + 38,
|
||||
sendfile = Linux + 39,
|
||||
socket = Linux + 40,
|
||||
connect = Linux + 41,
|
||||
accept = Linux + 42,
|
||||
sendto = Linux + 43,
|
||||
recvfrom = Linux + 44,
|
||||
sendmsg = Linux + 45,
|
||||
recvmsg = Linux + 46,
|
||||
shutdown = Linux + 47,
|
||||
bind = Linux + 48,
|
||||
listen = Linux + 49,
|
||||
getsockname = Linux + 50,
|
||||
getpeername = Linux + 51,
|
||||
socketpair = Linux + 52,
|
||||
setsockopt = Linux + 53,
|
||||
getsockopt = Linux + 54,
|
||||
clone = Linux + 55,
|
||||
fork = Linux + 56,
|
||||
execve = Linux + 57,
|
||||
exit = Linux + 58,
|
||||
wait4 = Linux + 59,
|
||||
kill = Linux + 60,
|
||||
uname = Linux + 61,
|
||||
semget = Linux + 62,
|
||||
semop = Linux + 63,
|
||||
semctl = Linux + 64,
|
||||
shmdt = Linux + 65,
|
||||
msgget = Linux + 66,
|
||||
msgsnd = Linux + 67,
|
||||
msgrcv = Linux + 68,
|
||||
msgctl = Linux + 69,
|
||||
fcntl = Linux + 70,
|
||||
flock = Linux + 71,
|
||||
fsync = Linux + 72,
|
||||
fdatasync = Linux + 73,
|
||||
truncate = Linux + 74,
|
||||
ftruncate = Linux + 75,
|
||||
getdents = Linux + 76,
|
||||
getcwd = Linux + 77,
|
||||
chdir = Linux + 78,
|
||||
fchdir = Linux + 79,
|
||||
rename = Linux + 80,
|
||||
mkdir = Linux + 81,
|
||||
rmdir = Linux + 82,
|
||||
creat = Linux + 83,
|
||||
link = Linux + 84,
|
||||
unlink = Linux + 85,
|
||||
symlink = Linux + 86,
|
||||
readlink = Linux + 87,
|
||||
chmod = Linux + 88,
|
||||
fchmod = Linux + 89,
|
||||
chown = Linux + 90,
|
||||
fchown = Linux + 91,
|
||||
lchown = Linux + 92,
|
||||
umask = Linux + 93,
|
||||
gettimeofday = Linux + 94,
|
||||
getrlimit = Linux + 95,
|
||||
getrusage = Linux + 96,
|
||||
sysinfo = Linux + 97,
|
||||
times = Linux + 98,
|
||||
ptrace = Linux + 99,
|
||||
getuid = Linux + 100,
|
||||
syslog = Linux + 101,
|
||||
getgid = Linux + 102,
|
||||
setuid = Linux + 103,
|
||||
setgid = Linux + 104,
|
||||
geteuid = Linux + 105,
|
||||
getegid = Linux + 106,
|
||||
setpgid = Linux + 107,
|
||||
getppid = Linux + 108,
|
||||
getpgrp = Linux + 109,
|
||||
setsid = Linux + 110,
|
||||
setreuid = Linux + 111,
|
||||
setregid = Linux + 112,
|
||||
getgroups = Linux + 113,
|
||||
setgroups = Linux + 114,
|
||||
setresuid = Linux + 115,
|
||||
getresuid = Linux + 116,
|
||||
setresgid = Linux + 117,
|
||||
getresgid = Linux + 118,
|
||||
getpgid = Linux + 119,
|
||||
setfsuid = Linux + 120,
|
||||
setfsgid = Linux + 121,
|
||||
getsid = Linux + 122,
|
||||
capget = Linux + 123,
|
||||
capset = Linux + 124,
|
||||
rt_sigpending = Linux + 125,
|
||||
rt_sigtimedwait = Linux + 126,
|
||||
rt_sigqueueinfo = Linux + 127,
|
||||
rt_sigsuspend = Linux + 128,
|
||||
sigaltstack = Linux + 129,
|
||||
utime = Linux + 130,
|
||||
mknod = Linux + 131,
|
||||
personality = Linux + 132,
|
||||
ustat = Linux + 133,
|
||||
statfs = Linux + 134,
|
||||
fstatfs = Linux + 135,
|
||||
sysfs = Linux + 136,
|
||||
getpriority = Linux + 137,
|
||||
setpriority = Linux + 138,
|
||||
sched_setparam = Linux + 139,
|
||||
sched_getparam = Linux + 140,
|
||||
sched_setscheduler = Linux + 141,
|
||||
sched_getscheduler = Linux + 142,
|
||||
sched_get_priority_max = Linux + 143,
|
||||
sched_get_priority_min = Linux + 144,
|
||||
sched_rr_get_interval = Linux + 145,
|
||||
mlock = Linux + 146,
|
||||
munlock = Linux + 147,
|
||||
mlockall = Linux + 148,
|
||||
munlockall = Linux + 149,
|
||||
vhangup = Linux + 150,
|
||||
pivot_root = Linux + 151,
|
||||
_sysctl = Linux + 152,
|
||||
prctl = Linux + 153,
|
||||
adjtimex = Linux + 154,
|
||||
setrlimit = Linux + 155,
|
||||
chroot = Linux + 156,
|
||||
sync = Linux + 157,
|
||||
acct = Linux + 158,
|
||||
settimeofday = Linux + 159,
|
||||
mount = Linux + 160,
|
||||
umount2 = Linux + 161,
|
||||
swapon = Linux + 162,
|
||||
swapoff = Linux + 163,
|
||||
reboot = Linux + 164,
|
||||
sethostname = Linux + 165,
|
||||
setdomainname = Linux + 166,
|
||||
create_module = Linux + 167,
|
||||
init_module = Linux + 168,
|
||||
delete_module = Linux + 169,
|
||||
get_kernel_syms = Linux + 170,
|
||||
query_module = Linux + 171,
|
||||
quotactl = Linux + 172,
|
||||
nfsservctl = Linux + 173,
|
||||
getpmsg = Linux + 174,
|
||||
putpmsg = Linux + 175,
|
||||
afs_syscall = Linux + 176,
|
||||
reserved177 = Linux + 177,
|
||||
gettid = Linux + 178,
|
||||
readahead = Linux + 179,
|
||||
setxattr = Linux + 180,
|
||||
lsetxattr = Linux + 181,
|
||||
fsetxattr = Linux + 182,
|
||||
getxattr = Linux + 183,
|
||||
lgetxattr = Linux + 184,
|
||||
fgetxattr = Linux + 185,
|
||||
listxattr = Linux + 186,
|
||||
llistxattr = Linux + 187,
|
||||
flistxattr = Linux + 188,
|
||||
removexattr = Linux + 189,
|
||||
lremovexattr = Linux + 190,
|
||||
fremovexattr = Linux + 191,
|
||||
tkill = Linux + 192,
|
||||
reserved193 = Linux + 193,
|
||||
futex = Linux + 194,
|
||||
sched_setaffinity = Linux + 195,
|
||||
sched_getaffinity = Linux + 196,
|
||||
cacheflush = Linux + 197,
|
||||
cachectl = Linux + 198,
|
||||
sysmips = Linux + 199,
|
||||
io_setup = Linux + 200,
|
||||
io_destroy = Linux + 201,
|
||||
io_getevents = Linux + 202,
|
||||
io_submit = Linux + 203,
|
||||
io_cancel = Linux + 204,
|
||||
exit_group = Linux + 205,
|
||||
lookup_dcookie = Linux + 206,
|
||||
epoll_create = Linux + 207,
|
||||
epoll_ctl = Linux + 208,
|
||||
epoll_wait = Linux + 209,
|
||||
remap_file_pages = Linux + 210,
|
||||
rt_sigreturn = Linux + 211,
|
||||
set_tid_address = Linux + 212,
|
||||
restart_syscall = Linux + 213,
|
||||
semtimedop = Linux + 214,
|
||||
fadvise64 = Linux + 215,
|
||||
timer_create = Linux + 216,
|
||||
timer_settime = Linux + 217,
|
||||
timer_gettime = Linux + 218,
|
||||
timer_getoverrun = Linux + 219,
|
||||
timer_delete = Linux + 220,
|
||||
clock_settime = Linux + 221,
|
||||
clock_gettime = Linux + 222,
|
||||
clock_getres = Linux + 223,
|
||||
clock_nanosleep = Linux + 224,
|
||||
tgkill = Linux + 225,
|
||||
utimes = Linux + 226,
|
||||
mbind = Linux + 227,
|
||||
get_mempolicy = Linux + 228,
|
||||
set_mempolicy = Linux + 229,
|
||||
mq_open = Linux + 230,
|
||||
mq_unlink = Linux + 231,
|
||||
mq_timedsend = Linux + 232,
|
||||
mq_timedreceive = Linux + 233,
|
||||
mq_notify = Linux + 234,
|
||||
mq_getsetattr = Linux + 235,
|
||||
vserver = Linux + 236,
|
||||
waitid = Linux + 237,
|
||||
add_key = Linux + 239,
|
||||
request_key = Linux + 240,
|
||||
keyctl = Linux + 241,
|
||||
set_thread_area = Linux + 242,
|
||||
inotify_init = Linux + 243,
|
||||
inotify_add_watch = Linux + 244,
|
||||
inotify_rm_watch = Linux + 245,
|
||||
migrate_pages = Linux + 246,
|
||||
openat = Linux + 247,
|
||||
mkdirat = Linux + 248,
|
||||
mknodat = Linux + 249,
|
||||
fchownat = Linux + 250,
|
||||
futimesat = Linux + 251,
|
||||
fstatat64 = Linux + 252,
|
||||
unlinkat = Linux + 253,
|
||||
renameat = Linux + 254,
|
||||
linkat = Linux + 255,
|
||||
symlinkat = Linux + 256,
|
||||
readlinkat = Linux + 257,
|
||||
fchmodat = Linux + 258,
|
||||
faccessat = Linux + 259,
|
||||
pselect6 = Linux + 260,
|
||||
ppoll = Linux + 261,
|
||||
unshare = Linux + 262,
|
||||
splice = Linux + 263,
|
||||
sync_file_range = Linux + 264,
|
||||
tee = Linux + 265,
|
||||
vmsplice = Linux + 266,
|
||||
move_pages = Linux + 267,
|
||||
set_robust_list = Linux + 268,
|
||||
get_robust_list = Linux + 269,
|
||||
kexec_load = Linux + 270,
|
||||
getcpu = Linux + 271,
|
||||
epoll_pwait = Linux + 272,
|
||||
ioprio_set = Linux + 273,
|
||||
ioprio_get = Linux + 274,
|
||||
utimensat = Linux + 275,
|
||||
signalfd = Linux + 276,
|
||||
timerfd = Linux + 277,
|
||||
eventfd = Linux + 278,
|
||||
fallocate = Linux + 279,
|
||||
timerfd_create = Linux + 280,
|
||||
timerfd_gettime = Linux + 281,
|
||||
timerfd_settime = Linux + 282,
|
||||
signalfd4 = Linux + 283,
|
||||
eventfd2 = Linux + 284,
|
||||
epoll_create1 = Linux + 285,
|
||||
dup3 = Linux + 286,
|
||||
pipe2 = Linux + 287,
|
||||
inotify_init1 = Linux + 288,
|
||||
preadv = Linux + 289,
|
||||
pwritev = Linux + 290,
|
||||
rt_tgsigqueueinfo = Linux + 291,
|
||||
perf_event_open = Linux + 292,
|
||||
accept4 = Linux + 293,
|
||||
recvmmsg = Linux + 294,
|
||||
fanotify_init = Linux + 295,
|
||||
fanotify_mark = Linux + 296,
|
||||
prlimit64 = Linux + 297,
|
||||
name_to_handle_at = Linux + 298,
|
||||
open_by_handle_at = Linux + 299,
|
||||
clock_adjtime = Linux + 300,
|
||||
syncfs = Linux + 301,
|
||||
sendmmsg = Linux + 302,
|
||||
setns = Linux + 303,
|
||||
process_vm_readv = Linux + 304,
|
||||
process_vm_writev = Linux + 305,
|
||||
kcmp = Linux + 306,
|
||||
finit_module = Linux + 307,
|
||||
getdents64 = Linux + 308,
|
||||
sched_setattr = Linux + 309,
|
||||
sched_getattr = Linux + 310,
|
||||
renameat2 = Linux + 311,
|
||||
seccomp = Linux + 312,
|
||||
getrandom = Linux + 313,
|
||||
memfd_create = Linux + 314,
|
||||
bpf = Linux + 315,
|
||||
execveat = Linux + 316,
|
||||
userfaultfd = Linux + 317,
|
||||
membarrier = Linux + 318,
|
||||
mlock2 = Linux + 319,
|
||||
copy_file_range = Linux + 320,
|
||||
preadv2 = Linux + 321,
|
||||
pwritev2 = Linux + 322,
|
||||
pkey_mprotect = Linux + 323,
|
||||
pkey_alloc = Linux + 324,
|
||||
pkey_free = Linux + 325,
|
||||
statx = Linux + 326,
|
||||
rseq = Linux + 327,
|
||||
io_pgetevents = Linux + 328,
|
||||
pidfd_send_signal = Linux + 424,
|
||||
io_uring_setup = Linux + 425,
|
||||
io_uring_enter = Linux + 426,
|
||||
io_uring_register = Linux + 427,
|
||||
open_tree = Linux + 428,
|
||||
move_mount = Linux + 429,
|
||||
fsopen = Linux + 430,
|
||||
fsconfig = Linux + 431,
|
||||
fsmount = Linux + 432,
|
||||
fspick = Linux + 433,
|
||||
pidfd_open = Linux + 434,
|
||||
clone3 = Linux + 435,
|
||||
close_range = Linux + 436,
|
||||
openat2 = Linux + 437,
|
||||
pidfd_getfd = Linux + 438,
|
||||
faccessat2 = Linux + 439,
|
||||
process_madvise = Linux + 440,
|
||||
epoll_pwait2 = Linux + 441,
|
||||
mount_setattr = Linux + 442,
|
||||
quotactl_fd = Linux + 443,
|
||||
landlock_create_ruleset = Linux + 444,
|
||||
landlock_add_rule = Linux + 445,
|
||||
landlock_restrict_self = Linux + 446,
|
||||
process_mrelease = Linux + 448,
|
||||
futex_waitv = Linux + 449,
|
||||
set_mempolicy_home_node = Linux + 450,
|
||||
};
|
||||
|
||||
pub const PowerPC = enum(usize) {
|
||||
restart_syscall = 0,
|
||||
exit = 1,
|
||||
|
||||
@ -48,7 +48,7 @@ const TLSVariant = enum {
|
||||
};
|
||||
|
||||
const tls_variant = switch (native_arch) {
|
||||
.arm, .armeb, .thumb, .aarch64, .aarch64_be, .riscv32, .riscv64, .mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => TLSVariant.VariantI,
|
||||
.arm, .armeb, .thumb, .aarch64, .aarch64_be, .riscv32, .riscv64, .mips, .mipsel, .mips64, .mips64el, .powerpc, .powerpc64, .powerpc64le => TLSVariant.VariantI,
|
||||
.x86_64, .x86, .sparc64 => TLSVariant.VariantII,
|
||||
else => @compileError("undefined tls_variant for this architecture"),
|
||||
};
|
||||
@ -64,7 +64,7 @@ const tls_tcb_size = switch (native_arch) {
|
||||
|
||||
// Controls if the TP points to the end of the TCB instead of its beginning
|
||||
const tls_tp_points_past_tcb = switch (native_arch) {
|
||||
.riscv32, .riscv64, .mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => true,
|
||||
.riscv32, .riscv64, .mips, .mipsel, .mips64, .mips64el, .powerpc, .powerpc64, .powerpc64le => true,
|
||||
else => false,
|
||||
};
|
||||
|
||||
@ -72,12 +72,12 @@ const tls_tp_points_past_tcb = switch (native_arch) {
|
||||
// make the generated code more efficient
|
||||
|
||||
const tls_tp_offset = switch (native_arch) {
|
||||
.mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => 0x7000,
|
||||
.mips, .mipsel, .mips64, .mips64el, .powerpc, .powerpc64, .powerpc64le => 0x7000,
|
||||
else => 0,
|
||||
};
|
||||
|
||||
const tls_dtv_offset = switch (native_arch) {
|
||||
.mips, .mipsel, .powerpc, .powerpc64, .powerpc64le => 0x8000,
|
||||
.mips, .mipsel, .mips64, .mips64el, .powerpc, .powerpc64, .powerpc64le => 0x8000,
|
||||
.riscv32, .riscv64 => 0x800,
|
||||
else => 0,
|
||||
};
|
||||
@ -156,7 +156,7 @@ pub fn setThreadPointer(addr: usize) void {
|
||||
: [addr] "r" (addr),
|
||||
);
|
||||
},
|
||||
.mips, .mipsel => {
|
||||
.mips, .mipsel, .mips64, .mips64el => {
|
||||
const rc = std.os.linux.syscall1(.set_thread_area, addr);
|
||||
assert(rc == 0);
|
||||
},
|
||||
|
||||
@ -85,7 +85,7 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
|
||||
var nt_name = UNICODE_STRING{
|
||||
.Length = path_len_bytes,
|
||||
.MaximumLength = path_len_bytes,
|
||||
.Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w.ptr)),
|
||||
.Buffer = @qualCast([*]u16, sub_path_w.ptr),
|
||||
};
|
||||
var attr = OBJECT_ATTRIBUTES{
|
||||
.Length = @sizeOf(OBJECT_ATTRIBUTES),
|
||||
@ -634,7 +634,7 @@ pub fn SetCurrentDirectory(path_name: []const u16) SetCurrentDirectoryError!void
|
||||
var nt_name = UNICODE_STRING{
|
||||
.Length = path_len_bytes,
|
||||
.MaximumLength = path_len_bytes,
|
||||
.Buffer = @intToPtr([*]u16, @ptrToInt(path_name.ptr)),
|
||||
.Buffer = @qualCast([*]u16, path_name.ptr),
|
||||
};
|
||||
|
||||
const rc = ntdll.RtlSetCurrentDirectory_U(&nt_name);
|
||||
@ -766,7 +766,7 @@ pub fn ReadLink(dir: ?HANDLE, sub_path_w: []const u16, out_buffer: []u8) ReadLin
|
||||
var nt_name = UNICODE_STRING{
|
||||
.Length = path_len_bytes,
|
||||
.MaximumLength = path_len_bytes,
|
||||
.Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w.ptr)),
|
||||
.Buffer = @qualCast([*]u16, sub_path_w.ptr),
|
||||
};
|
||||
var attr = OBJECT_ATTRIBUTES{
|
||||
.Length = @sizeOf(OBJECT_ATTRIBUTES),
|
||||
@ -876,7 +876,7 @@ pub fn DeleteFile(sub_path_w: []const u16, options: DeleteFileOptions) DeleteFil
|
||||
.Length = path_len_bytes,
|
||||
.MaximumLength = path_len_bytes,
|
||||
// The Windows API makes this mutable, but it will not mutate here.
|
||||
.Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w.ptr)),
|
||||
.Buffer = @qualCast([*]u16, sub_path_w.ptr),
|
||||
};
|
||||
|
||||
if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
|
||||
@ -1414,7 +1414,7 @@ pub fn sendmsg(
|
||||
}
|
||||
|
||||
pub fn sendto(s: ws2_32.SOCKET, buf: [*]const u8, len: usize, flags: u32, to: ?*const ws2_32.sockaddr, to_len: ws2_32.socklen_t) i32 {
|
||||
var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = @intToPtr([*]u8, @ptrToInt(buf)) };
|
||||
var buffer = ws2_32.WSABUF{ .len = @truncate(u31, len), .buf = @qualCast([*]u8, buf) };
|
||||
var bytes_send: DWORD = undefined;
|
||||
if (ws2_32.WSASendTo(s, @ptrCast([*]ws2_32.WSABUF, &buffer), 1, &bytes_send, flags, to, @intCast(i32, to_len), null, null) == ws2_32.SOCKET_ERROR) {
|
||||
return ws2_32.SOCKET_ERROR;
|
||||
@ -1876,13 +1876,13 @@ pub fn eqlIgnoreCaseWTF16(a: []const u16, b: []const u16) bool {
|
||||
const a_string = UNICODE_STRING{
|
||||
.Length = a_bytes,
|
||||
.MaximumLength = a_bytes,
|
||||
.Buffer = @intToPtr([*]u16, @ptrToInt(a.ptr)),
|
||||
.Buffer = @qualCast([*]u16, a.ptr),
|
||||
};
|
||||
const b_bytes = @intCast(u16, b.len * 2);
|
||||
const b_string = UNICODE_STRING{
|
||||
.Length = b_bytes,
|
||||
.MaximumLength = b_bytes,
|
||||
.Buffer = @intToPtr([*]u16, @ptrToInt(b.ptr)),
|
||||
.Buffer = @qualCast([*]u16, b.ptr),
|
||||
};
|
||||
return ntdll.RtlEqualUnicodeString(&a_string, &b_string, TRUE) == TRUE;
|
||||
}
|
||||
|
||||
@ -327,7 +327,7 @@ fn _start() callconv(.Naked) noreturn {
|
||||
: [argc] "={sp}" (-> [*]usize),
|
||||
);
|
||||
},
|
||||
.mips, .mipsel => {
|
||||
.mips, .mipsel, .mips64, .mips64el => {
|
||||
// The lr is already zeroed on entry, as specified by the ABI.
|
||||
argc_argv_ptr = asm volatile (
|
||||
\\ move $fp, $0
|
||||
|
||||
@ -9,6 +9,7 @@ pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged;
|
||||
pub const AutoHashMap = hash_map.AutoHashMap;
|
||||
pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged;
|
||||
pub const BoundedArray = @import("bounded_array.zig").BoundedArray;
|
||||
pub const Build = @import("Build.zig");
|
||||
pub const BufMap = @import("buf_map.zig").BufMap;
|
||||
pub const BufSet = @import("buf_set.zig").BufSet;
|
||||
pub const ChildProcess = @import("child_process.zig").ChildProcess;
|
||||
@ -49,7 +50,6 @@ pub const array_hash_map = @import("array_hash_map.zig");
|
||||
pub const atomic = @import("atomic.zig");
|
||||
pub const base64 = @import("base64.zig");
|
||||
pub const bit_set = @import("bit_set.zig");
|
||||
pub const build = @import("build.zig");
|
||||
pub const builtin = @import("builtin.zig");
|
||||
pub const c = @import("c.zig");
|
||||
pub const coff = @import("coff.zig");
|
||||
@ -96,6 +96,9 @@ pub const wasm = @import("wasm.zig");
|
||||
pub const zig = @import("zig.zig");
|
||||
pub const start = @import("start.zig");
|
||||
|
||||
/// deprecated: use `Build`.
|
||||
pub const build = Build;
|
||||
|
||||
const root = @import("root");
|
||||
const options_override = if (@hasDecl(root, "std_options")) root.std_options else struct {};
|
||||
|
||||
@ -150,6 +153,11 @@ pub const options = struct {
|
||||
else
|
||||
log.defaultLog;
|
||||
|
||||
pub const fmt_max_depth = if (@hasDecl(options_override, "fmt_max_depth"))
|
||||
options_override.fmt_max_depth
|
||||
else
|
||||
fmt.default_max_depth;
|
||||
|
||||
pub const cryptoRandomSeed: fn (buffer: []u8) void = if (@hasDecl(options_override, "cryptoRandomSeed"))
|
||||
options_override.cryptoRandomSeed
|
||||
else
|
||||
|
||||
@ -1,6 +1,18 @@
|
||||
pub const Options = struct {
|
||||
/// Number of directory levels to skip when extracting files.
|
||||
strip_components: u32 = 0,
|
||||
/// How to handle the "mode" property of files from within the tar file.
|
||||
mode_mode: ModeMode = .executable_bit_only,
|
||||
|
||||
const ModeMode = enum {
|
||||
/// The mode from the tar file is completely ignored. Files are created
|
||||
/// with the default mode when creating files.
|
||||
ignore,
|
||||
/// The mode from the tar file is inspected for the owner executable bit
|
||||
/// only. This bit is copied to the group and other executable bits.
|
||||
/// Other bits of the mode are left as the default when creating files.
|
||||
executable_bit_only,
|
||||
};
|
||||
};
|
||||
|
||||
pub const Header = struct {
|
||||
@ -72,6 +84,17 @@ pub const Header = struct {
|
||||
};
|
||||
|
||||
pub fn pipeToFileSystem(dir: std.fs.Dir, reader: anytype, options: Options) !void {
|
||||
switch (options.mode_mode) {
|
||||
.ignore => {},
|
||||
.executable_bit_only => {
|
||||
// This code does not look at the mode bits yet. To implement this feature,
|
||||
// the implementation must be adjusted to look at the mode, and check the
|
||||
// user executable bit, then call fchmod on newly created files when
|
||||
// the executable bit is supposed to be set.
|
||||
// It also needs to properly deal with ACLs on Windows.
|
||||
@panic("TODO: unimplemented: tar ModeMode.executable_bit_only");
|
||||
},
|
||||
}
|
||||
var file_name_buffer: [255]u8 = undefined;
|
||||
var buffer: [512 * 8]u8 = undefined;
|
||||
var start: usize = 0;
|
||||
|
||||
@ -1880,6 +1880,559 @@ pub const Target = struct {
|
||||
=> 16,
|
||||
};
|
||||
}
|
||||
|
||||
pub const CType = enum {
|
||||
short,
|
||||
ushort,
|
||||
int,
|
||||
uint,
|
||||
long,
|
||||
ulong,
|
||||
longlong,
|
||||
ulonglong,
|
||||
float,
|
||||
double,
|
||||
longdouble,
|
||||
};
|
||||
|
||||
pub fn c_type_byte_size(t: Target, c_type: CType) u16 {
|
||||
return switch (c_type) {
|
||||
.short,
|
||||
.ushort,
|
||||
.int,
|
||||
.uint,
|
||||
.long,
|
||||
.ulong,
|
||||
.longlong,
|
||||
.ulonglong,
|
||||
=> @divExact(c_type_bit_size(t, c_type), 8),
|
||||
|
||||
.float => 4,
|
||||
.double => 8,
|
||||
|
||||
.longdouble => switch (c_type_bit_size(t, c_type)) {
|
||||
16 => 2,
|
||||
32 => 4,
|
||||
64 => 8,
|
||||
80 => @intCast(u16, mem.alignForward(10, c_type_alignment(t, .longdouble))),
|
||||
128 => 16,
|
||||
else => unreachable,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn c_type_bit_size(target: Target, c_type: CType) u16 {
|
||||
switch (target.os.tag) {
|
||||
.freestanding, .other => switch (target.cpu.arch) {
|
||||
.msp430 => switch (c_type) {
|
||||
.short, .ushort, .int, .uint => return 16,
|
||||
.float, .long, .ulong => return 32,
|
||||
.longlong, .ulonglong, .double, .longdouble => return 64,
|
||||
},
|
||||
.avr => switch (c_type) {
|
||||
.short, .ushort, .int, .uint => return 16,
|
||||
.long, .ulong, .float, .double, .longdouble => return 32,
|
||||
.longlong, .ulonglong => return 64,
|
||||
},
|
||||
.tce, .tcele => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
|
||||
.float, .double, .longdouble => return 32,
|
||||
},
|
||||
.mips64, .mips64el => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => return 128,
|
||||
},
|
||||
.x86_64 => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => switch (target.abi) {
|
||||
.gnux32, .muslx32 => return 32,
|
||||
else => return 64,
|
||||
},
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => return 80,
|
||||
},
|
||||
else => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => return target.cpu.arch.ptrBitWidth(),
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => switch (target.cpu.arch) {
|
||||
.x86 => switch (target.abi) {
|
||||
.android => return 64,
|
||||
else => return 80,
|
||||
},
|
||||
|
||||
.powerpc,
|
||||
.powerpcle,
|
||||
.powerpc64,
|
||||
.powerpc64le,
|
||||
=> switch (target.abi) {
|
||||
.musl,
|
||||
.musleabi,
|
||||
.musleabihf,
|
||||
.muslx32,
|
||||
=> return 64,
|
||||
else => return 128,
|
||||
},
|
||||
|
||||
.riscv32,
|
||||
.riscv64,
|
||||
.aarch64,
|
||||
.aarch64_be,
|
||||
.aarch64_32,
|
||||
.s390x,
|
||||
.sparc,
|
||||
.sparc64,
|
||||
.sparcel,
|
||||
.wasm32,
|
||||
.wasm64,
|
||||
=> return 128,
|
||||
|
||||
else => return 64,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
.linux,
|
||||
.freebsd,
|
||||
.netbsd,
|
||||
.dragonfly,
|
||||
.openbsd,
|
||||
.wasi,
|
||||
.emscripten,
|
||||
.plan9,
|
||||
.solaris,
|
||||
.haiku,
|
||||
.ananas,
|
||||
.fuchsia,
|
||||
.minix,
|
||||
=> switch (target.cpu.arch) {
|
||||
.msp430 => switch (c_type) {
|
||||
.short, .ushort, .int, .uint => return 16,
|
||||
.long, .ulong, .float => return 32,
|
||||
.longlong, .ulonglong, .double, .longdouble => return 64,
|
||||
},
|
||||
.avr => switch (c_type) {
|
||||
.short, .ushort, .int, .uint => return 16,
|
||||
.long, .ulong, .float, .double, .longdouble => return 32,
|
||||
.longlong, .ulonglong => return 64,
|
||||
},
|
||||
.tce, .tcele => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .long, .ulong, .longlong, .ulonglong => return 32,
|
||||
.float, .double, .longdouble => return 32,
|
||||
},
|
||||
.mips64, .mips64el => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => return if (target.abi != .gnuabin32) 64 else 32,
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => if (target.os.tag == .freebsd) return 64 else return 128,
|
||||
},
|
||||
.x86_64 => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => switch (target.abi) {
|
||||
.gnux32, .muslx32 => return 32,
|
||||
else => return 64,
|
||||
},
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => return 80,
|
||||
},
|
||||
else => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => return target.cpu.arch.ptrBitWidth(),
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => switch (target.cpu.arch) {
|
||||
.x86 => switch (target.abi) {
|
||||
.android => return 64,
|
||||
else => return 80,
|
||||
},
|
||||
|
||||
.powerpc,
|
||||
.powerpcle,
|
||||
=> switch (target.abi) {
|
||||
.musl,
|
||||
.musleabi,
|
||||
.musleabihf,
|
||||
.muslx32,
|
||||
=> return 64,
|
||||
else => switch (target.os.tag) {
|
||||
.freebsd, .netbsd, .openbsd => return 64,
|
||||
else => return 128,
|
||||
},
|
||||
},
|
||||
|
||||
.powerpc64,
|
||||
.powerpc64le,
|
||||
=> switch (target.abi) {
|
||||
.musl,
|
||||
.musleabi,
|
||||
.musleabihf,
|
||||
.muslx32,
|
||||
=> return 64,
|
||||
else => switch (target.os.tag) {
|
||||
.freebsd, .openbsd => return 64,
|
||||
else => return 128,
|
||||
},
|
||||
},
|
||||
|
||||
.riscv32,
|
||||
.riscv64,
|
||||
.aarch64,
|
||||
.aarch64_be,
|
||||
.aarch64_32,
|
||||
.s390x,
|
||||
.mips64,
|
||||
.mips64el,
|
||||
.sparc,
|
||||
.sparc64,
|
||||
.sparcel,
|
||||
.wasm32,
|
||||
.wasm64,
|
||||
=> return 128,
|
||||
|
||||
else => return 64,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
.windows, .uefi => switch (target.cpu.arch) {
|
||||
.x86 => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => return 32,
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => switch (target.abi) {
|
||||
.gnu, .gnuilp32, .cygnus => return 80,
|
||||
else => return 64,
|
||||
},
|
||||
},
|
||||
.x86_64 => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => switch (target.abi) {
|
||||
.cygnus => return 64,
|
||||
else => return 32,
|
||||
},
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => switch (target.abi) {
|
||||
.gnu, .gnuilp32, .cygnus => return 80,
|
||||
else => return 64,
|
||||
},
|
||||
},
|
||||
else => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => return 32,
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => return 64,
|
||||
},
|
||||
},
|
||||
|
||||
.macos, .ios, .tvos, .watchos => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => switch (target.cpu.arch) {
|
||||
.x86, .arm, .aarch64_32 => return 32,
|
||||
.x86_64 => switch (target.abi) {
|
||||
.gnux32, .muslx32 => return 32,
|
||||
else => return 64,
|
||||
},
|
||||
else => return 64,
|
||||
},
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => switch (target.cpu.arch) {
|
||||
.x86 => switch (target.abi) {
|
||||
.android => return 64,
|
||||
else => return 80,
|
||||
},
|
||||
.x86_64 => return 80,
|
||||
else => return 64,
|
||||
},
|
||||
},
|
||||
|
||||
.nvcl, .cuda => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong => switch (target.cpu.arch) {
|
||||
.nvptx => return 32,
|
||||
.nvptx64 => return 64,
|
||||
else => return 64,
|
||||
},
|
||||
.longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => return 64,
|
||||
},
|
||||
|
||||
.amdhsa, .amdpal => switch (c_type) {
|
||||
.short, .ushort => return 16,
|
||||
.int, .uint, .float => return 32,
|
||||
.long, .ulong, .longlong, .ulonglong, .double => return 64,
|
||||
.longdouble => return 128,
|
||||
},
|
||||
|
||||
.cloudabi,
|
||||
.kfreebsd,
|
||||
.lv2,
|
||||
.zos,
|
||||
.rtems,
|
||||
.nacl,
|
||||
.aix,
|
||||
.ps4,
|
||||
.ps5,
|
||||
.elfiamcu,
|
||||
.mesa3d,
|
||||
.contiki,
|
||||
.hermit,
|
||||
.hurd,
|
||||
.opencl,
|
||||
.glsl450,
|
||||
.vulkan,
|
||||
.driverkit,
|
||||
.shadermodel,
|
||||
=> @panic("TODO specify the C integer and float type sizes for this OS"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn c_type_alignment(target: Target, c_type: CType) u16 {
|
||||
// Overrides for unusual alignments
|
||||
switch (target.cpu.arch) {
|
||||
.avr => switch (c_type) {
|
||||
.short, .ushort => return 2,
|
||||
else => return 1,
|
||||
},
|
||||
.x86 => switch (target.os.tag) {
|
||||
.windows, .uefi => switch (c_type) {
|
||||
.longlong, .ulonglong, .double => return 8,
|
||||
.longdouble => switch (target.abi) {
|
||||
.gnu, .gnuilp32, .cygnus => return 4,
|
||||
else => return 8,
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
// Next-power-of-two-aligned, up to a maximum.
|
||||
return @min(
|
||||
std.math.ceilPowerOfTwoAssert(u16, (c_type_bit_size(target, c_type) + 7) / 8),
|
||||
switch (target.cpu.arch) {
|
||||
.arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
|
||||
.netbsd => switch (target.abi) {
|
||||
.gnueabi,
|
||||
.gnueabihf,
|
||||
.eabi,
|
||||
.eabihf,
|
||||
.android,
|
||||
.musleabi,
|
||||
.musleabihf,
|
||||
=> 8,
|
||||
|
||||
else => @as(u16, 4),
|
||||
},
|
||||
.ios, .tvos, .watchos => 4,
|
||||
else => 8,
|
||||
},
|
||||
|
||||
.msp430,
|
||||
.avr,
|
||||
=> 2,
|
||||
|
||||
.arc,
|
||||
.csky,
|
||||
.x86,
|
||||
.xcore,
|
||||
.dxil,
|
||||
.loongarch32,
|
||||
.tce,
|
||||
.tcele,
|
||||
.le32,
|
||||
.amdil,
|
||||
.hsail,
|
||||
.spir,
|
||||
.spirv32,
|
||||
.kalimba,
|
||||
.shave,
|
||||
.renderscript32,
|
||||
.ve,
|
||||
.spu_2,
|
||||
=> 4,
|
||||
|
||||
.aarch64_32,
|
||||
.amdgcn,
|
||||
.amdil64,
|
||||
.bpfel,
|
||||
.bpfeb,
|
||||
.hexagon,
|
||||
.hsail64,
|
||||
.loongarch64,
|
||||
.m68k,
|
||||
.mips,
|
||||
.mipsel,
|
||||
.sparc,
|
||||
.sparcel,
|
||||
.sparc64,
|
||||
.lanai,
|
||||
.le64,
|
||||
.nvptx,
|
||||
.nvptx64,
|
||||
.r600,
|
||||
.s390x,
|
||||
.spir64,
|
||||
.spirv64,
|
||||
.renderscript64,
|
||||
=> 8,
|
||||
|
||||
.aarch64,
|
||||
.aarch64_be,
|
||||
.mips64,
|
||||
.mips64el,
|
||||
.powerpc,
|
||||
.powerpcle,
|
||||
.powerpc64,
|
||||
.powerpc64le,
|
||||
.riscv32,
|
||||
.riscv64,
|
||||
.x86_64,
|
||||
.wasm32,
|
||||
.wasm64,
|
||||
=> 16,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub fn c_type_preferred_alignment(target: Target, c_type: CType) u16 {
|
||||
// Overrides for unusual alignments
|
||||
switch (target.cpu.arch) {
|
||||
.arm, .armeb, .thumb, .thumbeb => switch (target.os.tag) {
|
||||
.netbsd => switch (target.abi) {
|
||||
.gnueabi,
|
||||
.gnueabihf,
|
||||
.eabi,
|
||||
.eabihf,
|
||||
.android,
|
||||
.musleabi,
|
||||
.musleabihf,
|
||||
=> {},
|
||||
|
||||
else => switch (c_type) {
|
||||
.longdouble => return 4,
|
||||
else => {},
|
||||
},
|
||||
},
|
||||
.ios, .tvos, .watchos => switch (c_type) {
|
||||
.longdouble => return 4,
|
||||
else => {},
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
.arc => switch (c_type) {
|
||||
.longdouble => return 4,
|
||||
else => {},
|
||||
},
|
||||
.avr => switch (c_type) {
|
||||
.int, .uint, .long, .ulong, .float, .longdouble => return 1,
|
||||
.short, .ushort => return 2,
|
||||
.double => return 4,
|
||||
.longlong, .ulonglong => return 8,
|
||||
},
|
||||
.x86 => switch (target.os.tag) {
|
||||
.windows, .uefi => switch (c_type) {
|
||||
.longdouble => switch (target.abi) {
|
||||
.gnu, .gnuilp32, .cygnus => return 4,
|
||||
else => return 8,
|
||||
},
|
||||
else => {},
|
||||
},
|
||||
else => switch (c_type) {
|
||||
.longdouble => return 4,
|
||||
else => {},
|
||||
},
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
// Next-power-of-two-aligned, up to a maximum.
|
||||
return @min(
|
||||
std.math.ceilPowerOfTwoAssert(u16, (c_type_bit_size(target, c_type) + 7) / 8),
|
||||
switch (target.cpu.arch) {
|
||||
.msp430 => @as(u16, 2),
|
||||
|
||||
.csky,
|
||||
.xcore,
|
||||
.dxil,
|
||||
.loongarch32,
|
||||
.tce,
|
||||
.tcele,
|
||||
.le32,
|
||||
.amdil,
|
||||
.hsail,
|
||||
.spir,
|
||||
.spirv32,
|
||||
.kalimba,
|
||||
.shave,
|
||||
.renderscript32,
|
||||
.ve,
|
||||
.spu_2,
|
||||
=> 4,
|
||||
|
||||
.arc,
|
||||
.arm,
|
||||
.armeb,
|
||||
.avr,
|
||||
.thumb,
|
||||
.thumbeb,
|
||||
.aarch64_32,
|
||||
.amdgcn,
|
||||
.amdil64,
|
||||
.bpfel,
|
||||
.bpfeb,
|
||||
.hexagon,
|
||||
.hsail64,
|
||||
.x86,
|
||||
.loongarch64,
|
||||
.m68k,
|
||||
.mips,
|
||||
.mipsel,
|
||||
.sparc,
|
||||
.sparcel,
|
||||
.sparc64,
|
||||
.lanai,
|
||||
.le64,
|
||||
.nvptx,
|
||||
.nvptx64,
|
||||
.r600,
|
||||
.s390x,
|
||||
.spir64,
|
||||
.spirv64,
|
||||
.renderscript64,
|
||||
=> 8,
|
||||
|
||||
.aarch64,
|
||||
.aarch64_be,
|
||||
.mips64,
|
||||
.mips64el,
|
||||
.powerpc,
|
||||
.powerpcle,
|
||||
.powerpc64,
|
||||
.powerpc64le,
|
||||
.riscv32,
|
||||
.riscv64,
|
||||
.x86_64,
|
||||
.wasm32,
|
||||
.wasm64,
|
||||
=> 16,
|
||||
},
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
test {
|
||||
|
||||
@ -8,7 +8,6 @@ pub const Tokenizer = tokenizer.Tokenizer;
|
||||
pub const fmtId = fmt.fmtId;
|
||||
pub const fmtEscapes = fmt.fmtEscapes;
|
||||
pub const isValidId = fmt.isValidId;
|
||||
pub const parse = @import("zig/parse.zig").parse;
|
||||
pub const string_literal = @import("zig/string_literal.zig");
|
||||
pub const number_literal = @import("zig/number_literal.zig");
|
||||
pub const primitives = @import("zig/primitives.zig");
|
||||
|
||||
@ -1,4 +1,8 @@
|
||||
//! Abstract Syntax Tree for Zig source code.
|
||||
//! For Zig syntax, the root node is at nodes[0] and contains the list of
|
||||
//! sub-nodes.
|
||||
//! For Zon syntax, the root node is at nodes[0] and contains lhs as the node
|
||||
//! index of the main expression.
|
||||
|
||||
/// Reference to externally-owned data.
|
||||
source: [:0]const u8,
|
||||
@ -11,13 +15,6 @@ extra_data: []Node.Index,
|
||||
|
||||
errors: []const Error,
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
const mem = std.mem;
|
||||
const Token = std.zig.Token;
|
||||
const Ast = @This();
|
||||
|
||||
pub const TokenIndex = u32;
|
||||
pub const ByteOffset = u32;
|
||||
|
||||
@ -34,7 +31,7 @@ pub const Location = struct {
|
||||
line_end: usize,
|
||||
};
|
||||
|
||||
pub fn deinit(tree: *Ast, gpa: mem.Allocator) void {
|
||||
pub fn deinit(tree: *Ast, gpa: Allocator) void {
|
||||
tree.tokens.deinit(gpa);
|
||||
tree.nodes.deinit(gpa);
|
||||
gpa.free(tree.extra_data);
|
||||
@ -48,11 +45,69 @@ pub const RenderError = error{
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
pub const Mode = enum { zig, zon };
|
||||
|
||||
/// Result should be freed with tree.deinit() when there are
|
||||
/// no more references to any of the tokens or nodes.
|
||||
pub fn parse(gpa: Allocator, source: [:0]const u8, mode: Mode) Allocator.Error!Ast {
|
||||
var tokens = Ast.TokenList{};
|
||||
defer tokens.deinit(gpa);
|
||||
|
||||
// Empirically, the zig std lib has an 8:1 ratio of source bytes to token count.
|
||||
const estimated_token_count = source.len / 8;
|
||||
try tokens.ensureTotalCapacity(gpa, estimated_token_count);
|
||||
|
||||
var tokenizer = std.zig.Tokenizer.init(source);
|
||||
while (true) {
|
||||
const token = tokenizer.next();
|
||||
try tokens.append(gpa, .{
|
||||
.tag = token.tag,
|
||||
.start = @intCast(u32, token.loc.start),
|
||||
});
|
||||
if (token.tag == .eof) break;
|
||||
}
|
||||
|
||||
var parser: Parse = .{
|
||||
.source = source,
|
||||
.gpa = gpa,
|
||||
.token_tags = tokens.items(.tag),
|
||||
.token_starts = tokens.items(.start),
|
||||
.errors = .{},
|
||||
.nodes = .{},
|
||||
.extra_data = .{},
|
||||
.scratch = .{},
|
||||
.tok_i = 0,
|
||||
};
|
||||
defer parser.errors.deinit(gpa);
|
||||
defer parser.nodes.deinit(gpa);
|
||||
defer parser.extra_data.deinit(gpa);
|
||||
defer parser.scratch.deinit(gpa);
|
||||
|
||||
// Empirically, Zig source code has a 2:1 ratio of tokens to AST nodes.
|
||||
// Make sure at least 1 so we can use appendAssumeCapacity on the root node below.
|
||||
const estimated_node_count = (tokens.len + 2) / 2;
|
||||
try parser.nodes.ensureTotalCapacity(gpa, estimated_node_count);
|
||||
|
||||
switch (mode) {
|
||||
.zig => try parser.parseRoot(),
|
||||
.zon => try parser.parseZon(),
|
||||
}
|
||||
|
||||
// TODO experiment with compacting the MultiArrayList slices here
|
||||
return Ast{
|
||||
.source = source,
|
||||
.tokens = tokens.toOwnedSlice(),
|
||||
.nodes = parser.nodes.toOwnedSlice(),
|
||||
.extra_data = try parser.extra_data.toOwnedSlice(gpa),
|
||||
.errors = try parser.errors.toOwnedSlice(gpa),
|
||||
};
|
||||
}
|
||||
|
||||
/// `gpa` is used for allocating the resulting formatted source code, as well as
|
||||
/// for allocating extra stack memory if needed, because this function utilizes recursion.
|
||||
/// Note: that's not actually true yet, see https://github.com/ziglang/zig/issues/1006.
|
||||
/// Caller owns the returned slice of bytes, allocated with `gpa`.
|
||||
pub fn render(tree: Ast, gpa: mem.Allocator) RenderError![]u8 {
|
||||
pub fn render(tree: Ast, gpa: Allocator) RenderError![]u8 {
|
||||
var buffer = std.ArrayList(u8).init(gpa);
|
||||
defer buffer.deinit();
|
||||
|
||||
@ -3347,3 +3402,12 @@ pub const Node = struct {
|
||||
rparen: TokenIndex,
|
||||
};
|
||||
};
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
const mem = std.mem;
|
||||
const Token = std.zig.Token;
|
||||
const Ast = @This();
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Parse = @import("Parse.zig");
|
||||
|
||||
3825
lib/std/zig/Parse.zig
Normal file
3825
lib/std/zig/Parse.zig
Normal file
File diff suppressed because it is too large
Load Diff
@ -75,7 +75,7 @@ fn castPtr(comptime DestType: type, target: anytype) DestType {
|
||||
const source = ptrInfo(@TypeOf(target));
|
||||
|
||||
if (source.is_const and !dest.is_const or source.is_volatile and !dest.is_volatile)
|
||||
return @intToPtr(DestType, @ptrToInt(target))
|
||||
return @qualCast(DestType, target)
|
||||
else if (@typeInfo(dest.child) == .Opaque)
|
||||
// dest.alignment would error out
|
||||
return @ptrCast(DestType, target)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -6073,7 +6073,7 @@ var fixed_buffer_mem: [100 * 1024]u8 = undefined;
|
||||
fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 {
|
||||
const stderr = io.getStdErr().writer();
|
||||
|
||||
var tree = try std.zig.parse(allocator, source);
|
||||
var tree = try std.zig.Ast.parse(allocator, source, .zig);
|
||||
defer tree.deinit(allocator);
|
||||
|
||||
for (tree.errors) |parse_error| {
|
||||
@ -6124,7 +6124,7 @@ fn testCanonical(source: [:0]const u8) !void {
|
||||
const Error = std.zig.Ast.Error.Tag;
|
||||
|
||||
fn testError(source: [:0]const u8, expected_errors: []const Error) !void {
|
||||
var tree = try std.zig.parse(std.testing.allocator, source);
|
||||
var tree = try std.zig.Ast.parse(std.testing.allocator, source, .zig);
|
||||
defer tree.deinit(std.testing.allocator);
|
||||
|
||||
std.testing.expectEqual(expected_errors.len, tree.errors.len) catch |err| {
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const Tokenizer = std.zig.Tokenizer;
|
||||
const Parser = std.zig.Parser;
|
||||
const io = std.io;
|
||||
const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
|
||||
|
||||
@ -34,6 +33,6 @@ pub fn main() !void {
|
||||
fn testOnce() usize {
|
||||
var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
|
||||
var allocator = fixed_buf_alloc.allocator();
|
||||
_ = std.zig.parse(allocator, source) catch @panic("parse failure");
|
||||
_ = std.zig.Ast.parse(allocator, source, .zig) catch @panic("parse failure");
|
||||
return fixed_buf_alloc.end_index;
|
||||
}
|
||||
|
||||
@ -2530,6 +2530,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
|
||||
.bit_size_of,
|
||||
.typeof_log2_int_type,
|
||||
.ptr_to_int,
|
||||
.qual_cast,
|
||||
.align_of,
|
||||
.bool_to_int,
|
||||
.embed_file,
|
||||
@ -4278,7 +4279,34 @@ fn testDecl(
|
||||
var num_namespaces_out: u32 = 0;
|
||||
var capturing_namespace: ?*Scope.Namespace = null;
|
||||
while (true) switch (s.tag) {
|
||||
.local_val, .local_ptr => unreachable, // a test cannot be in a local scope
|
||||
.local_val => {
|
||||
const local_val = s.cast(Scope.LocalVal).?;
|
||||
if (local_val.name == name_str_index) {
|
||||
local_val.used = test_name_token;
|
||||
return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{
|
||||
@tagName(local_val.id_cat),
|
||||
}, &[_]u32{
|
||||
try astgen.errNoteTok(local_val.token_src, "{s} declared here", .{
|
||||
@tagName(local_val.id_cat),
|
||||
}),
|
||||
});
|
||||
}
|
||||
s = local_val.parent;
|
||||
},
|
||||
.local_ptr => {
|
||||
const local_ptr = s.cast(Scope.LocalPtr).?;
|
||||
if (local_ptr.name == name_str_index) {
|
||||
local_ptr.used = test_name_token;
|
||||
return astgen.failTokNotes(test_name_token, "cannot test a {s}", .{
|
||||
@tagName(local_ptr.id_cat),
|
||||
}, &[_]u32{
|
||||
try astgen.errNoteTok(local_ptr.token_src, "{s} declared here", .{
|
||||
@tagName(local_ptr.id_cat),
|
||||
}),
|
||||
});
|
||||
}
|
||||
s = local_ptr.parent;
|
||||
},
|
||||
.gen_zir => s = s.cast(GenZir).?.parent,
|
||||
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
|
||||
.namespace, .enum_namespace => {
|
||||
@ -8010,6 +8038,7 @@ fn builtinCall(
|
||||
.float_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .float_cast),
|
||||
.int_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .int_cast),
|
||||
.ptr_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .ptr_cast),
|
||||
.qual_cast => return typeCast(gz, scope, ri, node, params[0], params[1], .qual_cast),
|
||||
.truncate => return typeCast(gz, scope, ri, node, params[0], params[1], .truncate),
|
||||
// zig fmt: on
|
||||
|
||||
@ -8692,6 +8721,7 @@ fn callExpr(
|
||||
defer arg_block.unstack();
|
||||
|
||||
// `call_inst` is reused to provide the param type.
|
||||
arg_block.rl_ty_inst = call_inst;
|
||||
const arg_ref = try expr(&arg_block, &arg_block.base, .{ .rl = .{ .coerced_ty = call_inst }, .ctx = .fn_arg }, param_node);
|
||||
_ = try arg_block.addBreak(.break_inline, call_index, arg_ref);
|
||||
|
||||
@ -10840,7 +10870,12 @@ const GenZir = struct {
|
||||
// we emit ZIR for the block break instructions to have the result values,
|
||||
// and then rvalue() on that to pass the value to the result location.
|
||||
switch (parent_ri.rl) {
|
||||
.ty, .coerced_ty => |ty_inst| {
|
||||
.coerced_ty => |ty_inst| {
|
||||
// Type coercion needs to happend before breaks.
|
||||
gz.rl_ty_inst = ty_inst;
|
||||
gz.break_result_info = .{ .rl = .{ .ty = ty_inst } };
|
||||
},
|
||||
.ty => |ty_inst| {
|
||||
gz.rl_ty_inst = ty_inst;
|
||||
gz.break_result_info = parent_ri;
|
||||
},
|
||||
|
||||
@ -1400,6 +1400,7 @@ fn walkInstruction(
|
||||
.float_cast,
|
||||
.int_cast,
|
||||
.ptr_cast,
|
||||
.qual_cast,
|
||||
.truncate,
|
||||
.align_cast,
|
||||
.has_decl,
|
||||
@ -2200,17 +2201,10 @@ fn walkInstruction(
|
||||
false,
|
||||
);
|
||||
|
||||
_ = operand;
|
||||
|
||||
// WIP
|
||||
|
||||
printWithContext(
|
||||
file,
|
||||
inst_index,
|
||||
"TODO: implement `{s}` for walkInstruction\n\n",
|
||||
.{@tagName(tags[inst_index])},
|
||||
);
|
||||
return self.cteTodo(@tagName(tags[inst_index]));
|
||||
return DocData.WalkResult{
|
||||
.typeRef = operand.expr,
|
||||
.expr = .{ .@"struct" = &.{} },
|
||||
};
|
||||
},
|
||||
.struct_init_anon => {
|
||||
const pl_node = data[inst_index].pl_node;
|
||||
@ -2537,6 +2531,7 @@ fn walkInstruction(
|
||||
const var_init_ref = @intToEnum(Ref, file.zir.extra[extra_index]);
|
||||
const var_init = try self.walkRef(file, parent_scope, parent_src, var_init_ref, need_type);
|
||||
value.expr = var_init.expr;
|
||||
value.typeRef = var_init.typeRef;
|
||||
}
|
||||
|
||||
return value;
|
||||
|
||||
@ -75,6 +75,7 @@ pub const Tag = enum {
|
||||
prefetch,
|
||||
ptr_cast,
|
||||
ptr_to_int,
|
||||
qual_cast,
|
||||
rem,
|
||||
return_address,
|
||||
select,
|
||||
@ -674,6 +675,13 @@ pub const list = list: {
|
||||
.param_count = 1,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@qualCast",
|
||||
.{
|
||||
.tag = .qual_cast,
|
||||
.param_count = 2,
|
||||
},
|
||||
},
|
||||
.{
|
||||
"@rem",
|
||||
.{
|
||||
|
||||
@ -385,7 +385,7 @@ pub const AllErrors = struct {
|
||||
count: u32 = 1,
|
||||
/// Does not include the trailing newline.
|
||||
source_line: ?[]const u8,
|
||||
notes: []Message = &.{},
|
||||
notes: []const Message = &.{},
|
||||
reference_trace: []Message = &.{},
|
||||
|
||||
/// Splits the error message up into lines to properly indent them
|
||||
@ -3299,7 +3299,7 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
|
||||
const gpa = comp.gpa;
|
||||
const module = comp.bin_file.options.module.?;
|
||||
const decl = module.declPtr(decl_index);
|
||||
comp.bin_file.updateDeclLineNumber(module, decl) catch |err| {
|
||||
comp.bin_file.updateDeclLineNumber(module, decl_index) catch |err| {
|
||||
try module.failed_decls.ensureUnusedCapacity(gpa, 1);
|
||||
module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create(
|
||||
gpa,
|
||||
|
||||
499
src/Manifest.zig
Normal file
499
src/Manifest.zig
Normal file
@ -0,0 +1,499 @@
|
||||
pub const basename = "build.zig.zon";
|
||||
pub const Hash = std.crypto.hash.sha2.Sha256;
|
||||
|
||||
pub const Dependency = struct {
|
||||
url: []const u8,
|
||||
url_tok: Ast.TokenIndex,
|
||||
hash: ?[]const u8,
|
||||
hash_tok: Ast.TokenIndex,
|
||||
};
|
||||
|
||||
pub const ErrorMessage = struct {
|
||||
msg: []const u8,
|
||||
tok: Ast.TokenIndex,
|
||||
off: u32,
|
||||
};
|
||||
|
||||
pub const MultihashFunction = enum(u16) {
|
||||
identity = 0x00,
|
||||
sha1 = 0x11,
|
||||
@"sha2-256" = 0x12,
|
||||
@"sha2-512" = 0x13,
|
||||
@"sha3-512" = 0x14,
|
||||
@"sha3-384" = 0x15,
|
||||
@"sha3-256" = 0x16,
|
||||
@"sha3-224" = 0x17,
|
||||
@"sha2-384" = 0x20,
|
||||
@"sha2-256-trunc254-padded" = 0x1012,
|
||||
@"sha2-224" = 0x1013,
|
||||
@"sha2-512-224" = 0x1014,
|
||||
@"sha2-512-256" = 0x1015,
|
||||
@"blake2b-256" = 0xb220,
|
||||
_,
|
||||
};
|
||||
|
||||
pub const multihash_function: MultihashFunction = switch (Hash) {
|
||||
std.crypto.hash.sha2.Sha256 => .@"sha2-256",
|
||||
else => @compileError("unreachable"),
|
||||
};
|
||||
comptime {
|
||||
// We avoid unnecessary uleb128 code in hexDigest by asserting here the
|
||||
// values are small enough to be contained in the one-byte encoding.
|
||||
assert(@enumToInt(multihash_function) < 127);
|
||||
assert(Hash.digest_length < 127);
|
||||
}
|
||||
pub const multihash_len = 1 + 1 + Hash.digest_length;
|
||||
|
||||
name: []const u8,
|
||||
version: std.SemanticVersion,
|
||||
dependencies: std.StringArrayHashMapUnmanaged(Dependency),
|
||||
|
||||
errors: []ErrorMessage,
|
||||
arena_state: std.heap.ArenaAllocator.State,
|
||||
|
||||
pub const Error = Allocator.Error;
|
||||
|
||||
pub fn parse(gpa: Allocator, ast: std.zig.Ast) Error!Manifest {
|
||||
const node_tags = ast.nodes.items(.tag);
|
||||
const node_datas = ast.nodes.items(.data);
|
||||
assert(node_tags[0] == .root);
|
||||
const main_node_index = node_datas[0].lhs;
|
||||
|
||||
var arena_instance = std.heap.ArenaAllocator.init(gpa);
|
||||
errdefer arena_instance.deinit();
|
||||
|
||||
var p: Parse = .{
|
||||
.gpa = gpa,
|
||||
.ast = ast,
|
||||
.arena = arena_instance.allocator(),
|
||||
.errors = .{},
|
||||
|
||||
.name = undefined,
|
||||
.version = undefined,
|
||||
.dependencies = .{},
|
||||
.buf = .{},
|
||||
};
|
||||
defer p.buf.deinit(gpa);
|
||||
defer p.errors.deinit(gpa);
|
||||
defer p.dependencies.deinit(gpa);
|
||||
|
||||
p.parseRoot(main_node_index) catch |err| switch (err) {
|
||||
error.ParseFailure => assert(p.errors.items.len > 0),
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
return .{
|
||||
.name = p.name,
|
||||
.version = p.version,
|
||||
.dependencies = try p.dependencies.clone(p.arena),
|
||||
.errors = try p.arena.dupe(ErrorMessage, p.errors.items),
|
||||
.arena_state = arena_instance.state,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(man: *Manifest, gpa: Allocator) void {
|
||||
man.arena_state.promote(gpa).deinit();
|
||||
man.* = undefined;
|
||||
}
|
||||
|
||||
const hex_charset = "0123456789abcdef";
|
||||
|
||||
pub fn hex64(x: u64) [16]u8 {
|
||||
var result: [16]u8 = undefined;
|
||||
var i: usize = 0;
|
||||
while (i < 8) : (i += 1) {
|
||||
const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
|
||||
result[i * 2 + 0] = hex_charset[byte >> 4];
|
||||
result[i * 2 + 1] = hex_charset[byte & 15];
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
test hex64 {
|
||||
const s = "[" ++ hex64(0x12345678_abcdef00) ++ "]";
|
||||
try std.testing.expectEqualStrings("[00efcdab78563412]", s);
|
||||
}
|
||||
|
||||
pub fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
|
||||
var result: [multihash_len * 2]u8 = undefined;
|
||||
|
||||
result[0] = hex_charset[@enumToInt(multihash_function) >> 4];
|
||||
result[1] = hex_charset[@enumToInt(multihash_function) & 15];
|
||||
|
||||
result[2] = hex_charset[Hash.digest_length >> 4];
|
||||
result[3] = hex_charset[Hash.digest_length & 15];
|
||||
|
||||
for (digest) |byte, i| {
|
||||
result[4 + i * 2] = hex_charset[byte >> 4];
|
||||
result[5 + i * 2] = hex_charset[byte & 15];
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
const Parse = struct {
|
||||
gpa: Allocator,
|
||||
ast: std.zig.Ast,
|
||||
arena: Allocator,
|
||||
buf: std.ArrayListUnmanaged(u8),
|
||||
errors: std.ArrayListUnmanaged(ErrorMessage),
|
||||
|
||||
name: []const u8,
|
||||
version: std.SemanticVersion,
|
||||
dependencies: std.StringArrayHashMapUnmanaged(Dependency),
|
||||
|
||||
const InnerError = error{ ParseFailure, OutOfMemory };
|
||||
|
||||
fn parseRoot(p: *Parse, node: Ast.Node.Index) !void {
|
||||
const ast = p.ast;
|
||||
const main_tokens = ast.nodes.items(.main_token);
|
||||
const main_token = main_tokens[node];
|
||||
|
||||
var buf: [2]Ast.Node.Index = undefined;
|
||||
const struct_init = ast.fullStructInit(&buf, node) orelse {
|
||||
return fail(p, main_token, "expected top level expression to be a struct", .{});
|
||||
};
|
||||
|
||||
var have_name = false;
|
||||
var have_version = false;
|
||||
|
||||
for (struct_init.ast.fields) |field_init| {
|
||||
const name_token = ast.firstToken(field_init) - 2;
|
||||
const field_name = try identifierTokenString(p, name_token);
|
||||
// We could get fancy with reflection and comptime logic here but doing
|
||||
// things manually provides an opportunity to do any additional verification
|
||||
// that is desirable on a per-field basis.
|
||||
if (mem.eql(u8, field_name, "dependencies")) {
|
||||
try parseDependencies(p, field_init);
|
||||
} else if (mem.eql(u8, field_name, "name")) {
|
||||
p.name = try parseString(p, field_init);
|
||||
have_name = true;
|
||||
} else if (mem.eql(u8, field_name, "version")) {
|
||||
const version_text = try parseString(p, field_init);
|
||||
p.version = std.SemanticVersion.parse(version_text) catch |err| v: {
|
||||
try appendError(p, main_tokens[field_init], "unable to parse semantic version: {s}", .{@errorName(err)});
|
||||
break :v undefined;
|
||||
};
|
||||
have_version = true;
|
||||
} else {
|
||||
// Ignore unknown fields so that we can add fields in future zig
|
||||
// versions without breaking older zig versions.
|
||||
}
|
||||
}
|
||||
|
||||
if (!have_name) {
|
||||
try appendError(p, main_token, "missing top-level 'name' field", .{});
|
||||
}
|
||||
|
||||
if (!have_version) {
|
||||
try appendError(p, main_token, "missing top-level 'version' field", .{});
|
||||
}
|
||||
}
|
||||
|
||||
fn parseDependencies(p: *Parse, node: Ast.Node.Index) !void {
|
||||
const ast = p.ast;
|
||||
const main_tokens = ast.nodes.items(.main_token);
|
||||
|
||||
var buf: [2]Ast.Node.Index = undefined;
|
||||
const struct_init = ast.fullStructInit(&buf, node) orelse {
|
||||
const tok = main_tokens[node];
|
||||
return fail(p, tok, "expected dependencies expression to be a struct", .{});
|
||||
};
|
||||
|
||||
for (struct_init.ast.fields) |field_init| {
|
||||
const name_token = ast.firstToken(field_init) - 2;
|
||||
const dep_name = try identifierTokenString(p, name_token);
|
||||
const dep = try parseDependency(p, field_init);
|
||||
try p.dependencies.put(p.gpa, dep_name, dep);
|
||||
}
|
||||
}
|
||||
|
||||
fn parseDependency(p: *Parse, node: Ast.Node.Index) !Dependency {
|
||||
const ast = p.ast;
|
||||
const main_tokens = ast.nodes.items(.main_token);
|
||||
|
||||
var buf: [2]Ast.Node.Index = undefined;
|
||||
const struct_init = ast.fullStructInit(&buf, node) orelse {
|
||||
const tok = main_tokens[node];
|
||||
return fail(p, tok, "expected dependency expression to be a struct", .{});
|
||||
};
|
||||
|
||||
var dep: Dependency = .{
|
||||
.url = undefined,
|
||||
.url_tok = undefined,
|
||||
.hash = null,
|
||||
.hash_tok = undefined,
|
||||
};
|
||||
var have_url = false;
|
||||
|
||||
for (struct_init.ast.fields) |field_init| {
|
||||
const name_token = ast.firstToken(field_init) - 2;
|
||||
const field_name = try identifierTokenString(p, name_token);
|
||||
// We could get fancy with reflection and comptime logic here but doing
|
||||
// things manually provides an opportunity to do any additional verification
|
||||
// that is desirable on a per-field basis.
|
||||
if (mem.eql(u8, field_name, "url")) {
|
||||
dep.url = parseString(p, field_init) catch |err| switch (err) {
|
||||
error.ParseFailure => continue,
|
||||
else => |e| return e,
|
||||
};
|
||||
dep.url_tok = main_tokens[field_init];
|
||||
have_url = true;
|
||||
} else if (mem.eql(u8, field_name, "hash")) {
|
||||
dep.hash = parseHash(p, field_init) catch |err| switch (err) {
|
||||
error.ParseFailure => continue,
|
||||
else => |e| return e,
|
||||
};
|
||||
dep.hash_tok = main_tokens[field_init];
|
||||
} else {
|
||||
// Ignore unknown fields so that we can add fields in future zig
|
||||
// versions without breaking older zig versions.
|
||||
}
|
||||
}
|
||||
|
||||
if (!have_url) {
|
||||
try appendError(p, main_tokens[node], "dependency is missing 'url' field", .{});
|
||||
}
|
||||
|
||||
return dep;
|
||||
}
|
||||
|
||||
fn parseString(p: *Parse, node: Ast.Node.Index) ![]const u8 {
|
||||
const ast = p.ast;
|
||||
const node_tags = ast.nodes.items(.tag);
|
||||
const main_tokens = ast.nodes.items(.main_token);
|
||||
if (node_tags[node] != .string_literal) {
|
||||
return fail(p, main_tokens[node], "expected string literal", .{});
|
||||
}
|
||||
const str_lit_token = main_tokens[node];
|
||||
const token_bytes = ast.tokenSlice(str_lit_token);
|
||||
p.buf.clearRetainingCapacity();
|
||||
try parseStrLit(p, str_lit_token, &p.buf, token_bytes, 0);
|
||||
const duped = try p.arena.dupe(u8, p.buf.items);
|
||||
return duped;
|
||||
}
|
||||
|
||||
fn parseHash(p: *Parse, node: Ast.Node.Index) ![]const u8 {
|
||||
const ast = p.ast;
|
||||
const main_tokens = ast.nodes.items(.main_token);
|
||||
const tok = main_tokens[node];
|
||||
const h = try parseString(p, node);
|
||||
|
||||
if (h.len >= 2) {
|
||||
const their_multihash_func = std.fmt.parseInt(u8, h[0..2], 16) catch |err| {
|
||||
return fail(p, tok, "invalid multihash value: unable to parse hash function: {s}", .{
|
||||
@errorName(err),
|
||||
});
|
||||
};
|
||||
if (@intToEnum(MultihashFunction, their_multihash_func) != multihash_function) {
|
||||
return fail(p, tok, "unsupported hash function: only sha2-256 is supported", .{});
|
||||
}
|
||||
}
|
||||
|
||||
const hex_multihash_len = 2 * Manifest.multihash_len;
|
||||
if (h.len != hex_multihash_len) {
|
||||
return fail(p, tok, "wrong hash size. expected: {d}, found: {d}", .{
|
||||
hex_multihash_len, h.len,
|
||||
});
|
||||
}
|
||||
|
||||
return h;
|
||||
}
|
||||
|
||||
/// TODO: try to DRY this with AstGen.identifierTokenString
|
||||
fn identifierTokenString(p: *Parse, token: Ast.TokenIndex) InnerError![]const u8 {
|
||||
const ast = p.ast;
|
||||
const token_tags = ast.tokens.items(.tag);
|
||||
assert(token_tags[token] == .identifier);
|
||||
const ident_name = ast.tokenSlice(token);
|
||||
if (!mem.startsWith(u8, ident_name, "@")) {
|
||||
return ident_name;
|
||||
}
|
||||
p.buf.clearRetainingCapacity();
|
||||
try parseStrLit(p, token, &p.buf, ident_name, 1);
|
||||
const duped = try p.arena.dupe(u8, p.buf.items);
|
||||
return duped;
|
||||
}
|
||||
|
||||
/// TODO: try to DRY this with AstGen.parseStrLit
|
||||
fn parseStrLit(
|
||||
p: *Parse,
|
||||
token: Ast.TokenIndex,
|
||||
buf: *std.ArrayListUnmanaged(u8),
|
||||
bytes: []const u8,
|
||||
offset: u32,
|
||||
) InnerError!void {
|
||||
const raw_string = bytes[offset..];
|
||||
var buf_managed = buf.toManaged(p.gpa);
|
||||
const result = std.zig.string_literal.parseWrite(buf_managed.writer(), raw_string);
|
||||
buf.* = buf_managed.moveToUnmanaged();
|
||||
switch (try result) {
|
||||
.success => {},
|
||||
.failure => |err| try p.appendStrLitError(err, token, bytes, offset),
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO: try to DRY this with AstGen.failWithStrLitError
|
||||
fn appendStrLitError(
|
||||
p: *Parse,
|
||||
err: std.zig.string_literal.Error,
|
||||
token: Ast.TokenIndex,
|
||||
bytes: []const u8,
|
||||
offset: u32,
|
||||
) Allocator.Error!void {
|
||||
const raw_string = bytes[offset..];
|
||||
switch (err) {
|
||||
.invalid_escape_character => |bad_index| {
|
||||
try p.appendErrorOff(
|
||||
token,
|
||||
offset + @intCast(u32, bad_index),
|
||||
"invalid escape character: '{c}'",
|
||||
.{raw_string[bad_index]},
|
||||
);
|
||||
},
|
||||
.expected_hex_digit => |bad_index| {
|
||||
try p.appendErrorOff(
|
||||
token,
|
||||
offset + @intCast(u32, bad_index),
|
||||
"expected hex digit, found '{c}'",
|
||||
.{raw_string[bad_index]},
|
||||
);
|
||||
},
|
||||
.empty_unicode_escape_sequence => |bad_index| {
|
||||
try p.appendErrorOff(
|
||||
token,
|
||||
offset + @intCast(u32, bad_index),
|
||||
"empty unicode escape sequence",
|
||||
.{},
|
||||
);
|
||||
},
|
||||
.expected_hex_digit_or_rbrace => |bad_index| {
|
||||
try p.appendErrorOff(
|
||||
token,
|
||||
offset + @intCast(u32, bad_index),
|
||||
"expected hex digit or '}}', found '{c}'",
|
||||
.{raw_string[bad_index]},
|
||||
);
|
||||
},
|
||||
.invalid_unicode_codepoint => |bad_index| {
|
||||
try p.appendErrorOff(
|
||||
token,
|
||||
offset + @intCast(u32, bad_index),
|
||||
"unicode escape does not correspond to a valid codepoint",
|
||||
.{},
|
||||
);
|
||||
},
|
||||
.expected_lbrace => |bad_index| {
|
||||
try p.appendErrorOff(
|
||||
token,
|
||||
offset + @intCast(u32, bad_index),
|
||||
"expected '{{', found '{c}",
|
||||
.{raw_string[bad_index]},
|
||||
);
|
||||
},
|
||||
.expected_rbrace => |bad_index| {
|
||||
try p.appendErrorOff(
|
||||
token,
|
||||
offset + @intCast(u32, bad_index),
|
||||
"expected '}}', found '{c}",
|
||||
.{raw_string[bad_index]},
|
||||
);
|
||||
},
|
||||
.expected_single_quote => |bad_index| {
|
||||
try p.appendErrorOff(
|
||||
token,
|
||||
offset + @intCast(u32, bad_index),
|
||||
"expected single quote ('), found '{c}",
|
||||
.{raw_string[bad_index]},
|
||||
);
|
||||
},
|
||||
.invalid_character => |bad_index| {
|
||||
try p.appendErrorOff(
|
||||
token,
|
||||
offset + @intCast(u32, bad_index),
|
||||
"invalid byte in string or character literal: '{c}'",
|
||||
.{raw_string[bad_index]},
|
||||
);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn fail(
|
||||
p: *Parse,
|
||||
tok: Ast.TokenIndex,
|
||||
comptime fmt: []const u8,
|
||||
args: anytype,
|
||||
) InnerError {
|
||||
try appendError(p, tok, fmt, args);
|
||||
return error.ParseFailure;
|
||||
}
|
||||
|
||||
fn appendError(p: *Parse, tok: Ast.TokenIndex, comptime fmt: []const u8, args: anytype) !void {
|
||||
return appendErrorOff(p, tok, 0, fmt, args);
|
||||
}
|
||||
|
||||
fn appendErrorOff(
|
||||
p: *Parse,
|
||||
tok: Ast.TokenIndex,
|
||||
byte_offset: u32,
|
||||
comptime fmt: []const u8,
|
||||
args: anytype,
|
||||
) Allocator.Error!void {
|
||||
try p.errors.append(p.gpa, .{
|
||||
.msg = try std.fmt.allocPrint(p.arena, fmt, args),
|
||||
.tok = tok,
|
||||
.off = byte_offset,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const Manifest = @This();
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const Ast = std.zig.Ast;
|
||||
const testing = std.testing;
|
||||
|
||||
test "basic" {
|
||||
const gpa = testing.allocator;
|
||||
|
||||
const example =
|
||||
\\.{
|
||||
\\ .name = "foo",
|
||||
\\ .version = "3.2.1",
|
||||
\\ .dependencies = .{
|
||||
\\ .bar = .{
|
||||
\\ .url = "https://example.com/baz.tar.gz",
|
||||
\\ .hash = "1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f",
|
||||
\\ },
|
||||
\\ },
|
||||
\\}
|
||||
;
|
||||
|
||||
var ast = try std.zig.Ast.parse(gpa, example, .zon);
|
||||
defer ast.deinit(gpa);
|
||||
|
||||
try testing.expect(ast.errors.len == 0);
|
||||
|
||||
var manifest = try Manifest.parse(gpa, ast);
|
||||
defer manifest.deinit(gpa);
|
||||
|
||||
try testing.expectEqualStrings("foo", manifest.name);
|
||||
|
||||
try testing.expectEqual(@as(std.SemanticVersion, .{
|
||||
.major = 3,
|
||||
.minor = 2,
|
||||
.patch = 1,
|
||||
}), manifest.version);
|
||||
|
||||
try testing.expect(manifest.dependencies.count() == 1);
|
||||
try testing.expectEqualStrings("bar", manifest.dependencies.keys()[0]);
|
||||
try testing.expectEqualStrings(
|
||||
"https://example.com/baz.tar.gz",
|
||||
manifest.dependencies.values()[0].url,
|
||||
);
|
||||
try testing.expectEqualStrings(
|
||||
"1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f",
|
||||
manifest.dependencies.values()[0].hash orelse return error.TestFailed,
|
||||
);
|
||||
}
|
||||
145
src/Module.zig
145
src/Module.zig
@ -328,8 +328,6 @@ pub const ErrorInt = u32;
|
||||
pub const Export = struct {
|
||||
options: std.builtin.ExportOptions,
|
||||
src: LazySrcLoc,
|
||||
/// Represents the position of the export, if any, in the output file.
|
||||
link: link.File.Export,
|
||||
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
|
||||
owner_decl: Decl.Index,
|
||||
/// The Decl containing the export statement. Inline function calls
|
||||
@ -533,17 +531,6 @@ pub const Decl = struct {
|
||||
/// What kind of a declaration is this.
|
||||
kind: Kind,
|
||||
|
||||
/// Represents the position of the code in the output file.
|
||||
/// This is populated regardless of semantic analysis and code generation.
|
||||
link: link.File.LinkBlock,
|
||||
|
||||
/// Represents the function in the linked output file, if the `Decl` is a function.
|
||||
/// This is stored here and not in `Fn` because `Decl` survives across updates but
|
||||
/// `Fn` does not.
|
||||
/// TODO Look into making `Fn` a longer lived structure and moving this field there
|
||||
/// to save on memory usage.
|
||||
fn_link: link.File.LinkFn,
|
||||
|
||||
/// The shallow set of other decls whose typed_value could possibly change if this Decl's
|
||||
/// typed_value is modified.
|
||||
dependants: DepsTable = .{},
|
||||
@ -2067,7 +2054,7 @@ pub const File = struct {
|
||||
if (file.tree_loaded) return &file.tree;
|
||||
|
||||
const source = try file.getSource(gpa);
|
||||
file.tree = try std.zig.parse(gpa, source.bytes);
|
||||
file.tree = try Ast.parse(gpa, source.bytes, .zig);
|
||||
file.tree_loaded = true;
|
||||
return &file.tree;
|
||||
}
|
||||
@ -3672,7 +3659,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
|
||||
file.source = source;
|
||||
file.source_loaded = true;
|
||||
|
||||
file.tree = try std.zig.parse(gpa, source);
|
||||
file.tree = try Ast.parse(gpa, source, .zig);
|
||||
defer if (!file.tree_loaded) file.tree.deinit(gpa);
|
||||
|
||||
if (file.tree.errors.len != 0) {
|
||||
@ -3987,7 +3974,7 @@ pub fn populateBuiltinFile(mod: *Module) !void {
|
||||
else => |e| return e,
|
||||
}
|
||||
|
||||
file.tree = try std.zig.parse(gpa, file.source);
|
||||
file.tree = try Ast.parse(gpa, file.source, .zig);
|
||||
file.tree_loaded = true;
|
||||
assert(file.tree.errors.len == 0); // builtin.zig must parse
|
||||
|
||||
@ -4098,7 +4085,7 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl_index: Decl.Index) SemaError!void {
|
||||
|
||||
// The exports this Decl performs will be re-discovered, so we remove them here
|
||||
// prior to re-analysis.
|
||||
mod.deleteDeclExports(decl_index);
|
||||
try mod.deleteDeclExports(decl_index);
|
||||
|
||||
// Similarly, `@setAlignStack` invocations will be re-discovered.
|
||||
if (decl.getFunction()) |func| {
|
||||
@ -4878,14 +4865,31 @@ pub fn importFile(
|
||||
};
|
||||
}
|
||||
|
||||
pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*EmbedFile {
|
||||
pub fn embedFile(mod: *Module, cur_file: *File, import_string: []const u8) !*EmbedFile {
|
||||
const gpa = mod.gpa;
|
||||
|
||||
// The resolved path is used as the key in the table, to detect if
|
||||
// a file refers to the same as another, despite different relative paths.
|
||||
if (cur_file.pkg.table.get(import_string)) |pkg| {
|
||||
const resolved_path = try std.fs.path.resolve(gpa, &[_][]const u8{
|
||||
pkg.root_src_directory.path orelse ".", pkg.root_src_path,
|
||||
});
|
||||
var keep_resolved_path = false;
|
||||
defer if (!keep_resolved_path) gpa.free(resolved_path);
|
||||
|
||||
const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
|
||||
errdefer assert(mod.embed_table.remove(resolved_path));
|
||||
if (gop.found_existing) return gop.value_ptr.*;
|
||||
|
||||
const sub_file_path = try gpa.dupe(u8, pkg.root_src_path);
|
||||
errdefer gpa.free(sub_file_path);
|
||||
|
||||
return newEmbedFile(mod, pkg, sub_file_path, resolved_path, &keep_resolved_path, gop);
|
||||
}
|
||||
|
||||
// The resolved path is used as the key in the table, to detect if a file
|
||||
// refers to the same as another, despite different relative paths.
|
||||
const cur_pkg_dir_path = cur_file.pkg.root_src_directory.path orelse ".";
|
||||
const resolved_path = try std.fs.path.resolve(gpa, &[_][]const u8{
|
||||
cur_pkg_dir_path, cur_file.sub_file_path, "..", rel_file_path,
|
||||
cur_pkg_dir_path, cur_file.sub_file_path, "..", import_string,
|
||||
});
|
||||
var keep_resolved_path = false;
|
||||
defer if (!keep_resolved_path) gpa.free(resolved_path);
|
||||
@ -4894,9 +4898,6 @@ pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*Emb
|
||||
errdefer assert(mod.embed_table.remove(resolved_path));
|
||||
if (gop.found_existing) return gop.value_ptr.*;
|
||||
|
||||
const new_file = try gpa.create(EmbedFile);
|
||||
errdefer gpa.destroy(new_file);
|
||||
|
||||
const resolved_root_path = try std.fs.path.resolve(gpa, &[_][]const u8{cur_pkg_dir_path});
|
||||
defer gpa.free(resolved_root_path);
|
||||
|
||||
@ -4915,7 +4916,23 @@ pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*Emb
|
||||
};
|
||||
errdefer gpa.free(sub_file_path);
|
||||
|
||||
var file = try cur_file.pkg.root_src_directory.handle.openFile(sub_file_path, .{});
|
||||
return newEmbedFile(mod, cur_file.pkg, sub_file_path, resolved_path, &keep_resolved_path, gop);
|
||||
}
|
||||
|
||||
fn newEmbedFile(
|
||||
mod: *Module,
|
||||
pkg: *Package,
|
||||
sub_file_path: []const u8,
|
||||
resolved_path: []const u8,
|
||||
keep_resolved_path: *bool,
|
||||
gop: std.StringHashMapUnmanaged(*EmbedFile).GetOrPutResult,
|
||||
) !*EmbedFile {
|
||||
const gpa = mod.gpa;
|
||||
|
||||
const new_file = try gpa.create(EmbedFile);
|
||||
errdefer gpa.destroy(new_file);
|
||||
|
||||
var file = try pkg.root_src_directory.handle.openFile(sub_file_path, .{});
|
||||
defer file.close();
|
||||
|
||||
const actual_stat = try file.stat();
|
||||
@ -4928,10 +4945,6 @@ pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*Emb
|
||||
const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), size_usize, 1, 0);
|
||||
errdefer gpa.free(bytes);
|
||||
|
||||
log.debug("new embedFile. resolved_root_path={s}, resolved_path={s}, sub_file_path={s}, rel_file_path={s}", .{
|
||||
resolved_root_path, resolved_path, sub_file_path, rel_file_path,
|
||||
});
|
||||
|
||||
if (mod.comp.whole_cache_manifest) |whole_cache_manifest| {
|
||||
const copied_resolved_path = try gpa.dupe(u8, resolved_path);
|
||||
errdefer gpa.free(copied_resolved_path);
|
||||
@ -4940,13 +4953,13 @@ pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*Emb
|
||||
try whole_cache_manifest.addFilePostContents(copied_resolved_path, bytes, stat);
|
||||
}
|
||||
|
||||
keep_resolved_path = true; // It's now owned by embed_table.
|
||||
keep_resolved_path.* = true; // It's now owned by embed_table.
|
||||
gop.value_ptr.* = new_file;
|
||||
new_file.* = .{
|
||||
.sub_file_path = sub_file_path,
|
||||
.bytes = bytes,
|
||||
.stat = stat,
|
||||
.pkg = cur_file.pkg,
|
||||
.pkg = pkg,
|
||||
.owner_decl = undefined, // Set by Sema immediately after this function returns.
|
||||
};
|
||||
return new_file;
|
||||
@ -5183,20 +5196,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
|
||||
decl.zir_decl_index = @intCast(u32, decl_sub_index);
|
||||
if (decl.getFunction()) |_| {
|
||||
switch (comp.bin_file.tag) {
|
||||
.coff => {
|
||||
// TODO Implement for COFF
|
||||
},
|
||||
.elf => if (decl.fn_link.elf.len != 0) {
|
||||
// TODO Look into detecting when this would be unnecessary by storing enough state
|
||||
// in `Decl` to notice that the line number did not change.
|
||||
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
|
||||
},
|
||||
.macho => if (decl.fn_link.macho.len != 0) {
|
||||
// TODO Look into detecting when this would be unnecessary by storing enough state
|
||||
// in `Decl` to notice that the line number did not change.
|
||||
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
|
||||
},
|
||||
.plan9 => {
|
||||
.coff, .elf, .macho, .plan9 => {
|
||||
// TODO Look into detecting when this would be unnecessary by storing enough state
|
||||
// in `Decl` to notice that the line number did not change.
|
||||
comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl_index });
|
||||
@ -5265,34 +5265,11 @@ pub fn clearDecl(
|
||||
assert(emit_h.decl_table.swapRemove(decl_index));
|
||||
}
|
||||
_ = mod.compile_log_decls.swapRemove(decl_index);
|
||||
mod.deleteDeclExports(decl_index);
|
||||
try mod.deleteDeclExports(decl_index);
|
||||
|
||||
if (decl.has_tv) {
|
||||
if (decl.ty.isFnOrHasRuntimeBits()) {
|
||||
mod.comp.bin_file.freeDecl(decl_index);
|
||||
|
||||
// TODO instead of a union, put this memory trailing Decl objects,
|
||||
// and allow it to be variably sized.
|
||||
decl.link = switch (mod.comp.bin_file.tag) {
|
||||
.coff => .{ .coff = link.File.Coff.Atom.empty },
|
||||
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
|
||||
.macho => .{ .macho = link.File.MachO.Atom.empty },
|
||||
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
|
||||
.c => .{ .c = {} },
|
||||
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
|
||||
.spirv => .{ .spirv = {} },
|
||||
.nvptx => .{ .nvptx = {} },
|
||||
};
|
||||
decl.fn_link = switch (mod.comp.bin_file.tag) {
|
||||
.coff => .{ .coff = {} },
|
||||
.elf => .{ .elf = link.File.Dwarf.SrcFn.empty },
|
||||
.macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
|
||||
.plan9 => .{ .plan9 = {} },
|
||||
.c => .{ .c = {} },
|
||||
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
|
||||
.spirv => .{ .spirv = .{} },
|
||||
.nvptx => .{ .nvptx = {} },
|
||||
};
|
||||
}
|
||||
if (decl.getInnerNamespace()) |namespace| {
|
||||
try namespace.deleteAllDecls(mod, outdated_decls);
|
||||
@ -5358,7 +5335,7 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
|
||||
|
||||
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
|
||||
/// this Decl will cause them to be re-created (or not).
|
||||
fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
|
||||
fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) Allocator.Error!void {
|
||||
var export_owners = (mod.export_owners.fetchSwapRemove(decl_index) orelse return).value;
|
||||
|
||||
for (export_owners.items) |exp| {
|
||||
@ -5381,16 +5358,16 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
|
||||
}
|
||||
}
|
||||
if (mod.comp.bin_file.cast(link.File.Elf)) |elf| {
|
||||
elf.deleteExport(exp.link.elf);
|
||||
elf.deleteDeclExport(decl_index, exp.options.name);
|
||||
}
|
||||
if (mod.comp.bin_file.cast(link.File.MachO)) |macho| {
|
||||
macho.deleteExport(exp.link.macho);
|
||||
try macho.deleteDeclExport(decl_index, exp.options.name);
|
||||
}
|
||||
if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| {
|
||||
wasm.deleteExport(exp.link.wasm);
|
||||
wasm.deleteDeclExport(decl_index);
|
||||
}
|
||||
if (mod.comp.bin_file.cast(link.File.Coff)) |coff| {
|
||||
coff.deleteExport(exp.link.coff);
|
||||
coff.deleteDeclExport(decl_index, exp.options.name);
|
||||
}
|
||||
if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| {
|
||||
failed_kv.value.destroy(mod.gpa);
|
||||
@ -5693,26 +5670,6 @@ pub fn allocateNewDecl(
|
||||
.deletion_flag = false,
|
||||
.zir_decl_index = 0,
|
||||
.src_scope = src_scope,
|
||||
.link = switch (mod.comp.bin_file.tag) {
|
||||
.coff => .{ .coff = link.File.Coff.Atom.empty },
|
||||
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
|
||||
.macho => .{ .macho = link.File.MachO.Atom.empty },
|
||||
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
|
||||
.c => .{ .c = {} },
|
||||
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
|
||||
.spirv => .{ .spirv = {} },
|
||||
.nvptx => .{ .nvptx = {} },
|
||||
},
|
||||
.fn_link = switch (mod.comp.bin_file.tag) {
|
||||
.coff => .{ .coff = {} },
|
||||
.elf => .{ .elf = link.File.Dwarf.SrcFn.empty },
|
||||
.macho => .{ .macho = link.File.Dwarf.SrcFn.empty },
|
||||
.plan9 => .{ .plan9 = {} },
|
||||
.c => .{ .c = {} },
|
||||
.wasm => .{ .wasm = link.File.Wasm.FnData.empty },
|
||||
.spirv => .{ .spirv = .{} },
|
||||
.nvptx => .{ .nvptx = {} },
|
||||
},
|
||||
.generation = 0,
|
||||
.is_pub = false,
|
||||
.is_exported = false,
|
||||
|
||||
309
src/Package.zig
309
src/Package.zig
@ -1,12 +1,13 @@
|
||||
const Package = @This();
|
||||
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std");
|
||||
const fs = std.fs;
|
||||
const mem = std.mem;
|
||||
const Allocator = mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const Hash = std.crypto.hash.sha2.Sha256;
|
||||
const log = std.log.scoped(.package);
|
||||
const main = @import("main.zig");
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Module = @import("Module.zig");
|
||||
@ -14,6 +15,7 @@ const ThreadPool = @import("ThreadPool.zig");
|
||||
const WaitGroup = @import("WaitGroup.zig");
|
||||
const Cache = @import("Cache.zig");
|
||||
const build_options = @import("build_options");
|
||||
const Manifest = @import("Manifest.zig");
|
||||
|
||||
pub const Table = std.StringHashMapUnmanaged(*Package);
|
||||
|
||||
@ -140,10 +142,10 @@ pub fn addAndAdopt(parent: *Package, gpa: Allocator, child: *Package) !void {
|
||||
}
|
||||
|
||||
pub const build_zig_basename = "build.zig";
|
||||
pub const ini_basename = build_zig_basename ++ ".ini";
|
||||
|
||||
pub fn fetchAndAddDependencies(
|
||||
pkg: *Package,
|
||||
arena: Allocator,
|
||||
thread_pool: *ThreadPool,
|
||||
http_client: *std.http.Client,
|
||||
directory: Compilation.Directory,
|
||||
@ -152,89 +154,77 @@ pub fn fetchAndAddDependencies(
|
||||
dependencies_source: *std.ArrayList(u8),
|
||||
build_roots_source: *std.ArrayList(u8),
|
||||
name_prefix: []const u8,
|
||||
color: main.Color,
|
||||
) !void {
|
||||
const max_bytes = 10 * 1024 * 1024;
|
||||
const gpa = thread_pool.allocator;
|
||||
const build_zig_ini = directory.handle.readFileAlloc(gpa, ini_basename, max_bytes) catch |err| switch (err) {
|
||||
const build_zig_zon_bytes = directory.handle.readFileAllocOptions(
|
||||
arena,
|
||||
Manifest.basename,
|
||||
max_bytes,
|
||||
null,
|
||||
1,
|
||||
0,
|
||||
) catch |err| switch (err) {
|
||||
error.FileNotFound => {
|
||||
// Handle the same as no dependencies.
|
||||
return;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
defer gpa.free(build_zig_ini);
|
||||
|
||||
const ini: std.Ini = .{ .bytes = build_zig_ini };
|
||||
var any_error = false;
|
||||
var it = ini.iterateSection("\n[dependency]\n");
|
||||
while (it.next()) |dep| {
|
||||
var line_it = mem.split(u8, dep, "\n");
|
||||
var opt_name: ?[]const u8 = null;
|
||||
var opt_url: ?[]const u8 = null;
|
||||
var expected_hash: ?[]const u8 = null;
|
||||
while (line_it.next()) |kv| {
|
||||
const eq_pos = mem.indexOfScalar(u8, kv, '=') orelse continue;
|
||||
const key = kv[0..eq_pos];
|
||||
const value = kv[eq_pos + 1 ..];
|
||||
if (mem.eql(u8, key, "name")) {
|
||||
opt_name = value;
|
||||
} else if (mem.eql(u8, key, "url")) {
|
||||
opt_url = value;
|
||||
} else if (mem.eql(u8, key, "hash")) {
|
||||
expected_hash = value;
|
||||
} else {
|
||||
const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(key.ptr) - @ptrToInt(ini.bytes.ptr));
|
||||
std.log.warn("{s}/{s}:{d}:{d} unrecognized key: '{s}'", .{
|
||||
directory.path orelse ".",
|
||||
"build.zig.ini",
|
||||
loc.line,
|
||||
loc.column,
|
||||
key,
|
||||
});
|
||||
}
|
||||
var ast = try std.zig.Ast.parse(gpa, build_zig_zon_bytes, .zon);
|
||||
defer ast.deinit(gpa);
|
||||
|
||||
if (ast.errors.len > 0) {
|
||||
const file_path = try directory.join(arena, &.{Manifest.basename});
|
||||
try main.printErrsMsgToStdErr(gpa, arena, ast, file_path, color);
|
||||
return error.PackageFetchFailed;
|
||||
}
|
||||
|
||||
var manifest = try Manifest.parse(gpa, ast);
|
||||
defer manifest.deinit(gpa);
|
||||
|
||||
if (manifest.errors.len > 0) {
|
||||
const ttyconf: std.debug.TTY.Config = switch (color) {
|
||||
.auto => std.debug.detectTTYConfig(std.io.getStdErr()),
|
||||
.on => .escape_codes,
|
||||
.off => .no_color,
|
||||
};
|
||||
const file_path = try directory.join(arena, &.{Manifest.basename});
|
||||
for (manifest.errors) |msg| {
|
||||
Report.renderErrorMessage(ast, file_path, ttyconf, msg, &.{});
|
||||
}
|
||||
return error.PackageFetchFailed;
|
||||
}
|
||||
|
||||
const name = opt_name orelse {
|
||||
const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(dep.ptr) - @ptrToInt(ini.bytes.ptr));
|
||||
std.log.err("{s}/{s}:{d}:{d} missing key: 'name'", .{
|
||||
directory.path orelse ".",
|
||||
"build.zig.ini",
|
||||
loc.line,
|
||||
loc.column,
|
||||
});
|
||||
any_error = true;
|
||||
continue;
|
||||
};
|
||||
const report: Report = .{
|
||||
.ast = &ast,
|
||||
.directory = directory,
|
||||
.color = color,
|
||||
.arena = arena,
|
||||
};
|
||||
|
||||
const url = opt_url orelse {
|
||||
const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(dep.ptr) - @ptrToInt(ini.bytes.ptr));
|
||||
std.log.err("{s}/{s}:{d}:{d} missing key: 'name'", .{
|
||||
directory.path orelse ".",
|
||||
"build.zig.ini",
|
||||
loc.line,
|
||||
loc.column,
|
||||
});
|
||||
any_error = true;
|
||||
continue;
|
||||
};
|
||||
var any_error = false;
|
||||
const deps_list = manifest.dependencies.values();
|
||||
for (manifest.dependencies.keys()) |name, i| {
|
||||
const dep = deps_list[i];
|
||||
|
||||
const sub_prefix = try std.fmt.allocPrint(gpa, "{s}{s}.", .{ name_prefix, name });
|
||||
defer gpa.free(sub_prefix);
|
||||
const sub_prefix = try std.fmt.allocPrint(arena, "{s}{s}.", .{ name_prefix, name });
|
||||
const fqn = sub_prefix[0 .. sub_prefix.len - 1];
|
||||
|
||||
const sub_pkg = try fetchAndUnpack(
|
||||
thread_pool,
|
||||
http_client,
|
||||
global_cache_directory,
|
||||
url,
|
||||
expected_hash,
|
||||
ini,
|
||||
directory,
|
||||
dep,
|
||||
report,
|
||||
build_roots_source,
|
||||
fqn,
|
||||
);
|
||||
|
||||
try pkg.fetchAndAddDependencies(
|
||||
arena,
|
||||
thread_pool,
|
||||
http_client,
|
||||
sub_pkg.root_src_directory,
|
||||
@ -243,6 +233,7 @@ pub fn fetchAndAddDependencies(
|
||||
dependencies_source,
|
||||
build_roots_source,
|
||||
sub_prefix,
|
||||
color,
|
||||
);
|
||||
|
||||
try addAndAdopt(pkg, gpa, sub_pkg);
|
||||
@ -252,7 +243,7 @@ pub fn fetchAndAddDependencies(
|
||||
});
|
||||
}
|
||||
|
||||
if (any_error) return error.InvalidBuildZigIniFile;
|
||||
if (any_error) return error.InvalidBuildManifestFile;
|
||||
}
|
||||
|
||||
pub fn createFilePkg(
|
||||
@ -263,7 +254,7 @@ pub fn createFilePkg(
|
||||
contents: []const u8,
|
||||
) !*Package {
|
||||
const rand_int = std.crypto.random.int(u64);
|
||||
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ hex64(rand_int);
|
||||
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ Manifest.hex64(rand_int);
|
||||
{
|
||||
var tmp_dir = try cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{});
|
||||
defer tmp_dir.close();
|
||||
@ -281,14 +272,73 @@ pub fn createFilePkg(
|
||||
return createWithDir(gpa, name, cache_directory, o_dir_sub_path, basename);
|
||||
}
|
||||
|
||||
const Report = struct {
|
||||
ast: *const std.zig.Ast,
|
||||
directory: Compilation.Directory,
|
||||
color: main.Color,
|
||||
arena: Allocator,
|
||||
|
||||
fn fail(
|
||||
report: Report,
|
||||
tok: std.zig.Ast.TokenIndex,
|
||||
comptime fmt_string: []const u8,
|
||||
fmt_args: anytype,
|
||||
) error{ PackageFetchFailed, OutOfMemory } {
|
||||
return failWithNotes(report, &.{}, tok, fmt_string, fmt_args);
|
||||
}
|
||||
|
||||
fn failWithNotes(
|
||||
report: Report,
|
||||
notes: []const Compilation.AllErrors.Message,
|
||||
tok: std.zig.Ast.TokenIndex,
|
||||
comptime fmt_string: []const u8,
|
||||
fmt_args: anytype,
|
||||
) error{ PackageFetchFailed, OutOfMemory } {
|
||||
const ttyconf: std.debug.TTY.Config = switch (report.color) {
|
||||
.auto => std.debug.detectTTYConfig(std.io.getStdErr()),
|
||||
.on => .escape_codes,
|
||||
.off => .no_color,
|
||||
};
|
||||
const file_path = try report.directory.join(report.arena, &.{Manifest.basename});
|
||||
renderErrorMessage(report.ast.*, file_path, ttyconf, .{
|
||||
.tok = tok,
|
||||
.off = 0,
|
||||
.msg = try std.fmt.allocPrint(report.arena, fmt_string, fmt_args),
|
||||
}, notes);
|
||||
return error.PackageFetchFailed;
|
||||
}
|
||||
|
||||
fn renderErrorMessage(
|
||||
ast: std.zig.Ast,
|
||||
file_path: []const u8,
|
||||
ttyconf: std.debug.TTY.Config,
|
||||
msg: Manifest.ErrorMessage,
|
||||
notes: []const Compilation.AllErrors.Message,
|
||||
) void {
|
||||
const token_starts = ast.tokens.items(.start);
|
||||
const start_loc = ast.tokenLocation(0, msg.tok);
|
||||
Compilation.AllErrors.Message.renderToStdErr(.{ .src = .{
|
||||
.msg = msg.msg,
|
||||
.src_path = file_path,
|
||||
.line = @intCast(u32, start_loc.line),
|
||||
.column = @intCast(u32, start_loc.column),
|
||||
.span = .{
|
||||
.start = token_starts[msg.tok],
|
||||
.end = @intCast(u32, token_starts[msg.tok] + ast.tokenSlice(msg.tok).len),
|
||||
.main = token_starts[msg.tok] + msg.off,
|
||||
},
|
||||
.source_line = ast.source[start_loc.line_start..start_loc.line_end],
|
||||
.notes = notes,
|
||||
} }, ttyconf);
|
||||
}
|
||||
};
|
||||
|
||||
fn fetchAndUnpack(
|
||||
thread_pool: *ThreadPool,
|
||||
http_client: *std.http.Client,
|
||||
global_cache_directory: Compilation.Directory,
|
||||
url: []const u8,
|
||||
expected_hash: ?[]const u8,
|
||||
ini: std.Ini,
|
||||
comp_directory: Compilation.Directory,
|
||||
dep: Manifest.Dependency,
|
||||
report: Report,
|
||||
build_roots_source: *std.ArrayList(u8),
|
||||
fqn: []const u8,
|
||||
) !*Package {
|
||||
@ -297,17 +347,9 @@ fn fetchAndUnpack(
|
||||
|
||||
// Check if the expected_hash is already present in the global package
|
||||
// cache, and thereby avoid both fetching and unpacking.
|
||||
if (expected_hash) |h| cached: {
|
||||
if (h.len != 2 * Hash.digest_length) {
|
||||
return reportError(
|
||||
ini,
|
||||
comp_directory,
|
||||
h.ptr,
|
||||
"wrong hash size. expected: {d}, found: {d}",
|
||||
.{ Hash.digest_length, h.len },
|
||||
);
|
||||
}
|
||||
const hex_digest = h[0 .. 2 * Hash.digest_length];
|
||||
if (dep.hash) |h| cached: {
|
||||
const hex_multihash_len = 2 * Manifest.multihash_len;
|
||||
const hex_digest = h[0..hex_multihash_len];
|
||||
const pkg_dir_sub_path = "p" ++ s ++ hex_digest;
|
||||
var pkg_dir = global_cache_directory.handle.openDir(pkg_dir_sub_path, .{}) catch |err| switch (err) {
|
||||
error.FileNotFound => break :cached,
|
||||
@ -344,10 +386,10 @@ fn fetchAndUnpack(
|
||||
return ptr;
|
||||
}
|
||||
|
||||
const uri = try std.Uri.parse(url);
|
||||
const uri = try std.Uri.parse(dep.url);
|
||||
|
||||
const rand_int = std.crypto.random.int(u64);
|
||||
const tmp_dir_sub_path = "tmp" ++ s ++ hex64(rand_int);
|
||||
const tmp_dir_sub_path = "tmp" ++ s ++ Manifest.hex64(rand_int);
|
||||
|
||||
const actual_hash = a: {
|
||||
var tmp_directory: Compilation.Directory = d: {
|
||||
@ -376,13 +418,9 @@ fn fetchAndUnpack(
|
||||
// by default, so the same logic applies for buffering the reader as for gzip.
|
||||
try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.xz);
|
||||
} else {
|
||||
return reportError(
|
||||
ini,
|
||||
comp_directory,
|
||||
uri.path.ptr,
|
||||
"unknown file extension for path '{s}'",
|
||||
.{uri.path},
|
||||
);
|
||||
return report.fail(dep.url_tok, "unknown file extension for path '{s}'", .{
|
||||
uri.path,
|
||||
});
|
||||
}
|
||||
|
||||
// TODO: delete files not included in the package prior to computing the package hash.
|
||||
@ -393,28 +431,21 @@ fn fetchAndUnpack(
|
||||
break :a try computePackageHash(thread_pool, .{ .dir = tmp_directory.handle });
|
||||
};
|
||||
|
||||
const pkg_dir_sub_path = "p" ++ s ++ hexDigest(actual_hash);
|
||||
const pkg_dir_sub_path = "p" ++ s ++ Manifest.hexDigest(actual_hash);
|
||||
try renameTmpIntoCache(global_cache_directory.handle, tmp_dir_sub_path, pkg_dir_sub_path);
|
||||
|
||||
if (expected_hash) |h| {
|
||||
const actual_hex = hexDigest(actual_hash);
|
||||
const actual_hex = Manifest.hexDigest(actual_hash);
|
||||
if (dep.hash) |h| {
|
||||
if (!mem.eql(u8, h, &actual_hex)) {
|
||||
return reportError(
|
||||
ini,
|
||||
comp_directory,
|
||||
h.ptr,
|
||||
"hash mismatch: expected: {s}, found: {s}",
|
||||
.{ h, actual_hex },
|
||||
);
|
||||
return report.fail(dep.hash_tok, "hash mismatch: expected: {s}, found: {s}", .{
|
||||
h, actual_hex,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
return reportError(
|
||||
ini,
|
||||
comp_directory,
|
||||
url.ptr,
|
||||
"url field is missing corresponding hash field: hash={s}",
|
||||
.{std.fmt.fmtSliceHexLower(&actual_hash)},
|
||||
);
|
||||
const notes: [1]Compilation.AllErrors.Message = .{.{ .plain = .{
|
||||
.msg = try std.fmt.allocPrint(report.arena, "expected .hash = \"{s}\",", .{&actual_hex}),
|
||||
} }};
|
||||
return report.failWithNotes(¬es, dep.url_tok, "url field is missing corresponding hash field", .{});
|
||||
}
|
||||
|
||||
const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path});
|
||||
@ -440,35 +471,21 @@ fn unpackTarball(
|
||||
|
||||
try std.tar.pipeToFileSystem(out_dir, decompress.reader(), .{
|
||||
.strip_components = 1,
|
||||
// TODO: we would like to set this to executable_bit_only, but two
|
||||
// things need to happen before that:
|
||||
// 1. the tar implementation needs to support it
|
||||
// 2. the hashing algorithm here needs to support detecting the is_executable
|
||||
// bit on Windows from the ACLs (see the isExecutable function).
|
||||
.mode_mode = .ignore,
|
||||
});
|
||||
}
|
||||
|
||||
fn reportError(
|
||||
ini: std.Ini,
|
||||
comp_directory: Compilation.Directory,
|
||||
src_ptr: [*]const u8,
|
||||
comptime fmt_string: []const u8,
|
||||
fmt_args: anytype,
|
||||
) error{PackageFetchFailed} {
|
||||
const loc = std.zig.findLineColumn(ini.bytes, @ptrToInt(src_ptr) - @ptrToInt(ini.bytes.ptr));
|
||||
if (comp_directory.path) |p| {
|
||||
std.debug.print("{s}{c}{s}:{d}:{d}: error: " ++ fmt_string ++ "\n", .{
|
||||
p, fs.path.sep, ini_basename, loc.line + 1, loc.column + 1,
|
||||
} ++ fmt_args);
|
||||
} else {
|
||||
std.debug.print("{s}:{d}:{d}: error: " ++ fmt_string ++ "\n", .{
|
||||
ini_basename, loc.line + 1, loc.column + 1,
|
||||
} ++ fmt_args);
|
||||
}
|
||||
return error.PackageFetchFailed;
|
||||
}
|
||||
|
||||
const HashedFile = struct {
|
||||
path: []const u8,
|
||||
hash: [Hash.digest_length]u8,
|
||||
hash: [Manifest.Hash.digest_length]u8,
|
||||
failure: Error!void,
|
||||
|
||||
const Error = fs.File.OpenError || fs.File.ReadError;
|
||||
const Error = fs.File.OpenError || fs.File.ReadError || fs.File.StatError;
|
||||
|
||||
fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
|
||||
_ = context;
|
||||
@ -479,7 +496,7 @@ const HashedFile = struct {
|
||||
fn computePackageHash(
|
||||
thread_pool: *ThreadPool,
|
||||
pkg_dir: fs.IterableDir,
|
||||
) ![Hash.digest_length]u8 {
|
||||
) ![Manifest.Hash.digest_length]u8 {
|
||||
const gpa = thread_pool.allocator;
|
||||
|
||||
// We'll use an arena allocator for the path name strings since they all
|
||||
@ -522,7 +539,7 @@ fn computePackageHash(
|
||||
|
||||
std.sort.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan);
|
||||
|
||||
var hasher = Hash.init(.{});
|
||||
var hasher = Manifest.Hash.init(.{});
|
||||
var any_failures = false;
|
||||
for (all_files.items) |hashed_file| {
|
||||
hashed_file.failure catch |err| {
|
||||
@ -543,7 +560,9 @@ fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile, wg: *WaitGroup) void {
|
||||
fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
|
||||
var buf: [8000]u8 = undefined;
|
||||
var file = try dir.openFile(hashed_file.path, .{});
|
||||
var hasher = Hash.init(.{});
|
||||
var hasher = Manifest.Hash.init(.{});
|
||||
hasher.update(hashed_file.path);
|
||||
hasher.update(&.{ 0, @boolToInt(try isExecutable(file)) });
|
||||
while (true) {
|
||||
const bytes_read = try file.read(&buf);
|
||||
if (bytes_read == 0) break;
|
||||
@ -552,31 +571,17 @@ fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void
|
||||
hasher.final(&hashed_file.hash);
|
||||
}
|
||||
|
||||
const hex_charset = "0123456789abcdef";
|
||||
|
||||
fn hex64(x: u64) [16]u8 {
|
||||
var result: [16]u8 = undefined;
|
||||
var i: usize = 0;
|
||||
while (i < 8) : (i += 1) {
|
||||
const byte = @truncate(u8, x >> @intCast(u6, 8 * i));
|
||||
result[i * 2 + 0] = hex_charset[byte >> 4];
|
||||
result[i * 2 + 1] = hex_charset[byte & 15];
|
||||
fn isExecutable(file: fs.File) !bool {
|
||||
if (builtin.os.tag == .windows) {
|
||||
// TODO check the ACL on Windows.
|
||||
// Until this is implemented, this could be a false negative on
|
||||
// Windows, which is why we do not yet set executable_bit_only above
|
||||
// when unpacking the tarball.
|
||||
return false;
|
||||
} else {
|
||||
const stat = try file.stat();
|
||||
return (stat.mode & std.os.S.IXUSR) != 0;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
test hex64 {
|
||||
const s = "[" ++ hex64(0x12345678_abcdef00) ++ "]";
|
||||
try std.testing.expectEqualStrings("[00efcdab78563412]", s);
|
||||
}
|
||||
|
||||
fn hexDigest(digest: [Hash.digest_length]u8) [Hash.digest_length * 2]u8 {
|
||||
var result: [Hash.digest_length * 2]u8 = undefined;
|
||||
for (digest) |byte, i| {
|
||||
result[i * 2 + 0] = hex_charset[byte >> 4];
|
||||
result[i * 2 + 1] = hex_charset[byte & 15];
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
fn renameTmpIntoCache(
|
||||
|
||||
113
src/Sema.zig
113
src/Sema.zig
@ -1015,6 +1015,7 @@ fn analyzeBodyInner(
|
||||
.float_cast => try sema.zirFloatCast(block, inst),
|
||||
.int_cast => try sema.zirIntCast(block, inst),
|
||||
.ptr_cast => try sema.zirPtrCast(block, inst),
|
||||
.qual_cast => try sema.zirQualCast(block, inst),
|
||||
.truncate => try sema.zirTruncate(block, inst),
|
||||
.align_cast => try sema.zirAlignCast(block, inst),
|
||||
.has_decl => try sema.zirHasDecl(block, inst),
|
||||
@ -3294,7 +3295,7 @@ fn ensureResultUsed(
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(block, src, "error is ignored", .{});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
try sema.errNote(block, src, msg, "consider using `try`, `catch`, or `if`", .{});
|
||||
try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(msg);
|
||||
@ -3325,7 +3326,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(block, src, "error is discarded", .{});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
try sema.errNote(block, src, msg, "consider using `try`, `catch`, or `if`", .{});
|
||||
try sema.errNote(block, src, msg, "consider using 'try', 'catch', or 'if'", .{});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(msg);
|
||||
@ -5564,16 +5565,6 @@ pub fn analyzeExport(
|
||||
.visibility = borrowed_options.visibility,
|
||||
},
|
||||
.src = src,
|
||||
.link = switch (mod.comp.bin_file.tag) {
|
||||
.coff => .{ .coff = .{} },
|
||||
.elf => .{ .elf = .{} },
|
||||
.macho => .{ .macho = .{} },
|
||||
.plan9 => .{ .plan9 = null },
|
||||
.c => .{ .c = {} },
|
||||
.wasm => .{ .wasm = .{} },
|
||||
.spirv => .{ .spirv = {} },
|
||||
.nvptx => .{ .nvptx = {} },
|
||||
},
|
||||
.owner_decl = sema.owner_decl_index,
|
||||
.src_decl = block.src_decl,
|
||||
.exported_decl = exported_decl_index,
|
||||
@ -6884,6 +6875,8 @@ fn analyzeInlineCallArg(
|
||||
if (err == error.AnalysisFail and param_block.comptime_reason != null) try param_block.comptime_reason.?.explain(sema, sema.err);
|
||||
return err;
|
||||
};
|
||||
} else if (!is_comptime_call and zir_tags[inst] == .param_comptime) {
|
||||
_ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
|
||||
}
|
||||
const casted_arg = sema.coerceExtra(arg_block, param_ty, uncasted_arg, arg_src, .{ .param_src = .{
|
||||
.func_inst = func_inst,
|
||||
@ -6957,6 +6950,9 @@ fn analyzeInlineCallArg(
|
||||
.val = arg_val,
|
||||
};
|
||||
} else {
|
||||
if (zir_tags[inst] == .param_anytype_comptime) {
|
||||
_ = try sema.resolveConstMaybeUndefVal(arg_block, arg_src, uncasted_arg, "parameter is comptime");
|
||||
}
|
||||
sema.inst_map.putAssumeCapacityNoClobber(inst, uncasted_arg);
|
||||
}
|
||||
|
||||
@ -8477,7 +8473,7 @@ fn handleExternLibName(
|
||||
return sema.fail(
|
||||
block,
|
||||
src_loc,
|
||||
"dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by `-l{s}` or `-fPIC`.",
|
||||
"dependency on dynamic library '{s}' requires enabling Position Independent Code. Fixed by '-l{s}' or '-fPIC'.",
|
||||
.{ lib_name, lib_name },
|
||||
);
|
||||
}
|
||||
@ -9014,7 +9010,18 @@ fn zirParam(
|
||||
if (is_comptime and sema.preallocated_new_func != null) {
|
||||
// We have a comptime value for this parameter so it should be elided from the
|
||||
// function type of the function instruction in this block.
|
||||
const coerced_arg = try sema.coerce(block, param_ty, arg, src);
|
||||
const coerced_arg = sema.coerce(block, param_ty, arg, .unneeded) catch |err| switch (err) {
|
||||
error.NeededSourceLocation => {
|
||||
// We are instantiating a generic function and a comptime arg
|
||||
// cannot be coerced to the param type, but since we don't
|
||||
// have the callee source location return `GenericPoison`
|
||||
// so that the instantiation is failed and the coercion
|
||||
// is handled by comptime call logic instead.
|
||||
assert(sema.is_generic_instantiation);
|
||||
return error.GenericPoison;
|
||||
},
|
||||
else => return err,
|
||||
};
|
||||
sema.inst_map.putAssumeCapacity(inst, coerced_arg);
|
||||
return;
|
||||
}
|
||||
@ -19529,13 +19536,34 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
|
||||
const operand_info = operand_ty.ptrInfo().data;
|
||||
const dest_info = dest_ty.ptrInfo().data;
|
||||
if (!operand_info.mutable and dest_info.mutable) {
|
||||
return sema.fail(block, src, "cast discards const qualifier", .{});
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(block, src, "cast discards const qualifier", .{});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
|
||||
try sema.errNote(block, src, msg, "consider using '@qualCast'", .{});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(msg);
|
||||
}
|
||||
if (operand_info.@"volatile" and !dest_info.@"volatile") {
|
||||
return sema.fail(block, src, "cast discards volatile qualifier", .{});
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(block, src, "cast discards volatile qualifier", .{});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
|
||||
try sema.errNote(block, src, msg, "consider using '@qualCast'", .{});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(msg);
|
||||
}
|
||||
if (operand_info.@"addrspace" != dest_info.@"addrspace") {
|
||||
return sema.fail(block, src, "cast changes pointer address space", .{});
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(block, src, "cast changes pointer address space", .{});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
|
||||
try sema.errNote(block, src, msg, "consider using '@addrSpaceCast'", .{});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(msg);
|
||||
}
|
||||
|
||||
const dest_is_slice = dest_ty.isSlice();
|
||||
@ -19590,6 +19618,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
|
||||
try sema.errNote(block, dest_ty_src, msg, "'{}' has alignment '{d}'", .{
|
||||
dest_ty.fmt(sema.mod), dest_align,
|
||||
});
|
||||
|
||||
try sema.errNote(block, src, msg, "consider using '@alignCast'", .{});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(msg);
|
||||
@ -19625,6 +19655,49 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
|
||||
return block.addBitCast(aligned_dest_ty, ptr);
|
||||
}
|
||||
|
||||
fn zirQualCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
||||
const src = inst_data.src();
|
||||
const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
||||
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
||||
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
||||
const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs);
|
||||
const operand = try sema.resolveInst(extra.rhs);
|
||||
const operand_ty = sema.typeOf(operand);
|
||||
|
||||
try sema.checkPtrType(block, dest_ty_src, dest_ty);
|
||||
try sema.checkPtrOperand(block, operand_src, operand_ty);
|
||||
|
||||
var operand_payload = operand_ty.ptrInfo();
|
||||
var dest_info = dest_ty.ptrInfo();
|
||||
|
||||
operand_payload.data.mutable = dest_info.data.mutable;
|
||||
operand_payload.data.@"volatile" = dest_info.data.@"volatile";
|
||||
|
||||
const altered_operand_ty = Type.initPayload(&operand_payload.base);
|
||||
if (!altered_operand_ty.eql(dest_ty, sema.mod)) {
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(block, src, "'@qualCast' can only modify 'const' and 'volatile' qualifiers", .{});
|
||||
errdefer msg.destroy(sema.gpa);
|
||||
|
||||
dest_info.data.mutable = !operand_ty.isConstPtr();
|
||||
dest_info.data.@"volatile" = operand_ty.isVolatilePtr();
|
||||
const altered_dest_ty = Type.initPayload(&dest_info.base);
|
||||
try sema.errNote(block, src, msg, "expected type '{}'", .{altered_dest_ty.fmt(sema.mod)});
|
||||
try sema.errNote(block, src, msg, "got type '{}'", .{operand_ty.fmt(sema.mod)});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(msg);
|
||||
}
|
||||
|
||||
if (try sema.resolveMaybeUndefVal(operand)) |operand_val| {
|
||||
return sema.addConstant(dest_ty, operand_val);
|
||||
}
|
||||
|
||||
try sema.requireRuntimeBlock(block, src, null);
|
||||
return block.addBitCast(dest_ty, operand);
|
||||
}
|
||||
|
||||
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
||||
const src = inst_data.src();
|
||||
@ -25141,7 +25214,7 @@ fn coerceExtra(
|
||||
(try sema.coerceInMemoryAllowed(block, inst_ty.errorUnionPayload(), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
|
||||
{
|
||||
try sema.errNote(block, inst_src, msg, "cannot convert error union to payload type", .{});
|
||||
try sema.errNote(block, inst_src, msg, "consider using `try`, `catch`, or `if`", .{});
|
||||
try sema.errNote(block, inst_src, msg, "consider using 'try', 'catch', or 'if'", .{});
|
||||
}
|
||||
|
||||
// ?T to T
|
||||
@ -25150,7 +25223,7 @@ fn coerceExtra(
|
||||
(try sema.coerceInMemoryAllowed(block, inst_ty.optionalChild(&buf), dest_ty, false, target, dest_ty_src, inst_src)) == .ok)
|
||||
{
|
||||
try sema.errNote(block, inst_src, msg, "cannot convert optional to payload type", .{});
|
||||
try sema.errNote(block, inst_src, msg, "consider using `.?`, `orelse`, or `if`", .{});
|
||||
try sema.errNote(block, inst_src, msg, "consider using '.?', 'orelse', or 'if'", .{});
|
||||
}
|
||||
|
||||
try in_memory_result.report(sema, block, inst_src, msg);
|
||||
@ -26076,7 +26149,7 @@ fn coerceVarArgParam(
|
||||
.Array => return sema.fail(block, inst_src, "arrays must be passed by reference to variadic function", .{}),
|
||||
.Float => float: {
|
||||
const target = sema.mod.getTarget();
|
||||
const double_bits = @import("type.zig").CType.sizeInBits(.double, target);
|
||||
const double_bits = target.c_type_bit_size(.double);
|
||||
const inst_bits = uncasted_ty.floatBits(sema.mod.getTarget());
|
||||
if (inst_bits >= double_bits) break :float inst;
|
||||
switch (double_bits) {
|
||||
|
||||
@ -176,7 +176,9 @@ pub fn print(
|
||||
|
||||
var i: u32 = 0;
|
||||
while (i < max_len) : (i += 1) {
|
||||
buf[i] = std.math.cast(u8, val.fieldValue(ty, i).toUnsignedInt(target)) orelse break :str;
|
||||
const elem = val.fieldValue(ty, i);
|
||||
if (elem.isUndef()) break :str;
|
||||
buf[i] = std.math.cast(u8, elem.toUnsignedInt(target)) orelse break :str;
|
||||
}
|
||||
|
||||
const truncated = if (len > max_string_len) " (truncated)" else "";
|
||||
@ -390,6 +392,7 @@ pub fn print(
|
||||
while (i < max_len) : (i += 1) {
|
||||
var elem_buf: Value.ElemValueBuffer = undefined;
|
||||
const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf);
|
||||
if (elem_val.isUndef()) break :str;
|
||||
buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(target)) orelse break :str;
|
||||
}
|
||||
|
||||
|
||||
@ -857,6 +857,9 @@ pub const Inst = struct {
|
||||
/// Implements the `@ptrCast` builtin.
|
||||
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
|
||||
ptr_cast,
|
||||
/// Implements the `@qualCast` builtin.
|
||||
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
|
||||
qual_cast,
|
||||
/// Implements the `@truncate` builtin.
|
||||
/// Uses `pl_node` with payload `Bin`. `lhs` is dest type, `rhs` is operand.
|
||||
truncate,
|
||||
@ -1195,6 +1198,7 @@ pub const Inst = struct {
|
||||
.float_cast,
|
||||
.int_cast,
|
||||
.ptr_cast,
|
||||
.qual_cast,
|
||||
.truncate,
|
||||
.align_cast,
|
||||
.has_field,
|
||||
@ -1484,6 +1488,7 @@ pub const Inst = struct {
|
||||
.float_cast,
|
||||
.int_cast,
|
||||
.ptr_cast,
|
||||
.qual_cast,
|
||||
.truncate,
|
||||
.align_cast,
|
||||
.has_field,
|
||||
@ -1755,6 +1760,7 @@ pub const Inst = struct {
|
||||
.float_cast = .pl_node,
|
||||
.int_cast = .pl_node,
|
||||
.ptr_cast = .pl_node,
|
||||
.qual_cast = .pl_node,
|
||||
.truncate = .pl_node,
|
||||
.align_cast = .pl_node,
|
||||
.typeof_builtin = .pl_node,
|
||||
|
||||
@ -203,13 +203,7 @@ const DbgInfoReloc = struct {
|
||||
else => unreachable, // not a possible argument
|
||||
|
||||
};
|
||||
try dw.genArgDbgInfo(
|
||||
reloc.name,
|
||||
reloc.ty,
|
||||
function.bin_file.tag,
|
||||
function.mod_fn.owner_decl,
|
||||
loc,
|
||||
);
|
||||
try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
@ -255,14 +249,7 @@ const DbgInfoReloc = struct {
|
||||
break :blk .nop;
|
||||
},
|
||||
};
|
||||
try dw.genVarDbgInfo(
|
||||
reloc.name,
|
||||
reloc.ty,
|
||||
function.bin_file.tag,
|
||||
function.mod_fn.owner_decl,
|
||||
is_ptr,
|
||||
loc,
|
||||
);
|
||||
try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
@ -4019,11 +4006,17 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
.direct => .load_memory_ptr_direct,
|
||||
.import => unreachable,
|
||||
};
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
|
||||
const atom_index = switch (self.bin_file.tag) {
|
||||
.macho => owner_decl.link.macho.getSymbolIndex().?,
|
||||
.coff => owner_decl.link.coff.getSymbolIndex().?,
|
||||
.macho => blk: {
|
||||
const macho_file = self.bin_file.cast(link.File.MachO).?;
|
||||
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
|
||||
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
|
||||
},
|
||||
.coff => blk: {
|
||||
const coff_file = self.bin_file.cast(link.File.Coff).?;
|
||||
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
|
||||
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
|
||||
},
|
||||
else => unreachable, // unsupported target format
|
||||
};
|
||||
_ = try self.addInst(.{
|
||||
@ -4301,34 +4294,37 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
if (self.air.value(callee)) |func_value| {
|
||||
if (func_value.castTag(.function)) |func_payload| {
|
||||
const func = func_payload.data;
|
||||
const fn_owner_decl = mod.declPtr(func.owner_decl);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
|
||||
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
|
||||
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
|
||||
const atom = elf_file.getAtom(atom_index);
|
||||
const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
|
||||
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try fn_owner_decl.link.macho.ensureInitialized(macho_file);
|
||||
const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
|
||||
const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
|
||||
try self.genSetReg(Type.initTag(.u64), .x30, .{
|
||||
.linker_load = .{
|
||||
.type = .got,
|
||||
.sym_index = fn_owner_decl.link.macho.getSymbolIndex().?,
|
||||
.sym_index = sym_index,
|
||||
},
|
||||
});
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
|
||||
try fn_owner_decl.link.coff.ensureInitialized(coff_file);
|
||||
const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
|
||||
const sym_index = coff_file.getAtom(atom).getSymbolIndex().?;
|
||||
try self.genSetReg(Type.initTag(.u64), .x30, .{
|
||||
.linker_load = .{
|
||||
.type = .got,
|
||||
.sym_index = fn_owner_decl.link.coff.getSymbolIndex().?,
|
||||
.sym_index = sym_index,
|
||||
},
|
||||
});
|
||||
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
|
||||
try p9.seeDecl(func.owner_decl);
|
||||
const decl_block_index = try p9.seeDecl(func.owner_decl);
|
||||
const decl_block = p9.getDeclBlock(decl_block_index);
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const got_addr = p9.bases.data;
|
||||
const got_index = fn_owner_decl.link.plan9.got_index.?;
|
||||
const got_index = decl_block.got_index.?;
|
||||
const fn_got_addr = got_addr + got_index * ptr_bytes;
|
||||
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
|
||||
} else unreachable;
|
||||
@ -4349,11 +4345,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
|
||||
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
|
||||
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
|
||||
const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
|
||||
_ = try self.addInst(.{
|
||||
.tag = .call_extern,
|
||||
.data = .{
|
||||
.relocation = .{
|
||||
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.getSymbolIndex().?,
|
||||
.atom_index = atom_index,
|
||||
.sym_index = sym_index,
|
||||
},
|
||||
},
|
||||
@ -5488,11 +5486,17 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
.direct => .load_memory_ptr_direct,
|
||||
.import => unreachable,
|
||||
};
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
|
||||
const atom_index = switch (self.bin_file.tag) {
|
||||
.macho => owner_decl.link.macho.getSymbolIndex().?,
|
||||
.coff => owner_decl.link.coff.getSymbolIndex().?,
|
||||
.macho => blk: {
|
||||
const macho_file = self.bin_file.cast(link.File.MachO).?;
|
||||
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
|
||||
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
|
||||
},
|
||||
.coff => blk: {
|
||||
const coff_file = self.bin_file.cast(link.File.Coff).?;
|
||||
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
|
||||
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
|
||||
},
|
||||
else => unreachable, // unsupported target format
|
||||
};
|
||||
_ = try self.addInst(.{
|
||||
@ -5602,11 +5606,17 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
.direct => .load_memory_direct,
|
||||
.import => .load_memory_import,
|
||||
};
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
|
||||
const atom_index = switch (self.bin_file.tag) {
|
||||
.macho => owner_decl.link.macho.getSymbolIndex().?,
|
||||
.coff => owner_decl.link.coff.getSymbolIndex().?,
|
||||
.macho => blk: {
|
||||
const macho_file = self.bin_file.cast(link.File.MachO).?;
|
||||
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
|
||||
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
|
||||
},
|
||||
.coff => blk: {
|
||||
const coff_file = self.bin_file.cast(link.File.Coff).?;
|
||||
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
|
||||
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
|
||||
},
|
||||
else => unreachable, // unsupported target format
|
||||
};
|
||||
_ = try self.addInst(.{
|
||||
@ -5796,11 +5806,17 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
|
||||
.direct => .load_memory_ptr_direct,
|
||||
.import => unreachable,
|
||||
};
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
|
||||
const atom_index = switch (self.bin_file.tag) {
|
||||
.macho => owner_decl.link.macho.getSymbolIndex().?,
|
||||
.coff => owner_decl.link.coff.getSymbolIndex().?,
|
||||
.macho => blk: {
|
||||
const macho_file = self.bin_file.cast(link.File.MachO).?;
|
||||
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
|
||||
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
|
||||
},
|
||||
.coff => blk: {
|
||||
const coff_file = self.bin_file.cast(link.File.Coff).?;
|
||||
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
|
||||
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
|
||||
},
|
||||
else => unreachable, // unsupported target format
|
||||
};
|
||||
_ = try self.addInst(.{
|
||||
@ -6119,23 +6135,27 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
|
||||
mod.markDeclAlive(decl);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
try decl.link.elf.ensureInitialized(elf_file);
|
||||
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
|
||||
const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
|
||||
const atom = elf_file.getAtom(atom_index);
|
||||
return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try decl.link.macho.ensureInitialized(macho_file);
|
||||
const atom = try macho_file.getOrCreateAtomForDecl(decl_index);
|
||||
const sym_index = macho_file.getAtom(atom).getSymbolIndex().?;
|
||||
return MCValue{ .linker_load = .{
|
||||
.type = .got,
|
||||
.sym_index = decl.link.macho.getSymbolIndex().?,
|
||||
.sym_index = sym_index,
|
||||
} };
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
|
||||
try decl.link.coff.ensureInitialized(coff_file);
|
||||
const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
|
||||
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
|
||||
return MCValue{ .linker_load = .{
|
||||
.type = .got,
|
||||
.sym_index = decl.link.coff.getSymbolIndex().?,
|
||||
.sym_index = sym_index,
|
||||
} };
|
||||
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
|
||||
try p9.seeDecl(decl_index);
|
||||
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
|
||||
const decl_block_index = try p9.seeDecl(decl_index);
|
||||
const decl_block = p9.getDeclBlock(decl_block_index);
|
||||
const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
|
||||
return MCValue{ .memory = got_addr };
|
||||
} else {
|
||||
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
|
||||
@ -6148,8 +6168,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
|
||||
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
|
||||
};
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
|
||||
return MCValue{ .memory = vaddr };
|
||||
return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |_| {
|
||||
return MCValue{ .linker_load = .{
|
||||
.type = .direct,
|
||||
|
||||
@ -670,9 +670,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
|
||||
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
// Add relocation to the decl.
|
||||
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
const target = macho_file.getGlobalByIndex(relocation.sym_index);
|
||||
try atom.addRelocation(macho_file, .{
|
||||
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
|
||||
.type = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
|
||||
.target = target,
|
||||
.offset = offset,
|
||||
@ -883,10 +883,10 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
}
|
||||
|
||||
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
const atom = macho_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
|
||||
const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
|
||||
// TODO this causes segfault in stage1
|
||||
// try atom.addRelocations(macho_file, 2, .{
|
||||
try atom.addRelocation(macho_file, .{
|
||||
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
|
||||
.target = .{ .sym_index = data.sym_index, .file = null },
|
||||
.offset = offset,
|
||||
.addend = 0,
|
||||
@ -902,7 +902,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
else => unreachable,
|
||||
},
|
||||
});
|
||||
try atom.addRelocation(macho_file, .{
|
||||
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
|
||||
.target = .{ .sym_index = data.sym_index, .file = null },
|
||||
.offset = offset + 4,
|
||||
.addend = 0,
|
||||
@ -919,7 +919,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
},
|
||||
});
|
||||
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
|
||||
const atom = coff_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
|
||||
const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
|
||||
const target = switch (tag) {
|
||||
.load_memory_got,
|
||||
.load_memory_ptr_got,
|
||||
@ -929,7 +929,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
.load_memory_import => coff_file.getGlobalByIndex(data.sym_index),
|
||||
else => unreachable,
|
||||
};
|
||||
try atom.addRelocation(coff_file, .{
|
||||
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
|
||||
.target = target,
|
||||
.offset = offset,
|
||||
.addend = 0,
|
||||
@ -946,7 +946,7 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
else => unreachable,
|
||||
},
|
||||
});
|
||||
try atom.addRelocation(coff_file, .{
|
||||
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
|
||||
.target = target,
|
||||
.offset = offset + 4,
|
||||
.addend = 0,
|
||||
|
||||
@ -282,13 +282,7 @@ const DbgInfoReloc = struct {
|
||||
else => unreachable, // not a possible argument
|
||||
};
|
||||
|
||||
try dw.genArgDbgInfo(
|
||||
reloc.name,
|
||||
reloc.ty,
|
||||
function.bin_file.tag,
|
||||
function.mod_fn.owner_decl,
|
||||
loc,
|
||||
);
|
||||
try dw.genArgDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, loc);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
@ -331,14 +325,7 @@ const DbgInfoReloc = struct {
|
||||
break :blk .nop;
|
||||
},
|
||||
};
|
||||
try dw.genVarDbgInfo(
|
||||
reloc.name,
|
||||
reloc.ty,
|
||||
function.bin_file.tag,
|
||||
function.mod_fn.owner_decl,
|
||||
is_ptr,
|
||||
loc,
|
||||
);
|
||||
try dw.genVarDbgInfo(reloc.name, reloc.ty, function.mod_fn.owner_decl, is_ptr, loc);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
@ -4256,12 +4243,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
if (self.air.value(callee)) |func_value| {
|
||||
if (func_value.castTag(.function)) |func_payload| {
|
||||
const func = func_payload.data;
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const fn_owner_decl = mod.declPtr(func.owner_decl);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
|
||||
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
|
||||
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
|
||||
const atom = elf_file.getAtom(atom_index);
|
||||
const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
|
||||
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |_| {
|
||||
unreachable; // unsupported architecture for MachO
|
||||
@ -6084,15 +6070,17 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
|
||||
mod.markDeclAlive(decl);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
try decl.link.elf.ensureInitialized(elf_file);
|
||||
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
|
||||
const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
|
||||
const atom = elf_file.getAtom(atom_index);
|
||||
return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |_| {
|
||||
unreachable; // unsupported architecture for MachO
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |_| {
|
||||
return self.fail("TODO codegen COFF const Decl pointer", .{});
|
||||
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
|
||||
try p9.seeDecl(decl_index);
|
||||
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
|
||||
const decl_block_index = try p9.seeDecl(decl_index);
|
||||
const decl_block = p9.getDeclBlock(decl_block_index);
|
||||
const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
|
||||
return MCValue{ .memory = got_addr };
|
||||
} else {
|
||||
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
|
||||
@ -6106,8 +6094,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
|
||||
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
|
||||
};
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
|
||||
return MCValue{ .memory = vaddr };
|
||||
return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |_| {
|
||||
unreachable;
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |_| {
|
||||
|
||||
@ -1615,13 +1615,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
|
||||
|
||||
switch (self.debug_output) {
|
||||
.dwarf => |dw| switch (mcv) {
|
||||
.register => |reg| try dw.genArgDbgInfo(
|
||||
name,
|
||||
ty,
|
||||
self.bin_file.tag,
|
||||
self.mod_fn.owner_decl,
|
||||
.{ .register = reg.dwarfLocOp() },
|
||||
),
|
||||
.register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
|
||||
.register = reg.dwarfLocOp(),
|
||||
}),
|
||||
.stack_offset => {},
|
||||
else => {},
|
||||
},
|
||||
@ -1721,12 +1717,9 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
if (self.air.value(callee)) |func_value| {
|
||||
if (func_value.castTag(.function)) |func_payload| {
|
||||
const func = func_payload.data;
|
||||
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const fn_owner_decl = mod.declPtr(func.owner_decl);
|
||||
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
|
||||
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
|
||||
|
||||
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
|
||||
const atom = elf_file.getAtom(atom_index);
|
||||
const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
|
||||
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
|
||||
_ = try self.addInst(.{
|
||||
.tag = .jalr,
|
||||
@ -2553,17 +2546,17 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
|
||||
const decl = mod.declPtr(decl_index);
|
||||
mod.markDeclAlive(decl);
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
try decl.link.elf.ensureInitialized(elf_file);
|
||||
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
|
||||
const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
|
||||
const atom = elf_file.getAtom(atom_index);
|
||||
return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |_| {
|
||||
// TODO I'm hacking my way through here by repurposing .memory for storing
|
||||
// index to the GOT target symbol index.
|
||||
return MCValue{ .memory = decl.link.macho.sym_index };
|
||||
unreachable;
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |_| {
|
||||
return self.fail("TODO codegen COFF const Decl pointer", .{});
|
||||
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
|
||||
try p9.seeDecl(decl_index);
|
||||
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
|
||||
const decl_block_index = try p9.seeDecl(decl_index);
|
||||
const decl_block = p9.getDeclBlock(decl_block_index);
|
||||
const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
|
||||
return MCValue{ .memory = got_addr };
|
||||
} else {
|
||||
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
|
||||
|
||||
@ -1216,11 +1216,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
if (self.bin_file.tag == link.File.Elf.base_tag) {
|
||||
if (func_value.castTag(.function)) |func_payload| {
|
||||
const func = func_payload.data;
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const fn_owner_decl = mod.declPtr(func.owner_decl);
|
||||
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
|
||||
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
|
||||
break :blk @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
|
||||
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
|
||||
const atom = elf_file.getAtom(atom_index);
|
||||
break :blk @intCast(u32, atom.getOffsetTableAddress(elf_file));
|
||||
} else unreachable;
|
||||
|
||||
try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr });
|
||||
@ -3413,13 +3412,9 @@ fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, mcv: MCValue) !void {
|
||||
|
||||
switch (self.debug_output) {
|
||||
.dwarf => |dw| switch (mcv) {
|
||||
.register => |reg| try dw.genArgDbgInfo(
|
||||
name,
|
||||
ty,
|
||||
self.bin_file.tag,
|
||||
self.mod_fn.owner_decl,
|
||||
.{ .register = reg.dwarfLocOp() },
|
||||
),
|
||||
.register => |reg| try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, .{
|
||||
.register = reg.dwarfLocOp(),
|
||||
}),
|
||||
else => {},
|
||||
},
|
||||
else => {},
|
||||
@ -4205,8 +4200,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
|
||||
|
||||
mod.markDeclAlive(decl);
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
try decl.link.elf.ensureInitialized(elf_file);
|
||||
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
|
||||
const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
|
||||
const atom = elf_file.getAtom(atom_index);
|
||||
return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
|
||||
} else {
|
||||
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
|
||||
}
|
||||
|
||||
@ -1194,7 +1194,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
|
||||
const fn_info = func.decl.ty.fnInfo();
|
||||
var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
|
||||
defer func_type.deinit(func.gpa);
|
||||
func.decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
|
||||
_ = try func.bin_file.storeDeclType(func.decl_index, func_type);
|
||||
|
||||
var cc_result = try func.resolveCallingConventionValues(func.decl.ty);
|
||||
defer cc_result.deinit(func.gpa);
|
||||
@ -1269,10 +1269,10 @@ fn genFunc(func: *CodeGen) InnerError!void {
|
||||
|
||||
var emit: Emit = .{
|
||||
.mir = mir,
|
||||
.bin_file = &func.bin_file.base,
|
||||
.bin_file = func.bin_file,
|
||||
.code = func.code,
|
||||
.locals = func.locals.items,
|
||||
.decl = func.decl,
|
||||
.decl_index = func.decl_index,
|
||||
.dbg_output = func.debug_output,
|
||||
.prev_di_line = 0,
|
||||
.prev_di_column = 0,
|
||||
@ -2117,33 +2117,31 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
|
||||
const fn_info = fn_ty.fnInfo();
|
||||
const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target);
|
||||
|
||||
const callee: ?*Decl = blk: {
|
||||
const callee: ?Decl.Index = blk: {
|
||||
const func_val = func.air.value(pl_op.operand) orelse break :blk null;
|
||||
const module = func.bin_file.base.options.module.?;
|
||||
|
||||
if (func_val.castTag(.function)) |function| {
|
||||
const decl = module.declPtr(function.data.owner_decl);
|
||||
try decl.link.wasm.ensureInitialized(func.bin_file);
|
||||
break :blk decl;
|
||||
_ = try func.bin_file.getOrCreateAtomForDecl(function.data.owner_decl);
|
||||
break :blk function.data.owner_decl;
|
||||
} else if (func_val.castTag(.extern_fn)) |extern_fn| {
|
||||
const ext_decl = module.declPtr(extern_fn.data.owner_decl);
|
||||
const ext_info = ext_decl.ty.fnInfo();
|
||||
var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target);
|
||||
defer func_type.deinit(func.gpa);
|
||||
const atom = &ext_decl.link.wasm;
|
||||
try atom.ensureInitialized(func.bin_file);
|
||||
ext_decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
|
||||
const atom_index = try func.bin_file.getOrCreateAtomForDecl(extern_fn.data.owner_decl);
|
||||
const atom = func.bin_file.getAtomPtr(atom_index);
|
||||
const type_index = try func.bin_file.storeDeclType(extern_fn.data.owner_decl, func_type);
|
||||
try func.bin_file.addOrUpdateImport(
|
||||
mem.sliceTo(ext_decl.name, 0),
|
||||
atom.getSymbolIndex().?,
|
||||
ext_decl.getExternFn().?.lib_name,
|
||||
ext_decl.fn_link.wasm.type_index,
|
||||
type_index,
|
||||
);
|
||||
break :blk ext_decl;
|
||||
break :blk extern_fn.data.owner_decl;
|
||||
} else if (func_val.castTag(.decl_ref)) |decl_ref| {
|
||||
const decl = module.declPtr(decl_ref.data);
|
||||
try decl.link.wasm.ensureInitialized(func.bin_file);
|
||||
break :blk decl;
|
||||
_ = try func.bin_file.getOrCreateAtomForDecl(decl_ref.data);
|
||||
break :blk decl_ref.data;
|
||||
}
|
||||
return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
|
||||
};
|
||||
@ -2164,7 +2162,8 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
|
||||
}
|
||||
|
||||
if (callee) |direct| {
|
||||
try func.addLabel(.call, direct.link.wasm.sym_index);
|
||||
const atom_index = func.bin_file.decls.get(direct).?;
|
||||
try func.addLabel(.call, func.bin_file.getAtom(atom_index).sym_index);
|
||||
} else {
|
||||
// in this case we call a function pointer
|
||||
// so load its value onto the stack
|
||||
@ -2477,7 +2476,7 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
.dwarf => |dwarf| {
|
||||
const src_index = func.air.instructions.items(.data)[inst].arg.src_index;
|
||||
const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, src_index);
|
||||
try dwarf.genArgDbgInfo(name, arg_ty, .wasm, func.mod_fn.owner_decl, .{
|
||||
try dwarf.genArgDbgInfo(name, arg_ty, func.mod_fn.owner_decl, .{
|
||||
.wasm_local = arg.local.value,
|
||||
});
|
||||
},
|
||||
@ -2760,9 +2759,10 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind
|
||||
}
|
||||
|
||||
module.markDeclAlive(decl);
|
||||
try decl.link.wasm.ensureInitialized(func.bin_file);
|
||||
const atom_index = try func.bin_file.getOrCreateAtomForDecl(decl_index);
|
||||
const atom = func.bin_file.getAtom(atom_index);
|
||||
|
||||
const target_sym_index = decl.link.wasm.sym_index;
|
||||
const target_sym_index = atom.sym_index;
|
||||
if (decl.ty.zigTypeTag() == .Fn) {
|
||||
try func.bin_file.addTableFunction(target_sym_index);
|
||||
return WValue{ .function_index = target_sym_index };
|
||||
@ -5547,7 +5547,7 @@ fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void {
|
||||
break :blk .nop;
|
||||
},
|
||||
};
|
||||
try func.debug_output.dwarf.genVarDbgInfo(name, ty, .wasm, func.mod_fn.owner_decl, is_ptr, loc);
|
||||
try func.debug_output.dwarf.genVarDbgInfo(name, ty, func.mod_fn.owner_decl, is_ptr, loc);
|
||||
|
||||
func.finishAir(inst, .none, &.{});
|
||||
}
|
||||
|
||||
@ -11,8 +11,8 @@ const leb128 = std.leb;
|
||||
|
||||
/// Contains our list of instructions
|
||||
mir: Mir,
|
||||
/// Reference to the file handler
|
||||
bin_file: *link.File,
|
||||
/// Reference to the Wasm module linker
|
||||
bin_file: *link.File.Wasm,
|
||||
/// Possible error message. When set, the value is allocated and
|
||||
/// must be freed manually.
|
||||
error_msg: ?*Module.ErrorMsg = null,
|
||||
@ -21,7 +21,7 @@ code: *std.ArrayList(u8),
|
||||
/// List of allocated locals.
|
||||
locals: []const u8,
|
||||
/// The declaration that code is being generated for.
|
||||
decl: *Module.Decl,
|
||||
decl_index: Module.Decl.Index,
|
||||
|
||||
// Debug information
|
||||
/// Holds the debug information for this emission
|
||||
@ -252,8 +252,8 @@ fn offset(self: Emit) u32 {
|
||||
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
|
||||
@setCold(true);
|
||||
std.debug.assert(emit.error_msg == null);
|
||||
// TODO: Determine the source location.
|
||||
emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.allocator, emit.decl.srcLoc(), format, args);
|
||||
const mod = emit.bin_file.base.options.module.?;
|
||||
emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(), format, args);
|
||||
return error.EmitFail;
|
||||
}
|
||||
|
||||
@ -304,8 +304,9 @@ fn emitGlobal(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) !void {
|
||||
const global_offset = emit.offset();
|
||||
try emit.code.appendSlice(&buf);
|
||||
|
||||
// globals can have index 0 as it represents the stack pointer
|
||||
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
|
||||
const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
|
||||
const atom = emit.bin_file.getAtomPtr(atom_index);
|
||||
try atom.relocs.append(emit.bin_file.base.allocator, .{
|
||||
.index = label,
|
||||
.offset = global_offset,
|
||||
.relocation_type = .R_WASM_GLOBAL_INDEX_LEB,
|
||||
@ -361,7 +362,9 @@ fn emitCall(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
try emit.code.appendSlice(&buf);
|
||||
|
||||
if (label != 0) {
|
||||
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
|
||||
const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
|
||||
const atom = emit.bin_file.getAtomPtr(atom_index);
|
||||
try atom.relocs.append(emit.bin_file.base.allocator, .{
|
||||
.offset = call_offset,
|
||||
.index = label,
|
||||
.relocation_type = .R_WASM_FUNCTION_INDEX_LEB,
|
||||
@ -387,7 +390,9 @@ fn emitFunctionIndex(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
try emit.code.appendSlice(&buf);
|
||||
|
||||
if (symbol_index != 0) {
|
||||
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
|
||||
const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
|
||||
const atom = emit.bin_file.getAtomPtr(atom_index);
|
||||
try atom.relocs.append(emit.bin_file.base.allocator, .{
|
||||
.offset = index_offset,
|
||||
.index = symbol_index,
|
||||
.relocation_type = .R_WASM_TABLE_INDEX_SLEB,
|
||||
@ -399,7 +404,7 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
|
||||
const mem = emit.mir.extraData(Mir.Memory, extra_index).data;
|
||||
const mem_offset = emit.offset() + 1;
|
||||
const is_wasm32 = emit.bin_file.options.target.cpu.arch == .wasm32;
|
||||
const is_wasm32 = emit.bin_file.base.options.target.cpu.arch == .wasm32;
|
||||
if (is_wasm32) {
|
||||
try emit.code.append(std.wasm.opcode(.i32_const));
|
||||
var buf: [5]u8 = undefined;
|
||||
@ -413,7 +418,9 @@ fn emitMemAddress(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
}
|
||||
|
||||
if (mem.pointer != 0) {
|
||||
try emit.decl.link.wasm.relocs.append(emit.bin_file.allocator, .{
|
||||
const atom_index = emit.bin_file.decls.get(emit.decl_index).?;
|
||||
const atom = emit.bin_file.getAtomPtr(atom_index);
|
||||
try atom.relocs.append(emit.bin_file.base.allocator, .{
|
||||
.offset = mem_offset,
|
||||
.index = mem.pointer,
|
||||
.relocation_type = if (is_wasm32) .R_WASM_MEMORY_ADDR_LEB else .R_WASM_MEMORY_ADDR_LEB64,
|
||||
|
||||
@ -2668,12 +2668,13 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
|
||||
switch (ptr) {
|
||||
.linker_load => |load_struct| {
|
||||
const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*));
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
|
||||
const atom_index = if (self.bin_file.tag == link.File.MachO.base_tag)
|
||||
fn_owner_decl.link.macho.getSymbolIndex().?
|
||||
else
|
||||
fn_owner_decl.link.coff.getSymbolIndex().?;
|
||||
const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: {
|
||||
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
|
||||
break :blk macho_file.getAtom(atom).getSymbolIndex().?;
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: {
|
||||
const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
|
||||
break :blk coff_file.getAtom(atom).getSymbolIndex().?;
|
||||
} else unreachable;
|
||||
const flags: u2 = switch (load_struct.type) {
|
||||
.got => 0b00,
|
||||
.direct => 0b01,
|
||||
@ -3835,7 +3836,7 @@ fn genArgDbgInfo(self: Self, ty: Type, name: [:0]const u8, mcv: MCValue) !void {
|
||||
},
|
||||
else => unreachable, // not a valid function parameter
|
||||
};
|
||||
try dw.genArgDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, loc);
|
||||
try dw.genArgDbgInfo(name, ty, self.mod_fn.owner_decl, loc);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
@ -3875,7 +3876,7 @@ fn genVarDbgInfo(
|
||||
break :blk .nop;
|
||||
},
|
||||
};
|
||||
try dw.genVarDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, is_ptr, loc);
|
||||
try dw.genVarDbgInfo(name, ty, self.mod_fn.owner_decl, is_ptr, loc);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
@ -3995,19 +3996,19 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
if (self.air.value(callee)) |func_value| {
|
||||
if (func_value.castTag(.function)) |func_payload| {
|
||||
const func = func_payload.data;
|
||||
const fn_owner_decl = mod.declPtr(func.owner_decl);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
|
||||
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
|
||||
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
|
||||
const atom = elf_file.getAtom(atom_index);
|
||||
const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
|
||||
_ = try self.addInst(.{
|
||||
.tag = .call,
|
||||
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
|
||||
.data = .{ .imm = got_addr },
|
||||
});
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
|
||||
try fn_owner_decl.link.coff.ensureInitialized(coff_file);
|
||||
const sym_index = fn_owner_decl.link.coff.getSymbolIndex().?;
|
||||
const atom_index = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
|
||||
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
|
||||
try self.genSetReg(Type.initTag(.usize), .rax, .{
|
||||
.linker_load = .{
|
||||
.type = .got,
|
||||
@ -4023,8 +4024,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
.data = undefined,
|
||||
});
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try fn_owner_decl.link.macho.ensureInitialized(macho_file);
|
||||
const sym_index = fn_owner_decl.link.macho.getSymbolIndex().?;
|
||||
const atom_index = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
|
||||
const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
|
||||
try self.genSetReg(Type.initTag(.usize), .rax, .{
|
||||
.linker_load = .{
|
||||
.type = .got,
|
||||
@ -4040,11 +4041,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
.data = undefined,
|
||||
});
|
||||
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
|
||||
try p9.seeDecl(func.owner_decl);
|
||||
const decl_block_index = try p9.seeDecl(func.owner_decl);
|
||||
const decl_block = p9.getDeclBlock(decl_block_index);
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const got_addr = p9.bases.data;
|
||||
const got_index = fn_owner_decl.link.plan9.got_index.?;
|
||||
const got_index = decl_block.got_index.?;
|
||||
const fn_got_addr = got_addr + got_index * ptr_bytes;
|
||||
_ = try self.addInst(.{
|
||||
.tag = .call,
|
||||
@ -4080,15 +4082,15 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
});
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
|
||||
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
|
||||
const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
|
||||
_ = try self.addInst(.{
|
||||
.tag = .call_extern,
|
||||
.ops = undefined,
|
||||
.data = .{
|
||||
.relocation = .{
|
||||
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.getSymbolIndex().?,
|
||||
.sym_index = sym_index,
|
||||
},
|
||||
},
|
||||
.data = .{ .relocation = .{
|
||||
.atom_index = atom_index,
|
||||
.sym_index = sym_index,
|
||||
} },
|
||||
});
|
||||
} else {
|
||||
return self.fail("TODO implement calling extern functions", .{});
|
||||
@ -6719,23 +6721,27 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
|
||||
module.markDeclAlive(decl);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
try decl.link.elf.ensureInitialized(elf_file);
|
||||
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
|
||||
const atom_index = try elf_file.getOrCreateAtomForDecl(decl_index);
|
||||
const atom = elf_file.getAtom(atom_index);
|
||||
return MCValue{ .memory = atom.getOffsetTableAddress(elf_file) };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try decl.link.macho.ensureInitialized(macho_file);
|
||||
const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
|
||||
const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
|
||||
return MCValue{ .linker_load = .{
|
||||
.type = .got,
|
||||
.sym_index = decl.link.macho.getSymbolIndex().?,
|
||||
.sym_index = sym_index,
|
||||
} };
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
|
||||
try decl.link.coff.ensureInitialized(coff_file);
|
||||
const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index);
|
||||
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
|
||||
return MCValue{ .linker_load = .{
|
||||
.type = .got,
|
||||
.sym_index = decl.link.coff.getSymbolIndex().?,
|
||||
.sym_index = sym_index,
|
||||
} };
|
||||
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
|
||||
try p9.seeDecl(decl_index);
|
||||
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
|
||||
const decl_block_index = try p9.seeDecl(decl_index);
|
||||
const decl_block = p9.getDeclBlock(decl_block_index);
|
||||
const got_addr = p9.bases.data + decl_block.got_index.? * ptr_bytes;
|
||||
return MCValue{ .memory = got_addr };
|
||||
} else {
|
||||
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
|
||||
@ -6748,8 +6754,7 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue {
|
||||
return self.fail("lowering unnamed constant failed: {s}", .{@errorName(err)});
|
||||
};
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const vaddr = elf_file.local_symbols.items[local_sym_index].st_value;
|
||||
return MCValue{ .memory = vaddr };
|
||||
return MCValue{ .memory = elf_file.getSymbol(local_sym_index).st_value };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |_| {
|
||||
return MCValue{ .linker_load = .{
|
||||
.type = .direct,
|
||||
|
||||
@ -1001,8 +1001,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
|
||||
0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
|
||||
else => unreachable,
|
||||
};
|
||||
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
try atom.addRelocation(macho_file, .{
|
||||
const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
|
||||
.type = reloc_type,
|
||||
.target = .{ .sym_index = relocation.sym_index, .file = null },
|
||||
.offset = @intCast(u32, end_offset - 4),
|
||||
@ -1011,8 +1011,8 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
|
||||
.length = 2,
|
||||
});
|
||||
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
|
||||
const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
try atom.addRelocation(coff_file, .{
|
||||
const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
|
||||
.type = switch (ops.flags) {
|
||||
0b00 => .got,
|
||||
0b01 => .direct,
|
||||
@ -1140,9 +1140,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
|
||||
|
||||
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
// Add relocation to the decl.
|
||||
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
const target = macho_file.getGlobalByIndex(relocation.sym_index);
|
||||
try atom.addRelocation(macho_file, .{
|
||||
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
|
||||
.type = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
|
||||
.target = target,
|
||||
.offset = offset,
|
||||
@ -1152,9 +1152,9 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
|
||||
});
|
||||
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
|
||||
// Add relocation to the decl.
|
||||
const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
const target = coff_file.getGlobalByIndex(relocation.sym_index);
|
||||
try atom.addRelocation(coff_file, .{
|
||||
try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
|
||||
.type = .direct,
|
||||
.target = target,
|
||||
.offset = offset,
|
||||
|
||||
@ -16,7 +16,6 @@ const trace = @import("../tracy.zig").trace;
|
||||
const LazySrcLoc = Module.LazySrcLoc;
|
||||
const Air = @import("../Air.zig");
|
||||
const Liveness = @import("../Liveness.zig");
|
||||
const CType = @import("../type.zig").CType;
|
||||
|
||||
const target_util = @import("../target.zig");
|
||||
const libcFloatPrefix = target_util.libcFloatPrefix;
|
||||
|
||||
@ -19,7 +19,6 @@ const Liveness = @import("../Liveness.zig");
|
||||
const Value = @import("../value.zig").Value;
|
||||
const Type = @import("../type.zig").Type;
|
||||
const LazySrcLoc = Module.LazySrcLoc;
|
||||
const CType = @import("../type.zig").CType;
|
||||
const x86_64_abi = @import("../arch/x86_64/abi.zig");
|
||||
const wasm_c_abi = @import("../arch/wasm/abi.zig");
|
||||
const aarch64_c_abi = @import("../arch/aarch64/abi.zig");
|
||||
@ -11043,8 +11042,8 @@ fn backendSupportsF128(target: std.Target) bool {
|
||||
fn intrinsicsAllowed(scalar_ty: Type, target: std.Target) bool {
|
||||
return switch (scalar_ty.tag()) {
|
||||
.f16 => backendSupportsF16(target),
|
||||
.f80 => (CType.longdouble.sizeInBits(target) == 80) and backendSupportsF80(target),
|
||||
.f128 => (CType.longdouble.sizeInBits(target) == 128) and backendSupportsF128(target),
|
||||
.f80 => (target.c_type_bit_size(.longdouble) == 80) and backendSupportsF80(target),
|
||||
.f128 => (target.c_type_bit_size(.longdouble) == 128) and backendSupportsF128(target),
|
||||
else => true,
|
||||
};
|
||||
}
|
||||
|
||||
@ -49,7 +49,7 @@ pub const DeclGen = struct {
|
||||
spv: *SpvModule,
|
||||
|
||||
/// The decl we are currently generating code for.
|
||||
decl: *Decl,
|
||||
decl_index: Decl.Index,
|
||||
|
||||
/// The intermediate code of the declaration we are currently generating. Note: If
|
||||
/// the declaration is not a function, this value will be undefined!
|
||||
@ -59,6 +59,8 @@ pub const DeclGen = struct {
|
||||
/// Note: If the declaration is not a function, this value will be undefined!
|
||||
liveness: Liveness,
|
||||
|
||||
ids: *const std.AutoHashMap(Decl.Index, IdResult),
|
||||
|
||||
/// An array of function argument result-ids. Each index corresponds with the
|
||||
/// function argument of the same index.
|
||||
args: std.ArrayListUnmanaged(IdRef) = .{},
|
||||
@ -133,14 +135,20 @@ pub const DeclGen = struct {
|
||||
|
||||
/// Initialize the common resources of a DeclGen. Some fields are left uninitialized,
|
||||
/// only set when `gen` is called.
|
||||
pub fn init(allocator: Allocator, module: *Module, spv: *SpvModule) DeclGen {
|
||||
pub fn init(
|
||||
allocator: Allocator,
|
||||
module: *Module,
|
||||
spv: *SpvModule,
|
||||
ids: *const std.AutoHashMap(Decl.Index, IdResult),
|
||||
) DeclGen {
|
||||
return .{
|
||||
.gpa = allocator,
|
||||
.module = module,
|
||||
.spv = spv,
|
||||
.decl = undefined,
|
||||
.decl_index = undefined,
|
||||
.air = undefined,
|
||||
.liveness = undefined,
|
||||
.ids = ids,
|
||||
.next_arg_index = undefined,
|
||||
.current_block_label_id = undefined,
|
||||
.error_msg = undefined,
|
||||
@ -150,9 +158,9 @@ pub const DeclGen = struct {
|
||||
/// Generate the code for `decl`. If a reportable error occurred during code generation,
|
||||
/// a message is returned by this function. Callee owns the memory. If this function
|
||||
/// returns such a reportable error, it is valid to be called again for a different decl.
|
||||
pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
|
||||
pub fn gen(self: *DeclGen, decl_index: Decl.Index, air: Air, liveness: Liveness) !?*Module.ErrorMsg {
|
||||
// Reset internal resources, we don't want to re-allocate these.
|
||||
self.decl = decl;
|
||||
self.decl_index = decl_index;
|
||||
self.air = air;
|
||||
self.liveness = liveness;
|
||||
self.args.items.len = 0;
|
||||
@ -194,7 +202,7 @@ pub const DeclGen = struct {
|
||||
pub fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
|
||||
@setCold(true);
|
||||
const src = LazySrcLoc.nodeOffset(0);
|
||||
const src_loc = src.toSrcLoc(self.decl);
|
||||
const src_loc = src.toSrcLoc(self.module.declPtr(self.decl_index));
|
||||
assert(self.error_msg == null);
|
||||
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, format, args);
|
||||
return error.CodegenFail;
|
||||
@ -332,7 +340,7 @@ pub const DeclGen = struct {
|
||||
};
|
||||
const decl = self.module.declPtr(fn_decl_index);
|
||||
self.module.markDeclAlive(decl);
|
||||
return decl.fn_link.spirv.id.toRef();
|
||||
return self.ids.get(fn_decl_index).?.toRef();
|
||||
}
|
||||
|
||||
const target = self.getTarget();
|
||||
@ -553,8 +561,8 @@ pub const DeclGen = struct {
|
||||
}
|
||||
|
||||
fn genDecl(self: *DeclGen) !void {
|
||||
const decl = self.decl;
|
||||
const result_id = decl.fn_link.spirv.id;
|
||||
const result_id = self.ids.get(self.decl_index).?;
|
||||
const decl = self.module.declPtr(self.decl_index);
|
||||
|
||||
if (decl.val.castTag(.function)) |_| {
|
||||
assert(decl.ty.zigTypeTag() == .Fn);
|
||||
@ -945,7 +953,7 @@ pub const DeclGen = struct {
|
||||
|
||||
fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
|
||||
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
|
||||
const src_fname_id = try self.spv.resolveSourceFileName(self.decl);
|
||||
const src_fname_id = try self.spv.resolveSourceFileName(self.module.declPtr(self.decl_index));
|
||||
try self.func.body.emit(self.spv.gpa, .OpLine, .{
|
||||
.file = src_fname_id,
|
||||
.line = dbg_stmt.line,
|
||||
@ -1106,7 +1114,7 @@ pub const DeclGen = struct {
|
||||
assert(as.errors.items.len != 0);
|
||||
assert(self.error_msg == null);
|
||||
const loc = LazySrcLoc.nodeOffset(0);
|
||||
const src_loc = loc.toSrcLoc(self.decl);
|
||||
const src_loc = loc.toSrcLoc(self.module.declPtr(self.decl_index));
|
||||
self.error_msg = try Module.ErrorMsg.create(self.module.gpa, src_loc, "failed to assemble SPIR-V inline assembly", .{});
|
||||
const notes = try self.module.gpa.alloc(Module.ErrorMsg, as.errors.items.len);
|
||||
|
||||
|
||||
50
src/link.zig
50
src/link.zig
@ -261,39 +261,6 @@ pub const File = struct {
|
||||
/// of this linking operation.
|
||||
lock: ?Cache.Lock = null,
|
||||
|
||||
pub const LinkBlock = union {
|
||||
elf: Elf.TextBlock,
|
||||
coff: Coff.Atom,
|
||||
macho: MachO.Atom,
|
||||
plan9: Plan9.DeclBlock,
|
||||
c: void,
|
||||
wasm: Wasm.DeclBlock,
|
||||
spirv: void,
|
||||
nvptx: void,
|
||||
};
|
||||
|
||||
pub const LinkFn = union {
|
||||
elf: Dwarf.SrcFn,
|
||||
coff: Coff.SrcFn,
|
||||
macho: Dwarf.SrcFn,
|
||||
plan9: void,
|
||||
c: void,
|
||||
wasm: Wasm.FnData,
|
||||
spirv: SpirV.FnData,
|
||||
nvptx: void,
|
||||
};
|
||||
|
||||
pub const Export = union {
|
||||
elf: Elf.Export,
|
||||
coff: Coff.Export,
|
||||
macho: MachO.Export,
|
||||
plan9: Plan9.Export,
|
||||
c: void,
|
||||
wasm: Wasm.Export,
|
||||
spirv: void,
|
||||
nvptx: void,
|
||||
};
|
||||
|
||||
/// Attempts incremental linking, if the file already exists. If
|
||||
/// incremental linking fails, falls back to truncating the file and
|
||||
/// rewriting it. A malicious file is detected as incremental link failure
|
||||
@ -580,22 +547,23 @@ pub const File = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void {
|
||||
pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
|
||||
const decl = module.declPtr(decl_index);
|
||||
log.debug("updateDeclLineNumber {*} ({s}), line={}", .{
|
||||
decl, decl.name, decl.src_line + 1,
|
||||
});
|
||||
assert(decl.has_tv);
|
||||
if (build_options.only_c) {
|
||||
assert(base.tag == .c);
|
||||
return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl);
|
||||
return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index);
|
||||
}
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
|
||||
.c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl),
|
||||
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl),
|
||||
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl),
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl_index),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl_index),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl_index),
|
||||
.c => return @fieldParentPtr(C, "base", base).updateDeclLineNumber(module, decl_index),
|
||||
.wasm => return @fieldParentPtr(Wasm, "base", base).updateDeclLineNumber(module, decl_index),
|
||||
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateDeclLineNumber(module, decl_index),
|
||||
.spirv, .nvptx => {},
|
||||
}
|
||||
}
|
||||
|
||||
@ -219,12 +219,12 @@ pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !voi
|
||||
code.shrinkAndFree(module.gpa, code.items.len);
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void {
|
||||
pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void {
|
||||
// The C backend does not have the ability to fix line numbers without re-generating
|
||||
// the entire Decl.
|
||||
_ = self;
|
||||
_ = module;
|
||||
_ = decl;
|
||||
_ = decl_index;
|
||||
}
|
||||
|
||||
pub fn flush(self: *C, comp: *Compilation, prog_node: *std.Progress.Node) !void {
|
||||
|
||||
@ -79,13 +79,13 @@ entry_addr: ?u32 = null,
|
||||
/// We store them here so that we can properly dispose of any allocated
|
||||
/// memory within the atom in the incremental linker.
|
||||
/// TODO consolidate this.
|
||||
decls: std.AutoHashMapUnmanaged(Module.Decl.Index, ?u16) = .{},
|
||||
decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
|
||||
|
||||
/// List of atoms that are either synthetic or map directly to the Zig source program.
|
||||
managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
|
||||
atoms: std.ArrayListUnmanaged(Atom) = .{},
|
||||
|
||||
/// Table of atoms indexed by the symbol index.
|
||||
atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
|
||||
atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
|
||||
|
||||
/// Table of unnamed constants associated with a parent `Decl`.
|
||||
/// We store them here so that we can free the constants whenever the `Decl`
|
||||
@ -124,9 +124,9 @@ const Entry = struct {
|
||||
sym_index: u32,
|
||||
};
|
||||
|
||||
const RelocTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation));
|
||||
const BaseRelocationTable = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(u32));
|
||||
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(*Atom));
|
||||
const RelocTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Relocation));
|
||||
const BaseRelocationTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(u32));
|
||||
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
|
||||
|
||||
const default_file_alignment: u16 = 0x200;
|
||||
const default_size_of_stack_reserve: u32 = 0x1000000;
|
||||
@ -137,7 +137,7 @@ const default_size_of_heap_commit: u32 = 0x1000;
|
||||
const Section = struct {
|
||||
header: coff.SectionHeader,
|
||||
|
||||
last_atom: ?*Atom = null,
|
||||
last_atom_index: ?Atom.Index = null,
|
||||
|
||||
/// A list of atoms that have surplus capacity. This list can have false
|
||||
/// positives, as functions grow and shrink over time, only sometimes being added
|
||||
@ -154,7 +154,34 @@ const Section = struct {
|
||||
/// overcapacity can be negative. A simple way to have negative overcapacity is to
|
||||
/// allocate a fresh atom, which will have ideal capacity, and then grow it
|
||||
/// by 1 byte. It will then have -1 overcapacity.
|
||||
free_list: std.ArrayListUnmanaged(*Atom) = .{},
|
||||
free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
|
||||
};
|
||||
|
||||
const DeclMetadata = struct {
|
||||
atom: Atom.Index,
|
||||
section: u16,
|
||||
/// A list of all exports aliases of this Decl.
|
||||
exports: std.ArrayListUnmanaged(u32) = .{},
|
||||
|
||||
fn getExport(m: DeclMetadata, coff_file: *const Coff, name: []const u8) ?u32 {
|
||||
for (m.exports.items) |exp| {
|
||||
if (mem.eql(u8, name, coff_file.getSymbolName(.{
|
||||
.sym_index = exp,
|
||||
.file = null,
|
||||
}))) return exp;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
fn getExportPtr(m: *DeclMetadata, coff_file: *Coff, name: []const u8) ?*u32 {
|
||||
for (m.exports.items) |*exp| {
|
||||
if (mem.eql(u8, name, coff_file.getSymbolName(.{
|
||||
.sym_index = exp.*,
|
||||
.file = null,
|
||||
}))) return exp;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const PtrWidth = enum {
|
||||
@ -168,11 +195,6 @@ pub const PtrWidth = enum {
|
||||
};
|
||||
}
|
||||
};
|
||||
pub const SrcFn = void;
|
||||
|
||||
pub const Export = struct {
|
||||
sym_index: ?u32 = null,
|
||||
};
|
||||
|
||||
pub const SymbolWithLoc = struct {
|
||||
// Index into the respective symbol table.
|
||||
@ -271,11 +293,7 @@ pub fn deinit(self: *Coff) void {
|
||||
}
|
||||
self.sections.deinit(gpa);
|
||||
|
||||
for (self.managed_atoms.items) |atom| {
|
||||
gpa.destroy(atom);
|
||||
}
|
||||
self.managed_atoms.deinit(gpa);
|
||||
|
||||
self.atoms.deinit(gpa);
|
||||
self.locals.deinit(gpa);
|
||||
self.globals.deinit(gpa);
|
||||
|
||||
@ -297,7 +315,15 @@ pub fn deinit(self: *Coff) void {
|
||||
self.imports.deinit(gpa);
|
||||
self.imports_free_list.deinit(gpa);
|
||||
self.imports_table.deinit(gpa);
|
||||
self.decls.deinit(gpa);
|
||||
|
||||
{
|
||||
var it = self.decls.iterator();
|
||||
while (it.next()) |entry| {
|
||||
entry.value_ptr.exports.deinit(gpa);
|
||||
}
|
||||
self.decls.deinit(gpa);
|
||||
}
|
||||
|
||||
self.atom_by_index_table.deinit(gpa);
|
||||
|
||||
{
|
||||
@ -461,17 +487,18 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
|
||||
// TODO: enforce order by increasing VM addresses in self.sections container.
|
||||
// This is required by the loader anyhow as far as I can tell.
|
||||
for (self.sections.items(.header)[sect_id + 1 ..]) |*next_header, next_sect_id| {
|
||||
const maybe_last_atom = &self.sections.items(.last_atom)[sect_id + 1 + next_sect_id];
|
||||
const maybe_last_atom_index = self.sections.items(.last_atom_index)[sect_id + 1 + next_sect_id];
|
||||
next_header.virtual_address += diff;
|
||||
|
||||
if (maybe_last_atom.*) |last_atom| {
|
||||
var atom = last_atom;
|
||||
if (maybe_last_atom_index) |last_atom_index| {
|
||||
var atom_index = last_atom_index;
|
||||
while (true) {
|
||||
const atom = self.getAtom(atom_index);
|
||||
const sym = atom.getSymbolPtr(self);
|
||||
sym.value += diff;
|
||||
|
||||
if (atom.prev) |prev| {
|
||||
atom = prev;
|
||||
if (atom.prev_index) |prev_index| {
|
||||
atom_index = prev_index;
|
||||
} else break;
|
||||
}
|
||||
}
|
||||
@ -480,14 +507,15 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
|
||||
header.virtual_size = increased_size;
|
||||
}
|
||||
|
||||
fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
|
||||
fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const atom = self.getAtom(atom_index);
|
||||
const sect_id = @enumToInt(atom.getSymbol(self).section_number) - 1;
|
||||
const header = &self.sections.items(.header)[sect_id];
|
||||
const free_list = &self.sections.items(.free_list)[sect_id];
|
||||
const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
|
||||
const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
|
||||
const new_atom_ideal_capacity = if (header.isCode()) padToIdeal(new_atom_size) else new_atom_size;
|
||||
|
||||
// We use these to indicate our intention to update metadata, placing the new atom,
|
||||
@ -495,7 +523,7 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
|
||||
// It would be simpler to do it inside the for loop below, but that would cause a
|
||||
// problem if an error was returned later in the function. So this action
|
||||
// is actually carried out at the end of the function, when errors are no longer possible.
|
||||
var atom_placement: ?*Atom = null;
|
||||
var atom_placement: ?Atom.Index = null;
|
||||
var free_list_removal: ?usize = null;
|
||||
|
||||
// First we look for an appropriately sized free list node.
|
||||
@ -503,7 +531,8 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
|
||||
var vaddr = blk: {
|
||||
var i: usize = 0;
|
||||
while (i < free_list.items.len) {
|
||||
const big_atom = free_list.items[i];
|
||||
const big_atom_index = free_list.items[i];
|
||||
const big_atom = self.getAtom(big_atom_index);
|
||||
// We now have a pointer to a live atom that has too much capacity.
|
||||
// Is it enough that we could fit this new atom?
|
||||
const sym = big_atom.getSymbol(self);
|
||||
@ -531,34 +560,43 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
|
||||
const keep_free_list_node = remaining_capacity >= min_text_capacity;
|
||||
|
||||
// Set up the metadata to be updated, after errors are no longer possible.
|
||||
atom_placement = big_atom;
|
||||
atom_placement = big_atom_index;
|
||||
if (!keep_free_list_node) {
|
||||
free_list_removal = i;
|
||||
}
|
||||
break :blk new_start_vaddr;
|
||||
} else if (maybe_last_atom.*) |last| {
|
||||
} else if (maybe_last_atom_index.*) |last_index| {
|
||||
const last = self.getAtom(last_index);
|
||||
const last_symbol = last.getSymbol(self);
|
||||
const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size;
|
||||
const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity;
|
||||
const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment);
|
||||
atom_placement = last;
|
||||
atom_placement = last_index;
|
||||
break :blk new_start_vaddr;
|
||||
} else {
|
||||
break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment);
|
||||
}
|
||||
};
|
||||
|
||||
const expand_section = atom_placement == null or atom_placement.?.next == null;
|
||||
const expand_section = if (atom_placement) |placement_index|
|
||||
self.getAtom(placement_index).next_index == null
|
||||
else
|
||||
true;
|
||||
if (expand_section) {
|
||||
const sect_capacity = self.allocatedSize(header.pointer_to_raw_data);
|
||||
const needed_size: u32 = (vaddr + new_atom_size) - header.virtual_address;
|
||||
if (needed_size > sect_capacity) {
|
||||
const new_offset = self.findFreeSpace(needed_size, default_file_alignment);
|
||||
const current_size = if (maybe_last_atom.*) |last_atom| blk: {
|
||||
const current_size = if (maybe_last_atom_index.*) |last_atom_index| blk: {
|
||||
const last_atom = self.getAtom(last_atom_index);
|
||||
const sym = last_atom.getSymbol(self);
|
||||
break :blk (sym.value + last_atom.size) - header.virtual_address;
|
||||
} else 0;
|
||||
log.debug("moving {s} from 0x{x} to 0x{x}", .{ self.getSectionName(header), header.pointer_to_raw_data, new_offset });
|
||||
log.debug("moving {s} from 0x{x} to 0x{x}", .{
|
||||
self.getSectionName(header),
|
||||
header.pointer_to_raw_data,
|
||||
new_offset,
|
||||
});
|
||||
const amt = try self.base.file.?.copyRangeAll(
|
||||
header.pointer_to_raw_data,
|
||||
self.base.file.?,
|
||||
@ -577,26 +615,34 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
|
||||
|
||||
header.virtual_size = @max(header.virtual_size, needed_size);
|
||||
header.size_of_raw_data = needed_size;
|
||||
maybe_last_atom.* = atom;
|
||||
maybe_last_atom_index.* = atom_index;
|
||||
}
|
||||
|
||||
atom.size = new_atom_size;
|
||||
atom.alignment = alignment;
|
||||
|
||||
if (atom.prev) |prev| {
|
||||
prev.next = atom.next;
|
||||
}
|
||||
if (atom.next) |next| {
|
||||
next.prev = atom.prev;
|
||||
{
|
||||
const atom_ptr = self.getAtomPtr(atom_index);
|
||||
atom_ptr.size = new_atom_size;
|
||||
atom_ptr.alignment = alignment;
|
||||
}
|
||||
|
||||
if (atom_placement) |big_atom| {
|
||||
atom.prev = big_atom;
|
||||
atom.next = big_atom.next;
|
||||
big_atom.next = atom;
|
||||
if (atom.prev_index) |prev_index| {
|
||||
const prev = self.getAtomPtr(prev_index);
|
||||
prev.next_index = atom.next_index;
|
||||
}
|
||||
if (atom.next_index) |next_index| {
|
||||
const next = self.getAtomPtr(next_index);
|
||||
next.prev_index = atom.prev_index;
|
||||
}
|
||||
|
||||
if (atom_placement) |big_atom_index| {
|
||||
const big_atom = self.getAtomPtr(big_atom_index);
|
||||
const atom_ptr = self.getAtomPtr(atom_index);
|
||||
atom_ptr.prev_index = big_atom_index;
|
||||
atom_ptr.next_index = big_atom.next_index;
|
||||
big_atom.next_index = atom_index;
|
||||
} else {
|
||||
atom.prev = null;
|
||||
atom.next = null;
|
||||
const atom_ptr = self.getAtomPtr(atom_index);
|
||||
atom_ptr.prev_index = null;
|
||||
atom_ptr.next_index = null;
|
||||
}
|
||||
if (free_list_removal) |i| {
|
||||
_ = free_list.swapRemove(i);
|
||||
@ -701,24 +747,37 @@ pub fn allocateImportEntry(self: *Coff, target: SymbolWithLoc) !u32 {
|
||||
return index;
|
||||
}
|
||||
|
||||
fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
|
||||
pub fn createAtom(self: *Coff) !Atom.Index {
|
||||
const gpa = self.base.allocator;
|
||||
const atom = try gpa.create(Atom);
|
||||
errdefer gpa.destroy(atom);
|
||||
atom.* = Atom.empty;
|
||||
try atom.ensureInitialized(self);
|
||||
const atom_index = @intCast(Atom.Index, self.atoms.items.len);
|
||||
const atom = try self.atoms.addOne(gpa);
|
||||
const sym_index = try self.allocateSymbol();
|
||||
try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom_index);
|
||||
atom.* = .{
|
||||
.sym_index = sym_index,
|
||||
.file = null,
|
||||
.size = 0,
|
||||
.alignment = 0,
|
||||
.prev_index = null,
|
||||
.next_index = null,
|
||||
};
|
||||
log.debug("creating ATOM(%{d}) at index {d}", .{ sym_index, atom_index });
|
||||
return atom_index;
|
||||
}
|
||||
|
||||
fn createGotAtom(self: *Coff, target: SymbolWithLoc) !Atom.Index {
|
||||
const atom_index = try self.createAtom();
|
||||
const atom = self.getAtomPtr(atom_index);
|
||||
atom.size = @sizeOf(u64);
|
||||
atom.alignment = @alignOf(u64);
|
||||
|
||||
try self.managed_atoms.append(gpa, atom);
|
||||
|
||||
const sym = atom.getSymbolPtr(self);
|
||||
sym.section_number = @intToEnum(coff.SectionNumber, self.got_section_index.? + 1);
|
||||
sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
|
||||
sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
|
||||
|
||||
log.debug("allocated GOT atom at 0x{x}", .{sym.value});
|
||||
|
||||
try atom.addRelocation(self, .{
|
||||
try Atom.addRelocation(self, atom_index, .{
|
||||
.type = .direct,
|
||||
.target = target,
|
||||
.offset = 0,
|
||||
@ -732,49 +791,46 @@ fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
|
||||
.UNDEFINED => @panic("TODO generate a binding for undefined GOT target"),
|
||||
.ABSOLUTE => {},
|
||||
.DEBUG => unreachable, // not possible
|
||||
else => try atom.addBaseRelocation(self, 0),
|
||||
else => try Atom.addBaseRelocation(self, atom_index, 0),
|
||||
}
|
||||
|
||||
return atom;
|
||||
return atom_index;
|
||||
}
|
||||
|
||||
fn createImportAtom(self: *Coff) !*Atom {
|
||||
const gpa = self.base.allocator;
|
||||
const atom = try gpa.create(Atom);
|
||||
errdefer gpa.destroy(atom);
|
||||
atom.* = Atom.empty;
|
||||
try atom.ensureInitialized(self);
|
||||
fn createImportAtom(self: *Coff) !Atom.Index {
|
||||
const atom_index = try self.createAtom();
|
||||
const atom = self.getAtomPtr(atom_index);
|
||||
atom.size = @sizeOf(u64);
|
||||
atom.alignment = @alignOf(u64);
|
||||
|
||||
try self.managed_atoms.append(gpa, atom);
|
||||
|
||||
const sym = atom.getSymbolPtr(self);
|
||||
sym.section_number = @intToEnum(coff.SectionNumber, self.idata_section_index.? + 1);
|
||||
sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
|
||||
sym.value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
|
||||
|
||||
log.debug("allocated import atom at 0x{x}", .{sym.value});
|
||||
|
||||
return atom;
|
||||
return atom_index;
|
||||
}
|
||||
|
||||
fn growAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
|
||||
fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
|
||||
const atom = self.getAtom(atom_index);
|
||||
const sym = atom.getSymbol(self);
|
||||
const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value;
|
||||
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
|
||||
if (!need_realloc) return sym.value;
|
||||
return self.allocateAtom(atom, new_atom_size, alignment);
|
||||
return self.allocateAtom(atom_index, new_atom_size, alignment);
|
||||
}
|
||||
|
||||
fn shrinkAtom(self: *Coff, atom: *Atom, new_block_size: u32) void {
|
||||
fn shrinkAtom(self: *Coff, atom_index: Atom.Index, new_block_size: u32) void {
|
||||
_ = self;
|
||||
_ = atom;
|
||||
_ = atom_index;
|
||||
_ = new_block_size;
|
||||
// TODO check the new capacity, and if it crosses the size threshold into a big enough
|
||||
// capacity, insert a free list node for it.
|
||||
}
|
||||
|
||||
fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void {
|
||||
fn writeAtom(self: *Coff, atom_index: Atom.Index, code: []const u8) !void {
|
||||
const atom = self.getAtom(atom_index);
|
||||
const sym = atom.getSymbol(self);
|
||||
const section = self.sections.get(@enumToInt(sym.section_number) - 1);
|
||||
const file_offset = section.header.pointer_to_raw_data + sym.value - section.header.virtual_address;
|
||||
@ -784,18 +840,18 @@ fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void {
|
||||
file_offset + code.len,
|
||||
});
|
||||
try self.base.file.?.pwriteAll(code, file_offset);
|
||||
try self.resolveRelocs(atom);
|
||||
try self.resolveRelocs(atom_index);
|
||||
}
|
||||
|
||||
fn writePtrWidthAtom(self: *Coff, atom: *Atom) !void {
|
||||
fn writePtrWidthAtom(self: *Coff, atom_index: Atom.Index) !void {
|
||||
switch (self.ptr_width) {
|
||||
.p32 => {
|
||||
var buffer: [@sizeOf(u32)]u8 = [_]u8{0} ** @sizeOf(u32);
|
||||
try self.writeAtom(atom, &buffer);
|
||||
try self.writeAtom(atom_index, &buffer);
|
||||
},
|
||||
.p64 => {
|
||||
var buffer: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64);
|
||||
try self.writeAtom(atom, &buffer);
|
||||
try self.writeAtom(atom_index, &buffer);
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -815,7 +871,8 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
|
||||
var it = self.relocs.valueIterator();
|
||||
while (it.next()) |relocs| {
|
||||
for (relocs.items) |*reloc| {
|
||||
const target_atom = reloc.getTargetAtom(self) orelse continue;
|
||||
const target_atom_index = reloc.getTargetAtomIndex(self) orelse continue;
|
||||
const target_atom = self.getAtom(target_atom_index);
|
||||
const target_sym = target_atom.getSymbol(self);
|
||||
if (target_sym.value < addr) continue;
|
||||
reloc.dirty = true;
|
||||
@ -823,24 +880,26 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
|
||||
}
|
||||
}
|
||||
|
||||
fn resolveRelocs(self: *Coff, atom: *Atom) !void {
|
||||
const relocs = self.relocs.get(atom) orelse return;
|
||||
fn resolveRelocs(self: *Coff, atom_index: Atom.Index) !void {
|
||||
const relocs = self.relocs.get(atom_index) orelse return;
|
||||
|
||||
log.debug("relocating '{s}'", .{atom.getName(self)});
|
||||
log.debug("relocating '{s}'", .{self.getAtom(atom_index).getName(self)});
|
||||
|
||||
for (relocs.items) |*reloc| {
|
||||
if (!reloc.dirty) continue;
|
||||
try reloc.resolve(atom, self);
|
||||
try reloc.resolve(atom_index, self);
|
||||
}
|
||||
}
|
||||
|
||||
fn freeAtom(self: *Coff, atom: *Atom) void {
|
||||
log.debug("freeAtom {*}", .{atom});
|
||||
|
||||
// Remove any relocs and base relocs associated with this Atom
|
||||
self.freeRelocationsForAtom(atom);
|
||||
fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
|
||||
log.debug("freeAtom {d}", .{atom_index});
|
||||
|
||||
const gpa = self.base.allocator;
|
||||
|
||||
// Remove any relocs and base relocs associated with this Atom
|
||||
Atom.freeRelocations(self, atom_index);
|
||||
|
||||
const atom = self.getAtom(atom_index);
|
||||
const sym = atom.getSymbol(self);
|
||||
const sect_id = @enumToInt(sym.section_number) - 1;
|
||||
const free_list = &self.sections.items(.free_list)[sect_id];
|
||||
@ -849,45 +908,46 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
|
||||
var i: usize = 0;
|
||||
// TODO turn free_list into a hash map
|
||||
while (i < free_list.items.len) {
|
||||
if (free_list.items[i] == atom) {
|
||||
if (free_list.items[i] == atom_index) {
|
||||
_ = free_list.swapRemove(i);
|
||||
continue;
|
||||
}
|
||||
if (free_list.items[i] == atom.prev) {
|
||||
if (free_list.items[i] == atom.prev_index) {
|
||||
already_have_free_list_node = true;
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
const maybe_last_atom = &self.sections.items(.last_atom)[sect_id];
|
||||
if (maybe_last_atom.*) |last_atom| {
|
||||
if (last_atom == atom) {
|
||||
if (atom.prev) |prev| {
|
||||
const maybe_last_atom_index = &self.sections.items(.last_atom_index)[sect_id];
|
||||
if (maybe_last_atom_index.*) |last_atom_index| {
|
||||
if (last_atom_index == atom_index) {
|
||||
if (atom.prev_index) |prev_index| {
|
||||
// TODO shrink the section size here
|
||||
maybe_last_atom.* = prev;
|
||||
maybe_last_atom_index.* = prev_index;
|
||||
} else {
|
||||
maybe_last_atom.* = null;
|
||||
maybe_last_atom_index.* = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (atom.prev) |prev| {
|
||||
prev.next = atom.next;
|
||||
if (atom.prev_index) |prev_index| {
|
||||
const prev = self.getAtomPtr(prev_index);
|
||||
prev.next_index = atom.next_index;
|
||||
|
||||
if (!already_have_free_list_node and prev.freeListEligible(self)) {
|
||||
if (!already_have_free_list_node and prev.*.freeListEligible(self)) {
|
||||
// The free list is heuristics, it doesn't have to be perfect, so we can
|
||||
// ignore the OOM here.
|
||||
free_list.append(gpa, prev) catch {};
|
||||
free_list.append(gpa, prev_index) catch {};
|
||||
}
|
||||
} else {
|
||||
atom.prev = null;
|
||||
self.getAtomPtr(atom_index).prev_index = null;
|
||||
}
|
||||
|
||||
if (atom.next) |next| {
|
||||
next.prev = atom.prev;
|
||||
if (atom.next_index) |next_index| {
|
||||
self.getAtomPtr(next_index).prev_index = atom.prev_index;
|
||||
} else {
|
||||
atom.next = null;
|
||||
self.getAtomPtr(atom_index).next_index = null;
|
||||
}
|
||||
|
||||
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
|
||||
@ -910,7 +970,7 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
|
||||
self.locals.items[sym_index].section_number = .UNDEFINED;
|
||||
_ = self.atom_by_index_table.remove(sym_index);
|
||||
log.debug(" adding local symbol index {d} to free list", .{sym_index});
|
||||
atom.sym_index = 0;
|
||||
self.getAtomPtr(atom_index).sym_index = 0;
|
||||
}
|
||||
|
||||
pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
|
||||
@ -927,15 +987,10 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
|
||||
|
||||
const decl_index = func.owner_decl;
|
||||
const decl = module.declPtr(decl_index);
|
||||
const atom = &decl.link.coff;
|
||||
try atom.ensureInitialized(self);
|
||||
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
|
||||
if (gop.found_existing) {
|
||||
self.freeUnnamedConsts(decl_index);
|
||||
self.freeRelocationsForAtom(&decl.link.coff);
|
||||
} else {
|
||||
gop.value_ptr.* = null;
|
||||
}
|
||||
|
||||
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
|
||||
self.freeUnnamedConsts(decl_index);
|
||||
Atom.freeRelocations(self, atom_index);
|
||||
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer code_buffer.deinit();
|
||||
@ -979,11 +1034,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
|
||||
}
|
||||
const unnamed_consts = gop.value_ptr;
|
||||
|
||||
const atom = try gpa.create(Atom);
|
||||
errdefer gpa.destroy(atom);
|
||||
atom.* = Atom.empty;
|
||||
try atom.ensureInitialized(self);
|
||||
try self.managed_atoms.append(gpa, atom);
|
||||
const atom_index = try self.createAtom();
|
||||
|
||||
const sym_name = blk: {
|
||||
const decl_name = try decl.getFullyQualifiedName(mod);
|
||||
@ -993,11 +1044,15 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
|
||||
break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
|
||||
};
|
||||
defer gpa.free(sym_name);
|
||||
try self.setSymbolName(atom.getSymbolPtr(self), sym_name);
|
||||
atom.getSymbolPtr(self).section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
|
||||
{
|
||||
const atom = self.getAtom(atom_index);
|
||||
const sym = atom.getSymbolPtr(self);
|
||||
try self.setSymbolName(sym, sym_name);
|
||||
sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
|
||||
}
|
||||
|
||||
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{
|
||||
.parent_atom_index = atom.getSymbolIndex().?,
|
||||
.parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
|
||||
});
|
||||
const code = switch (res) {
|
||||
.ok => code_buffer.items,
|
||||
@ -1010,17 +1065,18 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
|
||||
};
|
||||
|
||||
const required_alignment = tv.ty.abiAlignment(self.base.options.target);
|
||||
const atom = self.getAtomPtr(atom_index);
|
||||
atom.alignment = required_alignment;
|
||||
atom.size = @intCast(u32, code.len);
|
||||
atom.getSymbolPtr(self).value = try self.allocateAtom(atom, atom.size, atom.alignment);
|
||||
errdefer self.freeAtom(atom);
|
||||
atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, atom.alignment);
|
||||
errdefer self.freeAtom(atom_index);
|
||||
|
||||
try unnamed_consts.append(gpa, atom);
|
||||
try unnamed_consts.append(gpa, atom_index);
|
||||
|
||||
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, atom.getSymbol(self).value });
|
||||
log.debug(" (required alignment 0x{x})", .{required_alignment});
|
||||
|
||||
try self.writeAtom(atom, code);
|
||||
try self.writeAtom(atom_index, code);
|
||||
|
||||
return atom.getSymbolIndex().?;
|
||||
}
|
||||
@ -1047,14 +1103,9 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
|
||||
}
|
||||
}
|
||||
|
||||
const atom = &decl.link.coff;
|
||||
try atom.ensureInitialized(self);
|
||||
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
|
||||
if (gop.found_existing) {
|
||||
self.freeRelocationsForAtom(atom);
|
||||
} else {
|
||||
gop.value_ptr.* = null;
|
||||
}
|
||||
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
|
||||
Atom.freeRelocations(self, atom_index);
|
||||
const atom = self.getAtom(atom_index);
|
||||
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer code_buffer.deinit();
|
||||
@ -1064,7 +1115,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
|
||||
.ty = decl.ty,
|
||||
.val = decl_val,
|
||||
}, &code_buffer, .none, .{
|
||||
.parent_atom_index = decl.link.coff.getSymbolIndex().?,
|
||||
.parent_atom_index = atom.getSymbolIndex().?,
|
||||
});
|
||||
const code = switch (res) {
|
||||
.ok => code_buffer.items,
|
||||
@ -1082,7 +1133,20 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
|
||||
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
|
||||
}
|
||||
|
||||
fn getDeclOutputSection(self: *Coff, decl: *Module.Decl) u16 {
|
||||
pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom.Index {
|
||||
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{
|
||||
.atom = try self.createAtom(),
|
||||
.section = self.getDeclOutputSection(decl_index),
|
||||
.exports = .{},
|
||||
};
|
||||
}
|
||||
return gop.value_ptr.atom;
|
||||
}
|
||||
|
||||
fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
|
||||
const decl = self.base.options.module.?.declPtr(decl_index);
|
||||
const ty = decl.ty;
|
||||
const zig_ty = ty.zigTypeTag();
|
||||
const val = decl.val;
|
||||
@ -1117,14 +1181,11 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
|
||||
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
|
||||
const required_alignment = decl.getAlignment(self.base.options.target);
|
||||
|
||||
const decl_ptr = self.decls.getPtr(decl_index).?;
|
||||
if (decl_ptr.* == null) {
|
||||
decl_ptr.* = self.getDeclOutputSection(decl);
|
||||
}
|
||||
const sect_index = decl_ptr.*.?;
|
||||
|
||||
const decl_metadata = self.decls.get(decl_index).?;
|
||||
const atom_index = decl_metadata.atom;
|
||||
const atom = self.getAtom(atom_index);
|
||||
const sect_index = decl_metadata.section;
|
||||
const code_len = @intCast(u32, code.len);
|
||||
const atom = &decl.link.coff;
|
||||
|
||||
if (atom.size != 0) {
|
||||
const sym = atom.getSymbolPtr(self);
|
||||
@ -1135,7 +1196,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
|
||||
const capacity = atom.capacity(self);
|
||||
const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment);
|
||||
if (need_realloc) {
|
||||
const vaddr = try self.growAtom(atom, code_len, required_alignment);
|
||||
const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
|
||||
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, sym.value, vaddr });
|
||||
log.debug(" (required alignment 0x{x}", .{required_alignment});
|
||||
|
||||
@ -1143,49 +1204,43 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
|
||||
sym.value = vaddr;
|
||||
log.debug(" (updating GOT entry)", .{});
|
||||
const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
|
||||
const got_atom = self.getGotAtomForSymbol(got_target).?;
|
||||
const got_atom_index = self.getGotAtomIndexForSymbol(got_target).?;
|
||||
self.markRelocsDirtyByTarget(got_target);
|
||||
try self.writePtrWidthAtom(got_atom);
|
||||
try self.writePtrWidthAtom(got_atom_index);
|
||||
}
|
||||
} else if (code_len < atom.size) {
|
||||
self.shrinkAtom(atom, code_len);
|
||||
self.shrinkAtom(atom_index, code_len);
|
||||
}
|
||||
atom.size = code_len;
|
||||
self.getAtomPtr(atom_index).size = code_len;
|
||||
} else {
|
||||
const sym = atom.getSymbolPtr(self);
|
||||
try self.setSymbolName(sym, decl_name);
|
||||
sym.section_number = @intToEnum(coff.SectionNumber, sect_index + 1);
|
||||
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
|
||||
|
||||
const vaddr = try self.allocateAtom(atom, code_len, required_alignment);
|
||||
errdefer self.freeAtom(atom);
|
||||
const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
|
||||
errdefer self.freeAtom(atom_index);
|
||||
log.debug("allocated atom for {s} at 0x{x}", .{ decl_name, vaddr });
|
||||
atom.size = code_len;
|
||||
self.getAtomPtr(atom_index).size = code_len;
|
||||
sym.value = vaddr;
|
||||
|
||||
const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
|
||||
const got_index = try self.allocateGotEntry(got_target);
|
||||
const got_atom = try self.createGotAtom(got_target);
|
||||
const got_atom_index = try self.createGotAtom(got_target);
|
||||
const got_atom = self.getAtom(got_atom_index);
|
||||
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
|
||||
try self.writePtrWidthAtom(got_atom);
|
||||
try self.writePtrWidthAtom(got_atom_index);
|
||||
}
|
||||
|
||||
self.markRelocsDirtyByTarget(atom.getSymbolWithLoc());
|
||||
try self.writeAtom(atom, code);
|
||||
}
|
||||
|
||||
fn freeRelocationsForAtom(self: *Coff, atom: *Atom) void {
|
||||
var removed_relocs = self.relocs.fetchRemove(atom);
|
||||
if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
|
||||
var removed_base_relocs = self.base_relocs.fetchRemove(atom);
|
||||
if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(self.base.allocator);
|
||||
try self.writeAtom(atom_index, code);
|
||||
}
|
||||
|
||||
fn freeUnnamedConsts(self: *Coff, decl_index: Module.Decl.Index) void {
|
||||
const gpa = self.base.allocator;
|
||||
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
|
||||
for (unnamed_consts.items) |atom| {
|
||||
self.freeAtom(atom);
|
||||
for (unnamed_consts.items) |atom_index| {
|
||||
self.freeAtom(atom_index);
|
||||
}
|
||||
unnamed_consts.clearAndFree(gpa);
|
||||
}
|
||||
@ -1200,11 +1255,11 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void {
|
||||
|
||||
log.debug("freeDecl {*}", .{decl});
|
||||
|
||||
if (self.decls.fetchRemove(decl_index)) |kv| {
|
||||
if (kv.value) |_| {
|
||||
self.freeAtom(&decl.link.coff);
|
||||
self.freeUnnamedConsts(decl_index);
|
||||
}
|
||||
if (self.decls.fetchRemove(decl_index)) |const_kv| {
|
||||
var kv = const_kv;
|
||||
self.freeAtom(kv.value.atom);
|
||||
self.freeUnnamedConsts(decl_index);
|
||||
kv.value.exports.deinit(self.base.allocator);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1257,16 +1312,10 @@ pub fn updateDeclExports(
|
||||
const gpa = self.base.allocator;
|
||||
|
||||
const decl = module.declPtr(decl_index);
|
||||
const atom = &decl.link.coff;
|
||||
|
||||
if (atom.getSymbolIndex() == null) return;
|
||||
|
||||
const gop = try self.decls.getOrPut(gpa, decl_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = self.getDeclOutputSection(decl);
|
||||
}
|
||||
|
||||
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
|
||||
const atom = self.getAtom(atom_index);
|
||||
const decl_sym = atom.getSymbol(self);
|
||||
const decl_metadata = self.decls.getPtr(decl_index).?;
|
||||
|
||||
for (exports) |exp| {
|
||||
log.debug("adding new export '{s}'", .{exp.options.name});
|
||||
@ -1301,9 +1350,9 @@ pub fn updateDeclExports(
|
||||
continue;
|
||||
}
|
||||
|
||||
const sym_index = exp.link.coff.sym_index orelse blk: {
|
||||
const sym_index = decl_metadata.getExport(self, exp.options.name) orelse blk: {
|
||||
const sym_index = try self.allocateSymbol();
|
||||
exp.link.coff.sym_index = sym_index;
|
||||
try decl_metadata.exports.append(gpa, sym_index);
|
||||
break :blk sym_index;
|
||||
};
|
||||
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
|
||||
@ -1326,16 +1375,15 @@ pub fn updateDeclExports(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deleteExport(self: *Coff, exp: Export) void {
|
||||
pub fn deleteDeclExport(self: *Coff, decl_index: Module.Decl.Index, name: []const u8) void {
|
||||
if (self.llvm_object) |_| return;
|
||||
const sym_index = exp.sym_index orelse return;
|
||||
const metadata = self.decls.getPtr(decl_index) orelse return;
|
||||
const sym_index = metadata.getExportPtr(self, name) orelse return;
|
||||
|
||||
const gpa = self.base.allocator;
|
||||
|
||||
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
|
||||
const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
|
||||
const sym = self.getSymbolPtr(sym_loc);
|
||||
const sym_name = self.getSymbolName(sym_loc);
|
||||
log.debug("deleting export '{s}'", .{sym_name});
|
||||
log.debug("deleting export '{s}'", .{name});
|
||||
assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED);
|
||||
sym.* = .{
|
||||
.name = [_]u8{0} ** 8,
|
||||
@ -1345,9 +1393,9 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
|
||||
.storage_class = .NULL,
|
||||
.number_of_aux_symbols = 0,
|
||||
};
|
||||
self.locals_free_list.append(gpa, sym_index) catch {};
|
||||
self.locals_free_list.append(gpa, sym_index.*) catch {};
|
||||
|
||||
if (self.resolver.fetchRemove(sym_name)) |entry| {
|
||||
if (self.resolver.fetchRemove(name)) |entry| {
|
||||
defer gpa.free(entry.key);
|
||||
self.globals_free_list.append(gpa, entry.value) catch {};
|
||||
self.globals.items[entry.value] = .{
|
||||
@ -1355,6 +1403,8 @@ pub fn deleteExport(self: *Coff, exp: Export) void {
|
||||
.file = null,
|
||||
};
|
||||
}
|
||||
|
||||
sym_index.* = 0;
|
||||
}
|
||||
|
||||
fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void {
|
||||
@ -1419,9 +1469,10 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
|
||||
if (self.imports_table.contains(global)) continue;
|
||||
|
||||
const import_index = try self.allocateImportEntry(global);
|
||||
const import_atom = try self.createImportAtom();
|
||||
const import_atom_index = try self.createImportAtom();
|
||||
const import_atom = self.getAtom(import_atom_index);
|
||||
self.imports.items[import_index].sym_index = import_atom.getSymbolIndex().?;
|
||||
try self.writePtrWidthAtom(import_atom);
|
||||
try self.writePtrWidthAtom(import_atom_index);
|
||||
}
|
||||
|
||||
if (build_options.enable_logging) {
|
||||
@ -1455,22 +1506,14 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getDeclVAddr(
|
||||
self: *Coff,
|
||||
decl_index: Module.Decl.Index,
|
||||
reloc_info: link.File.RelocInfo,
|
||||
) !u64 {
|
||||
const mod = self.base.options.module.?;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
|
||||
pub fn getDeclVAddr(self: *Coff, decl_index: Module.Decl.Index, reloc_info: link.File.RelocInfo) !u64 {
|
||||
assert(self.llvm_object == null);
|
||||
|
||||
try decl.link.coff.ensureInitialized(self);
|
||||
const sym_index = decl.link.coff.getSymbolIndex().?;
|
||||
|
||||
const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
|
||||
const this_atom_index = try self.getOrCreateAtomForDecl(decl_index);
|
||||
const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?;
|
||||
const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
|
||||
const target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
|
||||
try atom.addRelocation(self, .{
|
||||
try Atom.addRelocation(self, atom_index, .{
|
||||
.type = .direct,
|
||||
.target = target,
|
||||
.offset = @intCast(u32, reloc_info.offset),
|
||||
@ -1478,7 +1521,7 @@ pub fn getDeclVAddr(
|
||||
.pcrel = false,
|
||||
.length = 3,
|
||||
});
|
||||
try atom.addBaseRelocation(self, @intCast(u32, reloc_info.offset));
|
||||
try Atom.addBaseRelocation(self, atom_index, @intCast(u32, reloc_info.offset));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1505,10 +1548,10 @@ pub fn getGlobalSymbol(self: *Coff, name: []const u8) !u32 {
|
||||
return global_index;
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
|
||||
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void {
|
||||
_ = self;
|
||||
_ = module;
|
||||
_ = decl;
|
||||
_ = decl_index;
|
||||
log.debug("TODO implement updateDeclLineNumber", .{});
|
||||
}
|
||||
|
||||
@ -1529,7 +1572,8 @@ fn writeBaseRelocations(self: *Coff) !void {
|
||||
|
||||
var it = self.base_relocs.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const atom = entry.key_ptr.*;
|
||||
const atom_index = entry.key_ptr.*;
|
||||
const atom = self.getAtom(atom_index);
|
||||
const offsets = entry.value_ptr.*;
|
||||
|
||||
for (offsets.items) |offset| {
|
||||
@ -1613,7 +1657,8 @@ fn writeImportTable(self: *Coff) !void {
|
||||
const gpa = self.base.allocator;
|
||||
|
||||
const section = self.sections.get(self.idata_section_index.?);
|
||||
const last_atom = section.last_atom orelse return;
|
||||
const last_atom_index = section.last_atom_index orelse return;
|
||||
const last_atom = self.getAtom(last_atom_index);
|
||||
|
||||
const iat_rva = section.header.virtual_address;
|
||||
const iat_size = last_atom.getSymbol(self).value + last_atom.size * 2 - iat_rva; // account for sentinel zero pointer
|
||||
@ -2051,27 +2096,37 @@ pub fn getOrPutGlobalPtr(self: *Coff, name: []const u8) !GetOrPutGlobalPtrResult
|
||||
return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr };
|
||||
}
|
||||
|
||||
pub fn getAtom(self: *const Coff, atom_index: Atom.Index) Atom {
|
||||
assert(atom_index < self.atoms.items.len);
|
||||
return self.atoms.items[atom_index];
|
||||
}
|
||||
|
||||
pub fn getAtomPtr(self: *Coff, atom_index: Atom.Index) *Atom {
|
||||
assert(atom_index < self.atoms.items.len);
|
||||
return &self.atoms.items[atom_index];
|
||||
}
|
||||
|
||||
/// Returns atom if there is an atom referenced by the symbol described by `sym_loc` descriptor.
|
||||
/// Returns null on failure.
|
||||
pub fn getAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
|
||||
pub fn getAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
|
||||
assert(sym_loc.file == null); // TODO linking with object files
|
||||
return self.atom_by_index_table.get(sym_loc.sym_index);
|
||||
}
|
||||
|
||||
/// Returns GOT atom that references `sym_loc` if one exists.
|
||||
/// Returns null otherwise.
|
||||
pub fn getGotAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
|
||||
pub fn getGotAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
|
||||
const got_index = self.got_entries_table.get(sym_loc) orelse return null;
|
||||
const got_entry = self.got_entries.items[got_index];
|
||||
return self.getAtomForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
|
||||
return self.getAtomIndexForSymbol(.{ .sym_index = got_entry.sym_index, .file = null });
|
||||
}
|
||||
|
||||
/// Returns import atom that references `sym_loc` if one exists.
|
||||
/// Returns null otherwise.
|
||||
pub fn getImportAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom {
|
||||
pub fn getImportAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.Index {
|
||||
const imports_index = self.imports_table.get(sym_loc) orelse return null;
|
||||
const imports_entry = self.imports.items[imports_index];
|
||||
return self.getAtomForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
|
||||
return self.getAtomIndexForSymbol(.{ .sym_index = imports_entry.sym_index, .file = null });
|
||||
}
|
||||
|
||||
fn setSectionName(self: *Coff, header: *coff.SectionHeader, name: []const u8) !void {
|
||||
|
||||
@ -27,23 +27,10 @@ alignment: u32,
|
||||
|
||||
/// Points to the previous and next neighbors, based on the `text_offset`.
|
||||
/// This can be used to find, for example, the capacity of this `Atom`.
|
||||
prev: ?*Atom,
|
||||
next: ?*Atom,
|
||||
prev_index: ?Index,
|
||||
next_index: ?Index,
|
||||
|
||||
pub const empty = Atom{
|
||||
.sym_index = 0,
|
||||
.file = null,
|
||||
.size = 0,
|
||||
.alignment = 0,
|
||||
.prev = null,
|
||||
.next = null,
|
||||
};
|
||||
|
||||
pub fn ensureInitialized(self: *Atom, coff_file: *Coff) !void {
|
||||
if (self.getSymbolIndex() != null) return; // Already initialized
|
||||
self.sym_index = try coff_file.allocateSymbol();
|
||||
try coff_file.atom_by_index_table.putNoClobber(coff_file.base.allocator, self.sym_index, self);
|
||||
}
|
||||
pub const Index = u32;
|
||||
|
||||
pub fn getSymbolIndex(self: Atom) ?u32 {
|
||||
if (self.sym_index == 0) return null;
|
||||
@ -85,7 +72,8 @@ pub fn getName(self: Atom, coff_file: *const Coff) []const u8 {
|
||||
/// Returns how much room there is to grow in virtual address space.
|
||||
pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
|
||||
const self_sym = self.getSymbol(coff_file);
|
||||
if (self.next) |next| {
|
||||
if (self.next_index) |next_index| {
|
||||
const next = coff_file.getAtom(next_index);
|
||||
const next_sym = next.getSymbol(coff_file);
|
||||
return next_sym.value - self_sym.value;
|
||||
} else {
|
||||
@ -97,7 +85,8 @@ pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
|
||||
|
||||
pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
|
||||
// No need to keep a free list node for the last atom.
|
||||
const next = self.next orelse return false;
|
||||
const next_index = self.next_index orelse return false;
|
||||
const next = coff_file.getAtom(next_index);
|
||||
const self_sym = self.getSymbol(coff_file);
|
||||
const next_sym = next.getSymbol(coff_file);
|
||||
const cap = next_sym.value - self_sym.value;
|
||||
@ -107,22 +96,33 @@ pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
|
||||
return surplus >= Coff.min_text_capacity;
|
||||
}
|
||||
|
||||
pub fn addRelocation(self: *Atom, coff_file: *Coff, reloc: Relocation) !void {
|
||||
pub fn addRelocation(coff_file: *Coff, atom_index: Index, reloc: Relocation) !void {
|
||||
const gpa = coff_file.base.allocator;
|
||||
log.debug(" (adding reloc of type {s} to target %{d})", .{ @tagName(reloc.type), reloc.target.sym_index });
|
||||
const gop = try coff_file.relocs.getOrPut(gpa, self);
|
||||
const gop = try coff_file.relocs.getOrPut(gpa, atom_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{};
|
||||
}
|
||||
try gop.value_ptr.append(gpa, reloc);
|
||||
}
|
||||
|
||||
pub fn addBaseRelocation(self: *Atom, coff_file: *Coff, offset: u32) !void {
|
||||
pub fn addBaseRelocation(coff_file: *Coff, atom_index: Index, offset: u32) !void {
|
||||
const gpa = coff_file.base.allocator;
|
||||
log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{ offset, self.sym_index });
|
||||
const gop = try coff_file.base_relocs.getOrPut(gpa, self);
|
||||
log.debug(" (adding base relocation at offset 0x{x} in %{d})", .{
|
||||
offset,
|
||||
coff_file.getAtom(atom_index).getSymbolIndex().?,
|
||||
});
|
||||
const gop = try coff_file.base_relocs.getOrPut(gpa, atom_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{};
|
||||
}
|
||||
try gop.value_ptr.append(gpa, offset);
|
||||
}
|
||||
|
||||
pub fn freeRelocations(coff_file: *Coff, atom_index: Index) void {
|
||||
const gpa = coff_file.base.allocator;
|
||||
var removed_relocs = coff_file.relocs.fetchRemove(atom_index);
|
||||
if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
|
||||
var removed_base_relocs = coff_file.base_relocs.fetchRemove(atom_index);
|
||||
if (removed_base_relocs) |*base_relocs| base_relocs.value.deinit(gpa);
|
||||
}
|
||||
|
||||
@ -46,33 +46,35 @@ length: u2,
|
||||
dirty: bool = true,
|
||||
|
||||
/// Returns an Atom which is the target node of this relocation edge (if any).
|
||||
pub fn getTargetAtom(self: Relocation, coff_file: *Coff) ?*Atom {
|
||||
pub fn getTargetAtomIndex(self: Relocation, coff_file: *const Coff) ?Atom.Index {
|
||||
switch (self.type) {
|
||||
.got,
|
||||
.got_page,
|
||||
.got_pageoff,
|
||||
=> return coff_file.getGotAtomForSymbol(self.target),
|
||||
=> return coff_file.getGotAtomIndexForSymbol(self.target),
|
||||
|
||||
.direct,
|
||||
.page,
|
||||
.pageoff,
|
||||
=> return coff_file.getAtomForSymbol(self.target),
|
||||
=> return coff_file.getAtomIndexForSymbol(self.target),
|
||||
|
||||
.import,
|
||||
.import_page,
|
||||
.import_pageoff,
|
||||
=> return coff_file.getImportAtomForSymbol(self.target),
|
||||
=> return coff_file.getImportAtomIndexForSymbol(self.target),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resolve(self: *Relocation, atom: *Atom, coff_file: *Coff) !void {
|
||||
pub fn resolve(self: *Relocation, atom_index: Atom.Index, coff_file: *Coff) !void {
|
||||
const atom = coff_file.getAtom(atom_index);
|
||||
const source_sym = atom.getSymbol(coff_file);
|
||||
const source_section = coff_file.sections.get(@enumToInt(source_sym.section_number) - 1).header;
|
||||
const source_vaddr = source_sym.value + self.offset;
|
||||
|
||||
const file_offset = source_section.pointer_to_raw_data + source_sym.value - source_section.virtual_address;
|
||||
|
||||
const target_atom = self.getTargetAtom(coff_file) orelse return;
|
||||
const target_atom_index = self.getTargetAtomIndex(coff_file) orelse return;
|
||||
const target_atom = coff_file.getAtom(target_atom_index);
|
||||
const target_vaddr = target_atom.getSymbol(coff_file).value;
|
||||
const target_vaddr_with_addend = target_vaddr + self.addend;
|
||||
|
||||
@ -107,7 +109,7 @@ const Context = struct {
|
||||
image_base: u64,
|
||||
};
|
||||
|
||||
fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
|
||||
fn resolveAarch64(self: Relocation, ctx: Context, coff_file: *Coff) !void {
|
||||
var buffer: [@sizeOf(u64)]u8 = undefined;
|
||||
switch (self.length) {
|
||||
2 => {
|
||||
@ -197,7 +199,7 @@ fn resolveAarch64(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn resolveX86(self: *Relocation, ctx: Context, coff_file: *Coff) !void {
|
||||
fn resolveX86(self: Relocation, ctx: Context, coff_file: *Coff) !void {
|
||||
switch (self.type) {
|
||||
.got_page => unreachable,
|
||||
.got_pageoff => unreachable,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
1204
src/link/Elf.zig
1204
src/link/Elf.zig
File diff suppressed because it is too large
Load Diff
@ -4,7 +4,6 @@ const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const elf = std.elf;
|
||||
|
||||
const Dwarf = @import("../Dwarf.zig");
|
||||
const Elf = @import("../Elf.zig");
|
||||
|
||||
/// Each decl always gets a local symbol with the fully qualified name.
|
||||
@ -20,44 +19,33 @@ offset_table_index: u32,
|
||||
|
||||
/// Points to the previous and next neighbors, based on the `text_offset`.
|
||||
/// This can be used to find, for example, the capacity of this `TextBlock`.
|
||||
prev: ?*Atom,
|
||||
next: ?*Atom,
|
||||
prev_index: ?Index,
|
||||
next_index: ?Index,
|
||||
|
||||
dbg_info_atom: Dwarf.Atom,
|
||||
pub const Index = u32;
|
||||
|
||||
pub const empty = Atom{
|
||||
.local_sym_index = 0,
|
||||
.offset_table_index = undefined,
|
||||
.prev = null,
|
||||
.next = null,
|
||||
.dbg_info_atom = undefined,
|
||||
pub const Reloc = struct {
|
||||
target: u32,
|
||||
offset: u64,
|
||||
addend: u32,
|
||||
prev_vaddr: u64,
|
||||
};
|
||||
|
||||
pub fn ensureInitialized(self: *Atom, elf_file: *Elf) !void {
|
||||
if (self.getSymbolIndex() != null) return; // Already initialized
|
||||
self.local_sym_index = try elf_file.allocateLocalSymbol();
|
||||
self.offset_table_index = try elf_file.allocateGotOffset();
|
||||
try elf_file.atom_by_index_table.putNoClobber(elf_file.base.allocator, self.local_sym_index, self);
|
||||
}
|
||||
|
||||
pub fn getSymbolIndex(self: Atom) ?u32 {
|
||||
if (self.local_sym_index == 0) return null;
|
||||
return self.local_sym_index;
|
||||
}
|
||||
|
||||
pub fn getSymbol(self: Atom, elf_file: *Elf) elf.Elf64_Sym {
|
||||
const sym_index = self.getSymbolIndex().?;
|
||||
return elf_file.local_symbols.items[sym_index];
|
||||
pub fn getSymbol(self: Atom, elf_file: *const Elf) elf.Elf64_Sym {
|
||||
return elf_file.getSymbol(self.getSymbolIndex().?);
|
||||
}
|
||||
|
||||
pub fn getSymbolPtr(self: Atom, elf_file: *Elf) *elf.Elf64_Sym {
|
||||
const sym_index = self.getSymbolIndex().?;
|
||||
return &elf_file.local_symbols.items[sym_index];
|
||||
return elf_file.getSymbolPtr(self.getSymbolIndex().?);
|
||||
}
|
||||
|
||||
pub fn getName(self: Atom, elf_file: *Elf) []const u8 {
|
||||
const sym = self.getSymbol();
|
||||
return elf_file.getString(sym.st_name);
|
||||
pub fn getName(self: Atom, elf_file: *const Elf) []const u8 {
|
||||
return elf_file.getSymbolName(self.getSymbolIndex().?);
|
||||
}
|
||||
|
||||
pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
|
||||
@ -72,9 +60,10 @@ pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
|
||||
/// Returns how much room there is to grow in virtual address space.
|
||||
/// File offset relocation happens transparently, so it is not included in
|
||||
/// this calculation.
|
||||
pub fn capacity(self: Atom, elf_file: *Elf) u64 {
|
||||
pub fn capacity(self: Atom, elf_file: *const Elf) u64 {
|
||||
const self_sym = self.getSymbol(elf_file);
|
||||
if (self.next) |next| {
|
||||
if (self.next_index) |next_index| {
|
||||
const next = elf_file.getAtom(next_index);
|
||||
const next_sym = next.getSymbol(elf_file);
|
||||
return next_sym.st_value - self_sym.st_value;
|
||||
} else {
|
||||
@ -83,9 +72,10 @@ pub fn capacity(self: Atom, elf_file: *Elf) u64 {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
|
||||
pub fn freeListEligible(self: Atom, elf_file: *const Elf) bool {
|
||||
// No need to keep a free list node for the last block.
|
||||
const next = self.next orelse return false;
|
||||
const next_index = self.next_index orelse return false;
|
||||
const next = elf_file.getAtom(next_index);
|
||||
const self_sym = self.getSymbol(elf_file);
|
||||
const next_sym = next.getSymbol(elf_file);
|
||||
const cap = next_sym.st_value - self_sym.st_value;
|
||||
@ -94,3 +84,17 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
|
||||
const surplus = cap - ideal_cap;
|
||||
return surplus >= Elf.min_text_capacity;
|
||||
}
|
||||
|
||||
pub fn addRelocation(elf_file: *Elf, atom_index: Index, reloc: Reloc) !void {
|
||||
const gpa = elf_file.base.allocator;
|
||||
const gop = try elf_file.relocs.getOrPut(gpa, atom_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{};
|
||||
}
|
||||
try gop.value_ptr.append(gpa, reloc);
|
||||
}
|
||||
|
||||
pub fn freeRelocations(elf_file: *Elf, atom_index: Index) void {
|
||||
var removed_relocs = elf_file.relocs.fetchRemove(atom_index);
|
||||
if (removed_relocs) |*relocs| relocs.value.deinit(elf_file.base.allocator);
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -13,7 +13,6 @@ const trace = @import("../../tracy.zig").trace;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const Arch = std.Target.Cpu.Arch;
|
||||
const Dwarf = @import("../Dwarf.zig");
|
||||
const MachO = @import("../MachO.zig");
|
||||
const Relocation = @import("Relocation.zig");
|
||||
const SymbolWithLoc = MachO.SymbolWithLoc;
|
||||
@ -39,10 +38,11 @@ size: u64,
|
||||
alignment: u32,
|
||||
|
||||
/// Points to the previous and next neighbours
|
||||
next: ?*Atom,
|
||||
prev: ?*Atom,
|
||||
/// TODO use the same trick as with symbols: reserve index 0 as null atom
|
||||
next_index: ?Index,
|
||||
prev_index: ?Index,
|
||||
|
||||
dbg_info_atom: Dwarf.Atom,
|
||||
pub const Index = u32;
|
||||
|
||||
pub const Binding = struct {
|
||||
target: SymbolWithLoc,
|
||||
@ -54,22 +54,6 @@ pub const SymbolAtOffset = struct {
|
||||
offset: u64,
|
||||
};
|
||||
|
||||
pub const empty = Atom{
|
||||
.sym_index = 0,
|
||||
.file = null,
|
||||
.size = 0,
|
||||
.alignment = 0,
|
||||
.prev = null,
|
||||
.next = null,
|
||||
.dbg_info_atom = undefined,
|
||||
};
|
||||
|
||||
pub fn ensureInitialized(self: *Atom, macho_file: *MachO) !void {
|
||||
if (self.getSymbolIndex() != null) return; // Already initialized
|
||||
self.sym_index = try macho_file.allocateSymbol();
|
||||
try macho_file.atom_by_index_table.putNoClobber(macho_file.base.allocator, self.sym_index, self);
|
||||
}
|
||||
|
||||
pub fn getSymbolIndex(self: Atom) ?u32 {
|
||||
if (self.sym_index == 0) return null;
|
||||
return self.sym_index;
|
||||
@ -108,7 +92,8 @@ pub fn getName(self: Atom, macho_file: *MachO) []const u8 {
|
||||
/// this calculation.
|
||||
pub fn capacity(self: Atom, macho_file: *MachO) u64 {
|
||||
const self_sym = self.getSymbol(macho_file);
|
||||
if (self.next) |next| {
|
||||
if (self.next_index) |next_index| {
|
||||
const next = macho_file.getAtom(next_index);
|
||||
const next_sym = next.getSymbol(macho_file);
|
||||
return next_sym.n_value - self_sym.n_value;
|
||||
} else {
|
||||
@ -120,7 +105,8 @@ pub fn capacity(self: Atom, macho_file: *MachO) u64 {
|
||||
|
||||
pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
|
||||
// No need to keep a free list node for the last atom.
|
||||
const next = self.next orelse return false;
|
||||
const next_index = self.next_index orelse return false;
|
||||
const next = macho_file.getAtom(next_index);
|
||||
const self_sym = self.getSymbol(macho_file);
|
||||
const next_sym = next.getSymbol(macho_file);
|
||||
const cap = next_sym.n_value - self_sym.n_value;
|
||||
@ -130,19 +116,19 @@ pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
|
||||
return surplus >= MachO.min_text_capacity;
|
||||
}
|
||||
|
||||
pub fn addRelocation(self: *Atom, macho_file: *MachO, reloc: Relocation) !void {
|
||||
return self.addRelocations(macho_file, 1, .{reloc});
|
||||
pub fn addRelocation(macho_file: *MachO, atom_index: Index, reloc: Relocation) !void {
|
||||
return addRelocations(macho_file, atom_index, 1, .{reloc});
|
||||
}
|
||||
|
||||
pub fn addRelocations(
|
||||
self: *Atom,
|
||||
macho_file: *MachO,
|
||||
atom_index: Index,
|
||||
comptime count: comptime_int,
|
||||
relocs: [count]Relocation,
|
||||
) !void {
|
||||
const gpa = macho_file.base.allocator;
|
||||
const target = macho_file.base.options.target;
|
||||
const gop = try macho_file.relocs.getOrPut(gpa, self);
|
||||
const gop = try macho_file.relocs.getOrPut(gpa, atom_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{};
|
||||
}
|
||||
@ -156,56 +142,72 @@ pub fn addRelocations(
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addRebase(self: *Atom, macho_file: *MachO, offset: u32) !void {
|
||||
pub fn addRebase(macho_file: *MachO, atom_index: Index, offset: u32) !void {
|
||||
const gpa = macho_file.base.allocator;
|
||||
log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, self.getSymbolIndex() });
|
||||
const gop = try macho_file.rebases.getOrPut(gpa, self);
|
||||
const atom = macho_file.getAtom(atom_index);
|
||||
log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, atom.getSymbolIndex() });
|
||||
const gop = try macho_file.rebases.getOrPut(gpa, atom_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{};
|
||||
}
|
||||
try gop.value_ptr.append(gpa, offset);
|
||||
}
|
||||
|
||||
pub fn addBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
|
||||
pub fn addBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
|
||||
const gpa = macho_file.base.allocator;
|
||||
const atom = macho_file.getAtom(atom_index);
|
||||
log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{?d})", .{
|
||||
macho_file.getSymbolName(binding.target),
|
||||
binding.offset,
|
||||
self.getSymbolIndex(),
|
||||
atom.getSymbolIndex(),
|
||||
});
|
||||
const gop = try macho_file.bindings.getOrPut(gpa, self);
|
||||
const gop = try macho_file.bindings.getOrPut(gpa, atom_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{};
|
||||
}
|
||||
try gop.value_ptr.append(gpa, binding);
|
||||
}
|
||||
|
||||
pub fn addLazyBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
|
||||
pub fn addLazyBinding(macho_file: *MachO, atom_index: Index, binding: Binding) !void {
|
||||
const gpa = macho_file.base.allocator;
|
||||
const atom = macho_file.getAtom(atom_index);
|
||||
log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{?d})", .{
|
||||
macho_file.getSymbolName(binding.target),
|
||||
binding.offset,
|
||||
self.getSymbolIndex(),
|
||||
atom.getSymbolIndex(),
|
||||
});
|
||||
const gop = try macho_file.lazy_bindings.getOrPut(gpa, self);
|
||||
const gop = try macho_file.lazy_bindings.getOrPut(gpa, atom_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{};
|
||||
}
|
||||
try gop.value_ptr.append(gpa, binding);
|
||||
}
|
||||
|
||||
pub fn resolveRelocations(self: *Atom, macho_file: *MachO) !void {
|
||||
const relocs = macho_file.relocs.get(self) orelse return;
|
||||
const source_sym = self.getSymbol(macho_file);
|
||||
pub fn resolveRelocations(macho_file: *MachO, atom_index: Index) !void {
|
||||
const atom = macho_file.getAtom(atom_index);
|
||||
const relocs = macho_file.relocs.get(atom_index) orelse return;
|
||||
const source_sym = atom.getSymbol(macho_file);
|
||||
const source_section = macho_file.sections.get(source_sym.n_sect - 1).header;
|
||||
const file_offset = source_section.offset + source_sym.n_value - source_section.addr;
|
||||
|
||||
log.debug("relocating '{s}'", .{self.getName(macho_file)});
|
||||
log.debug("relocating '{s}'", .{atom.getName(macho_file)});
|
||||
|
||||
for (relocs.items) |*reloc| {
|
||||
if (!reloc.dirty) continue;
|
||||
|
||||
try reloc.resolve(self, macho_file, file_offset);
|
||||
try reloc.resolve(macho_file, atom_index, file_offset);
|
||||
reloc.dirty = false;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn freeRelocations(macho_file: *MachO, atom_index: Index) void {
|
||||
const gpa = macho_file.base.allocator;
|
||||
var removed_relocs = macho_file.relocs.fetchOrderedRemove(atom_index);
|
||||
if (removed_relocs) |*relocs| relocs.value.deinit(gpa);
|
||||
var removed_rebases = macho_file.rebases.fetchOrderedRemove(atom_index);
|
||||
if (removed_rebases) |*rebases| rebases.value.deinit(gpa);
|
||||
var removed_bindings = macho_file.bindings.fetchOrderedRemove(atom_index);
|
||||
if (removed_bindings) |*bindings| bindings.value.deinit(gpa);
|
||||
var removed_lazy_bindings = macho_file.lazy_bindings.fetchOrderedRemove(atom_index);
|
||||
if (removed_lazy_bindings) |*lazy_bindings| lazy_bindings.value.deinit(gpa);
|
||||
}
|
||||
|
||||
@ -82,11 +82,11 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
|
||||
}
|
||||
|
||||
if (self.debug_str_section_index == null) {
|
||||
assert(self.dwarf.strtab.items.len == 0);
|
||||
try self.dwarf.strtab.append(self.allocator, 0);
|
||||
assert(self.dwarf.strtab.buffer.items.len == 0);
|
||||
try self.dwarf.strtab.buffer.append(self.allocator, 0);
|
||||
self.debug_str_section_index = try self.allocateSection(
|
||||
"__debug_str",
|
||||
@intCast(u32, self.dwarf.strtab.items.len),
|
||||
@intCast(u32, self.dwarf.strtab.buffer.items.len),
|
||||
0,
|
||||
);
|
||||
self.debug_string_table_dirty = true;
|
||||
@ -291,10 +291,10 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
|
||||
|
||||
{
|
||||
const sect_index = self.debug_str_section_index.?;
|
||||
if (self.debug_string_table_dirty or self.dwarf.strtab.items.len != self.getSection(sect_index).size) {
|
||||
const needed_size = @intCast(u32, self.dwarf.strtab.items.len);
|
||||
if (self.debug_string_table_dirty or self.dwarf.strtab.buffer.items.len != self.getSection(sect_index).size) {
|
||||
const needed_size = @intCast(u32, self.dwarf.strtab.buffer.items.len);
|
||||
try self.growSection(sect_index, needed_size, false);
|
||||
try self.file.pwriteAll(self.dwarf.strtab.items, self.getSection(sect_index).offset);
|
||||
try self.file.pwriteAll(self.dwarf.strtab.buffer.items, self.getSection(sect_index).offset);
|
||||
self.debug_string_table_dirty = false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -29,33 +29,35 @@ pub fn fmtType(self: Relocation, target: std.Target) []const u8 {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getTargetAtom(self: Relocation, macho_file: *MachO) ?*Atom {
|
||||
pub fn getTargetAtomIndex(self: Relocation, macho_file: *MachO) ?Atom.Index {
|
||||
switch (macho_file.base.options.target.cpu.arch) {
|
||||
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, self.type)) {
|
||||
.ARM64_RELOC_GOT_LOAD_PAGE21,
|
||||
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
|
||||
.ARM64_RELOC_POINTER_TO_GOT,
|
||||
=> return macho_file.getGotAtomForSymbol(self.target),
|
||||
=> return macho_file.getGotAtomIndexForSymbol(self.target),
|
||||
else => {},
|
||||
},
|
||||
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, self.type)) {
|
||||
.X86_64_RELOC_GOT,
|
||||
.X86_64_RELOC_GOT_LOAD,
|
||||
=> return macho_file.getGotAtomForSymbol(self.target),
|
||||
=> return macho_file.getGotAtomIndexForSymbol(self.target),
|
||||
else => {},
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
if (macho_file.getStubsAtomForSymbol(self.target)) |stubs_atom| return stubs_atom;
|
||||
return macho_file.getAtomForSymbol(self.target);
|
||||
if (macho_file.getStubsAtomIndexForSymbol(self.target)) |stubs_atom| return stubs_atom;
|
||||
return macho_file.getAtomIndexForSymbol(self.target);
|
||||
}
|
||||
|
||||
pub fn resolve(self: Relocation, atom: *Atom, macho_file: *MachO, base_offset: u64) !void {
|
||||
pub fn resolve(self: Relocation, macho_file: *MachO, atom_index: Atom.Index, base_offset: u64) !void {
|
||||
const arch = macho_file.base.options.target.cpu.arch;
|
||||
const atom = macho_file.getAtom(atom_index);
|
||||
const source_sym = atom.getSymbol(macho_file);
|
||||
const source_addr = source_sym.n_value + self.offset;
|
||||
|
||||
const target_atom = self.getTargetAtom(macho_file) orelse return;
|
||||
const target_atom_index = self.getTargetAtomIndex(macho_file) orelse return;
|
||||
const target_atom = macho_file.getAtom(target_atom_index);
|
||||
const target_addr = @intCast(i64, target_atom.getSymbol(macho_file).n_value) + self.addend;
|
||||
|
||||
log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{
|
||||
|
||||
@ -3596,7 +3596,8 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
|
||||
man.hash.addOptionalBytes(options.sysroot);
|
||||
try man.addOptionalFile(options.entitlements);
|
||||
|
||||
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
|
||||
// We don't actually care whether it's a cache hit or miss; we just
|
||||
// need the digest and the lock.
|
||||
_ = try man.hit();
|
||||
digest = man.final();
|
||||
|
||||
@ -4177,9 +4178,11 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
|
||||
log.debug("failed to save linking hash digest file: {s}", .{@errorName(err)});
|
||||
};
|
||||
// Again failure here only means an unnecessary cache miss.
|
||||
man.writeManifest() catch |err| {
|
||||
log.debug("failed to write cache manifest when linking: {s}", .{@errorName(err)});
|
||||
};
|
||||
if (man.have_exclusive_lock) {
|
||||
man.writeManifest() catch |err| {
|
||||
log.debug("failed to write cache manifest when linking: {s}", .{@errorName(err)});
|
||||
};
|
||||
}
|
||||
// We hang on to this lock so that the output file path can be used without
|
||||
// other processes clobbering it.
|
||||
macho_file.base.lock = man.toOwnedLock();
|
||||
|
||||
@ -21,14 +21,7 @@ const Allocator = std.mem.Allocator;
|
||||
const log = std.log.scoped(.link);
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const FnDeclOutput = struct {
|
||||
/// this code is modified when relocated so it is mutable
|
||||
code: []u8,
|
||||
/// this might have to be modified in the linker, so thats why its mutable
|
||||
lineinfo: []u8,
|
||||
start_line: u32,
|
||||
end_line: u32,
|
||||
};
|
||||
pub const base_tag = .plan9;
|
||||
|
||||
base: link.File,
|
||||
sixtyfour_bit: bool,
|
||||
@ -101,6 +94,9 @@ got_index_free_list: std.ArrayListUnmanaged(usize) = .{},
|
||||
|
||||
syms_index_free_list: std.ArrayListUnmanaged(usize) = .{},
|
||||
|
||||
decl_blocks: std.ArrayListUnmanaged(DeclBlock) = .{},
|
||||
decls: std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata) = .{},
|
||||
|
||||
const Reloc = struct {
|
||||
target: Module.Decl.Index,
|
||||
offset: u64,
|
||||
@ -115,6 +111,42 @@ const Bases = struct {
|
||||
|
||||
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(struct { info: DeclBlock, code: []const u8 }));
|
||||
|
||||
pub const PtrWidth = enum { p32, p64 };
|
||||
|
||||
pub const DeclBlock = struct {
|
||||
type: aout.Sym.Type,
|
||||
/// offset in the text or data sects
|
||||
offset: ?u64,
|
||||
/// offset into syms
|
||||
sym_index: ?usize,
|
||||
/// offset into got
|
||||
got_index: ?usize,
|
||||
|
||||
pub const Index = u32;
|
||||
};
|
||||
|
||||
const DeclMetadata = struct {
|
||||
index: DeclBlock.Index,
|
||||
exports: std.ArrayListUnmanaged(usize) = .{},
|
||||
|
||||
fn getExport(m: DeclMetadata, p9: *const Plan9, name: []const u8) ?usize {
|
||||
for (m.exports.items) |exp| {
|
||||
const sym = p9.syms.items[exp];
|
||||
if (mem.eql(u8, name, sym.name)) return exp;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
const FnDeclOutput = struct {
|
||||
/// this code is modified when relocated so it is mutable
|
||||
code: []u8,
|
||||
/// this might have to be modified in the linker, so thats why its mutable
|
||||
lineinfo: []u8,
|
||||
start_line: u32,
|
||||
end_line: u32,
|
||||
};
|
||||
|
||||
fn getAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 {
|
||||
return addr + switch (t) {
|
||||
.T, .t, .l, .L => self.bases.text,
|
||||
@ -127,22 +159,6 @@ fn getSymAddr(self: Plan9, s: aout.Sym) u64 {
|
||||
return self.getAddr(s.value, s.type);
|
||||
}
|
||||
|
||||
pub const DeclBlock = struct {
|
||||
type: aout.Sym.Type,
|
||||
/// offset in the text or data sects
|
||||
offset: ?u64,
|
||||
/// offset into syms
|
||||
sym_index: ?usize,
|
||||
/// offset into got
|
||||
got_index: ?usize,
|
||||
pub const empty = DeclBlock{
|
||||
.type = .t,
|
||||
.offset = null,
|
||||
.sym_index = null,
|
||||
.got_index = null,
|
||||
};
|
||||
};
|
||||
|
||||
pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
|
||||
return switch (arch) {
|
||||
.x86_64 => .{
|
||||
@ -164,8 +180,6 @@ pub fn defaultBaseAddrs(arch: std.Target.Cpu.Arch) Bases {
|
||||
};
|
||||
}
|
||||
|
||||
pub const PtrWidth = enum { p32, p64 };
|
||||
|
||||
pub fn createEmpty(gpa: Allocator, options: link.Options) !*Plan9 {
|
||||
if (options.use_llvm)
|
||||
return error.LLVMBackendDoesNotSupportPlan9;
|
||||
@ -271,7 +285,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
|
||||
const decl = module.declPtr(decl_index);
|
||||
self.freeUnnamedConsts(decl_index);
|
||||
|
||||
try self.seeDecl(decl_index);
|
||||
_ = try self.seeDecl(decl_index);
|
||||
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
|
||||
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
@ -313,11 +327,11 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
|
||||
.end_line = end_line,
|
||||
};
|
||||
try self.putFn(decl_index, out);
|
||||
return self.updateFinish(decl);
|
||||
return self.updateFinish(decl_index);
|
||||
}
|
||||
|
||||
pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
|
||||
try self.seeDecl(decl_index);
|
||||
_ = try self.seeDecl(decl_index);
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
@ -387,7 +401,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
|
||||
}
|
||||
}
|
||||
|
||||
try self.seeDecl(decl_index);
|
||||
_ = try self.seeDecl(decl_index);
|
||||
|
||||
log.debug("codegen decl {*} ({s}) ({d})", .{ decl, decl.name, decl_index });
|
||||
|
||||
@ -414,28 +428,31 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
|
||||
if (self.data_decl_table.fetchPutAssumeCapacity(decl_index, duped_code)) |old_entry| {
|
||||
self.base.allocator.free(old_entry.value);
|
||||
}
|
||||
return self.updateFinish(decl);
|
||||
return self.updateFinish(decl_index);
|
||||
}
|
||||
/// called at the end of update{Decl,Func}
|
||||
fn updateFinish(self: *Plan9, decl: *Module.Decl) !void {
|
||||
fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void {
|
||||
const decl = self.base.options.module.?.declPtr(decl_index);
|
||||
const is_fn = (decl.ty.zigTypeTag() == .Fn);
|
||||
log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name });
|
||||
const sym_t: aout.Sym.Type = if (is_fn) .t else .d;
|
||||
|
||||
const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
|
||||
// write the internal linker metadata
|
||||
decl.link.plan9.type = sym_t;
|
||||
decl_block.type = sym_t;
|
||||
// write the symbol
|
||||
// we already have the got index
|
||||
const sym: aout.Sym = .{
|
||||
.value = undefined, // the value of stuff gets filled in in flushModule
|
||||
.type = decl.link.plan9.type,
|
||||
.type = decl_block.type,
|
||||
.name = mem.span(decl.name),
|
||||
};
|
||||
|
||||
if (decl.link.plan9.sym_index) |s| {
|
||||
if (decl_block.sym_index) |s| {
|
||||
self.syms.items[s] = sym;
|
||||
} else {
|
||||
const s = try self.allocateSymbolIndex();
|
||||
decl.link.plan9.sym_index = s;
|
||||
decl_block.sym_index = s;
|
||||
self.syms.items[s] = sym;
|
||||
}
|
||||
}
|
||||
@ -550,6 +567,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
|
||||
while (it.next()) |entry| {
|
||||
const decl_index = entry.key_ptr.*;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
|
||||
const out = entry.value_ptr.*;
|
||||
log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line + 1, out.end_line });
|
||||
{
|
||||
@ -568,16 +586,16 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
|
||||
iovecs_i += 1;
|
||||
const off = self.getAddr(text_i, .t);
|
||||
text_i += out.code.len;
|
||||
decl.link.plan9.offset = off;
|
||||
decl_block.offset = off;
|
||||
if (!self.sixtyfour_bit) {
|
||||
mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off));
|
||||
mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
|
||||
mem.writeIntNative(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off));
|
||||
mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
|
||||
} else {
|
||||
mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
|
||||
mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
|
||||
}
|
||||
self.syms.items[decl.link.plan9.sym_index.?].value = off;
|
||||
self.syms.items[decl_block.sym_index.?].value = off;
|
||||
if (mod.decl_exports.get(decl_index)) |exports| {
|
||||
try self.addDeclExports(mod, decl, exports.items);
|
||||
try self.addDeclExports(mod, decl_index, exports.items);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -598,6 +616,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
|
||||
while (it.next()) |entry| {
|
||||
const decl_index = entry.key_ptr.*;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
|
||||
const code = entry.value_ptr.*;
|
||||
log.debug("write data decl {*} ({s})", .{ decl, decl.name });
|
||||
|
||||
@ -606,15 +625,15 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
|
||||
iovecs_i += 1;
|
||||
const off = self.getAddr(data_i, .d);
|
||||
data_i += code.len;
|
||||
decl.link.plan9.offset = off;
|
||||
decl_block.offset = off;
|
||||
if (!self.sixtyfour_bit) {
|
||||
mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
|
||||
mem.writeInt(u32, got_table[decl_block.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian());
|
||||
} else {
|
||||
mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
|
||||
mem.writeInt(u64, got_table[decl_block.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian());
|
||||
}
|
||||
self.syms.items[decl.link.plan9.sym_index.?].value = off;
|
||||
self.syms.items[decl_block.sym_index.?].value = off;
|
||||
if (mod.decl_exports.get(decl_index)) |exports| {
|
||||
try self.addDeclExports(mod, decl, exports.items);
|
||||
try self.addDeclExports(mod, decl_index, exports.items);
|
||||
}
|
||||
}
|
||||
// write the unnamed constants after the other data decls
|
||||
@ -676,7 +695,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
|
||||
for (kv.value_ptr.items) |reloc| {
|
||||
const target_decl_index = reloc.target;
|
||||
const target_decl = mod.declPtr(target_decl_index);
|
||||
const target_decl_offset = target_decl.link.plan9.offset.?;
|
||||
const target_decl_block = self.getDeclBlock(self.decls.get(target_decl_index).?.index);
|
||||
const target_decl_offset = target_decl_block.offset.?;
|
||||
|
||||
const offset = reloc.offset;
|
||||
const addend = reloc.addend;
|
||||
@ -709,28 +729,36 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
|
||||
fn addDeclExports(
|
||||
self: *Plan9,
|
||||
module: *Module,
|
||||
decl: *Module.Decl,
|
||||
decl_index: Module.Decl.Index,
|
||||
exports: []const *Module.Export,
|
||||
) !void {
|
||||
const metadata = self.decls.getPtr(decl_index).?;
|
||||
const decl_block = self.getDeclBlock(metadata.index);
|
||||
|
||||
for (exports) |exp| {
|
||||
// plan9 does not support custom sections
|
||||
if (exp.options.section) |section_name| {
|
||||
if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) {
|
||||
try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{}));
|
||||
try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(
|
||||
self.base.allocator,
|
||||
module.declPtr(decl_index).srcLoc(),
|
||||
"plan9 does not support extra sections",
|
||||
.{},
|
||||
));
|
||||
break;
|
||||
}
|
||||
}
|
||||
const sym = .{
|
||||
.value = decl.link.plan9.offset.?,
|
||||
.type = decl.link.plan9.type.toGlobal(),
|
||||
.value = decl_block.offset.?,
|
||||
.type = decl_block.type.toGlobal(),
|
||||
.name = exp.options.name,
|
||||
};
|
||||
|
||||
if (exp.link.plan9) |i| {
|
||||
if (metadata.getExport(self, exp.options.name)) |i| {
|
||||
self.syms.items[i] = sym;
|
||||
} else {
|
||||
try self.syms.append(self.base.allocator, sym);
|
||||
exp.link.plan9 = self.syms.items.len - 1;
|
||||
try metadata.exports.append(self.base.allocator, self.syms.items.len - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -760,13 +788,18 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
|
||||
self.base.allocator.free(removed_entry.value);
|
||||
}
|
||||
}
|
||||
if (decl.link.plan9.got_index) |i| {
|
||||
// TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
|
||||
self.got_index_free_list.append(self.base.allocator, i) catch {};
|
||||
}
|
||||
if (decl.link.plan9.sym_index) |i| {
|
||||
self.syms_index_free_list.append(self.base.allocator, i) catch {};
|
||||
self.syms.items[i] = aout.Sym.undefined_symbol;
|
||||
if (self.decls.fetchRemove(decl_index)) |const_kv| {
|
||||
var kv = const_kv;
|
||||
const decl_block = self.getDeclBlock(kv.value.index);
|
||||
if (decl_block.got_index) |i| {
|
||||
// TODO: if this catch {} is triggered, an assertion in flushModule will be triggered, because got_index_free_list will have the wrong length
|
||||
self.got_index_free_list.append(self.base.allocator, i) catch {};
|
||||
}
|
||||
if (decl_block.sym_index) |i| {
|
||||
self.syms_index_free_list.append(self.base.allocator, i) catch {};
|
||||
self.syms.items[i] = aout.Sym.undefined_symbol;
|
||||
}
|
||||
kv.value.exports.deinit(self.base.allocator);
|
||||
}
|
||||
self.freeUnnamedConsts(decl_index);
|
||||
{
|
||||
@ -786,12 +819,30 @@ fn freeUnnamedConsts(self: *Plan9, decl_index: Module.Decl.Index) void {
|
||||
unnamed_consts.clearAndFree(self.base.allocator);
|
||||
}
|
||||
|
||||
pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !void {
|
||||
const mod = self.base.options.module.?;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
if (decl.link.plan9.got_index == null) {
|
||||
decl.link.plan9.got_index = self.allocateGotIndex();
|
||||
fn createDeclBlock(self: *Plan9) !DeclBlock.Index {
|
||||
const gpa = self.base.allocator;
|
||||
const index = @intCast(DeclBlock.Index, self.decl_blocks.items.len);
|
||||
const decl_block = try self.decl_blocks.addOne(gpa);
|
||||
decl_block.* = .{
|
||||
.type = .t,
|
||||
.offset = null,
|
||||
.sym_index = null,
|
||||
.got_index = null,
|
||||
};
|
||||
return index;
|
||||
}
|
||||
|
||||
pub fn seeDecl(self: *Plan9, decl_index: Module.Decl.Index) !DeclBlock.Index {
|
||||
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
|
||||
if (!gop.found_existing) {
|
||||
const index = try self.createDeclBlock();
|
||||
self.getDeclBlockPtr(index).got_index = self.allocateGotIndex();
|
||||
gop.value_ptr.* = .{
|
||||
.index = index,
|
||||
.exports = .{},
|
||||
};
|
||||
}
|
||||
return gop.value_ptr.index;
|
||||
}
|
||||
|
||||
pub fn updateDeclExports(
|
||||
@ -800,7 +851,7 @@ pub fn updateDeclExports(
|
||||
decl_index: Module.Decl.Index,
|
||||
exports: []const *Module.Export,
|
||||
) !void {
|
||||
try self.seeDecl(decl_index);
|
||||
_ = try self.seeDecl(decl_index);
|
||||
// we do all the things in flush
|
||||
_ = module;
|
||||
_ = exports;
|
||||
@ -842,10 +893,17 @@ pub fn deinit(self: *Plan9) void {
|
||||
self.syms_index_free_list.deinit(gpa);
|
||||
self.file_segments.deinit(gpa);
|
||||
self.path_arena.deinit();
|
||||
self.decl_blocks.deinit(gpa);
|
||||
|
||||
{
|
||||
var it = self.decls.iterator();
|
||||
while (it.next()) |entry| {
|
||||
entry.value_ptr.exports.deinit(gpa);
|
||||
}
|
||||
self.decls.deinit(gpa);
|
||||
}
|
||||
}
|
||||
|
||||
pub const Export = ?usize;
|
||||
pub const base_tag = .plan9;
|
||||
pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Options) !*Plan9 {
|
||||
if (options.use_llvm)
|
||||
return error.LLVMBackendDoesNotSupportPlan9;
|
||||
@ -911,20 +969,19 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
|
||||
}
|
||||
}
|
||||
|
||||
const mod = self.base.options.module.?;
|
||||
|
||||
// write the data symbols
|
||||
{
|
||||
var it = self.data_decl_table.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const decl_index = entry.key_ptr.*;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const sym = self.syms.items[decl.link.plan9.sym_index.?];
|
||||
const decl_metadata = self.decls.get(decl_index).?;
|
||||
const decl_block = self.getDeclBlock(decl_metadata.index);
|
||||
const sym = self.syms.items[decl_block.sym_index.?];
|
||||
try self.writeSym(writer, sym);
|
||||
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
|
||||
for (exports.items) |e| {
|
||||
try self.writeSym(writer, self.syms.items[e.link.plan9.?]);
|
||||
}
|
||||
for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
|
||||
try self.writeSym(writer, self.syms.items[exp_i]);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -943,16 +1000,17 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
|
||||
var submap_it = symidx_and_submap.functions.iterator();
|
||||
while (submap_it.next()) |entry| {
|
||||
const decl_index = entry.key_ptr.*;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const sym = self.syms.items[decl.link.plan9.sym_index.?];
|
||||
const decl_metadata = self.decls.get(decl_index).?;
|
||||
const decl_block = self.getDeclBlock(decl_metadata.index);
|
||||
const sym = self.syms.items[decl_block.sym_index.?];
|
||||
try self.writeSym(writer, sym);
|
||||
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
|
||||
for (exports.items) |e| {
|
||||
const s = self.syms.items[e.link.plan9.?];
|
||||
for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
|
||||
const s = self.syms.items[exp_i];
|
||||
if (mem.eql(u8, s.name, "_start"))
|
||||
self.entry_val = s.value;
|
||||
try self.writeSym(writer, s);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -960,10 +1018,10 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
|
||||
}
|
||||
|
||||
/// Must be called only after a successful call to `updateDecl`.
|
||||
pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl: *const Module.Decl) !void {
|
||||
pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void {
|
||||
_ = self;
|
||||
_ = mod;
|
||||
_ = decl;
|
||||
_ = decl_index;
|
||||
}
|
||||
|
||||
pub fn getDeclVAddr(
|
||||
@ -1004,3 +1062,11 @@ pub fn getDeclVAddr(
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
|
||||
pub fn getDeclBlock(self: *const Plan9, index: DeclBlock.Index) DeclBlock {
|
||||
return self.decl_blocks.items[index];
|
||||
}
|
||||
|
||||
fn getDeclBlockPtr(self: *Plan9, index: DeclBlock.Index) *DeclBlock {
|
||||
return &self.decl_blocks.items[index];
|
||||
}
|
||||
|
||||
@ -42,13 +42,6 @@ const SpvModule = @import("../codegen/spirv/Module.zig");
|
||||
const spec = @import("../codegen/spirv/spec.zig");
|
||||
const IdResult = spec.IdResult;
|
||||
|
||||
// TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl?
|
||||
pub const FnData = struct {
|
||||
// We're going to fill these in flushModule, and we're going to fill them unconditionally,
|
||||
// so just set it to undefined.
|
||||
id: IdResult = undefined,
|
||||
};
|
||||
|
||||
base: link.File,
|
||||
|
||||
/// This linker backend does not try to incrementally link output SPIR-V code.
|
||||
@ -209,16 +202,19 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
|
||||
// so that we can access them before processing them.
|
||||
// TODO: We're allocating an ID unconditionally now, are there
|
||||
// declarations which don't generate a result?
|
||||
// TODO: fn_link is used here, but thats probably not the right field. It will work anyway though.
|
||||
var ids = std.AutoHashMap(Module.Decl.Index, IdResult).init(self.base.allocator);
|
||||
defer ids.deinit();
|
||||
try ids.ensureTotalCapacity(@intCast(u32, self.decl_table.count()));
|
||||
|
||||
for (self.decl_table.keys()) |decl_index| {
|
||||
const decl = module.declPtr(decl_index);
|
||||
if (decl.has_tv) {
|
||||
decl.fn_link.spirv.id = spv.allocId();
|
||||
ids.putAssumeCapacityNoClobber(decl_index, spv.allocId());
|
||||
}
|
||||
}
|
||||
|
||||
// Now, actually generate the code for all declarations.
|
||||
var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv);
|
||||
var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &spv, &ids);
|
||||
defer decl_gen.deinit();
|
||||
|
||||
var it = self.decl_table.iterator();
|
||||
@ -231,7 +227,7 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
|
||||
const liveness = entry.value_ptr.liveness;
|
||||
|
||||
// Note, if `decl` is not a function, air/liveness may be undefined.
|
||||
if (try decl_gen.gen(decl, air, liveness)) |msg| {
|
||||
if (try decl_gen.gen(decl_index, air, liveness)) |msg| {
|
||||
try module.failed_decls.put(module.gpa, decl_index, msg);
|
||||
return; // TODO: Attempt to generate more decls?
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -4,7 +4,6 @@ const std = @import("std");
|
||||
const types = @import("types.zig");
|
||||
const Wasm = @import("../Wasm.zig");
|
||||
const Symbol = @import("Symbol.zig");
|
||||
const Dwarf = @import("../Dwarf.zig");
|
||||
|
||||
const leb = std.leb;
|
||||
const log = std.log.scoped(.link);
|
||||
@ -30,17 +29,17 @@ file: ?u16,
|
||||
|
||||
/// Next atom in relation to this atom.
|
||||
/// When null, this atom is the last atom
|
||||
next: ?*Atom,
|
||||
next: ?Atom.Index,
|
||||
/// Previous atom in relation to this atom.
|
||||
/// is null when this atom is the first in its order
|
||||
prev: ?*Atom,
|
||||
prev: ?Atom.Index,
|
||||
|
||||
/// Contains atoms local to a decl, all managed by this `Atom`.
|
||||
/// When the parent atom is being freed, it will also do so for all local atoms.
|
||||
locals: std.ArrayListUnmanaged(Atom) = .{},
|
||||
locals: std.ArrayListUnmanaged(Atom.Index) = .{},
|
||||
|
||||
/// Represents the debug Atom that holds all debug information of this Atom.
|
||||
dbg_info_atom: Dwarf.Atom,
|
||||
/// Alias to an unsigned 32-bit integer
|
||||
pub const Index = u32;
|
||||
|
||||
/// Represents a default empty wasm `Atom`
|
||||
pub const empty: Atom = .{
|
||||
@ -51,18 +50,15 @@ pub const empty: Atom = .{
|
||||
.prev = null,
|
||||
.size = 0,
|
||||
.sym_index = 0,
|
||||
.dbg_info_atom = undefined,
|
||||
};
|
||||
|
||||
/// Frees all resources owned by this `Atom`.
|
||||
pub fn deinit(atom: *Atom, gpa: Allocator) void {
|
||||
pub fn deinit(atom: *Atom, wasm: *Wasm) void {
|
||||
const gpa = wasm.base.allocator;
|
||||
atom.relocs.deinit(gpa);
|
||||
atom.code.deinit(gpa);
|
||||
|
||||
for (atom.locals.items) |*local| {
|
||||
local.deinit(gpa);
|
||||
}
|
||||
atom.locals.deinit(gpa);
|
||||
atom.* = undefined;
|
||||
}
|
||||
|
||||
/// Sets the length of relocations and code to '0',
|
||||
@ -83,24 +79,11 @@ pub fn format(atom: Atom, comptime fmt: []const u8, options: std.fmt.FormatOptio
|
||||
});
|
||||
}
|
||||
|
||||
/// Returns the first `Atom` from a given atom
|
||||
pub fn getFirst(atom: *Atom) *Atom {
|
||||
var tmp = atom;
|
||||
while (tmp.prev) |prev| tmp = prev;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
/// Returns the location of the symbol that represents this `Atom`
|
||||
pub fn symbolLoc(atom: Atom) Wasm.SymbolLoc {
|
||||
return .{ .file = atom.file, .index = atom.sym_index };
|
||||
}
|
||||
|
||||
pub fn ensureInitialized(atom: *Atom, wasm_bin: *Wasm) !void {
|
||||
if (atom.getSymbolIndex() != null) return; // already initialized
|
||||
atom.sym_index = try wasm_bin.allocateSymbol();
|
||||
try wasm_bin.symbol_atom.putNoClobber(wasm_bin.base.allocator, atom.symbolLoc(), atom);
|
||||
}
|
||||
|
||||
pub fn getSymbolIndex(atom: Atom) ?u32 {
|
||||
if (atom.sym_index == 0) return null;
|
||||
return atom.sym_index;
|
||||
@ -203,20 +186,28 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
|
||||
if (symbol.isUndefined()) {
|
||||
return 0;
|
||||
}
|
||||
const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
|
||||
const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
|
||||
// this can only occur during incremental-compilation when a relocation
|
||||
// still points to a freed decl. It is fine to emit the value 0 here
|
||||
// as no actual code will point towards it.
|
||||
return 0;
|
||||
};
|
||||
const target_atom = wasm_bin.getAtom(target_atom_index);
|
||||
const va = @intCast(i32, target_atom.getVA(wasm_bin, symbol));
|
||||
return @intCast(u32, va + relocation.addend);
|
||||
},
|
||||
.R_WASM_EVENT_INDEX_LEB => return symbol.index,
|
||||
.R_WASM_SECTION_OFFSET_I32 => {
|
||||
const target_atom = wasm_bin.symbol_atom.get(target_loc).?;
|
||||
const target_atom_index = wasm_bin.symbol_atom.get(target_loc).?;
|
||||
const target_atom = wasm_bin.getAtom(target_atom_index);
|
||||
const rel_value = @intCast(i32, target_atom.offset) + relocation.addend;
|
||||
return @intCast(u32, rel_value);
|
||||
},
|
||||
.R_WASM_FUNCTION_OFFSET_I32 => {
|
||||
const target_atom = wasm_bin.symbol_atom.get(target_loc) orelse {
|
||||
const target_atom_index = wasm_bin.symbol_atom.get(target_loc) orelse {
|
||||
return @bitCast(u32, @as(i32, -1));
|
||||
};
|
||||
const target_atom = wasm_bin.getAtom(target_atom_index);
|
||||
const offset: u32 = 11 + Wasm.getULEB128Size(target_atom.size); // Header (11 bytes fixed-size) + body size (leb-encoded)
|
||||
const rel_value = @intCast(i32, target_atom.offset + offset) + relocation.addend;
|
||||
return @intCast(u32, rel_value);
|
||||
|
||||
@ -901,14 +901,9 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
|
||||
continue; // found unknown section, so skip parsing into atom as we do not know how to handle it.
|
||||
};
|
||||
|
||||
const atom = try gpa.create(Atom);
|
||||
const atom_index = @intCast(Atom.Index, wasm_bin.managed_atoms.items.len);
|
||||
const atom = try wasm_bin.managed_atoms.addOne(gpa);
|
||||
atom.* = Atom.empty;
|
||||
errdefer {
|
||||
atom.deinit(gpa);
|
||||
gpa.destroy(atom);
|
||||
}
|
||||
|
||||
try wasm_bin.managed_atoms.append(gpa, atom);
|
||||
atom.file = object_index;
|
||||
atom.size = relocatable_data.size;
|
||||
atom.alignment = relocatable_data.getAlignment(object);
|
||||
@ -938,12 +933,12 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
|
||||
.index = relocatable_data.getIndex(),
|
||||
})) |symbols| {
|
||||
atom.sym_index = symbols.pop();
|
||||
try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom);
|
||||
try wasm_bin.symbol_atom.putNoClobber(gpa, atom.symbolLoc(), atom_index);
|
||||
|
||||
// symbols referencing the same atom will be added as alias
|
||||
// or as 'parent' when they are global.
|
||||
while (symbols.popOrNull()) |idx| {
|
||||
try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom);
|
||||
try wasm_bin.symbol_atom.putNoClobber(gpa, .{ .file = atom.file, .index = idx }, atom_index);
|
||||
const alias_symbol = object.symtable[idx];
|
||||
if (alias_symbol.isGlobal()) {
|
||||
atom.sym_index = idx;
|
||||
@ -956,7 +951,7 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
|
||||
segment.alignment = std.math.max(segment.alignment, atom.alignment);
|
||||
}
|
||||
|
||||
try wasm_bin.appendAtomAtIndex(final_index, atom);
|
||||
try wasm_bin.appendAtomAtIndex(final_index, atom_index);
|
||||
log.debug("Parsed into atom: '{s}' at segment index {d}", .{ object.string_table.get(object.symtable[atom.sym_index].name), final_index });
|
||||
}
|
||||
}
|
||||
|
||||
27
src/main.zig
27
src/main.zig
@ -3915,6 +3915,7 @@ pub const usage_build =
|
||||
;
|
||||
|
||||
pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
|
||||
var color: Color = .auto;
|
||||
var prominent_compile_errors: bool = false;
|
||||
|
||||
// We want to release all the locks before executing the child process, so we make a nice
|
||||
@ -4117,6 +4118,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
|
||||
// Here we borrow main package's table and will replace it with a fresh
|
||||
// one after this process completes.
|
||||
main_pkg.fetchAndAddDependencies(
|
||||
arena,
|
||||
&thread_pool,
|
||||
&http_client,
|
||||
build_directory,
|
||||
@ -4125,6 +4127,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
|
||||
&dependencies_source,
|
||||
&build_roots_source,
|
||||
"",
|
||||
color,
|
||||
) catch |err| switch (err) {
|
||||
error.PackageFetchFailed => process.exit(1),
|
||||
else => |e| return e,
|
||||
@ -4361,12 +4364,12 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
|
||||
};
|
||||
defer gpa.free(source_code);
|
||||
|
||||
var tree = std.zig.parse(gpa, source_code) catch |err| {
|
||||
var tree = Ast.parse(gpa, source_code, .zig) catch |err| {
|
||||
fatal("error parsing stdin: {}", .{err});
|
||||
};
|
||||
defer tree.deinit(gpa);
|
||||
|
||||
try printErrsMsgToStdErr(gpa, arena, tree.errors, tree, "<stdin>", color);
|
||||
try printErrsMsgToStdErr(gpa, arena, tree, "<stdin>", color);
|
||||
var has_ast_error = false;
|
||||
if (check_ast_flag) {
|
||||
const Module = @import("Module.zig");
|
||||
@ -4566,10 +4569,10 @@ fn fmtPathFile(
|
||||
// Add to set after no longer possible to get error.IsDir.
|
||||
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
|
||||
|
||||
var tree = try std.zig.parse(fmt.gpa, source_code);
|
||||
var tree = try Ast.parse(fmt.gpa, source_code, .zig);
|
||||
defer tree.deinit(fmt.gpa);
|
||||
|
||||
try printErrsMsgToStdErr(fmt.gpa, fmt.arena, tree.errors, tree, file_path, fmt.color);
|
||||
try printErrsMsgToStdErr(fmt.gpa, fmt.arena, tree, file_path, fmt.color);
|
||||
if (tree.errors.len != 0) {
|
||||
fmt.any_error = true;
|
||||
return;
|
||||
@ -4649,14 +4652,14 @@ fn fmtPathFile(
|
||||
}
|
||||
}
|
||||
|
||||
fn printErrsMsgToStdErr(
|
||||
pub fn printErrsMsgToStdErr(
|
||||
gpa: mem.Allocator,
|
||||
arena: mem.Allocator,
|
||||
parse_errors: []const Ast.Error,
|
||||
tree: Ast,
|
||||
path: []const u8,
|
||||
color: Color,
|
||||
) !void {
|
||||
const parse_errors: []const Ast.Error = tree.errors;
|
||||
var i: usize = 0;
|
||||
while (i < parse_errors.len) : (i += 1) {
|
||||
const parse_error = parse_errors[i];
|
||||
@ -5312,11 +5315,11 @@ pub fn cmdAstCheck(
|
||||
file.pkg = try Package.create(gpa, "root", null, file.sub_file_path);
|
||||
defer file.pkg.destroy(gpa);
|
||||
|
||||
file.tree = try std.zig.parse(gpa, file.source);
|
||||
file.tree = try Ast.parse(gpa, file.source, .zig);
|
||||
file.tree_loaded = true;
|
||||
defer file.tree.deinit(gpa);
|
||||
|
||||
try printErrsMsgToStdErr(gpa, arena, file.tree.errors, file.tree, file.sub_file_path, color);
|
||||
try printErrsMsgToStdErr(gpa, arena, file.tree, file.sub_file_path, color);
|
||||
if (file.tree.errors.len != 0) {
|
||||
process.exit(1);
|
||||
}
|
||||
@ -5438,11 +5441,11 @@ pub fn cmdChangelist(
|
||||
file.source = source;
|
||||
file.source_loaded = true;
|
||||
|
||||
file.tree = try std.zig.parse(gpa, file.source);
|
||||
file.tree = try Ast.parse(gpa, file.source, .zig);
|
||||
file.tree_loaded = true;
|
||||
defer file.tree.deinit(gpa);
|
||||
|
||||
try printErrsMsgToStdErr(gpa, arena, file.tree.errors, file.tree, old_source_file, .auto);
|
||||
try printErrsMsgToStdErr(gpa, arena, file.tree, old_source_file, .auto);
|
||||
if (file.tree.errors.len != 0) {
|
||||
process.exit(1);
|
||||
}
|
||||
@ -5476,10 +5479,10 @@ pub fn cmdChangelist(
|
||||
if (new_amt != new_stat.size)
|
||||
return error.UnexpectedEndOfFile;
|
||||
|
||||
var new_tree = try std.zig.parse(gpa, new_source);
|
||||
var new_tree = try Ast.parse(gpa, new_source, .zig);
|
||||
defer new_tree.deinit(gpa);
|
||||
|
||||
try printErrsMsgToStdErr(gpa, arena, new_tree.errors, new_tree, new_source_file, .auto);
|
||||
try printErrsMsgToStdErr(gpa, arena, new_tree, new_source_file, .auto);
|
||||
if (new_tree.errors.len != 0) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user