Merge remote-tracking branch 'origin/master' into llvm16

This commit is contained in:
Andrew Kelley 2023-02-27 14:11:29 -07:00
commit d399f8a489
122 changed files with 15938 additions and 3837 deletions

View File

@ -434,13 +434,12 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log10.zig" "${CMAKE_SOURCE_DIR}/lib/compiler_rt/log10.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log2.zig" "${CMAKE_SOURCE_DIR}/lib/compiler_rt/log2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/modti3.zig" "${CMAKE_SOURCE_DIR}/lib/compiler_rt/modti3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulXi3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/muldf3.zig" "${CMAKE_SOURCE_DIR}/lib/compiler_rt/muldf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/muldi3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulf3.zig" "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulo.zig" "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulo.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulsf3.zig" "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulsf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/multf3.zig" "${CMAKE_SOURCE_DIR}/lib/compiler_rt/multf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/multi3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulxf3.zig" "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulxf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/negXi2.zig" "${CMAKE_SOURCE_DIR}/lib/compiler_rt/negXi2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/negv.zig" "${CMAKE_SOURCE_DIR}/lib/compiler_rt/negv.zig"
@ -569,6 +568,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/clang_options_data.zig" "${CMAKE_SOURCE_DIR}/src/clang_options_data.zig"
"${CMAKE_SOURCE_DIR}/src/codegen.zig" "${CMAKE_SOURCE_DIR}/src/codegen.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/c.zig" "${CMAKE_SOURCE_DIR}/src/codegen/c.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/c/type.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/llvm.zig" "${CMAKE_SOURCE_DIR}/src/codegen/llvm.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/llvm/bindings.zig" "${CMAKE_SOURCE_DIR}/src/codegen/llvm/bindings.zig"
"${CMAKE_SOURCE_DIR}/src/glibc.zig" "${CMAKE_SOURCE_DIR}/src/glibc.zig"
@ -612,7 +612,6 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/link/tapi.zig" "${CMAKE_SOURCE_DIR}/src/link/tapi.zig"
"${CMAKE_SOURCE_DIR}/src/link/tapi/Tokenizer.zig" "${CMAKE_SOURCE_DIR}/src/link/tapi/Tokenizer.zig"
"${CMAKE_SOURCE_DIR}/src/link/tapi/parse.zig" "${CMAKE_SOURCE_DIR}/src/link/tapi/parse.zig"
"${CMAKE_SOURCE_DIR}/src/link/tapi/parse/test.zig"
"${CMAKE_SOURCE_DIR}/src/link/tapi/yaml.zig" "${CMAKE_SOURCE_DIR}/src/link/tapi/yaml.zig"
"${CMAKE_SOURCE_DIR}/src/main.zig" "${CMAKE_SOURCE_DIR}/src/main.zig"
"${CMAKE_SOURCE_DIR}/src/mingw.zig" "${CMAKE_SOURCE_DIR}/src/mingw.zig"
@ -748,10 +747,11 @@ set(BUILD_ZIG2_ARGS
build-exe src/main.zig -ofmt=c -lc build-exe src/main.zig -ofmt=c -lc
-OReleaseSmall -OReleaseSmall
--name zig2 -femit-bin="${ZIG2_C_SOURCE}" --name zig2 -femit-bin="${ZIG2_C_SOURCE}"
--pkg-begin build_options "${ZIG_CONFIG_ZIG_OUT}" --pkg-end --mod "build_options::${ZIG_CONFIG_ZIG_OUT}"
--deps build_options
-target "${HOST_TARGET_TRIPLE}" -target "${HOST_TARGET_TRIPLE}"
) )
add_custom_command( add_custom_command(
OUTPUT "${ZIG2_C_SOURCE}" OUTPUT "${ZIG2_C_SOURCE}"
COMMAND zig1 ${BUILD_ZIG2_ARGS} COMMAND zig1 ${BUILD_ZIG2_ARGS}
@ -765,10 +765,11 @@ set(BUILD_COMPILER_RT_ARGS
build-obj lib/compiler_rt.zig -ofmt=c build-obj lib/compiler_rt.zig -ofmt=c
-OReleaseSmall -OReleaseSmall
--name compiler_rt -femit-bin="${ZIG_COMPILER_RT_C_SOURCE}" --name compiler_rt -femit-bin="${ZIG_COMPILER_RT_C_SOURCE}"
--pkg-begin build_options "${ZIG_CONFIG_ZIG_OUT}" --pkg-end --mod "build_options::${ZIG_CONFIG_ZIG_OUT}"
--deps build_options
-target "${HOST_TARGET_TRIPLE}" -target "${HOST_TARGET_TRIPLE}"
) )
add_custom_command( add_custom_command(
OUTPUT "${ZIG_COMPILER_RT_C_SOURCE}" OUTPUT "${ZIG_COMPILER_RT_C_SOURCE}"
COMMAND zig1 ${BUILD_COMPILER_RT_ARGS} COMMAND zig1 ${BUILD_COMPILER_RT_ARGS}
@ -782,7 +783,7 @@ set_target_properties(zig2 PROPERTIES
COMPILE_FLAGS ${ZIG2_COMPILE_FLAGS} COMPILE_FLAGS ${ZIG2_COMPILE_FLAGS}
LINK_FLAGS ${ZIG2_LINK_FLAGS} LINK_FLAGS ${ZIG2_LINK_FLAGS}
) )
target_include_directories(zig2 PUBLIC "${CMAKE_SOURCE_DIR}/lib") target_include_directories(zig2 PUBLIC "${CMAKE_SOURCE_DIR}/stage1")
target_link_libraries(zig2 LINK_PUBLIC zigcpp) target_link_libraries(zig2 LINK_PUBLIC zigcpp)
if(MSVC) if(MSVC)

View File

@ -118,8 +118,11 @@ pub fn build(b: *std.Build) !void {
".gz", ".gz",
".z.0", ".z.0",
".z.9", ".z.9",
".zstd.3",
".zstd.19",
"rfc1951.txt", "rfc1951.txt",
"rfc1952.txt", "rfc1952.txt",
"rfc8478.txt",
// exclude files from lib/std/compress/deflate/testdata // exclude files from lib/std/compress/deflate/testdata
".expect", ".expect",
".expect-noinput", ".expect-noinput",
@ -513,8 +516,12 @@ fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
run_opt.addArg("-o"); run_opt.addArg("-o");
run_opt.addFileSourceArg(.{ .path = "stage1/zig1.wasm" }); run_opt.addFileSourceArg(.{ .path = "stage1/zig1.wasm" });
const copy_zig_h = b.addWriteFiles();
copy_zig_h.addCopyFileToSource(.{ .path = "lib/zig.h" }, "stage1/zig.h");
const update_zig1_step = b.step("update-zig1", "Update stage1/zig1.wasm"); const update_zig1_step = b.step("update-zig1", "Update stage1/zig1.wasm");
update_zig1_step.dependOn(&run_opt.step); update_zig1_step.dependOn(&run_opt.step);
update_zig1_step.dependOn(&copy_zig_h.step);
} }
fn addCompilerStep( fn addCompilerStep(

View File

@ -87,7 +87,8 @@ CheckLastExitCode
-OReleaseSmall ` -OReleaseSmall `
--name compiler_rt ` --name compiler_rt `
-femit-bin="compiler_rt-x86_64-windows-msvc.c" ` -femit-bin="compiler_rt-x86_64-windows-msvc.c" `
--pkg-begin build_options config.zig --pkg-end ` --mod build_options::config.zig `
--deps build_options `
-target x86_64-windows-msvc -target x86_64-windows-msvc
CheckLastExitCode CheckLastExitCode

View File

@ -87,7 +87,8 @@ CheckLastExitCode
-OReleaseSmall ` -OReleaseSmall `
--name compiler_rt ` --name compiler_rt `
-femit-bin="compiler_rt-x86_64-windows-msvc.c" ` -femit-bin="compiler_rt-x86_64-windows-msvc.c" `
--pkg-begin build_options config.zig --pkg-end ` --mod build_options::config.zig `
--deps build_options `
-target x86_64-windows-msvc -target x86_64-windows-msvc
CheckLastExitCode CheckLastExitCode

View File

@ -3,64 +3,34 @@ const builtin = @import("builtin");
pub const panic = @import("compiler_rt/common.zig").panic; pub const panic = @import("compiler_rt/common.zig").panic;
comptime { comptime {
_ = @import("compiler_rt/addf3.zig"); // Integer routines
_ = @import("compiler_rt/addhf3.zig"); _ = @import("compiler_rt/count0bits.zig");
_ = @import("compiler_rt/addsf3.zig"); _ = @import("compiler_rt/parity.zig");
_ = @import("compiler_rt/adddf3.zig"); _ = @import("compiler_rt/popcount.zig");
_ = @import("compiler_rt/addtf3.zig"); _ = @import("compiler_rt/bswap.zig");
_ = @import("compiler_rt/addxf3.zig"); _ = @import("compiler_rt/cmp.zig");
_ = @import("compiler_rt/subhf3.zig"); _ = @import("compiler_rt/shift.zig");
_ = @import("compiler_rt/subsf3.zig"); _ = @import("compiler_rt/negXi2.zig");
_ = @import("compiler_rt/subdf3.zig"); _ = @import("compiler_rt/int.zig");
_ = @import("compiler_rt/subtf3.zig"); _ = @import("compiler_rt/mulXi3.zig");
_ = @import("compiler_rt/subxf3.zig"); _ = @import("compiler_rt/divti3.zig");
_ = @import("compiler_rt/udivti3.zig");
_ = @import("compiler_rt/modti3.zig");
_ = @import("compiler_rt/umodti3.zig");
_ = @import("compiler_rt/mulf3.zig"); _ = @import("compiler_rt/absv.zig");
_ = @import("compiler_rt/mulhf3.zig"); _ = @import("compiler_rt/absvsi2.zig");
_ = @import("compiler_rt/mulsf3.zig"); _ = @import("compiler_rt/absvdi2.zig");
_ = @import("compiler_rt/muldf3.zig"); _ = @import("compiler_rt/absvti2.zig");
_ = @import("compiler_rt/multf3.zig"); _ = @import("compiler_rt/negv.zig");
_ = @import("compiler_rt/mulxf3.zig");
_ = @import("compiler_rt/powiXf2.zig"); _ = @import("compiler_rt/addo.zig");
_ = @import("compiler_rt/mulc3.zig"); _ = @import("compiler_rt/subo.zig");
_ = @import("compiler_rt/mulhc3.zig"); _ = @import("compiler_rt/mulo.zig");
_ = @import("compiler_rt/mulsc3.zig");
_ = @import("compiler_rt/muldc3.zig");
_ = @import("compiler_rt/mulxc3.zig");
_ = @import("compiler_rt/multc3.zig");
_ = @import("compiler_rt/divc3.zig");
_ = @import("compiler_rt/divhc3.zig");
_ = @import("compiler_rt/divsc3.zig");
_ = @import("compiler_rt/divdc3.zig");
_ = @import("compiler_rt/divxc3.zig");
_ = @import("compiler_rt/divtc3.zig");
_ = @import("compiler_rt/neghf2.zig");
_ = @import("compiler_rt/negsf2.zig");
_ = @import("compiler_rt/negdf2.zig");
_ = @import("compiler_rt/negtf2.zig");
_ = @import("compiler_rt/negxf2.zig");
_ = @import("compiler_rt/comparef.zig");
_ = @import("compiler_rt/cmphf2.zig");
_ = @import("compiler_rt/cmpsf2.zig");
_ = @import("compiler_rt/cmpdf2.zig");
_ = @import("compiler_rt/cmptf2.zig");
_ = @import("compiler_rt/cmpxf2.zig");
_ = @import("compiler_rt/gehf2.zig");
_ = @import("compiler_rt/gesf2.zig");
_ = @import("compiler_rt/gedf2.zig");
_ = @import("compiler_rt/gexf2.zig");
_ = @import("compiler_rt/getf2.zig");
_ = @import("compiler_rt/unordhf2.zig");
_ = @import("compiler_rt/unordsf2.zig");
_ = @import("compiler_rt/unorddf2.zig");
_ = @import("compiler_rt/unordxf2.zig");
_ = @import("compiler_rt/unordtf2.zig");
// Float routines
// conversion
_ = @import("compiler_rt/extendf.zig"); _ = @import("compiler_rt/extendf.zig");
_ = @import("compiler_rt/extendhfsf2.zig"); _ = @import("compiler_rt/extendhfsf2.zig");
_ = @import("compiler_rt/extendhfdf2.zig"); _ = @import("compiler_rt/extendhfdf2.zig");
@ -85,70 +55,6 @@ comptime {
_ = @import("compiler_rt/trunctfdf2.zig"); _ = @import("compiler_rt/trunctfdf2.zig");
_ = @import("compiler_rt/trunctfxf2.zig"); _ = @import("compiler_rt/trunctfxf2.zig");
_ = @import("compiler_rt/divhf3.zig");
_ = @import("compiler_rt/divsf3.zig");
_ = @import("compiler_rt/divdf3.zig");
_ = @import("compiler_rt/divxf3.zig");
_ = @import("compiler_rt/divtf3.zig");
_ = @import("compiler_rt/sin.zig");
_ = @import("compiler_rt/cos.zig");
_ = @import("compiler_rt/sincos.zig");
_ = @import("compiler_rt/ceil.zig");
_ = @import("compiler_rt/exp.zig");
_ = @import("compiler_rt/exp2.zig");
_ = @import("compiler_rt/fabs.zig");
_ = @import("compiler_rt/floor.zig");
_ = @import("compiler_rt/fma.zig");
_ = @import("compiler_rt/fmax.zig");
_ = @import("compiler_rt/fmin.zig");
_ = @import("compiler_rt/fmod.zig");
_ = @import("compiler_rt/log.zig");
_ = @import("compiler_rt/log10.zig");
_ = @import("compiler_rt/log2.zig");
_ = @import("compiler_rt/round.zig");
_ = @import("compiler_rt/sqrt.zig");
_ = @import("compiler_rt/tan.zig");
_ = @import("compiler_rt/trunc.zig");
_ = @import("compiler_rt/divti3.zig");
_ = @import("compiler_rt/modti3.zig");
_ = @import("compiler_rt/multi3.zig");
_ = @import("compiler_rt/udivti3.zig");
_ = @import("compiler_rt/udivmodei4.zig");
_ = @import("compiler_rt/udivmodti4.zig");
_ = @import("compiler_rt/umodti3.zig");
_ = @import("compiler_rt/int_to_float.zig");
_ = @import("compiler_rt/floatsihf.zig");
_ = @import("compiler_rt/floatsisf.zig");
_ = @import("compiler_rt/floatsidf.zig");
_ = @import("compiler_rt/floatsitf.zig");
_ = @import("compiler_rt/floatsixf.zig");
_ = @import("compiler_rt/floatdihf.zig");
_ = @import("compiler_rt/floatdisf.zig");
_ = @import("compiler_rt/floatdidf.zig");
_ = @import("compiler_rt/floatditf.zig");
_ = @import("compiler_rt/floatdixf.zig");
_ = @import("compiler_rt/floattihf.zig");
_ = @import("compiler_rt/floattisf.zig");
_ = @import("compiler_rt/floattidf.zig");
_ = @import("compiler_rt/floattitf.zig");
_ = @import("compiler_rt/floattixf.zig");
_ = @import("compiler_rt/floatundihf.zig");
_ = @import("compiler_rt/floatundisf.zig");
_ = @import("compiler_rt/floatundidf.zig");
_ = @import("compiler_rt/floatunditf.zig");
_ = @import("compiler_rt/floatundixf.zig");
_ = @import("compiler_rt/floatunsihf.zig");
_ = @import("compiler_rt/floatunsisf.zig");
_ = @import("compiler_rt/floatunsidf.zig");
_ = @import("compiler_rt/floatunsitf.zig");
_ = @import("compiler_rt/floatunsixf.zig");
_ = @import("compiler_rt/floatuntihf.zig");
_ = @import("compiler_rt/floatuntisf.zig");
_ = @import("compiler_rt/floatuntidf.zig");
_ = @import("compiler_rt/floatuntitf.zig");
_ = @import("compiler_rt/floatuntixf.zig");
_ = @import("compiler_rt/float_to_int.zig"); _ = @import("compiler_rt/float_to_int.zig");
_ = @import("compiler_rt/fixhfsi.zig"); _ = @import("compiler_rt/fixhfsi.zig");
_ = @import("compiler_rt/fixhfdi.zig"); _ = @import("compiler_rt/fixhfdi.zig");
@ -181,28 +87,131 @@ comptime {
_ = @import("compiler_rt/fixunsxfdi.zig"); _ = @import("compiler_rt/fixunsxfdi.zig");
_ = @import("compiler_rt/fixunsxfti.zig"); _ = @import("compiler_rt/fixunsxfti.zig");
_ = @import("compiler_rt/count0bits.zig"); _ = @import("compiler_rt/int_to_float.zig");
_ = @import("compiler_rt/parity.zig"); _ = @import("compiler_rt/floatsihf.zig");
_ = @import("compiler_rt/popcount.zig"); _ = @import("compiler_rt/floatsisf.zig");
_ = @import("compiler_rt/bswap.zig"); _ = @import("compiler_rt/floatsidf.zig");
_ = @import("compiler_rt/int.zig"); _ = @import("compiler_rt/floatsitf.zig");
_ = @import("compiler_rt/shift.zig"); _ = @import("compiler_rt/floatsixf.zig");
_ = @import("compiler_rt/floatdihf.zig");
_ = @import("compiler_rt/floatdisf.zig");
_ = @import("compiler_rt/floatdidf.zig");
_ = @import("compiler_rt/floatditf.zig");
_ = @import("compiler_rt/floatdixf.zig");
_ = @import("compiler_rt/floattihf.zig");
_ = @import("compiler_rt/floattisf.zig");
_ = @import("compiler_rt/floattidf.zig");
_ = @import("compiler_rt/floattitf.zig");
_ = @import("compiler_rt/floattixf.zig");
_ = @import("compiler_rt/floatundihf.zig");
_ = @import("compiler_rt/floatundisf.zig");
_ = @import("compiler_rt/floatundidf.zig");
_ = @import("compiler_rt/floatunditf.zig");
_ = @import("compiler_rt/floatundixf.zig");
_ = @import("compiler_rt/floatunsihf.zig");
_ = @import("compiler_rt/floatunsisf.zig");
_ = @import("compiler_rt/floatunsidf.zig");
_ = @import("compiler_rt/floatunsitf.zig");
_ = @import("compiler_rt/floatunsixf.zig");
_ = @import("compiler_rt/floatuntihf.zig");
_ = @import("compiler_rt/floatuntisf.zig");
_ = @import("compiler_rt/floatuntidf.zig");
_ = @import("compiler_rt/floatuntitf.zig");
_ = @import("compiler_rt/floatuntixf.zig");
_ = @import("compiler_rt/negXi2.zig"); // comparison
_ = @import("compiler_rt/comparef.zig");
_ = @import("compiler_rt/cmphf2.zig");
_ = @import("compiler_rt/cmpsf2.zig");
_ = @import("compiler_rt/cmpdf2.zig");
_ = @import("compiler_rt/cmptf2.zig");
_ = @import("compiler_rt/cmpxf2.zig");
_ = @import("compiler_rt/unordhf2.zig");
_ = @import("compiler_rt/unordsf2.zig");
_ = @import("compiler_rt/unorddf2.zig");
_ = @import("compiler_rt/unordxf2.zig");
_ = @import("compiler_rt/unordtf2.zig");
_ = @import("compiler_rt/gehf2.zig");
_ = @import("compiler_rt/gesf2.zig");
_ = @import("compiler_rt/gedf2.zig");
_ = @import("compiler_rt/gexf2.zig");
_ = @import("compiler_rt/getf2.zig");
_ = @import("compiler_rt/muldi3.zig"); // arithmetic
_ = @import("compiler_rt/addf3.zig");
_ = @import("compiler_rt/addhf3.zig");
_ = @import("compiler_rt/addsf3.zig");
_ = @import("compiler_rt/adddf3.zig");
_ = @import("compiler_rt/addtf3.zig");
_ = @import("compiler_rt/addxf3.zig");
_ = @import("compiler_rt/absv.zig"); _ = @import("compiler_rt/subhf3.zig");
_ = @import("compiler_rt/absvsi2.zig"); _ = @import("compiler_rt/subsf3.zig");
_ = @import("compiler_rt/absvdi2.zig"); _ = @import("compiler_rt/subdf3.zig");
_ = @import("compiler_rt/absvti2.zig"); _ = @import("compiler_rt/subtf3.zig");
_ = @import("compiler_rt/subxf3.zig");
_ = @import("compiler_rt/negv.zig"); _ = @import("compiler_rt/mulf3.zig");
_ = @import("compiler_rt/addo.zig"); _ = @import("compiler_rt/mulhf3.zig");
_ = @import("compiler_rt/subo.zig"); _ = @import("compiler_rt/mulsf3.zig");
_ = @import("compiler_rt/mulo.zig"); _ = @import("compiler_rt/muldf3.zig");
_ = @import("compiler_rt/cmp.zig"); _ = @import("compiler_rt/multf3.zig");
_ = @import("compiler_rt/mulxf3.zig");
_ = @import("compiler_rt/divhf3.zig");
_ = @import("compiler_rt/divsf3.zig");
_ = @import("compiler_rt/divdf3.zig");
_ = @import("compiler_rt/divxf3.zig");
_ = @import("compiler_rt/divtf3.zig");
_ = @import("compiler_rt/neghf2.zig");
_ = @import("compiler_rt/negsf2.zig");
_ = @import("compiler_rt/negdf2.zig");
_ = @import("compiler_rt/negtf2.zig");
_ = @import("compiler_rt/negxf2.zig");
// other
_ = @import("compiler_rt/powiXf2.zig");
_ = @import("compiler_rt/mulc3.zig");
_ = @import("compiler_rt/mulhc3.zig");
_ = @import("compiler_rt/mulsc3.zig");
_ = @import("compiler_rt/muldc3.zig");
_ = @import("compiler_rt/mulxc3.zig");
_ = @import("compiler_rt/multc3.zig");
_ = @import("compiler_rt/divc3.zig");
_ = @import("compiler_rt/divhc3.zig");
_ = @import("compiler_rt/divsc3.zig");
_ = @import("compiler_rt/divdc3.zig");
_ = @import("compiler_rt/divxc3.zig");
_ = @import("compiler_rt/divtc3.zig");
// Math routines. Alphabetically sorted.
_ = @import("compiler_rt/ceil.zig");
_ = @import("compiler_rt/cos.zig");
_ = @import("compiler_rt/exp.zig");
_ = @import("compiler_rt/exp2.zig");
_ = @import("compiler_rt/fabs.zig");
_ = @import("compiler_rt/floor.zig");
_ = @import("compiler_rt/fma.zig");
_ = @import("compiler_rt/fmax.zig");
_ = @import("compiler_rt/fmin.zig");
_ = @import("compiler_rt/fmod.zig");
_ = @import("compiler_rt/log.zig");
_ = @import("compiler_rt/log10.zig");
_ = @import("compiler_rt/log2.zig");
_ = @import("compiler_rt/round.zig");
_ = @import("compiler_rt/sin.zig");
_ = @import("compiler_rt/sincos.zig");
_ = @import("compiler_rt/sqrt.zig");
_ = @import("compiler_rt/tan.zig");
_ = @import("compiler_rt/trunc.zig");
// BigInt. Alphabetically sorted.
_ = @import("compiler_rt/udivmodei4.zig");
_ = @import("compiler_rt/udivmodti4.zig");
// extra
_ = @import("compiler_rt/os_version_check.zig"); _ = @import("compiler_rt/os_version_check.zig");
_ = @import("compiler_rt/emutls.zig"); _ = @import("compiler_rt/emutls.zig");
_ = @import("compiler_rt/arm.zig"); _ = @import("compiler_rt/arm.zig");

View File

@ -331,7 +331,7 @@ Integer and Float Operations
| ✓ | __negdf2 | f64 | ∅ | f64 | .. | | ✓ | __negdf2 | f64 | ∅ | f64 | .. |
| ✓ | __negtf2 | f128 | ∅ | f128 | .. | | ✓ | __negtf2 | f128 | ∅ | f128 | .. |
| ✓ | __negxf2 | f80 | ∅ | f80 | .. | | ✓ | __negxf2 | f80 | ∅ | f80 | .. |
| | | | | | **Floating point raised to integer power** | | | | | | | **Other** |
| ✓ | __powihf2 | f16 | i32 | f16 | `a ^ b` | | ✓ | __powihf2 | f16 | i32 | f16 | `a ^ b` |
| ✓ | __powisf2 | f32 | i32 | f32 | .. | | ✓ | __powisf2 | f32 | i32 | f32 | .. |
| ✓ | __powidf2 | f64 | i32 | f64 | .. | | ✓ | __powidf2 | f64 | i32 | f64 | .. |

View File

@ -1,5 +1,6 @@
const std = @import("std"); const std = @import("std");
const builtin = @import("builtin"); const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
pub const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak; pub const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
/// Determines the symbol's visibility to other objects. /// Determines the symbol's visibility to other objects.
@ -221,3 +222,20 @@ pub inline fn fneg(a: anytype) @TypeOf(a) {
const negated = @bitCast(U, a) ^ sign_bit_mask; const negated = @bitCast(U, a) ^ sign_bit_mask;
return @bitCast(F, negated); return @bitCast(F, negated);
} }
/// Allows to access underlying bits as two equally sized lower and higher
/// signed or unsigned integers.
pub fn HalveInt(comptime T: type, comptime signed_half: bool) type {
return extern union {
pub const bits = @divExact(@typeInfo(T).Int.bits, 2);
pub const HalfTU = std.meta.Int(.unsigned, bits);
pub const HalfTS = std.meta.Int(.signed, bits);
pub const HalfT = if (signed_half) HalfTS else HalfTU;
all: T,
s: if (native_endian == .Little)
extern struct { low: HalfT, high: HalfT }
else
extern struct { high: HalfT, low: HalfT },
};
}

View File

@ -16,7 +16,6 @@ pub const panic = common.panic;
comptime { comptime {
@export(__divmodti4, .{ .name = "__divmodti4", .linkage = common.linkage, .visibility = common.visibility }); @export(__divmodti4, .{ .name = "__divmodti4", .linkage = common.linkage, .visibility = common.visibility });
@export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = common.linkage, .visibility = common.visibility }); @export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = common.linkage, .visibility = common.visibility });
@export(__mulsi3, .{ .name = "__mulsi3", .linkage = common.linkage, .visibility = common.visibility });
@export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = common.linkage, .visibility = common.visibility }); @export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_aeabi) { if (common.want_aeabi) {
@export(__aeabi_idiv, .{ .name = "__aeabi_idiv", .linkage = common.linkage, .visibility = common.visibility }); @export(__aeabi_idiv, .{ .name = "__aeabi_idiv", .linkage = common.linkage, .visibility = common.visibility });
@ -663,59 +662,3 @@ fn test_one_umodsi3(a: u32, b: u32, expected_r: u32) !void {
const r: u32 = __umodsi3(a, b); const r: u32 = __umodsi3(a, b);
try testing.expect(r == expected_r); try testing.expect(r == expected_r);
} }
pub fn __mulsi3(a: i32, b: i32) callconv(.C) i32 {
var ua = @bitCast(u32, a);
var ub = @bitCast(u32, b);
var r: u32 = 0;
while (ua > 0) {
if ((ua & 1) != 0) r +%= ub;
ua >>= 1;
ub <<= 1;
}
return @bitCast(i32, r);
}
fn test_one_mulsi3(a: i32, b: i32, result: i32) !void {
try testing.expectEqual(result, __mulsi3(a, b));
}
test "mulsi3" {
try test_one_mulsi3(0, 0, 0);
try test_one_mulsi3(0, 1, 0);
try test_one_mulsi3(1, 0, 0);
try test_one_mulsi3(0, 10, 0);
try test_one_mulsi3(10, 0, 0);
try test_one_mulsi3(0, maxInt(i32), 0);
try test_one_mulsi3(maxInt(i32), 0, 0);
try test_one_mulsi3(0, -1, 0);
try test_one_mulsi3(-1, 0, 0);
try test_one_mulsi3(0, -10, 0);
try test_one_mulsi3(-10, 0, 0);
try test_one_mulsi3(0, minInt(i32), 0);
try test_one_mulsi3(minInt(i32), 0, 0);
try test_one_mulsi3(1, 1, 1);
try test_one_mulsi3(1, 10, 10);
try test_one_mulsi3(10, 1, 10);
try test_one_mulsi3(1, maxInt(i32), maxInt(i32));
try test_one_mulsi3(maxInt(i32), 1, maxInt(i32));
try test_one_mulsi3(1, -1, -1);
try test_one_mulsi3(1, -10, -10);
try test_one_mulsi3(-10, 1, -10);
try test_one_mulsi3(1, minInt(i32), minInt(i32));
try test_one_mulsi3(minInt(i32), 1, minInt(i32));
try test_one_mulsi3(46340, 46340, 2147395600);
try test_one_mulsi3(-46340, 46340, -2147395600);
try test_one_mulsi3(46340, -46340, -2147395600);
try test_one_mulsi3(-46340, -46340, 2147395600);
try test_one_mulsi3(4194303, 8192, @truncate(i32, 34359730176));
try test_one_mulsi3(-4194303, 8192, @truncate(i32, -34359730176));
try test_one_mulsi3(4194303, -8192, @truncate(i32, -34359730176));
try test_one_mulsi3(-4194303, -8192, @truncate(i32, 34359730176));
try test_one_mulsi3(8192, 4194303, @truncate(i32, 34359730176));
try test_one_mulsi3(-8192, 4194303, @truncate(i32, -34359730176));
try test_one_mulsi3(8192, -4194303, @truncate(i32, -34359730176));
try test_one_mulsi3(-8192, -4194303, @truncate(i32, 34359730176));
}

101
lib/compiler_rt/mulXi3.zig Normal file
View File

@ -0,0 +1,101 @@
const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const common = @import("common.zig");
const native_endian = builtin.cpu.arch.endian();
pub const panic = common.panic;
comptime {
@export(__mulsi3, .{ .name = "__mulsi3", .linkage = common.linkage, .visibility = common.visibility });
if (common.want_aeabi) {
@export(__aeabi_lmul, .{ .name = "__aeabi_lmul", .linkage = common.linkage, .visibility = common.visibility });
} else {
@export(__muldi3, .{ .name = "__muldi3", .linkage = common.linkage, .visibility = common.visibility });
}
if (common.want_windows_v2u64_abi) {
@export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = common.linkage, .visibility = common.visibility });
} else {
@export(__multi3, .{ .name = "__multi3", .linkage = common.linkage, .visibility = common.visibility });
}
}
pub fn __mulsi3(a: i32, b: i32) callconv(.C) i32 {
var ua = @bitCast(u32, a);
var ub = @bitCast(u32, b);
var r: u32 = 0;
while (ua > 0) {
if ((ua & 1) != 0) r +%= ub;
ua >>= 1;
ub <<= 1;
}
return @bitCast(i32, r);
}
pub fn __muldi3(a: i64, b: i64) callconv(.C) i64 {
return mulX(i64, a, b);
}
fn __aeabi_lmul(a: i64, b: i64) callconv(.AAPCS) i64 {
return mulX(i64, a, b);
}
inline fn mulX(comptime T: type, a: T, b: T) T {
const word_t = common.HalveInt(T, false);
const x = word_t{ .all = a };
const y = word_t{ .all = b };
var r = switch (T) {
i64, i128 => word_t{ .all = muldXi(word_t.HalfT, x.s.low, y.s.low) },
else => unreachable,
};
r.s.high +%= x.s.high *% y.s.low +% x.s.low *% y.s.high;
return r.all;
}
fn DoubleInt(comptime T: type) type {
return switch (T) {
u32 => i64,
u64 => i128,
i32 => i64,
i64 => i128,
else => unreachable,
};
}
fn muldXi(comptime T: type, a: T, b: T) DoubleInt(T) {
const DT = DoubleInt(T);
const word_t = common.HalveInt(DT, false);
const bits_in_word_2 = @sizeOf(T) * 8 / 2;
const lower_mask = (~@as(T, 0)) >> bits_in_word_2;
var r: word_t = undefined;
r.s.low = (a & lower_mask) *% (b & lower_mask);
var t: T = r.s.low >> bits_in_word_2;
r.s.low &= lower_mask;
t += (a >> bits_in_word_2) *% (b & lower_mask);
r.s.low +%= (t & lower_mask) << bits_in_word_2;
r.s.high = t >> bits_in_word_2;
t = r.s.low >> bits_in_word_2;
r.s.low &= lower_mask;
t +%= (b >> bits_in_word_2) *% (a & lower_mask);
r.s.low +%= (t & lower_mask) << bits_in_word_2;
r.s.high +%= t >> bits_in_word_2;
r.s.high +%= (a >> bits_in_word_2) *% (b >> bits_in_word_2);
return r.all;
}
pub fn __multi3(a: i128, b: i128) callconv(.C) i128 {
return mulX(i128, a, b);
}
const v2u64 = @Vector(2, u64);
fn __multi3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
return @bitCast(v2u64, mulX(i128, @bitCast(i128, a), @bitCast(i128, b)));
}
test {
_ = @import("mulXi3_test.zig");
}

View File

@ -0,0 +1,147 @@
const std = @import("std");
const testing = std.testing;
const mulXi3 = @import("mulXi3.zig");
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
fn test_one_mulsi3(a: i32, b: i32, result: i32) !void {
try testing.expectEqual(result, mulXi3.__mulsi3(a, b));
}
fn test__muldi3(a: i64, b: i64, expected: i64) !void {
const x = mulXi3.__muldi3(a, b);
try testing.expect(x == expected);
}
fn test__multi3(a: i128, b: i128, expected: i128) !void {
const x = mulXi3.__multi3(a, b);
try testing.expect(x == expected);
}
test "mulsi3" {
try test_one_mulsi3(0, 0, 0);
try test_one_mulsi3(0, 1, 0);
try test_one_mulsi3(1, 0, 0);
try test_one_mulsi3(0, 10, 0);
try test_one_mulsi3(10, 0, 0);
try test_one_mulsi3(0, maxInt(i32), 0);
try test_one_mulsi3(maxInt(i32), 0, 0);
try test_one_mulsi3(0, -1, 0);
try test_one_mulsi3(-1, 0, 0);
try test_one_mulsi3(0, -10, 0);
try test_one_mulsi3(-10, 0, 0);
try test_one_mulsi3(0, minInt(i32), 0);
try test_one_mulsi3(minInt(i32), 0, 0);
try test_one_mulsi3(1, 1, 1);
try test_one_mulsi3(1, 10, 10);
try test_one_mulsi3(10, 1, 10);
try test_one_mulsi3(1, maxInt(i32), maxInt(i32));
try test_one_mulsi3(maxInt(i32), 1, maxInt(i32));
try test_one_mulsi3(1, -1, -1);
try test_one_mulsi3(1, -10, -10);
try test_one_mulsi3(-10, 1, -10);
try test_one_mulsi3(1, minInt(i32), minInt(i32));
try test_one_mulsi3(minInt(i32), 1, minInt(i32));
try test_one_mulsi3(46340, 46340, 2147395600);
try test_one_mulsi3(-46340, 46340, -2147395600);
try test_one_mulsi3(46340, -46340, -2147395600);
try test_one_mulsi3(-46340, -46340, 2147395600);
try test_one_mulsi3(4194303, 8192, @truncate(i32, 34359730176));
try test_one_mulsi3(-4194303, 8192, @truncate(i32, -34359730176));
try test_one_mulsi3(4194303, -8192, @truncate(i32, -34359730176));
try test_one_mulsi3(-4194303, -8192, @truncate(i32, 34359730176));
try test_one_mulsi3(8192, 4194303, @truncate(i32, 34359730176));
try test_one_mulsi3(-8192, 4194303, @truncate(i32, -34359730176));
try test_one_mulsi3(8192, -4194303, @truncate(i32, -34359730176));
try test_one_mulsi3(-8192, -4194303, @truncate(i32, 34359730176));
}
test "muldi3" {
try test__muldi3(0, 0, 0);
try test__muldi3(0, 1, 0);
try test__muldi3(1, 0, 0);
try test__muldi3(0, 10, 0);
try test__muldi3(10, 0, 0);
try test__muldi3(0, 81985529216486895, 0);
try test__muldi3(81985529216486895, 0, 0);
try test__muldi3(0, -1, 0);
try test__muldi3(-1, 0, 0);
try test__muldi3(0, -10, 0);
try test__muldi3(-10, 0, 0);
try test__muldi3(0, -81985529216486895, 0);
try test__muldi3(-81985529216486895, 0, 0);
try test__muldi3(1, 1, 1);
try test__muldi3(1, 10, 10);
try test__muldi3(10, 1, 10);
try test__muldi3(1, 81985529216486895, 81985529216486895);
try test__muldi3(81985529216486895, 1, 81985529216486895);
try test__muldi3(1, -1, -1);
try test__muldi3(1, -10, -10);
try test__muldi3(-10, 1, -10);
try test__muldi3(1, -81985529216486895, -81985529216486895);
try test__muldi3(-81985529216486895, 1, -81985529216486895);
try test__muldi3(3037000499, 3037000499, 9223372030926249001);
try test__muldi3(-3037000499, 3037000499, -9223372030926249001);
try test__muldi3(3037000499, -3037000499, -9223372030926249001);
try test__muldi3(-3037000499, -3037000499, 9223372030926249001);
try test__muldi3(4398046511103, 2097152, 9223372036852678656);
try test__muldi3(-4398046511103, 2097152, -9223372036852678656);
try test__muldi3(4398046511103, -2097152, -9223372036852678656);
try test__muldi3(-4398046511103, -2097152, 9223372036852678656);
try test__muldi3(2097152, 4398046511103, 9223372036852678656);
try test__muldi3(-2097152, 4398046511103, -9223372036852678656);
try test__muldi3(2097152, -4398046511103, -9223372036852678656);
try test__muldi3(-2097152, -4398046511103, 9223372036852678656);
}
test "multi3" {
try test__multi3(0, 0, 0);
try test__multi3(0, 1, 0);
try test__multi3(1, 0, 0);
try test__multi3(0, 10, 0);
try test__multi3(10, 0, 0);
try test__multi3(0, 81985529216486895, 0);
try test__multi3(81985529216486895, 0, 0);
try test__multi3(0, -1, 0);
try test__multi3(-1, 0, 0);
try test__multi3(0, -10, 0);
try test__multi3(-10, 0, 0);
try test__multi3(0, -81985529216486895, 0);
try test__multi3(-81985529216486895, 0, 0);
try test__multi3(1, 1, 1);
try test__multi3(1, 10, 10);
try test__multi3(10, 1, 10);
try test__multi3(1, 81985529216486895, 81985529216486895);
try test__multi3(81985529216486895, 1, 81985529216486895);
try test__multi3(1, -1, -1);
try test__multi3(1, -10, -10);
try test__multi3(-10, 1, -10);
try test__multi3(1, -81985529216486895, -81985529216486895);
try test__multi3(-81985529216486895, 1, -81985529216486895);
try test__multi3(3037000499, 3037000499, 9223372030926249001);
try test__multi3(-3037000499, 3037000499, -9223372030926249001);
try test__multi3(3037000499, -3037000499, -9223372030926249001);
try test__multi3(-3037000499, -3037000499, 9223372030926249001);
try test__multi3(4398046511103, 2097152, 9223372036852678656);
try test__multi3(-4398046511103, 2097152, -9223372036852678656);
try test__multi3(4398046511103, -2097152, -9223372036852678656);
try test__multi3(-4398046511103, -2097152, 9223372036852678656);
try test__multi3(2097152, 4398046511103, 9223372036852678656);
try test__multi3(-2097152, 4398046511103, -9223372036852678656);
try test__multi3(2097152, -4398046511103, -9223372036852678656);
try test__multi3(-2097152, -4398046511103, 9223372036852678656);
try test__multi3(0x00000000000000B504F333F9DE5BE000, 0x000000000000000000B504F333F9DE5B, 0x7FFFFFFFFFFFF328DF915DA296E8A000);
}

View File

@ -1,71 +0,0 @@
//! Ported from
//! https://github.com/llvm/llvm-project/blob/llvmorg-9.0.0/compiler-rt/lib/builtins/muldi3.c
const std = @import("std");
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (common.want_aeabi) {
@export(__aeabi_lmul, .{ .name = "__aeabi_lmul", .linkage = common.linkage, .visibility = common.visibility });
} else {
@export(__muldi3, .{ .name = "__muldi3", .linkage = common.linkage, .visibility = common.visibility });
}
}
pub fn __muldi3(a: i64, b: i64) callconv(.C) i64 {
return mul(a, b);
}
fn __aeabi_lmul(a: i64, b: i64) callconv(.AAPCS) i64 {
return mul(a, b);
}
inline fn mul(a: i64, b: i64) i64 {
const x = dwords{ .all = a };
const y = dwords{ .all = b };
var r = dwords{ .all = muldsi3(x.s.low, y.s.low) };
r.s.high +%= x.s.high *% y.s.low +% x.s.low *% y.s.high;
return r.all;
}
const dwords = extern union {
all: i64,
s: switch (native_endian) {
.Little => extern struct {
low: u32,
high: u32,
},
.Big => extern struct {
high: u32,
low: u32,
},
},
};
fn muldsi3(a: u32, b: u32) i64 {
const bits_in_word_2 = @sizeOf(i32) * 8 / 2;
const lower_mask = (~@as(u32, 0)) >> bits_in_word_2;
var r: dwords = undefined;
r.s.low = (a & lower_mask) *% (b & lower_mask);
var t: u32 = r.s.low >> bits_in_word_2;
r.s.low &= lower_mask;
t += (a >> bits_in_word_2) *% (b & lower_mask);
r.s.low +%= (t & lower_mask) << bits_in_word_2;
r.s.high = t >> bits_in_word_2;
t = r.s.low >> bits_in_word_2;
r.s.low &= lower_mask;
t +%= (b >> bits_in_word_2) *% (a & lower_mask);
r.s.low +%= (t & lower_mask) << bits_in_word_2;
r.s.high +%= t >> bits_in_word_2;
r.s.high +%= (a >> bits_in_word_2) *% (b >> bits_in_word_2);
return r.all;
}
test {
_ = @import("muldi3_test.zig");
}

View File

@ -1,51 +0,0 @@
const __muldi3 = @import("muldi3.zig").__muldi3;
const testing = @import("std").testing;
fn test__muldi3(a: i64, b: i64, expected: i64) !void {
const x = __muldi3(a, b);
try testing.expect(x == expected);
}
test "muldi3" {
try test__muldi3(0, 0, 0);
try test__muldi3(0, 1, 0);
try test__muldi3(1, 0, 0);
try test__muldi3(0, 10, 0);
try test__muldi3(10, 0, 0);
try test__muldi3(0, 81985529216486895, 0);
try test__muldi3(81985529216486895, 0, 0);
try test__muldi3(0, -1, 0);
try test__muldi3(-1, 0, 0);
try test__muldi3(0, -10, 0);
try test__muldi3(-10, 0, 0);
try test__muldi3(0, -81985529216486895, 0);
try test__muldi3(-81985529216486895, 0, 0);
try test__muldi3(1, 1, 1);
try test__muldi3(1, 10, 10);
try test__muldi3(10, 1, 10);
try test__muldi3(1, 81985529216486895, 81985529216486895);
try test__muldi3(81985529216486895, 1, 81985529216486895);
try test__muldi3(1, -1, -1);
try test__muldi3(1, -10, -10);
try test__muldi3(-10, 1, -10);
try test__muldi3(1, -81985529216486895, -81985529216486895);
try test__muldi3(-81985529216486895, 1, -81985529216486895);
try test__muldi3(3037000499, 3037000499, 9223372030926249001);
try test__muldi3(-3037000499, 3037000499, -9223372030926249001);
try test__muldi3(3037000499, -3037000499, -9223372030926249001);
try test__muldi3(-3037000499, -3037000499, 9223372030926249001);
try test__muldi3(4398046511103, 2097152, 9223372036852678656);
try test__muldi3(-4398046511103, 2097152, -9223372036852678656);
try test__muldi3(4398046511103, -2097152, -9223372036852678656);
try test__muldi3(-4398046511103, -2097152, 9223372036852678656);
try test__muldi3(2097152, 4398046511103, 9223372036852678656);
try test__muldi3(-2097152, 4398046511103, -9223372036852678656);
try test__muldi3(2097152, -4398046511103, -9223372036852678656);
try test__muldi3(-2097152, -4398046511103, 9223372036852678656);
}

View File

@ -1,75 +0,0 @@
//! Ported from git@github.com:llvm-project/llvm-project-20170507.git
//! ae684fad6d34858c014c94da69c15e7774a633c3
//! 2018-08-13
const std = @import("std");
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
const common = @import("common.zig");
pub const panic = common.panic;
comptime {
if (common.want_windows_v2u64_abi) {
@export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = common.linkage, .visibility = common.visibility });
} else {
@export(__multi3, .{ .name = "__multi3", .linkage = common.linkage, .visibility = common.visibility });
}
}
pub fn __multi3(a: i128, b: i128) callconv(.C) i128 {
return mul(a, b);
}
const v2u64 = @Vector(2, u64);
fn __multi3_windows_x86_64(a: v2u64, b: v2u64) callconv(.C) v2u64 {
return @bitCast(v2u64, mul(@bitCast(i128, a), @bitCast(i128, b)));
}
inline fn mul(a: i128, b: i128) i128 {
const x = twords{ .all = a };
const y = twords{ .all = b };
var r = twords{ .all = mulddi3(x.s.low, y.s.low) };
r.s.high +%= x.s.high *% y.s.low +% x.s.low *% y.s.high;
return r.all;
}
fn mulddi3(a: u64, b: u64) i128 {
const bits_in_dword_2 = (@sizeOf(i64) * 8) / 2;
const lower_mask = ~@as(u64, 0) >> bits_in_dword_2;
var r: twords = undefined;
r.s.low = (a & lower_mask) *% (b & lower_mask);
var t: u64 = r.s.low >> bits_in_dword_2;
r.s.low &= lower_mask;
t +%= (a >> bits_in_dword_2) *% (b & lower_mask);
r.s.low +%= (t & lower_mask) << bits_in_dword_2;
r.s.high = t >> bits_in_dword_2;
t = r.s.low >> bits_in_dword_2;
r.s.low &= lower_mask;
t +%= (b >> bits_in_dword_2) *% (a & lower_mask);
r.s.low +%= (t & lower_mask) << bits_in_dword_2;
r.s.high +%= t >> bits_in_dword_2;
r.s.high +%= (a >> bits_in_dword_2) *% (b >> bits_in_dword_2);
return r.all;
}
const twords = extern union {
all: i128,
s: S,
const S = if (native_endian == .Little)
extern struct {
low: u64,
high: u64,
}
else
extern struct {
high: u64,
low: u64,
};
};
test {
_ = @import("multi3_test.zig");
}

View File

@ -1,53 +0,0 @@
const __multi3 = @import("multi3.zig").__multi3;
const testing = @import("std").testing;
fn test__multi3(a: i128, b: i128, expected: i128) !void {
const x = __multi3(a, b);
try testing.expect(x == expected);
}
test "multi3" {
try test__multi3(0, 0, 0);
try test__multi3(0, 1, 0);
try test__multi3(1, 0, 0);
try test__multi3(0, 10, 0);
try test__multi3(10, 0, 0);
try test__multi3(0, 81985529216486895, 0);
try test__multi3(81985529216486895, 0, 0);
try test__multi3(0, -1, 0);
try test__multi3(-1, 0, 0);
try test__multi3(0, -10, 0);
try test__multi3(-10, 0, 0);
try test__multi3(0, -81985529216486895, 0);
try test__multi3(-81985529216486895, 0, 0);
try test__multi3(1, 1, 1);
try test__multi3(1, 10, 10);
try test__multi3(10, 1, 10);
try test__multi3(1, 81985529216486895, 81985529216486895);
try test__multi3(81985529216486895, 1, 81985529216486895);
try test__multi3(1, -1, -1);
try test__multi3(1, -10, -10);
try test__multi3(-10, 1, -10);
try test__multi3(1, -81985529216486895, -81985529216486895);
try test__multi3(-81985529216486895, 1, -81985529216486895);
try test__multi3(3037000499, 3037000499, 9223372030926249001);
try test__multi3(-3037000499, 3037000499, -9223372030926249001);
try test__multi3(3037000499, -3037000499, -9223372030926249001);
try test__multi3(-3037000499, -3037000499, 9223372030926249001);
try test__multi3(4398046511103, 2097152, 9223372036852678656);
try test__multi3(-4398046511103, 2097152, -9223372036852678656);
try test__multi3(4398046511103, -2097152, -9223372036852678656);
try test__multi3(-4398046511103, -2097152, 9223372036852678656);
try test__multi3(2097152, 4398046511103, 9223372036852678656);
try test__multi3(-2097152, 4398046511103, -9223372036852678656);
try test__multi3(2097152, -4398046511103, -9223372036852678656);
try test__multi3(-2097152, -4398046511103, 9223372036852678656);
try test__multi3(0x00000000000000B504F333F9DE5BE000, 0x000000000000000000B504F333F9DE5B, 0x7FFFFFFFFFFFF328DF915DA296E8A000);
}

View File

@ -1,7 +1,6 @@
const std = @import("std"); const std = @import("std");
const builtin = @import("builtin"); const builtin = @import("builtin");
const Log2Int = std.math.Log2Int; const Log2Int = std.math.Log2Int;
const native_endian = builtin.cpu.arch.endian();
const common = @import("common.zig"); const common = @import("common.zig");
pub const panic = common.panic; pub const panic = common.panic;
@ -27,39 +26,24 @@ comptime {
} }
} }
fn Dwords(comptime T: type, comptime signed_half: bool) type {
return extern union {
const bits = @divExact(@typeInfo(T).Int.bits, 2);
const HalfTU = std.meta.Int(.unsigned, bits);
const HalfTS = std.meta.Int(.signed, bits);
const HalfT = if (signed_half) HalfTS else HalfTU;
all: T,
s: if (native_endian == .Little)
extern struct { low: HalfT, high: HalfT }
else
extern struct { high: HalfT, low: HalfT },
};
}
// Arithmetic shift left: shift in 0 from right to left // Arithmetic shift left: shift in 0 from right to left
// Precondition: 0 <= b < bits_in_dword // Precondition: 0 <= b < bits_in_dword
inline fn ashlXi3(comptime T: type, a: T, b: i32) T { inline fn ashlXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, false); const word_t = common.HalveInt(T, false);
const S = Log2Int(dwords.HalfT); const S = Log2Int(word_t.HalfT);
const input = dwords{ .all = a }; const input = word_t{ .all = a };
var output: dwords = undefined; var output: word_t = undefined;
if (b >= dwords.bits) { if (b >= word_t.bits) {
output.s.low = 0; output.s.low = 0;
output.s.high = input.s.low << @intCast(S, b - dwords.bits); output.s.high = input.s.low << @intCast(S, b - word_t.bits);
} else if (b == 0) { } else if (b == 0) {
return a; return a;
} else { } else {
output.s.low = input.s.low << @intCast(S, b); output.s.low = input.s.low << @intCast(S, b);
output.s.high = input.s.high << @intCast(S, b); output.s.high = input.s.high << @intCast(S, b);
output.s.high |= input.s.low >> @intCast(S, dwords.bits - b); output.s.high |= input.s.low >> @intCast(S, word_t.bits - b);
} }
return output.all; return output.all;
@ -68,24 +52,24 @@ inline fn ashlXi3(comptime T: type, a: T, b: i32) T {
// Arithmetic shift right: shift in 1 from left to right // Arithmetic shift right: shift in 1 from left to right
// Precondition: 0 <= b < T.bit_count // Precondition: 0 <= b < T.bit_count
inline fn ashrXi3(comptime T: type, a: T, b: i32) T { inline fn ashrXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, true); const word_t = common.HalveInt(T, true);
const S = Log2Int(dwords.HalfT); const S = Log2Int(word_t.HalfT);
const input = dwords{ .all = a }; const input = word_t{ .all = a };
var output: dwords = undefined; var output: word_t = undefined;
if (b >= dwords.bits) { if (b >= word_t.bits) {
output.s.high = input.s.high >> (dwords.bits - 1); output.s.high = input.s.high >> (word_t.bits - 1);
output.s.low = input.s.high >> @intCast(S, b - dwords.bits); output.s.low = input.s.high >> @intCast(S, b - word_t.bits);
} else if (b == 0) { } else if (b == 0) {
return a; return a;
} else { } else {
output.s.high = input.s.high >> @intCast(S, b); output.s.high = input.s.high >> @intCast(S, b);
output.s.low = input.s.high << @intCast(S, dwords.bits - b); output.s.low = input.s.high << @intCast(S, word_t.bits - b);
// Avoid sign-extension here // Avoid sign-extension here
output.s.low |= @bitCast( output.s.low |= @bitCast(
dwords.HalfT, word_t.HalfT,
@bitCast(dwords.HalfTU, input.s.low) >> @intCast(S, b), @bitCast(word_t.HalfTU, input.s.low) >> @intCast(S, b),
); );
} }
@ -95,20 +79,20 @@ inline fn ashrXi3(comptime T: type, a: T, b: i32) T {
// Logical shift right: shift in 0 from left to right // Logical shift right: shift in 0 from left to right
// Precondition: 0 <= b < T.bit_count // Precondition: 0 <= b < T.bit_count
inline fn lshrXi3(comptime T: type, a: T, b: i32) T { inline fn lshrXi3(comptime T: type, a: T, b: i32) T {
const dwords = Dwords(T, false); const word_t = common.HalveInt(T, false);
const S = Log2Int(dwords.HalfT); const S = Log2Int(word_t.HalfT);
const input = dwords{ .all = a }; const input = word_t{ .all = a };
var output: dwords = undefined; var output: word_t = undefined;
if (b >= dwords.bits) { if (b >= word_t.bits) {
output.s.high = 0; output.s.high = 0;
output.s.low = input.s.high >> @intCast(S, b - dwords.bits); output.s.low = input.s.high >> @intCast(S, b - word_t.bits);
} else if (b == 0) { } else if (b == 0) {
return a; return a;
} else { } else {
output.s.high = input.s.high >> @intCast(S, b); output.s.high = input.s.high >> @intCast(S, b);
output.s.low = input.s.high << @intCast(S, dwords.bits - b); output.s.low = input.s.high << @intCast(S, word_t.bits - b);
output.s.low |= input.s.low >> @intCast(S, b); output.s.low |= input.s.low >> @intCast(S, b);
} }

View File

@ -106,7 +106,7 @@ const NAV_MODES = {
// empty array means refers to the package itself // empty array means refers to the package itself
declNames: [], declNames: [],
// these will be all types, except the last one may be a type or a decl // these will be all types, except the last one may be a type or a decl
declObjs: [], declObjs: [],
// (a, b, c, d) comptime call; result is the value the docs refer to // (a, b, c, d) comptime call; result is the value the docs refer to
callName: null, callName: null,
}; };
@ -200,7 +200,7 @@ const NAV_MODES = {
case NAV_MODES.GUIDES: case NAV_MODES.GUIDES:
document.title = "[G] " + curNav.activeGuide + suffix; document.title = "[G] " + curNav.activeGuide + suffix;
return; return;
} }
} }
function isDecl(x) { function isDecl(x) {
@ -401,7 +401,7 @@ const NAV_MODES = {
domGuideSwitch.classList.add("active"); domGuideSwitch.classList.add("active");
domApiSwitch.classList.remove("active"); domApiSwitch.classList.remove("active");
domDocs.classList.add("hidden"); domDocs.classList.add("hidden");
domGuides.classList.remove("hidden"); domGuides.classList.remove("hidden");
domApiMenu.classList.add("hidden"); domApiMenu.classList.add("hidden");
// sidebar guides list // sidebar guides list
@ -422,7 +422,7 @@ const NAV_MODES = {
if (list.length > 0) { if (list.length > 0) {
domGuidesMenu.classList.remove("hidden"); domGuidesMenu.classList.remove("hidden");
} }
// main content // main content
const activeGuide = zigAnalysis.guides[curNav.activeGuide]; const activeGuide = zigAnalysis.guides[curNav.activeGuide];
if (activeGuide == undefined) { if (activeGuide == undefined) {
@ -454,7 +454,7 @@ const NAV_MODES = {
Happy writing! Happy writing!
`); `);
} else { } else {
domGuides.innerHTML = markdown(activeGuide); domGuides.innerHTML = markdown(activeGuide);
} }
} }
@ -467,7 +467,7 @@ const NAV_MODES = {
domDocs.classList.remove("hidden"); domDocs.classList.remove("hidden");
domApiMenu.classList.remove("hidden"); domApiMenu.classList.remove("hidden");
domGuidesMenu.classList.add("hidden"); domGuidesMenu.classList.add("hidden");
domStatus.classList.add("hidden"); domStatus.classList.add("hidden");
domFnProto.classList.add("hidden"); domFnProto.classList.add("hidden");
domSectParams.classList.add("hidden"); domSectParams.classList.add("hidden");
@ -537,9 +537,9 @@ const NAV_MODES = {
currentType = childDecl; currentType = childDecl;
curNav.declObjs.push(currentType); curNav.declObjs.push(currentType);
} }
window.x = currentType; window.x = currentType;
renderNav(); renderNav();
@ -586,15 +586,15 @@ const NAV_MODES = {
switch (curNav.mode) { switch (curNav.mode) {
case NAV_MODES.API: case NAV_MODES.API:
case NAV_MODES.API_INTERNAL: case NAV_MODES.API_INTERNAL:
return renderApi(); return renderApi();
case NAV_MODES.GUIDES: case NAV_MODES.GUIDES:
return renderGuides(); return renderGuides();
default: default:
throw "?"; throw "?";
} }
} }
function renderDocTest(decl) { function renderDocTest(decl) {
if (!decl.decltest) return; if (!decl.decltest) return;
const astNode = getAstNode(decl.decltest); const astNode = getAstNode(decl.decltest);
@ -651,7 +651,7 @@ const NAV_MODES = {
wantLink: true, wantLink: true,
fnDecl, fnDecl,
}); });
domFnSourceLink.innerHTML = "[<a target=\"_blank\" href=\"" + sourceFileLink(fnDecl) + "\">src</a>]"; domFnSourceLink.innerHTML = "[<a target=\"_blank\" href=\"" + sourceFileLink(fnDecl) + "\">src</a>]";
let docsSource = null; let docsSource = null;
@ -895,7 +895,7 @@ const NAV_MODES = {
function navLink(pkgNames, declNames, callName) { function navLink(pkgNames, declNames, callName) {
let base = curNav.mode; let base = curNav.mode;
if (pkgNames.length === 0 && declNames.length === 0) { if (pkgNames.length === 0 && declNames.length === 0) {
return base; return base;
} else if (declNames.length === 0 && callName == null) { } else if (declNames.length === 0 && callName == null) {
@ -919,18 +919,18 @@ const NAV_MODES = {
function findDeclNavLink(declName) { function findDeclNavLink(declName) {
if (curNav.declObjs.length == 0) return null; if (curNav.declObjs.length == 0) return null;
const curFile = getAstNode(curNav.declObjs[curNav.declObjs.length-1].src).file; const curFile = getAstNode(curNav.declObjs[curNav.declObjs.length - 1].src).file;
for (let i = curNav.declObjs.length -1; i >= 0; i--) {
const curDecl = curNav.declObjs[i];
const curDeclName = curNav.declNames[i-1];
if (curDeclName == declName) {
const declPath = curNav.declNames.slice(0,i);
return navLink(curNav.pkgNames, declPath);
}
if (findSubDecl(curDecl, declName) != null) { for (let i = curNav.declObjs.length - 1; i >= 0; i--) {
const declPath = curNav.declNames.slice(0,i).concat([declName]); const curDecl = curNav.declObjs[i];
const curDeclName = curNav.declNames[i - 1];
if (curDeclName == declName) {
const declPath = curNav.declNames.slice(0, i);
return navLink(curNav.pkgNames, declPath);
}
if (findSubDecl(curDecl, declName) != null) {
const declPath = curNav.declNames.slice(0, i).concat([declName]);
return navLink(curNav.pkgNames, declPath); return navLink(curNav.pkgNames, declPath);
} }
} }
@ -1366,10 +1366,6 @@ const NAV_MODES = {
payloadHtml += "truncate"; payloadHtml += "truncate";
break; break;
} }
case "align_cast": {
payloadHtml += "alignCast";
break;
}
case "has_decl": { case "has_decl": {
payloadHtml += "hasDecl"; payloadHtml += "hasDecl";
break; break;
@ -1700,15 +1696,15 @@ const NAV_MODES = {
} }
case "declRef": { case "declRef": {
const name = getDecl(expr.declRef).name; const name = getDecl(expr.declRef).name;
if (opts.wantHtml) { if (opts.wantHtml) {
let payloadHtml = ""; let payloadHtml = "";
if (opts.wantLink) { if (opts.wantLink) {
payloadHtml += '<a href="'+ findDeclNavLink(name) +'">'; payloadHtml += '<a href="' + findDeclNavLink(name) + '">';
} }
payloadHtml += payloadHtml +=
'<span class="tok-kw" style="color:lightblue;">' + '<span class="tok-kw" style="color:lightblue;">' +
name + name +
"</span>"; "</span>";
if (opts.wantLink) payloadHtml += "</a>"; if (opts.wantLink) payloadHtml += "</a>";
return payloadHtml; return payloadHtml;
@ -1728,12 +1724,12 @@ const NAV_MODES = {
if ("string" in expr.refPath[i]) { if ("string" in expr.refPath[i]) {
component = expr.refPath[i].string; component = expr.refPath[i].string;
} else { } else {
component = exprName(expr.refPath[i], {...opts, wantLink: false}); component = exprName(expr.refPath[i], { ...opts, wantLink: false });
if (opts.wantLink && "declRef" in expr.refPath[i]) { if (opts.wantLink && "declRef" in expr.refPath[i]) {
url += "." + getDecl(expr.refPath[i].declRef).name; url += "." + getDecl(expr.refPath[i].declRef).name;
component = '<a href="'+ url +'">' + component = '<a href="' + url + '">' +
component + component +
"</a>"; "</a>";
} }
} }
name += "." + component; name += "." + component;
@ -1792,12 +1788,12 @@ const NAV_MODES = {
name = "struct { "; name = "struct { ";
} }
} }
if (structObj.fields.length > 1 && opts.wantHtml) {name += "</br>";} if (structObj.fields.length > 1 && opts.wantHtml) { name += "</br>"; }
let indent = ""; let indent = "";
if (structObj.fields.length > 1 && opts.wantHtml) { if (structObj.fields.length > 1 && opts.wantHtml) {
indent = "&nbsp;&nbsp;&nbsp;&nbsp;" indent = "&nbsp;&nbsp;&nbsp;&nbsp;"
} }
if (opts.indent) { if (opts.indent && structObj.fields.length > 1) {
indent = opts.indent + indent; indent = opts.indent + indent;
} }
let structNode = getAstNode(structObj.src); let structNode = getAstNode(structObj.src);
@ -1808,7 +1804,7 @@ const NAV_MODES = {
field_end += " "; field_end += " ";
} }
for(let i = 0; i < structObj.fields.length; i += 1) { for (let i = 0; i < structObj.fields.length; i += 1) {
let fieldNode = getAstNode(structNode.fields[i]); let fieldNode = getAstNode(structNode.fields[i]);
let fieldName = fieldNode.name; let fieldName = fieldNode.name;
let html = indent; let html = indent;
@ -1817,17 +1813,17 @@ const NAV_MODES = {
} }
let fieldTypeExpr = structObj.fields[i]; let fieldTypeExpr = structObj.fields[i];
if(!structObj.is_tuple) { if (!structObj.is_tuple) {
html += ": "; html += ": ";
} }
html += exprName(fieldTypeExpr, {...opts, indent: indent}); html += exprName(fieldTypeExpr, { ...opts, indent: indent });
html += field_end; html += field_end;
name += html; name += html;
} }
if (opts.indent) { if (opts.indent && structObj.fields.length > 1) {
name += opts.indent; name += opts.indent;
} }
name += "}"; name += "}";
@ -1850,7 +1846,7 @@ const NAV_MODES = {
if (enumObj.nonexhaustive) { if (enumObj.nonexhaustive) {
fields_len += 1; fields_len += 1;
} }
if (fields_len > 1 && opts.wantHtml) {name += "</br>";} if (fields_len > 1 && opts.wantHtml) { name += "</br>"; }
let indent = ""; let indent = "";
if (fields_len > 1) { if (fields_len > 1) {
if (opts.wantHtml) { if (opts.wantHtml) {
@ -1868,10 +1864,10 @@ const NAV_MODES = {
} else { } else {
field_end += " "; field_end += " ";
} }
for(let i = 0; i < enumNode.fields.length; i += 1) { for (let i = 0; i < enumNode.fields.length; i += 1) {
let fieldNode = getAstNode(enumNode.fields[i]); let fieldNode = getAstNode(enumNode.fields[i]);
let fieldName = fieldNode.name; let fieldName = fieldNode.name;
let html = indent + escapeHtml(fieldName); let html = indent + escapeHtml(fieldName);
html += field_end; html += field_end;
@ -1895,16 +1891,16 @@ const NAV_MODES = {
name = "union"; name = "union";
} }
if (unionObj.auto_tag) { if (unionObj.auto_tag) {
if (opts.wantHtml) { if (opts.wantHtml) {
name += " (<span class='tok-kw'>enum</span>"; name += " (<span class='tok-kw'>enum</span>";
} else { } else {
name += " (enum"; name += " (enum";
} }
if (unionObj.tag) { if (unionObj.tag) {
name += "(" + exprName(unionObj.tag, opts) + "))"; name += "(" + exprName(unionObj.tag, opts) + "))";
} else { } else {
name += ")"; name += ")";
} }
} else if (unionObj.tag) { } else if (unionObj.tag) {
name += " (" + exprName(unionObj.tag, opts) + ")"; name += " (" + exprName(unionObj.tag, opts) + ")";
} }
@ -1926,7 +1922,7 @@ const NAV_MODES = {
} else { } else {
field_end += " "; field_end += " ";
} }
for(let i = 0; i < unionObj.fields.length; i += 1) { for (let i = 0; i < unionObj.fields.length; i += 1) {
let fieldNode = getAstNode(unionNode.fields[i]); let fieldNode = getAstNode(unionNode.fields[i]);
let fieldName = fieldNode.name; let fieldName = fieldNode.name;
let html = indent + escapeHtml(fieldName); let html = indent + escapeHtml(fieldName);
@ -1934,7 +1930,7 @@ const NAV_MODES = {
let fieldTypeExpr = unionObj.fields[i]; let fieldTypeExpr = unionObj.fields[i];
html += ": "; html += ": ";
html += exprName(fieldTypeExpr, {...opts, indent: indent}); html += exprName(fieldTypeExpr, { ...opts, indent: indent });
html += field_end; html += field_end;
@ -2163,7 +2159,7 @@ const NAV_MODES = {
opts.fnDecl = null; opts.fnDecl = null;
opts.linkFnNameDecl = null; opts.linkFnNameDecl = null;
let payloadHtml = ""; let payloadHtml = "";
if (opts.addParensIfFnSignature && fnObj.src == 0){ if (opts.addParensIfFnSignature && fnObj.src == 0) {
payloadHtml += "("; payloadHtml += "(";
} }
if (opts.wantHtml) { if (opts.wantHtml) {
@ -2179,7 +2175,7 @@ const NAV_MODES = {
if (linkFnNameDecl) { if (linkFnNameDecl) {
payloadHtml += payloadHtml +=
'<a href="' + linkFnNameDecl + '">' + '<a href="' + linkFnNameDecl + '">' +
escapeHtml(fnDecl.name) + escapeHtml(fnDecl.name) +
"</a>"; "</a>";
} else { } else {
payloadHtml += escapeHtml(fnDecl.name); payloadHtml += escapeHtml(fnDecl.name);
@ -2198,7 +2194,7 @@ const NAV_MODES = {
fields = fnNode.fields; fields = fnNode.fields;
isVarArgs = fnNode.varArgs; isVarArgs = fnNode.varArgs;
} }
for (let i = 0; i < fnObj.params.length; i += 1) { for (let i = 0; i < fnObj.params.length; i += 1) {
if (i != 0) { if (i != 0) {
payloadHtml += ", "; payloadHtml += ", ";
@ -2251,13 +2247,13 @@ const NAV_MODES = {
} else if ("typeOf" in value) { } else if ("typeOf" in value) {
payloadHtml += exprName(value, opts); payloadHtml += exprName(value, opts);
} else if ("typeOf_peer" in value) { } else if ("typeOf_peer" in value) {
payloadHtml += exprName(value, opts); payloadHtml += exprName(value, opts);
} else if ("declRef" in value) { } else if ("declRef" in value) {
payloadHtml += exprName(value, opts); payloadHtml += exprName(value, opts);
} else if ("call" in value) { } else if ("call" in value) {
payloadHtml += exprName(value, opts); payloadHtml += exprName(value, opts);
} else if ("refPath" in value) { } else if ("refPath" in value) {
payloadHtml += exprName(value, opts); payloadHtml += exprName(value, opts);
} else if ("type" in value) { } else if ("type" in value) {
payloadHtml += exprName(value, opts); payloadHtml += exprName(value, opts);
//payloadHtml += '<span class="tok-kw">' + name + "</span>"; //payloadHtml += '<span class="tok-kw">' + name + "</span>";
@ -2301,7 +2297,7 @@ const NAV_MODES = {
} }
if (fnObj.ret != null) { if (fnObj.ret != null) {
payloadHtml += exprName(fnObj.ret, { payloadHtml += exprName(fnObj.ret, {
...opts, ...opts,
addParensIfFnSignature: true, addParensIfFnSignature: true,
}); });
} else if (opts.wantHtml) { } else if (opts.wantHtml) {
@ -2310,7 +2306,7 @@ const NAV_MODES = {
payloadHtml += "anytype"; payloadHtml += "anytype";
} }
if (opts.addParensIfFnSignature && fnObj.src == 0){ if (opts.addParensIfFnSignature && fnObj.src == 0) {
payloadHtml += ")"; payloadHtml += ")";
} }
return payloadHtml; return payloadHtml;
@ -2353,7 +2349,7 @@ const NAV_MODES = {
) { ) {
name = "std"; name = "std";
} else { } else {
name = exprName({ type: typeObj }, {wantHtml: false, wantLink: false}); name = exprName({ type: typeObj }, { wantHtml: false, wantLink: false });
} }
if (name != null && name != "") { if (name != null && name != "") {
domHdrName.innerText = domHdrName.innerText =
@ -2644,10 +2640,10 @@ const NAV_MODES = {
} }
function sourceFileLink(decl) { function sourceFileLink(decl) {
const srcNode = getAstNode(decl.src); const srcNode = getAstNode(decl.src);
return sourceFileUrlTemplate. return sourceFileUrlTemplate.
replace("{{file}}", zigAnalysis.files[srcNode.file]). replace("{{file}}", zigAnalysis.files[srcNode.file]).
replace("{{line}}", srcNode.line + 1); replace("{{line}}", srcNode.line + 1);
} }
function renderContainer(container) { function renderContainer(container) {
@ -2783,8 +2779,8 @@ const NAV_MODES = {
if (short != docs) { if (short != docs) {
short = markdown(short); short = markdown(short);
var long = markdown(docs); var long = markdown(docs);
tdDesc.innerHTML = tdDesc.innerHTML =
"<div class=\"expand\" ><span class=\"button\" onclick=\"toggleExpand(event)\"></span><div class=\"sum-less\">" + short + "</div>" + "<div class=\"sum-more\">" + long + "</div></details>"; "<div class=\"expand\" ><span class=\"button\" onclick=\"toggleExpand(event)\"></span><div class=\"sum-less\">" + short + "</div>" + "<div class=\"sum-more\">" + long + "</div></details>";
} }
else { else {
tdDesc.innerHTML = markdown(short); tdDesc.innerHTML = markdown(short);
@ -2818,10 +2814,10 @@ const NAV_MODES = {
html += ' = <span class="tok-number">' + fieldName + "</span>"; html += ' = <span class="tok-number">' + fieldName + "</span>";
} else { } else {
let fieldTypeExpr = container.fields[i]; let fieldTypeExpr = container.fields[i];
if(container.kind ==! typeKinds.Struct || !container.is_tuple) { if (container.kind !== typeKinds.Struct || !container.is_tuple) {
html += ": "; html += ": ";
} }
html += exprName(fieldTypeExpr, {wantHtml:true, wantLink:true}); html += exprName(fieldTypeExpr, { wantHtml: true, wantLink: true });
let tsn = typeShorthandName(fieldTypeExpr); let tsn = typeShorthandName(fieldTypeExpr);
if (tsn) { if (tsn) {
html += "<span> (" + tsn + ")</span>"; html += "<span> (" + tsn + ")</span>";
@ -3007,8 +3003,8 @@ const NAV_MODES = {
throw new Error("No type 'type' found"); throw new Error("No type 'type' found");
} }
function updateCurNav() { function updateCurNav() {
curNav = { curNav = {
mode: NAV_MODES.API, mode: NAV_MODES.API,
pkgNames: [], pkgNames: [],
@ -3021,7 +3017,7 @@ const NAV_MODES = {
const mode = location.hash.substring(0, 3); const mode = location.hash.substring(0, 3);
let query = location.hash.substring(3); let query = location.hash.substring(3);
const DEFAULT_HASH = NAV_MODES.API + zigAnalysis.packages[zigAnalysis.rootPkg].name; const DEFAULT_HASH = NAV_MODES.API + zigAnalysis.packages[zigAnalysis.rootPkg].name;
switch (mode) { switch (mode) {
case NAV_MODES.API: case NAV_MODES.API:
@ -3037,7 +3033,7 @@ const NAV_MODES = {
nonSearchPart = query.substring(0, qpos); nonSearchPart = query.substring(0, qpos);
curNavSearch = decodeURIComponent(query.substring(qpos + 1)); curNavSearch = decodeURIComponent(query.substring(qpos + 1));
} }
let parts = nonSearchPart.split(":"); let parts = nonSearchPart.split(":");
if (parts[0] == "") { if (parts[0] == "") {
location.hash = DEFAULT_HASH; location.hash = DEFAULT_HASH;
@ -3059,14 +3055,14 @@ const NAV_MODES = {
curNav.mode = mode; curNav.mode = mode;
curNav.activeGuide = query; curNav.activeGuide = query;
return; return;
default: default:
location.hash = DEFAULT_HASH; location.hash = DEFAULT_HASH;
return; return;
} }
} }
function onHashChange() { function onHashChange() {
updateCurNav(); updateCurNav();
if (domSearch.value !== curNavSearch) { if (domSearch.value !== curNavSearch) {
@ -3103,7 +3099,7 @@ const NAV_MODES = {
if (!callee.generic_ret) return null; if (!callee.generic_ret) return null;
resolvedGenericRet = resolveValue({ expr: callee.generic_ret }); resolvedGenericRet = resolveValue({ expr: callee.generic_ret });
} }
if ("type" in resolvedGenericRet.expr) { if ("type" in resolvedGenericRet.expr) {
parentType = getType(resolvedGenericRet.expr.type); parentType = getType(resolvedGenericRet.expr.type);
} }
@ -3248,7 +3244,7 @@ const NAV_MODES = {
}); });
} }
function shortDesc(docs){ function shortDesc(docs) {
const trimmed_docs = docs.trim(); const trimmed_docs = docs.trim();
let index = trimmed_docs.indexOf("\n\n"); let index = trimmed_docs.indexOf("\n\n");
let cut = false; let cut = false;
@ -3319,14 +3315,16 @@ const NAV_MODES = {
} else if (line.text.startsWith("#")) { } else if (line.text.startsWith("#")) {
line.type = "h1"; line.type = "h1";
line.text = line.text.substr(1); line.text = line.text.substr(1);
} else if (line.text.startsWith("-")) { } else if (line.text.match(/^-[ \t]+.*$/)) {
line.type = "ul"; // line starts with a hyphen, followed by spaces or tabs
line.text = line.text.substr(1); const match = line.text.match(/^-[ \t]+/);
} else if (line.text.match(/^\d+\..*$/)) {
// if line starts with {number}{dot}
const match = line.text.match(/(\d+)\./);
line.type = "ul"; line.type = "ul";
line.text = line.text.substr(match[0].length); line.text = line.text.substr(match[0].length);
} else if (line.text.match(/^\d+\.[ \t]+.*$/)) {
// line starts with {number}{dot}{spaces or tabs}
const match = line.text.match(/(\d+)\.[ \t]+/);
line.type = "ol";
line.text = line.text.substr(match[0].length);
line.ordered_number = Number(match[1].length); line.ordered_number = Number(match[1].length);
} else if (line.text == "```") { } else if (line.text == "```") {
line.type = "skip"; line.type = "skip";
@ -3536,7 +3534,7 @@ const NAV_MODES = {
case "ul": case "ul":
case "ol": case "ol":
if ( if (
!previousLineIs("ul", line_no) || !previousLineIs(line.type, line_no) ||
getPreviousLineIndent(line_no) < line.indent getPreviousLineIndent(line_no) < line.indent
) { ) {
html += "<" + line.type + ">\n"; html += "<" + line.type + ">\n";
@ -3545,7 +3543,7 @@ const NAV_MODES = {
html += "<li>" + markdownInlines(line.text) + "</li>\n"; html += "<li>" + markdownInlines(line.text) + "</li>\n";
if ( if (
!nextLineIs("ul", line_no) || !nextLineIs(line.type, line_no) ||
getNextLineIndent(line_no) < line.indent getNextLineIndent(line_no) < line.indent
) { ) {
html += "</" + line.type + ">\n"; html += "</" + line.type + ">\n";
@ -4067,4 +4065,4 @@ function toggleExpand(event) {
if (!parent.open && parent.getBoundingClientRect().top < 0) { if (!parent.open && parent.getBoundingClientRect().top < 0) {
parent.parentElement.parentElement.scrollIntoView(true); parent.parentElement.parentElement.scrollIntoView(true);
} }
} }

View File

@ -37,7 +37,7 @@ pub const FmtStep = @import("Build/FmtStep.zig");
pub const InstallArtifactStep = @import("Build/InstallArtifactStep.zig"); pub const InstallArtifactStep = @import("Build/InstallArtifactStep.zig");
pub const InstallDirStep = @import("Build/InstallDirStep.zig"); pub const InstallDirStep = @import("Build/InstallDirStep.zig");
pub const InstallFileStep = @import("Build/InstallFileStep.zig"); pub const InstallFileStep = @import("Build/InstallFileStep.zig");
pub const InstallRawStep = @import("Build/InstallRawStep.zig"); pub const ObjCopyStep = @import("Build/ObjCopyStep.zig");
pub const CompileStep = @import("Build/CompileStep.zig"); pub const CompileStep = @import("Build/CompileStep.zig");
pub const LogStep = @import("Build/LogStep.zig"); pub const LogStep = @import("Build/LogStep.zig");
pub const OptionsStep = @import("Build/OptionsStep.zig"); pub const OptionsStep = @import("Build/OptionsStep.zig");
@ -1254,11 +1254,8 @@ pub fn installLibFile(self: *Build, src_path: []const u8, dest_rel_path: []const
self.getInstallStep().dependOn(&self.addInstallFileWithDir(.{ .path = src_path }, .lib, dest_rel_path).step); self.getInstallStep().dependOn(&self.addInstallFileWithDir(.{ .path = src_path }, .lib, dest_rel_path).step);
} }
/// Output format (BIN vs Intel HEX) determined by filename pub fn addObjCopy(b: *Build, source: FileSource, options: ObjCopyStep.Options) *ObjCopyStep {
pub fn installRaw(self: *Build, artifact: *CompileStep, dest_filename: []const u8, options: InstallRawStep.CreateOptions) *InstallRawStep { return ObjCopyStep.create(b, source, options);
const raw = self.addInstallRaw(artifact, dest_filename, options);
self.getInstallStep().dependOn(&raw.step);
return raw;
} }
///`dest_rel_path` is relative to install prefix path ///`dest_rel_path` is relative to install prefix path
@ -1280,10 +1277,6 @@ pub fn addInstallHeaderFile(b: *Build, src_path: []const u8, dest_rel_path: []co
return b.addInstallFileWithDir(.{ .path = src_path }, .header, dest_rel_path); return b.addInstallFileWithDir(.{ .path = src_path }, .header, dest_rel_path);
} }
pub fn addInstallRaw(self: *Build, artifact: *CompileStep, dest_filename: []const u8, options: InstallRawStep.CreateOptions) *InstallRawStep {
return InstallRawStep.create(self, artifact, dest_filename, options);
}
pub fn addInstallFileWithDir( pub fn addInstallFileWithDir(
self: *Build, self: *Build,
source: FileSource, source: FileSource,
@ -1771,7 +1764,7 @@ test {
_ = InstallArtifactStep; _ = InstallArtifactStep;
_ = InstallDirStep; _ = InstallDirStep;
_ = InstallFileStep; _ = InstallFileStep;
_ = InstallRawStep; _ = ObjCopyStep;
_ = CompileStep; _ = CompileStep;
_ = LogStep; _ = LogStep;
_ = OptionsStep; _ = OptionsStep;

View File

@ -21,7 +21,7 @@ const VcpkgRoot = std.Build.VcpkgRoot;
const InstallDir = std.Build.InstallDir; const InstallDir = std.Build.InstallDir;
const InstallArtifactStep = std.Build.InstallArtifactStep; const InstallArtifactStep = std.Build.InstallArtifactStep;
const GeneratedFile = std.Build.GeneratedFile; const GeneratedFile = std.Build.GeneratedFile;
const InstallRawStep = std.Build.InstallRawStep; const ObjCopyStep = std.Build.ObjCopyStep;
const EmulatableRunStep = std.Build.EmulatableRunStep; const EmulatableRunStep = std.Build.EmulatableRunStep;
const CheckObjectStep = std.Build.CheckObjectStep; const CheckObjectStep = std.Build.CheckObjectStep;
const RunStep = std.Build.RunStep; const RunStep = std.Build.RunStep;
@ -432,10 +432,6 @@ pub fn install(self: *CompileStep) void {
self.builder.installArtifact(self); self.builder.installArtifact(self);
} }
pub fn installRaw(self: *CompileStep, dest_filename: []const u8, options: InstallRawStep.CreateOptions) *InstallRawStep {
return self.builder.installRaw(self, dest_filename, options);
}
pub fn installHeader(a: *CompileStep, src_path: []const u8, dest_rel_path: []const u8) void { pub fn installHeader(a: *CompileStep, src_path: []const u8, dest_rel_path: []const u8) void {
const install_file = a.builder.addInstallHeaderFile(src_path, dest_rel_path); const install_file = a.builder.addInstallHeaderFile(src_path, dest_rel_path);
a.builder.getInstallStep().dependOn(&install_file.step); a.builder.getInstallStep().dependOn(&install_file.step);
@ -458,6 +454,7 @@ pub fn installConfigHeader(
options.install_dir, options.install_dir,
dest_rel_path, dest_rel_path,
); );
install_file.step.dependOn(&config_header.step);
cs.builder.getInstallStep().dependOn(&install_file.step); cs.builder.getInstallStep().dependOn(&install_file.step);
cs.installed_headers.append(&install_file.step) catch @panic("OOM"); cs.installed_headers.append(&install_file.step) catch @panic("OOM");
} }
@ -506,6 +503,18 @@ pub fn installLibraryHeaders(a: *CompileStep, l: *CompileStep) void {
a.installed_headers.appendSlice(l.installed_headers.items) catch @panic("OOM"); a.installed_headers.appendSlice(l.installed_headers.items) catch @panic("OOM");
} }
pub fn addObjCopy(cs: *CompileStep, options: ObjCopyStep.Options) *ObjCopyStep {
var copy = options;
if (copy.basename == null) {
if (options.format) |f| {
copy.basename = cs.builder.fmt("{s}.{s}", .{ cs.name, @tagName(f) });
} else {
copy.basename = cs.name;
}
}
return cs.builder.addObjCopy(cs.getOutputSource(), copy);
}
/// Deprecated: use `std.Build.addRunArtifact` /// Deprecated: use `std.Build.addRunArtifact`
/// This function will run in the context of the package that created the executable, /// This function will run in the context of the package that created the executable,
/// which is undesirable when running an executable provided by a dependency package. /// which is undesirable when running an executable provided by a dependency package.
@ -955,7 +964,10 @@ pub fn addFrameworkPath(self: *CompileStep, dir_path: []const u8) void {
/// package's module table using `name`. /// package's module table using `name`.
pub fn addModule(cs: *CompileStep, name: []const u8, module: *Module) void { pub fn addModule(cs: *CompileStep, name: []const u8, module: *Module) void {
cs.modules.put(cs.builder.dupe(name), module) catch @panic("OOM"); cs.modules.put(cs.builder.dupe(name), module) catch @panic("OOM");
cs.addRecursiveBuildDeps(module);
var done = std.AutoHashMap(*Module, void).init(cs.builder.allocator);
defer done.deinit();
cs.addRecursiveBuildDeps(module, &done) catch @panic("OOM");
} }
/// Adds a module to be used with `@import` without exposing it in the current /// Adds a module to be used with `@import` without exposing it in the current
@ -969,10 +981,12 @@ pub fn addOptions(cs: *CompileStep, module_name: []const u8, options: *OptionsSt
addModule(cs, module_name, options.createModule()); addModule(cs, module_name, options.createModule());
} }
fn addRecursiveBuildDeps(cs: *CompileStep, module: *Module) void { fn addRecursiveBuildDeps(cs: *CompileStep, module: *Module, done: *std.AutoHashMap(*Module, void)) !void {
if (done.contains(module)) return;
try done.put(module, {});
module.source_file.addStepDependencies(&cs.step); module.source_file.addStepDependencies(&cs.step);
for (module.dependencies.values()) |dep| { for (module.dependencies.values()) |dep| {
cs.addRecursiveBuildDeps(dep); try cs.addRecursiveBuildDeps(dep, done);
} }
} }
@ -1031,22 +1045,110 @@ fn linkLibraryOrObject(self: *CompileStep, other: *CompileStep) void {
fn appendModuleArgs( fn appendModuleArgs(
cs: *CompileStep, cs: *CompileStep,
zig_args: *ArrayList([]const u8), zig_args: *ArrayList([]const u8),
name: []const u8,
module: *Module,
) error{OutOfMemory}!void { ) error{OutOfMemory}!void {
try zig_args.append("--pkg-begin"); // First, traverse the whole dependency graph and give every module a unique name, ideally one
try zig_args.append(name); // named after what it's called somewhere in the graph. It will help here to have both a mapping
try zig_args.append(module.builder.pathFromRoot(module.source_file.getPath(module.builder))); // from module to name and a set of all the currently-used names.
var mod_names = std.AutoHashMap(*Module, []const u8).init(cs.builder.allocator);
var names = std.StringHashMap(void).init(cs.builder.allocator);
var to_name = std.ArrayList(struct {
name: []const u8,
mod: *Module,
}).init(cs.builder.allocator);
{ {
const keys = module.dependencies.keys(); var it = cs.modules.iterator();
for (module.dependencies.values(), 0..) |sub_module, i| { while (it.next()) |kv| {
const sub_name = keys[i]; // While we're traversing the root dependencies, let's make sure that no module names
try cs.appendModuleArgs(zig_args, sub_name, sub_module); // have colons in them, since the CLI forbids it. We handle this for transitive
// dependencies further down.
if (std.mem.indexOfScalar(u8, kv.key_ptr.*, ':') != null) {
@panic("Module names cannot contain colons");
}
try to_name.append(.{
.name = kv.key_ptr.*,
.mod = kv.value_ptr.*,
});
} }
} }
try zig_args.append("--pkg-end"); while (to_name.popOrNull()) |dep| {
if (mod_names.contains(dep.mod)) continue;
// We'll use this buffer to store the name we decide on
var buf = try cs.builder.allocator.alloc(u8, dep.name.len + 32);
// First, try just the exposed dependency name
std.mem.copy(u8, buf, dep.name);
var name = buf[0..dep.name.len];
var n: usize = 0;
while (names.contains(name)) {
// If that failed, append an incrementing number to the end
name = std.fmt.bufPrint(buf, "{s}{}", .{ dep.name, n }) catch unreachable;
n += 1;
}
try mod_names.put(dep.mod, name);
try names.put(name, {});
var it = dep.mod.dependencies.iterator();
while (it.next()) |kv| {
// Same colon-in-name check as above, but for transitive dependencies.
if (std.mem.indexOfScalar(u8, kv.key_ptr.*, ':') != null) {
@panic("Module names cannot contain colons");
}
try to_name.append(.{
.name = kv.key_ptr.*,
.mod = kv.value_ptr.*,
});
}
}
// Since the module names given to the CLI are based off of the exposed names, we already know
// that none of the CLI names have colons in them, so there's no need to check that explicitly.
// Every module in the graph is now named; output their definitions
{
var it = mod_names.iterator();
while (it.next()) |kv| {
const mod = kv.key_ptr.*;
const name = kv.value_ptr.*;
const deps_str = try constructDepString(cs.builder.allocator, mod_names, mod.dependencies);
const src = mod.builder.pathFromRoot(mod.source_file.getPath(mod.builder));
try zig_args.append("--mod");
try zig_args.append(try std.fmt.allocPrint(cs.builder.allocator, "{s}:{s}:{s}", .{ name, deps_str, src }));
}
}
// Lastly, output the root dependencies
const deps_str = try constructDepString(cs.builder.allocator, mod_names, cs.modules);
if (deps_str.len > 0) {
try zig_args.append("--deps");
try zig_args.append(deps_str);
}
}
fn constructDepString(
allocator: std.mem.Allocator,
mod_names: std.AutoHashMap(*Module, []const u8),
deps: std.StringArrayHashMap(*Module),
) ![]const u8 {
var deps_str = std.ArrayList(u8).init(allocator);
var it = deps.iterator();
while (it.next()) |kv| {
const expose = kv.key_ptr.*;
const name = mod_names.get(kv.value_ptr.*).?;
if (std.mem.eql(u8, expose, name)) {
try deps_str.writer().print("{s},", .{name});
} else {
try deps_str.writer().print("{s}={s},", .{ expose, name });
}
}
if (deps_str.items.len > 0) {
return deps_str.items[0 .. deps_str.items.len - 1]; // omit trailing comma
} else {
return "";
}
} }
fn make(step: *Step) !void { fn make(step: *Step) !void {
@ -1573,13 +1675,7 @@ fn make(step: *Step) !void {
try zig_args.append("--test-no-exec"); try zig_args.append("--test-no-exec");
} }
{ try self.appendModuleArgs(&zig_args);
const keys = self.modules.keys();
for (self.modules.values(), 0..) |module, i| {
const name = keys[i];
try self.appendModuleArgs(&zig_args, name, module);
}
}
for (self.include_dirs.items) |include_dir| { for (self.include_dirs.items) |include_dir| {
switch (include_dir) { switch (include_dir) {

View File

@ -26,7 +26,7 @@ builder: *std.Build,
exe: *CompileStep, exe: *CompileStep,
/// Set this to `null` to ignore the exit code for the purpose of determining a successful execution /// Set this to `null` to ignore the exit code for the purpose of determining a successful execution
expected_exit_code: ?u8 = 0, expected_term: ?std.ChildProcess.Term = .{ .Exited = 0 },
/// Override this field to modify the environment /// Override this field to modify the environment
env_map: ?*EnvMap, env_map: ?*EnvMap,
@ -131,7 +131,7 @@ fn make(step: *Step) !void {
try RunStep.runCommand( try RunStep.runCommand(
argv_list.items, argv_list.items,
self.builder, self.builder,
self.expected_exit_code, self.expected_term,
self.stdout_action, self.stdout_action,
self.stderr_action, self.stderr_action,
.Inherit, .Inherit,

View File

@ -1,110 +0,0 @@
//! TODO: Rename this to ObjCopyStep now that it invokes the `zig objcopy`
//! subcommand rather than containing an implementation directly.
const std = @import("std");
const InstallRawStep = @This();
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const ArrayListUnmanaged = std.ArrayListUnmanaged;
const File = std.fs.File;
const InstallDir = std.Build.InstallDir;
const CompileStep = std.Build.CompileStep;
const Step = std.Build.Step;
const elf = std.elf;
const fs = std.fs;
const io = std.io;
const sort = std.sort;
pub const base_id = .install_raw;
pub const RawFormat = enum {
bin,
hex,
};
step: Step,
builder: *std.Build,
artifact: *CompileStep,
dest_dir: InstallDir,
dest_filename: []const u8,
options: CreateOptions,
output_file: std.Build.GeneratedFile,
pub const CreateOptions = struct {
format: ?RawFormat = null,
dest_dir: ?InstallDir = null,
only_section: ?[]const u8 = null,
pad_to: ?u64 = null,
};
pub fn create(
builder: *std.Build,
artifact: *CompileStep,
dest_filename: []const u8,
options: CreateOptions,
) *InstallRawStep {
const self = builder.allocator.create(InstallRawStep) catch @panic("OOM");
self.* = InstallRawStep{
.step = Step.init(.install_raw, builder.fmt("install raw binary {s}", .{artifact.step.name}), builder.allocator, make),
.builder = builder,
.artifact = artifact,
.dest_dir = if (options.dest_dir) |d| d else switch (artifact.kind) {
.obj => unreachable,
.@"test" => unreachable,
.exe, .test_exe => .bin,
.lib => unreachable,
},
.dest_filename = dest_filename,
.options = options,
.output_file = std.Build.GeneratedFile{ .step = &self.step },
};
self.step.dependOn(&artifact.step);
builder.pushInstalledFile(self.dest_dir, dest_filename);
return self;
}
pub fn getOutputSource(self: *const InstallRawStep) std.Build.FileSource {
return std.Build.FileSource{ .generated = &self.output_file };
}
fn make(step: *Step) !void {
const self = @fieldParentPtr(InstallRawStep, "step", step);
const b = self.builder;
if (self.artifact.target.getObjectFormat() != .elf) {
std.debug.print("InstallRawStep only works with ELF format.\n", .{});
return error.InvalidObjectFormat;
}
const full_src_path = self.artifact.getOutputSource().getPath(b);
const full_dest_path = b.getInstallPath(self.dest_dir, self.dest_filename);
self.output_file.path = full_dest_path;
try fs.cwd().makePath(b.getInstallPath(self.dest_dir, ""));
var argv_list = std.ArrayList([]const u8).init(b.allocator);
try argv_list.appendSlice(&.{ b.zig_exe, "objcopy" });
if (self.options.only_section) |only_section| {
try argv_list.appendSlice(&.{ "-j", only_section });
}
if (self.options.pad_to) |pad_to| {
try argv_list.appendSlice(&.{
"--pad-to",
b.fmt("{d}", .{pad_to}),
});
}
if (self.options.format) |format| switch (format) {
.bin => try argv_list.appendSlice(&.{ "-O", "binary" }),
.hex => try argv_list.appendSlice(&.{ "-O", "hex" }),
};
try argv_list.appendSlice(&.{ full_src_path, full_dest_path });
_ = try self.builder.execFromStep(argv_list.items, &self.step);
}
test {
std.testing.refAllDecls(InstallRawStep);
}

View File

@ -0,0 +1,138 @@
const std = @import("std");
const ObjCopyStep = @This();
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const ArrayListUnmanaged = std.ArrayListUnmanaged;
const File = std.fs.File;
const InstallDir = std.Build.InstallDir;
const CompileStep = std.Build.CompileStep;
const Step = std.Build.Step;
const elf = std.elf;
const fs = std.fs;
const io = std.io;
const sort = std.sort;
pub const base_id: Step.Id = .objcopy;
pub const RawFormat = enum {
bin,
hex,
};
step: Step,
builder: *std.Build,
file_source: std.Build.FileSource,
basename: []const u8,
output_file: std.Build.GeneratedFile,
format: ?RawFormat,
only_section: ?[]const u8,
pad_to: ?u64,
pub const Options = struct {
basename: ?[]const u8 = null,
format: ?RawFormat = null,
only_section: ?[]const u8 = null,
pad_to: ?u64 = null,
};
pub fn create(
builder: *std.Build,
file_source: std.Build.FileSource,
options: Options,
) *ObjCopyStep {
const self = builder.allocator.create(ObjCopyStep) catch @panic("OOM");
self.* = ObjCopyStep{
.step = Step.init(
base_id,
builder.fmt("objcopy {s}", .{file_source.getDisplayName()}),
builder.allocator,
make,
),
.builder = builder,
.file_source = file_source,
.basename = options.basename orelse file_source.getDisplayName(),
.output_file = std.Build.GeneratedFile{ .step = &self.step },
.format = options.format,
.only_section = options.only_section,
.pad_to = options.pad_to,
};
file_source.addStepDependencies(&self.step);
return self;
}
pub fn getOutputSource(self: *const ObjCopyStep) std.Build.FileSource {
return .{ .generated = &self.output_file };
}
fn make(step: *Step) !void {
const self = @fieldParentPtr(ObjCopyStep, "step", step);
const b = self.builder;
var man = b.cache.obtain();
defer man.deinit();
// Random bytes to make ObjCopyStep unique. Refresh this with new random
// bytes when ObjCopyStep implementation is modified incompatibly.
man.hash.add(@as(u32, 0xe18b7baf));
const full_src_path = self.file_source.getPath(b);
_ = try man.addFile(full_src_path, null);
man.hash.addOptionalBytes(self.only_section);
man.hash.addOptional(self.pad_to);
man.hash.addOptional(self.format);
if (man.hit() catch |err| failWithCacheError(man, err)) {
// Cache hit, skip subprocess execution.
const digest = man.final();
self.output_file.path = try b.cache_root.join(b.allocator, &.{
"o", &digest, self.basename,
});
return;
}
const digest = man.final();
const full_dest_path = try b.cache_root.join(b.allocator, &.{ "o", &digest, self.basename });
const cache_path = "o" ++ fs.path.sep_str ++ digest;
b.cache_root.handle.makePath(cache_path) catch |err| {
std.debug.print("unable to make path {s}: {s}\n", .{ cache_path, @errorName(err) });
return err;
};
var argv = std.ArrayList([]const u8).init(b.allocator);
try argv.appendSlice(&.{ b.zig_exe, "objcopy" });
if (self.only_section) |only_section| {
try argv.appendSlice(&.{ "-j", only_section });
}
if (self.pad_to) |pad_to| {
try argv.appendSlice(&.{ "--pad-to", b.fmt("{d}", .{pad_to}) });
}
if (self.format) |format| switch (format) {
.bin => try argv.appendSlice(&.{ "-O", "binary" }),
.hex => try argv.appendSlice(&.{ "-O", "hex" }),
};
try argv.appendSlice(&.{ full_src_path, full_dest_path });
_ = try self.builder.execFromStep(argv.items, &self.step);
self.output_file.path = full_dest_path;
try man.writeManifest();
}
/// TODO consolidate this with the same function in RunStep?
/// Also properly deal with concurrency (see open PR)
fn failWithCacheError(man: std.Build.Cache.Manifest, err: anyerror) noreturn {
const i = man.failed_file_index orelse failWithSimpleError(err);
const pp = man.files.items[i].prefixed_path orelse failWithSimpleError(err);
const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
std.debug.print("{s}: {s}/{s}\n", .{ @errorName(err), prefix, pp.sub_path });
std.process.exit(1);
}
fn failWithSimpleError(err: anyerror) noreturn {
std.debug.print("{s}\n", .{@errorName(err)});
std.process.exit(1);
}

View File

@ -35,7 +35,7 @@ stderr_action: StdIoAction = .inherit,
stdin_behavior: std.ChildProcess.StdIo = .Inherit, stdin_behavior: std.ChildProcess.StdIo = .Inherit,
/// Set this to `null` to ignore the exit code for the purpose of determining a successful execution /// Set this to `null` to ignore the exit code for the purpose of determining a successful execution
expected_exit_code: ?u8 = 0, expected_term: ?std.ChildProcess.Term = .{ .Exited = 0 },
/// Print the command before running it /// Print the command before running it
print: bool, print: bool,
@ -290,7 +290,7 @@ fn make(step: *Step) !void {
try runCommand( try runCommand(
argv_list.items, argv_list.items,
self.builder, self.builder,
self.expected_exit_code, self.expected_term,
self.stdout_action, self.stdout_action,
self.stderr_action, self.stderr_action,
self.stdin_behavior, self.stdin_behavior,
@ -304,10 +304,55 @@ fn make(step: *Step) !void {
} }
} }
fn formatTerm(
term: ?std.ChildProcess.Term,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
if (term) |t| switch (t) {
.Exited => |code| try writer.print("exited with code {}", .{code}),
.Signal => |sig| try writer.print("terminated with signal {}", .{sig}),
.Stopped => |sig| try writer.print("stopped with signal {}", .{sig}),
.Unknown => |code| try writer.print("terminated for unknown reason with code {}", .{code}),
} else {
try writer.writeAll("exited with any code");
}
}
fn fmtTerm(term: ?std.ChildProcess.Term) std.fmt.Formatter(formatTerm) {
return .{ .data = term };
}
fn termMatches(expected: ?std.ChildProcess.Term, actual: std.ChildProcess.Term) bool {
return if (expected) |e| switch (e) {
.Exited => |expected_code| switch (actual) {
.Exited => |actual_code| expected_code == actual_code,
else => false,
},
.Signal => |expected_sig| switch (actual) {
.Signal => |actual_sig| expected_sig == actual_sig,
else => false,
},
.Stopped => |expected_sig| switch (actual) {
.Stopped => |actual_sig| expected_sig == actual_sig,
else => false,
},
.Unknown => |expected_code| switch (actual) {
.Unknown => |actual_code| expected_code == actual_code,
else => false,
},
} else switch (actual) {
.Exited => true,
else => false,
};
}
pub fn runCommand( pub fn runCommand(
argv: []const []const u8, argv: []const []const u8,
builder: *std.Build, builder: *std.Build,
expected_exit_code: ?u8, expected_term: ?std.ChildProcess.Term,
stdout_action: StdIoAction, stdout_action: StdIoAction,
stderr_action: StdIoAction, stderr_action: StdIoAction,
stdin_behavior: std.ChildProcess.StdIo, stdin_behavior: std.ChildProcess.StdIo,
@ -369,32 +414,14 @@ pub fn runCommand(
return err; return err;
}; };
switch (term) { if (!termMatches(expected_term, term)) {
.Exited => |code| blk: { if (builder.prominent_compile_errors) {
const expected_code = expected_exit_code orelse break :blk; std.debug.print("Run step {} (expected {})\n", .{ fmtTerm(term), fmtTerm(expected_term) });
} else {
if (code != expected_code) { std.debug.print("The following command {} (expected {}):\n", .{ fmtTerm(term), fmtTerm(expected_term) });
if (builder.prominent_compile_errors) {
std.debug.print("Run step exited with error code {} (expected {})\n", .{
code,
expected_code,
});
} else {
std.debug.print("The following command exited with error code {} (expected {}):\n", .{
code,
expected_code,
});
printCmd(cwd, argv);
}
return error.UnexpectedExitCode;
}
},
else => {
std.debug.print("The following command terminated unexpectedly:\n", .{});
printCmd(cwd, argv); printCmd(cwd, argv);
return error.UncleanExit; }
}, return error.UnexpectedExit;
} }
switch (stderr_action) { switch (stderr_action) {

View File

@ -21,7 +21,7 @@ pub const Id = enum {
check_file, check_file,
check_object, check_object,
config_header, config_header,
install_raw, objcopy,
options, options,
custom, custom,
@ -42,7 +42,7 @@ pub const Id = enum {
.check_file => Build.CheckFileStep, .check_file => Build.CheckFileStep,
.check_object => Build.CheckObjectStep, .check_object => Build.CheckObjectStep,
.config_header => Build.ConfigHeaderStep, .config_header => Build.ConfigHeaderStep,
.install_raw => Build.InstallRawStep, .objcopy => Build.ObjCopyStep,
.options => Build.OptionsStep, .options => Build.OptionsStep,
.custom => @compileError("no type available for custom step"), .custom => @compileError("no type available for custom step"),
}; };

View File

@ -1,55 +1,117 @@
const std = @import("../std.zig"); //! WriteFileStep is primarily used to create a directory in an appropriate
const Step = std.Build.Step; //! location inside the local cache which has a set of files that have either
const fs = std.fs; //! been generated during the build, or are copied from the source package.
const ArrayList = std.ArrayList; //!
//! However, this step has an additional capability of writing data to paths
const WriteFileStep = @This(); //! relative to the package root, effectively mutating the package's source
//! files. Be careful with the latter functionality; it should not be used
pub const base_id = .write_file; //! during the normal build process, but as a utility run by a developer with
//! intention to update source files, which will then be committed to version
//! control.
step: Step, step: Step,
builder: *std.Build, builder: *std.Build,
files: std.TailQueue(File), /// The elements here are pointers because we need stable pointers for the
/// GeneratedFile field.
files: std.ArrayListUnmanaged(*File),
output_source_files: std.ArrayListUnmanaged(OutputSourceFile),
pub const base_id = .write_file;
pub const File = struct { pub const File = struct {
source: std.Build.GeneratedFile, generated_file: std.Build.GeneratedFile,
basename: []const u8, sub_path: []const u8,
contents: Contents,
};
pub const OutputSourceFile = struct {
contents: Contents,
sub_path: []const u8,
};
pub const Contents = union(enum) {
bytes: []const u8, bytes: []const u8,
copy: std.Build.FileSource,
}; };
pub fn init(builder: *std.Build) WriteFileStep { pub fn init(builder: *std.Build) WriteFileStep {
return WriteFileStep{ return .{
.builder = builder, .builder = builder,
.step = Step.init(.write_file, "writefile", builder.allocator, make), .step = Step.init(.write_file, "writefile", builder.allocator, make),
.files = .{}, .files = .{},
.output_source_files = .{},
}; };
} }
pub fn add(self: *WriteFileStep, basename: []const u8, bytes: []const u8) void { pub fn add(wf: *WriteFileStep, sub_path: []const u8, bytes: []const u8) void {
const node = self.builder.allocator.create(std.TailQueue(File).Node) catch @panic("unhandled error"); const gpa = wf.builder.allocator;
node.* = .{ const file = gpa.create(File) catch @panic("OOM");
.data = .{ file.* = .{
.source = std.Build.GeneratedFile{ .step = &self.step }, .generated_file = .{ .step = &wf.step },
.basename = self.builder.dupePath(basename), .sub_path = wf.builder.dupePath(sub_path),
.bytes = self.builder.dupe(bytes), .contents = .{ .bytes = wf.builder.dupe(bytes) },
},
}; };
wf.files.append(gpa, file) catch @panic("OOM");
self.files.append(node);
} }
/// Gets a file source for the given basename. If the file does not exist, returns `null`. /// Place the file into the generated directory within the local cache,
pub fn getFileSource(step: *WriteFileStep, basename: []const u8) ?std.Build.FileSource { /// along with all the rest of the files added to this step. The parameter
var it = step.files.first; /// here is the destination path relative to the local cache directory
while (it) |node| : (it = node.next) { /// associated with this WriteFileStep. It may be a basename, or it may
if (std.mem.eql(u8, node.data.basename, basename)) /// include sub-directories, in which case this step will ensure the
return std.Build.FileSource{ .generated = &node.data.source }; /// required sub-path exists.
/// This is the option expected to be used most commonly with `addCopyFile`.
pub fn addCopyFile(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: []const u8) void {
const gpa = wf.builder.allocator;
const file = gpa.create(File) catch @panic("OOM");
file.* = .{
.generated_file = .{ .step = &wf.step },
.sub_path = wf.builder.dupePath(sub_path),
.contents = .{ .copy = source },
};
wf.files.append(gpa, file) catch @panic("OOM");
}
/// A path relative to the package root.
/// Be careful with this because it updates source files. This should not be
/// used as part of the normal build process, but as a utility occasionally
/// run by a developer with intent to modify source files and then commit
/// those changes to version control.
/// A file added this way is not available with `getFileSource`.
pub fn addCopyFileToSource(wf: *WriteFileStep, source: std.Build.FileSource, sub_path: []const u8) void {
wf.output_source_files.append(wf.builder.allocator, .{
.contents = .{ .copy = source },
.sub_path = sub_path,
}) catch @panic("OOM");
}
/// Gets a file source for the given sub_path. If the file does not exist, returns `null`.
pub fn getFileSource(wf: *WriteFileStep, sub_path: []const u8) ?std.Build.FileSource {
for (wf.files.items) |file| {
if (std.mem.eql(u8, file.sub_path, sub_path)) {
return .{ .generated = &file.generated_file };
}
} }
return null; return null;
} }
fn make(step: *Step) !void { fn make(step: *Step) !void {
const self = @fieldParentPtr(WriteFileStep, "step", step); const wf = @fieldParentPtr(WriteFileStep, "step", step);
// Writing to source files is kind of an extra capability of this
// WriteFileStep - arguably it should be a different step. But anyway here
// it is, it happens unconditionally and does not interact with the other
// files here.
for (wf.output_source_files.items) |output_source_file| {
const basename = fs.path.basename(output_source_file.sub_path);
if (fs.path.dirname(output_source_file.sub_path)) |dirname| {
var dir = try wf.builder.build_root.handle.makeOpenPath(dirname, .{});
defer dir.close();
try writeFile(wf, dir, output_source_file.contents, basename);
} else {
try writeFile(wf, wf.builder.build_root.handle, output_source_file.contents, basename);
}
}
// The cache is used here not really as a way to speed things up - because writing // The cache is used here not really as a way to speed things up - because writing
// the data to a file would probably be very fast - but as a way to find a canonical // the data to a file would probably be very fast - but as a way to find a canonical
@ -58,56 +120,96 @@ fn make(step: *Step) !void {
// If, for example, a hard-coded path was used as the location to put WriteFileStep // If, for example, a hard-coded path was used as the location to put WriteFileStep
// files, then two WriteFileSteps executing in parallel might clobber each other. // files, then two WriteFileSteps executing in parallel might clobber each other.
// TODO port the cache system from the compiler to zig std lib. Until then var man = wf.builder.cache.obtain();
// we directly construct the path, and no "cache hit" detection happens; defer man.deinit();
// the files are always written.
// Note there is similar code over in ConfigHeaderStep.
const Hasher = std.crypto.auth.siphash.SipHash128(1, 3);
// Random bytes to make WriteFileStep unique. Refresh this with // Random bytes to make WriteFileStep unique. Refresh this with
// new random bytes when WriteFileStep implementation is modified // new random bytes when WriteFileStep implementation is modified
// in a non-backwards-compatible way. // in a non-backwards-compatible way.
var hash = Hasher.init("eagVR1dYXoE7ARDP"); man.hash.add(@as(u32, 0xd767ee59));
{ for (wf.files.items) |file| {
var it = self.files.first; man.hash.addBytes(file.sub_path);
while (it) |node| : (it = node.next) { switch (file.contents) {
hash.update(node.data.basename); .bytes => |bytes| {
hash.update(node.data.bytes); man.hash.addBytes(bytes);
hash.update("|"); },
.copy => |file_source| {
_ = try man.addFile(file_source.getPath(wf.builder), null);
},
} }
} }
var digest: [16]u8 = undefined;
hash.final(&digest);
var hash_basename: [digest.len * 2]u8 = undefined;
_ = std.fmt.bufPrint(
&hash_basename,
"{s}",
.{std.fmt.fmtSliceHexLower(&digest)},
) catch unreachable;
const output_dir = try self.builder.cache_root.join(self.builder.allocator, &.{ if (man.hit() catch |err| failWithCacheError(man, err)) {
"o", &hash_basename, // Cache hit, skip writing file data.
}); const digest = man.final();
var dir = fs.cwd().makeOpenPath(output_dir, .{}) catch |err| { for (wf.files.items) |file| {
std.debug.print("unable to make path {s}: {s}\n", .{ output_dir, @errorName(err) }); file.generated_file.path = try wf.builder.cache_root.join(
return err; wf.builder.allocator,
}; &.{ "o", &digest, file.sub_path },
defer dir.close();
{
var it = self.files.first;
while (it) |node| : (it = node.next) {
dir.writeFile(node.data.basename, node.data.bytes) catch |err| {
std.debug.print("unable to write {s} into {s}: {s}\n", .{
node.data.basename,
output_dir,
@errorName(err),
});
return err;
};
node.data.source.path = try fs.path.join(
self.builder.allocator,
&[_][]const u8{ output_dir, node.data.basename },
); );
} }
return;
}
const digest = man.final();
const cache_path = "o" ++ fs.path.sep_str ++ digest;
var cache_dir = wf.builder.cache_root.handle.makeOpenPath(cache_path, .{}) catch |err| {
std.debug.print("unable to make path {s}: {s}\n", .{ cache_path, @errorName(err) });
return err;
};
defer cache_dir.close();
for (wf.files.items) |file| {
const basename = fs.path.basename(file.sub_path);
if (fs.path.dirname(file.sub_path)) |dirname| {
var dir = try wf.builder.cache_root.handle.makeOpenPath(dirname, .{});
defer dir.close();
try writeFile(wf, dir, file.contents, basename);
} else {
try writeFile(wf, cache_dir, file.contents, basename);
}
file.generated_file.path = try wf.builder.cache_root.join(
wf.builder.allocator,
&.{ cache_path, file.sub_path },
);
}
try man.writeManifest();
}
fn writeFile(wf: *WriteFileStep, dir: fs.Dir, contents: Contents, basename: []const u8) !void {
// TODO after landing concurrency PR, improve error reporting here
switch (contents) {
.bytes => |bytes| return dir.writeFile(basename, bytes),
.copy => |file_source| {
const source_path = file_source.getPath(wf.builder);
const prev_status = try fs.Dir.updateFile(fs.cwd(), source_path, dir, basename, .{});
_ = prev_status; // TODO logging (affected by open PR regarding concurrency)
},
} }
} }
/// TODO consolidate this with the same function in RunStep?
/// Also properly deal with concurrency (see open PR)
fn failWithCacheError(man: std.Build.Cache.Manifest, err: anyerror) noreturn {
const i = man.failed_file_index orelse failWithSimpleError(err);
const pp = man.files.items[i].prefixed_path orelse failWithSimpleError(err);
const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
std.debug.print("{s}: {s}/{s}\n", .{ @errorName(err), prefix, pp.sub_path });
std.process.exit(1);
}
fn failWithSimpleError(err: anyerror) noreturn {
std.debug.print("{s}\n", .{@errorName(err)});
std.process.exit(1);
}
const std = @import("../std.zig");
const Step = std.Build.Step;
const fs = std.fs;
const ArrayList = std.ArrayList;
const WriteFileStep = @This();

136
lib/std/RingBuffer.zig Normal file
View File

@ -0,0 +1,136 @@
//! This ring buffer stores read and write indices while being able to utilise
//! the full backing slice by incrementing the indices modulo twice the slice's
//! length and reducing indices modulo the slice's length on slice access. This
//! means that whether the ring buffer if full or empty can be distinguished by
//! looking at the difference between the read and write indices without adding
//! an extra boolean flag or having to reserve a slot in the buffer.
//!
//! This ring buffer has not been implemented with thread safety in mind, and
//! therefore should not be assumed to be suitable for use cases involving
//! separate reader and writer threads.
const Allocator = @import("std").mem.Allocator;
const assert = @import("std").debug.assert;
const RingBuffer = @This();
data: []u8,
read_index: usize,
write_index: usize,
pub const Error = error{Full};
/// Allocate a new `RingBuffer`; `deinit()` should be called to free the buffer.
pub fn init(allocator: Allocator, capacity: usize) Allocator.Error!RingBuffer {
const bytes = try allocator.alloc(u8, capacity);
return RingBuffer{
.data = bytes,
.write_index = 0,
.read_index = 0,
};
}
/// Free the data backing a `RingBuffer`; must be passed the same `Allocator` as
/// `init()`.
pub fn deinit(self: *RingBuffer, allocator: Allocator) void {
allocator.free(self.data);
self.* = undefined;
}
/// Returns `index` modulo the length of the backing slice.
pub fn mask(self: RingBuffer, index: usize) usize {
return index % self.data.len;
}
/// Returns `index` modulo twice the length of the backing slice.
pub fn mask2(self: RingBuffer, index: usize) usize {
return index % (2 * self.data.len);
}
/// Write `byte` into the ring buffer. Returns `error.Full` if the ring
/// buffer is full.
pub fn write(self: *RingBuffer, byte: u8) Error!void {
if (self.isFull()) return error.Full;
self.writeAssumeCapacity(byte);
}
/// Write `byte` into the ring buffer. If the ring buffer is full, the
/// oldest byte is overwritten.
pub fn writeAssumeCapacity(self: *RingBuffer, byte: u8) void {
self.data[self.mask(self.write_index)] = byte;
self.write_index = self.mask2(self.write_index + 1);
}
/// Write `bytes` into the ring buffer. Returns `error.Full` if the ring
/// buffer does not have enough space, without writing any data.
pub fn writeSlice(self: *RingBuffer, bytes: []const u8) Error!void {
if (self.len() + bytes.len > self.data.len) return error.Full;
self.writeSliceAssumeCapacity(bytes);
}
/// Write `bytes` into the ring buffer. If there is not enough space, older
/// bytes will be overwritten.
pub fn writeSliceAssumeCapacity(self: *RingBuffer, bytes: []const u8) void {
for (bytes) |b| self.writeAssumeCapacity(b);
}
/// Consume a byte from the ring buffer and return it. Returns `null` if the
/// ring buffer is empty.
pub fn read(self: *RingBuffer) ?u8 {
if (self.isEmpty()) return null;
return self.readAssumeLength();
}
/// Consume a byte from the ring buffer and return it; asserts that the buffer
/// is not empty.
pub fn readAssumeLength(self: *RingBuffer) u8 {
assert(!self.isEmpty());
const byte = self.data[self.mask(self.read_index)];
self.read_index = self.mask2(self.read_index + 1);
return byte;
}
/// Returns `true` if the ring buffer is empty and `false` otherwise.
pub fn isEmpty(self: RingBuffer) bool {
return self.write_index == self.read_index;
}
/// Returns `true` if the ring buffer is full and `false` otherwise.
pub fn isFull(self: RingBuffer) bool {
return self.mask2(self.write_index + self.data.len) == self.read_index;
}
/// Returns the length
pub fn len(self: RingBuffer) usize {
const wrap_offset = 2 * self.data.len * @boolToInt(self.write_index < self.read_index);
const adjusted_write_index = self.write_index + wrap_offset;
return adjusted_write_index - self.read_index;
}
/// A `Slice` represents a region of a ring buffer. The region is split into two
/// sections as the ring buffer data will not be contiguous if the desired
/// region wraps to the start of the backing slice.
pub const Slice = struct {
first: []u8,
second: []u8,
};
/// Returns a `Slice` for the region of the ring buffer starting at
/// `self.mask(start_unmasked)` with the specified length.
pub fn sliceAt(self: RingBuffer, start_unmasked: usize, length: usize) Slice {
assert(length <= self.data.len);
const slice1_start = self.mask(start_unmasked);
const slice1_end = @min(self.data.len, slice1_start + length);
const slice1 = self.data[slice1_start..slice1_end];
const slice2 = self.data[0 .. length - slice1.len];
return Slice{
.first = slice1,
.second = slice2,
};
}
/// Returns a `Slice` for the last `length` bytes written to the ring buffer.
/// Does not check that any bytes have been written into the region.
pub fn sliceLast(self: RingBuffer, length: usize) Slice {
return self.sliceAt(self.write_index + self.data.len - length, length);
}

View File

@ -197,6 +197,32 @@ pub const ChildProcess = struct {
stderr: []u8, stderr: []u8,
}; };
/// Collect the output from the process's stdout and stderr. Will return once all output
/// has been collected. This does not mean that the process has ended. `wait` should still
/// be called to wait for and clean up the process.
///
/// The process must be started with stdout_behavior and stderr_behavior == .Pipe
pub fn collectOutput(
child: ChildProcess,
stdout: *std.ArrayList(u8),
stderr: *std.ArrayList(u8),
max_output_bytes: usize,
) !void {
debug.assert(child.stdout_behavior == .Pipe);
debug.assert(child.stderr_behavior == .Pipe);
if (builtin.os.tag == .haiku) {
const stdout_in = child.stdout.?.reader();
const stderr_in = child.stderr.?.reader();
try stdout_in.readAllArrayList(stdout, max_output_bytes);
try stderr_in.readAllArrayList(stderr, max_output_bytes);
} else if (builtin.os.tag == .windows) {
try collectOutputWindows(child, stdout, stderr, max_output_bytes);
} else {
try collectOutputPosix(child, stdout, stderr, max_output_bytes);
}
}
fn collectOutputPosix( fn collectOutputPosix(
child: ChildProcess, child: ChildProcess,
stdout: *std.ArrayList(u8), stdout: *std.ArrayList(u8),
@ -297,8 +323,12 @@ pub const ChildProcess = struct {
} }
} }
fn collectOutputWindows(child: ChildProcess, outs: [2]*std.ArrayList(u8), max_output_bytes: usize) !void { fn collectOutputWindows(child: ChildProcess, stdout: *std.ArrayList(u8), stderr: *std.ArrayList(u8), max_output_bytes: usize) !void {
const bump_amt = 512; const bump_amt = 512;
const outs = [_]*std.ArrayList(u8){
stdout,
stderr,
};
const handles = [_]windows.HANDLE{ const handles = [_]windows.HANDLE{
child.stdout.?.handle, child.stdout.?.handle,
child.stderr.?.handle, child.stderr.?.handle,
@ -391,24 +421,6 @@ pub const ChildProcess = struct {
child.env_map = args.env_map; child.env_map = args.env_map;
child.expand_arg0 = args.expand_arg0; child.expand_arg0 = args.expand_arg0;
try child.spawn();
if (builtin.os.tag == .haiku) {
const stdout_in = child.stdout.?.reader();
const stderr_in = child.stderr.?.reader();
const stdout = try stdout_in.readAllAlloc(args.allocator, args.max_output_bytes);
errdefer args.allocator.free(stdout);
const stderr = try stderr_in.readAllAlloc(args.allocator, args.max_output_bytes);
errdefer args.allocator.free(stderr);
return ExecResult{
.term = try child.wait(),
.stdout = stdout,
.stderr = stderr,
};
}
var stdout = std.ArrayList(u8).init(args.allocator); var stdout = std.ArrayList(u8).init(args.allocator);
var stderr = std.ArrayList(u8).init(args.allocator); var stderr = std.ArrayList(u8).init(args.allocator);
errdefer { errdefer {
@ -416,11 +428,8 @@ pub const ChildProcess = struct {
stderr.deinit(); stderr.deinit();
} }
if (builtin.os.tag == .windows) { try child.spawn();
try collectOutputWindows(child, [_]*std.ArrayList(u8){ &stdout, &stderr }, args.max_output_bytes); try child.collectOutput(&stdout, &stderr, args.max_output_bytes);
} else {
try collectOutputPosix(child, &stdout, &stderr, args.max_output_bytes);
}
return ExecResult{ return ExecResult{
.term = try child.wait(), .term = try child.wait(),

View File

@ -6,6 +6,7 @@ pub const lzma = @import("compress/lzma.zig");
pub const lzma2 = @import("compress/lzma2.zig"); pub const lzma2 = @import("compress/lzma2.zig");
pub const xz = @import("compress/xz.zig"); pub const xz = @import("compress/xz.zig");
pub const zlib = @import("compress/zlib.zig"); pub const zlib = @import("compress/zlib.zig");
pub const zstd = @import("compress/zstandard.zig");
pub fn HashedReader( pub fn HashedReader(
comptime ReaderType: anytype, comptime ReaderType: anytype,
@ -44,4 +45,5 @@ test {
_ = lzma2; _ = lzma2;
_ = xz; _ = xz;
_ = zlib; _ = zlib;
_ = zstd;
} }

3027
lib/std/compress/testdata/rfc8478.txt vendored Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,286 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const RingBuffer = std.RingBuffer;
const types = @import("zstandard/types.zig");
pub const frame = types.frame;
pub const compressed_block = types.compressed_block;
pub const decompress = @import("zstandard/decompress.zig");
pub const DecompressStreamOptions = struct {
verify_checksum: bool = true,
window_size_max: usize = 1 << 23, // 8MiB default maximum window size,
};
pub fn DecompressStream(
comptime ReaderType: type,
comptime options: DecompressStreamOptions,
) type {
return struct {
const Self = @This();
allocator: Allocator,
source: std.io.CountingReader(ReaderType),
state: enum { NewFrame, InFrame, LastBlock },
decode_state: decompress.block.DecodeState,
frame_context: decompress.FrameContext,
buffer: RingBuffer,
literal_fse_buffer: []types.compressed_block.Table.Fse,
match_fse_buffer: []types.compressed_block.Table.Fse,
offset_fse_buffer: []types.compressed_block.Table.Fse,
literals_buffer: []u8,
sequence_buffer: []u8,
checksum: if (options.verify_checksum) ?u32 else void,
current_frame_decompressed_size: usize,
pub const Error = ReaderType.Error || error{
ChecksumFailure,
DictionaryIdFlagUnsupported,
MalformedBlock,
MalformedFrame,
OutOfMemory,
};
pub const Reader = std.io.Reader(*Self, Error, read);
pub fn init(allocator: Allocator, source: ReaderType) Self {
return Self{
.allocator = allocator,
.source = std.io.countingReader(source),
.state = .NewFrame,
.decode_state = undefined,
.frame_context = undefined,
.buffer = undefined,
.literal_fse_buffer = undefined,
.match_fse_buffer = undefined,
.offset_fse_buffer = undefined,
.literals_buffer = undefined,
.sequence_buffer = undefined,
.checksum = undefined,
.current_frame_decompressed_size = undefined,
};
}
fn frameInit(self: *Self) !void {
const source_reader = self.source.reader();
switch (try decompress.decodeFrameHeader(source_reader)) {
.skippable => |header| {
try source_reader.skipBytes(header.frame_size, .{});
self.state = .NewFrame;
},
.zstandard => |header| {
const frame_context = context: {
break :context try decompress.FrameContext.init(
header,
options.window_size_max,
options.verify_checksum,
);
};
const literal_fse_buffer = try self.allocator.alloc(
types.compressed_block.Table.Fse,
types.compressed_block.table_size_max.literal,
);
errdefer self.allocator.free(literal_fse_buffer);
const match_fse_buffer = try self.allocator.alloc(
types.compressed_block.Table.Fse,
types.compressed_block.table_size_max.match,
);
errdefer self.allocator.free(match_fse_buffer);
const offset_fse_buffer = try self.allocator.alloc(
types.compressed_block.Table.Fse,
types.compressed_block.table_size_max.offset,
);
errdefer self.allocator.free(offset_fse_buffer);
const decode_state = decompress.block.DecodeState.init(
literal_fse_buffer,
match_fse_buffer,
offset_fse_buffer,
);
const buffer = try RingBuffer.init(self.allocator, frame_context.window_size);
const literals_data = try self.allocator.alloc(u8, options.window_size_max);
errdefer self.allocator.free(literals_data);
const sequence_data = try self.allocator.alloc(u8, options.window_size_max);
errdefer self.allocator.free(sequence_data);
self.literal_fse_buffer = literal_fse_buffer;
self.match_fse_buffer = match_fse_buffer;
self.offset_fse_buffer = offset_fse_buffer;
self.literals_buffer = literals_data;
self.sequence_buffer = sequence_data;
self.buffer = buffer;
self.decode_state = decode_state;
self.frame_context = frame_context;
self.checksum = if (options.verify_checksum) null else {};
self.current_frame_decompressed_size = 0;
self.state = .InFrame;
},
}
}
pub fn deinit(self: *Self) void {
if (self.state == .NewFrame) return;
self.allocator.free(self.decode_state.literal_fse_buffer);
self.allocator.free(self.decode_state.match_fse_buffer);
self.allocator.free(self.decode_state.offset_fse_buffer);
self.allocator.free(self.literals_buffer);
self.allocator.free(self.sequence_buffer);
self.buffer.deinit(self.allocator);
}
pub fn reader(self: *Self) Reader {
return .{ .context = self };
}
pub fn read(self: *Self, buffer: []u8) Error!usize {
if (buffer.len == 0) return 0;
var size: usize = 0;
while (size == 0) {
while (self.state == .NewFrame) {
const initial_count = self.source.bytes_read;
self.frameInit() catch |err| switch (err) {
error.DictionaryIdFlagUnsupported => return error.DictionaryIdFlagUnsupported,
error.EndOfStream => return if (self.source.bytes_read == initial_count)
0
else
error.MalformedFrame,
error.OutOfMemory => return error.OutOfMemory,
else => return error.MalformedFrame,
};
}
size = try self.readInner(buffer);
}
return size;
}
fn readInner(self: *Self, buffer: []u8) Error!usize {
std.debug.assert(self.state != .NewFrame);
const source_reader = self.source.reader();
while (self.buffer.isEmpty() and self.state != .LastBlock) {
const header_bytes = source_reader.readBytesNoEof(3) catch
return error.MalformedFrame;
const block_header = decompress.block.decodeBlockHeader(&header_bytes);
decompress.block.decodeBlockReader(
&self.buffer,
source_reader,
block_header,
&self.decode_state,
self.frame_context.block_size_max,
self.literals_buffer,
self.sequence_buffer,
) catch
return error.MalformedBlock;
if (self.frame_context.content_size) |size| {
if (self.current_frame_decompressed_size > size) return error.MalformedFrame;
}
const size = self.buffer.len();
self.current_frame_decompressed_size += size;
if (self.frame_context.hasher_opt) |*hasher| {
if (size > 0) {
const written_slice = self.buffer.sliceLast(size);
hasher.update(written_slice.first);
hasher.update(written_slice.second);
}
}
if (block_header.last_block) {
self.state = .LastBlock;
if (self.frame_context.has_checksum) {
const checksum = source_reader.readIntLittle(u32) catch
return error.MalformedFrame;
if (comptime options.verify_checksum) {
if (self.frame_context.hasher_opt) |*hasher| {
if (checksum != decompress.computeChecksum(hasher))
return error.ChecksumFailure;
}
}
}
if (self.frame_context.content_size) |content_size| {
if (content_size != self.current_frame_decompressed_size) {
return error.MalformedFrame;
}
}
}
}
const size = @min(self.buffer.len(), buffer.len);
for (0..size) |i| {
buffer[i] = self.buffer.read().?;
}
if (self.state == .LastBlock and self.buffer.len() == 0) {
self.state = .NewFrame;
self.allocator.free(self.literal_fse_buffer);
self.allocator.free(self.match_fse_buffer);
self.allocator.free(self.offset_fse_buffer);
self.allocator.free(self.literals_buffer);
self.allocator.free(self.sequence_buffer);
self.buffer.deinit(self.allocator);
}
return size;
}
};
}
pub fn decompressStreamOptions(
allocator: Allocator,
reader: anytype,
comptime options: DecompressStreamOptions,
) DecompressStream(@TypeOf(reader, options)) {
return DecompressStream(@TypeOf(reader), options).init(allocator, reader);
}
pub fn decompressStream(
allocator: Allocator,
reader: anytype,
) DecompressStream(@TypeOf(reader), .{}) {
return DecompressStream(@TypeOf(reader), .{}).init(allocator, reader);
}
fn testDecompress(data: []const u8) ![]u8 {
var in_stream = std.io.fixedBufferStream(data);
var zstd_stream = decompressStream(std.testing.allocator, in_stream.reader());
defer zstd_stream.deinit();
const result = zstd_stream.reader().readAllAlloc(std.testing.allocator, std.math.maxInt(usize));
return result;
}
fn testReader(data: []const u8, comptime expected: []const u8) !void {
const buf = try testDecompress(data);
defer std.testing.allocator.free(buf);
try std.testing.expectEqualSlices(u8, expected, buf);
}
test "zstandard decompression" {
const uncompressed = @embedFile("testdata/rfc8478.txt");
const compressed3 = @embedFile("testdata/rfc8478.txt.zst.3");
const compressed19 = @embedFile("testdata/rfc8478.txt.zst.19");
var buffer = try std.testing.allocator.alloc(u8, uncompressed.len);
defer std.testing.allocator.free(buffer);
const res3 = try decompress.decode(buffer, compressed3, true);
try std.testing.expectEqual(uncompressed.len, res3);
try std.testing.expectEqualSlices(u8, uncompressed, buffer);
const res19 = try decompress.decode(buffer, compressed19, true);
try std.testing.expectEqual(uncompressed.len, res19);
try std.testing.expectEqualSlices(u8, uncompressed, buffer);
try testReader(compressed3, uncompressed);
try testReader(compressed19, uncompressed);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,153 @@
const std = @import("std");
const assert = std.debug.assert;
const types = @import("../types.zig");
const Table = types.compressed_block.Table;
pub fn decodeFseTable(
bit_reader: anytype,
expected_symbol_count: usize,
max_accuracy_log: u4,
entries: []Table.Fse,
) !usize {
const accuracy_log_biased = try bit_reader.readBitsNoEof(u4, 4);
if (accuracy_log_biased > max_accuracy_log -| 5) return error.MalformedAccuracyLog;
const accuracy_log = accuracy_log_biased + 5;
var values: [256]u16 = undefined;
var value_count: usize = 0;
const total_probability = @as(u16, 1) << accuracy_log;
var accumulated_probability: u16 = 0;
while (accumulated_probability < total_probability) {
// WARNING: The RFC in poorly worded, and would suggest std.math.log2_int_ceil is correct here,
// but power of two (remaining probabilities + 1) need max bits set to 1 more.
const max_bits = std.math.log2_int(u16, total_probability - accumulated_probability + 1) + 1;
const small = try bit_reader.readBitsNoEof(u16, max_bits - 1);
const cutoff = (@as(u16, 1) << max_bits) - 1 - (total_probability - accumulated_probability + 1);
const value = if (small < cutoff)
small
else value: {
const value_read = small + (try bit_reader.readBitsNoEof(u16, 1) << (max_bits - 1));
break :value if (value_read < @as(u16, 1) << (max_bits - 1))
value_read
else
value_read - cutoff;
};
accumulated_probability += if (value != 0) value - 1 else 1;
values[value_count] = value;
value_count += 1;
if (value == 1) {
while (true) {
const repeat_flag = try bit_reader.readBitsNoEof(u2, 2);
if (repeat_flag + value_count > 256) return error.MalformedFseTable;
for (0..repeat_flag) |_| {
values[value_count] = 1;
value_count += 1;
}
if (repeat_flag < 3) break;
}
}
if (value_count == 256) break;
}
bit_reader.alignToByte();
if (value_count < 2) return error.MalformedFseTable;
if (accumulated_probability != total_probability) return error.MalformedFseTable;
if (value_count > expected_symbol_count) return error.MalformedFseTable;
const table_size = total_probability;
try buildFseTable(values[0..value_count], entries[0..table_size]);
return table_size;
}
fn buildFseTable(values: []const u16, entries: []Table.Fse) !void {
const total_probability = @intCast(u16, entries.len);
const accuracy_log = std.math.log2_int(u16, total_probability);
assert(total_probability <= 1 << 9);
var less_than_one_count: usize = 0;
for (values, 0..) |value, i| {
if (value == 0) {
entries[entries.len - 1 - less_than_one_count] = Table.Fse{
.symbol = @intCast(u8, i),
.baseline = 0,
.bits = accuracy_log,
};
less_than_one_count += 1;
}
}
var position: usize = 0;
var temp_states: [1 << 9]u16 = undefined;
for (values, 0..) |value, symbol| {
if (value == 0 or value == 1) continue;
const probability = value - 1;
const state_share_dividend = std.math.ceilPowerOfTwo(u16, probability) catch
return error.MalformedFseTable;
const share_size = @divExact(total_probability, state_share_dividend);
const double_state_count = state_share_dividend - probability;
const single_state_count = probability - double_state_count;
const share_size_log = std.math.log2_int(u16, share_size);
for (0..probability) |i| {
temp_states[i] = @intCast(u16, position);
position += (entries.len >> 1) + (entries.len >> 3) + 3;
position &= entries.len - 1;
while (position >= entries.len - less_than_one_count) {
position += (entries.len >> 1) + (entries.len >> 3) + 3;
position &= entries.len - 1;
}
}
std.sort.sort(u16, temp_states[0..probability], {}, std.sort.asc(u16));
for (0..probability) |i| {
entries[temp_states[i]] = if (i < double_state_count) Table.Fse{
.symbol = @intCast(u8, symbol),
.bits = share_size_log + 1,
.baseline = single_state_count * share_size + @intCast(u16, i) * 2 * share_size,
} else Table.Fse{
.symbol = @intCast(u8, symbol),
.bits = share_size_log,
.baseline = (@intCast(u16, i) - double_state_count) * share_size,
};
}
}
}
test buildFseTable {
const literals_length_default_values = [36]u16{
5, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 2, 2, 2, 2, 2,
0, 0, 0, 0,
};
const match_lengths_default_values = [53]u16{
2, 5, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0,
0, 0, 0, 0, 0,
};
const offset_codes_default_values = [29]u16{
2, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0,
};
var entries: [64]Table.Fse = undefined;
try buildFseTable(&literals_length_default_values, &entries);
try std.testing.expectEqualSlices(Table.Fse, types.compressed_block.predefined_literal_fse_table.fse, &entries);
try buildFseTable(&match_lengths_default_values, &entries);
try std.testing.expectEqualSlices(Table.Fse, types.compressed_block.predefined_match_fse_table.fse, &entries);
try buildFseTable(&offset_codes_default_values, entries[0..32]);
try std.testing.expectEqualSlices(Table.Fse, types.compressed_block.predefined_offset_fse_table.fse, entries[0..32]);
}

View File

@ -0,0 +1,234 @@
const std = @import("std");
const types = @import("../types.zig");
const LiteralsSection = types.compressed_block.LiteralsSection;
const Table = types.compressed_block.Table;
const readers = @import("../readers.zig");
const decodeFseTable = @import("fse.zig").decodeFseTable;
pub const Error = error{
MalformedHuffmanTree,
MalformedFseTable,
MalformedAccuracyLog,
EndOfStream,
};
fn decodeFseHuffmanTree(
source: anytype,
compressed_size: usize,
buffer: []u8,
weights: *[256]u4,
) !usize {
var stream = std.io.limitedReader(source, compressed_size);
var bit_reader = readers.bitReader(stream.reader());
var entries: [1 << 6]Table.Fse = undefined;
const table_size = decodeFseTable(&bit_reader, 256, 6, &entries) catch |err| switch (err) {
error.MalformedAccuracyLog, error.MalformedFseTable => |e| return e,
error.EndOfStream => return error.MalformedFseTable,
else => |e| return e,
};
const accuracy_log = std.math.log2_int_ceil(usize, table_size);
const amount = try stream.reader().readAll(buffer);
var huff_bits: readers.ReverseBitReader = undefined;
huff_bits.init(buffer[0..amount]) catch return error.MalformedHuffmanTree;
return assignWeights(&huff_bits, accuracy_log, &entries, weights);
}
fn decodeFseHuffmanTreeSlice(src: []const u8, compressed_size: usize, weights: *[256]u4) !usize {
if (src.len < compressed_size) return error.MalformedHuffmanTree;
var stream = std.io.fixedBufferStream(src[0..compressed_size]);
var counting_reader = std.io.countingReader(stream.reader());
var bit_reader = readers.bitReader(counting_reader.reader());
var entries: [1 << 6]Table.Fse = undefined;
const table_size = decodeFseTable(&bit_reader, 256, 6, &entries) catch |err| switch (err) {
error.MalformedAccuracyLog, error.MalformedFseTable => |e| return e,
error.EndOfStream => return error.MalformedFseTable,
};
const accuracy_log = std.math.log2_int_ceil(usize, table_size);
const start_index = std.math.cast(usize, counting_reader.bytes_read) orelse
return error.MalformedHuffmanTree;
var huff_data = src[start_index..compressed_size];
var huff_bits: readers.ReverseBitReader = undefined;
huff_bits.init(huff_data) catch return error.MalformedHuffmanTree;
return assignWeights(&huff_bits, accuracy_log, &entries, weights);
}
fn assignWeights(
huff_bits: *readers.ReverseBitReader,
accuracy_log: usize,
entries: *[1 << 6]Table.Fse,
weights: *[256]u4,
) !usize {
var i: usize = 0;
var even_state: u32 = huff_bits.readBitsNoEof(u32, accuracy_log) catch return error.MalformedHuffmanTree;
var odd_state: u32 = huff_bits.readBitsNoEof(u32, accuracy_log) catch return error.MalformedHuffmanTree;
while (i < 254) {
const even_data = entries[even_state];
var read_bits: usize = 0;
const even_bits = huff_bits.readBits(u32, even_data.bits, &read_bits) catch unreachable;
weights[i] = std.math.cast(u4, even_data.symbol) orelse return error.MalformedHuffmanTree;
i += 1;
if (read_bits < even_data.bits) {
weights[i] = std.math.cast(u4, entries[odd_state].symbol) orelse return error.MalformedHuffmanTree;
i += 1;
break;
}
even_state = even_data.baseline + even_bits;
read_bits = 0;
const odd_data = entries[odd_state];
const odd_bits = huff_bits.readBits(u32, odd_data.bits, &read_bits) catch unreachable;
weights[i] = std.math.cast(u4, odd_data.symbol) orelse return error.MalformedHuffmanTree;
i += 1;
if (read_bits < odd_data.bits) {
if (i == 255) return error.MalformedHuffmanTree;
weights[i] = std.math.cast(u4, entries[even_state].symbol) orelse return error.MalformedHuffmanTree;
i += 1;
break;
}
odd_state = odd_data.baseline + odd_bits;
} else return error.MalformedHuffmanTree;
if (!huff_bits.isEmpty()) {
return error.MalformedHuffmanTree;
}
return i + 1; // stream contains all but the last symbol
}
fn decodeDirectHuffmanTree(source: anytype, encoded_symbol_count: usize, weights: *[256]u4) !usize {
const weights_byte_count = (encoded_symbol_count + 1) / 2;
for (0..weights_byte_count) |i| {
const byte = try source.readByte();
weights[2 * i] = @intCast(u4, byte >> 4);
weights[2 * i + 1] = @intCast(u4, byte & 0xF);
}
return encoded_symbol_count + 1;
}
fn assignSymbols(weight_sorted_prefixed_symbols: []LiteralsSection.HuffmanTree.PrefixedSymbol, weights: [256]u4) usize {
for (0..weight_sorted_prefixed_symbols.len) |i| {
weight_sorted_prefixed_symbols[i] = .{
.symbol = @intCast(u8, i),
.weight = undefined,
.prefix = undefined,
};
}
std.sort.sort(
LiteralsSection.HuffmanTree.PrefixedSymbol,
weight_sorted_prefixed_symbols,
weights,
lessThanByWeight,
);
var prefix: u16 = 0;
var prefixed_symbol_count: usize = 0;
var sorted_index: usize = 0;
const symbol_count = weight_sorted_prefixed_symbols.len;
while (sorted_index < symbol_count) {
var symbol = weight_sorted_prefixed_symbols[sorted_index].symbol;
const weight = weights[symbol];
if (weight == 0) {
sorted_index += 1;
continue;
}
while (sorted_index < symbol_count) : ({
sorted_index += 1;
prefixed_symbol_count += 1;
prefix += 1;
}) {
symbol = weight_sorted_prefixed_symbols[sorted_index].symbol;
if (weights[symbol] != weight) {
prefix = ((prefix - 1) >> (weights[symbol] - weight)) + 1;
break;
}
weight_sorted_prefixed_symbols[prefixed_symbol_count].symbol = symbol;
weight_sorted_prefixed_symbols[prefixed_symbol_count].prefix = prefix;
weight_sorted_prefixed_symbols[prefixed_symbol_count].weight = weight;
}
}
return prefixed_symbol_count;
}
fn buildHuffmanTree(weights: *[256]u4, symbol_count: usize) error{MalformedHuffmanTree}!LiteralsSection.HuffmanTree {
var weight_power_sum_big: u32 = 0;
for (weights[0 .. symbol_count - 1]) |value| {
weight_power_sum_big += (@as(u16, 1) << value) >> 1;
}
if (weight_power_sum_big >= 1 << 11) return error.MalformedHuffmanTree;
const weight_power_sum = @intCast(u16, weight_power_sum_big);
// advance to next power of two (even if weight_power_sum is a power of 2)
// TODO: is it valid to have weight_power_sum == 0?
const max_number_of_bits = if (weight_power_sum == 0) 1 else std.math.log2_int(u16, weight_power_sum) + 1;
const next_power_of_two = @as(u16, 1) << max_number_of_bits;
weights[symbol_count - 1] = std.math.log2_int(u16, next_power_of_two - weight_power_sum) + 1;
var weight_sorted_prefixed_symbols: [256]LiteralsSection.HuffmanTree.PrefixedSymbol = undefined;
const prefixed_symbol_count = assignSymbols(weight_sorted_prefixed_symbols[0..symbol_count], weights.*);
const tree = LiteralsSection.HuffmanTree{
.max_bit_count = max_number_of_bits,
.symbol_count_minus_one = @intCast(u8, prefixed_symbol_count - 1),
.nodes = weight_sorted_prefixed_symbols,
};
return tree;
}
pub fn decodeHuffmanTree(
source: anytype,
buffer: []u8,
) (@TypeOf(source).Error || Error)!LiteralsSection.HuffmanTree {
const header = try source.readByte();
var weights: [256]u4 = undefined;
const symbol_count = if (header < 128)
// FSE compressed weights
try decodeFseHuffmanTree(source, header, buffer, &weights)
else
try decodeDirectHuffmanTree(source, header - 127, &weights);
return buildHuffmanTree(&weights, symbol_count);
}
pub fn decodeHuffmanTreeSlice(
src: []const u8,
consumed_count: *usize,
) Error!LiteralsSection.HuffmanTree {
if (src.len == 0) return error.MalformedHuffmanTree;
const header = src[0];
var bytes_read: usize = 1;
var weights: [256]u4 = undefined;
const symbol_count = if (header < 128) count: {
// FSE compressed weights
bytes_read += header;
break :count try decodeFseHuffmanTreeSlice(src[1..], header, &weights);
} else count: {
var fbs = std.io.fixedBufferStream(src[1..]);
defer bytes_read += fbs.pos;
break :count try decodeDirectHuffmanTree(fbs.reader(), header - 127, &weights);
};
consumed_count.* += bytes_read;
return buildHuffmanTree(&weights, symbol_count);
}
fn lessThanByWeight(
weights: [256]u4,
lhs: LiteralsSection.HuffmanTree.PrefixedSymbol,
rhs: LiteralsSection.HuffmanTree.PrefixedSymbol,
) bool {
// NOTE: this function relies on the use of a stable sorting algorithm,
// otherwise a special case of if (weights[lhs] == weights[rhs]) return lhs < rhs;
// should be added
return weights[lhs.symbol] < weights[rhs.symbol];
}

View File

@ -0,0 +1,636 @@
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const RingBuffer = std.RingBuffer;
const types = @import("types.zig");
const frame = types.frame;
const LiteralsSection = types.compressed_block.LiteralsSection;
const SequencesSection = types.compressed_block.SequencesSection;
const SkippableHeader = types.frame.Skippable.Header;
const ZstandardHeader = types.frame.Zstandard.Header;
const Table = types.compressed_block.Table;
pub const block = @import("decode/block.zig");
const readers = @import("readers.zig");
const readInt = std.mem.readIntLittle;
const readIntSlice = std.mem.readIntSliceLittle;
/// Returns `true` is `magic` is a valid magic number for a skippable frame
pub fn isSkippableMagic(magic: u32) bool {
return frame.Skippable.magic_number_min <= magic and magic <= frame.Skippable.magic_number_max;
}
/// Returns the kind of frame at the beginning of `source`.
///
/// Errors returned:
/// - `error.BadMagic` if `source` begins with bytes not equal to the
/// Zstandard frame magic number, or outside the range of magic numbers for
/// skippable frames.
/// - `error.EndOfStream` if `source` contains fewer than 4 bytes
pub fn decodeFrameType(source: anytype) error{ BadMagic, EndOfStream }!frame.Kind {
const magic = try source.readIntLittle(u32);
return frameType(magic);
}
/// Returns the kind of frame associated to `magic`.
///
/// Errors returned:
/// - `error.BadMagic` if `magic` is not a valid magic number.
pub fn frameType(magic: u32) error{BadMagic}!frame.Kind {
return if (magic == frame.Zstandard.magic_number)
.zstandard
else if (isSkippableMagic(magic))
.skippable
else
error.BadMagic;
}
pub const FrameHeader = union(enum) {
zstandard: ZstandardHeader,
skippable: SkippableHeader,
};
pub const HeaderError = error{ BadMagic, EndOfStream, ReservedBitSet };
/// Returns the header of the frame at the beginning of `source`.
///
/// Errors returned:
/// - `error.BadMagic` if `source` begins with bytes not equal to the
/// Zstandard frame magic number, or outside the range of magic numbers for
/// skippable frames.
/// - `error.EndOfStream` if `source` contains fewer than 4 bytes
/// - `error.ReservedBitSet` if the frame is a Zstandard frame and any of the
/// reserved bits are set
pub fn decodeFrameHeader(source: anytype) (@TypeOf(source).Error || HeaderError)!FrameHeader {
const magic = try source.readIntLittle(u32);
const frame_type = try frameType(magic);
switch (frame_type) {
.zstandard => return FrameHeader{ .zstandard = try decodeZstandardHeader(source) },
.skippable => return FrameHeader{
.skippable = .{
.magic_number = magic,
.frame_size = try source.readIntLittle(u32),
},
},
}
}
pub const ReadWriteCount = struct {
read_count: usize,
write_count: usize,
};
/// Decodes frames from `src` into `dest`; returns the length of the result.
/// The stream should not have extra trailing bytes - either all bytes in `src`
/// will be decoded, or an error will be returned. An error will be returned if
/// a Zstandard frame in `src` does not declare its content size.
///
/// Errors returned:
/// - `error.DictionaryIdFlagUnsupported` if a `src` contains a frame that
/// uses a dictionary
/// - `error.MalformedFrame` if a frame in `src` is invalid
/// - `error.UnknownContentSizeUnsupported` if a frame in `src` does not
/// declare its content size
pub fn decode(dest: []u8, src: []const u8, verify_checksum: bool) error{
MalformedFrame,
UnknownContentSizeUnsupported,
DictionaryIdFlagUnsupported,
}!usize {
var write_count: usize = 0;
var read_count: usize = 0;
while (read_count < src.len) {
const counts = decodeFrame(dest, src[read_count..], verify_checksum) catch |err| {
switch (err) {
error.UnknownContentSizeUnsupported => return error.UnknownContentSizeUnsupported,
error.DictionaryIdFlagUnsupported => return error.DictionaryIdFlagUnsupported,
else => return error.MalformedFrame,
}
};
read_count += counts.read_count;
write_count += counts.write_count;
}
return write_count;
}
/// Decodes a stream of frames from `src`; returns the decoded bytes. The stream
/// should not have extra trailing bytes - either all bytes in `src` will be
/// decoded, or an error will be returned.
///
/// Errors returned:
/// - `error.DictionaryIdFlagUnsupported` if a `src` contains a frame that
/// uses a dictionary
/// - `error.MalformedFrame` if a frame in `src` is invalid
/// - `error.OutOfMemory` if `allocator` cannot allocate enough memory
pub fn decodeAlloc(
allocator: Allocator,
src: []const u8,
verify_checksum: bool,
window_size_max: usize,
) error{ DictionaryIdFlagUnsupported, MalformedFrame, OutOfMemory }![]u8 {
var result = std.ArrayList(u8).init(allocator);
errdefer result.deinit();
var read_count: usize = 0;
while (read_count < src.len) {
read_count += decodeFrameArrayList(
allocator,
&result,
src[read_count..],
verify_checksum,
window_size_max,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.DictionaryIdFlagUnsupported => return error.DictionaryIdFlagUnsupported,
else => return error.MalformedFrame,
};
}
return result.toOwnedSlice();
}
/// Decodes the frame at the start of `src` into `dest`. Returns the number of
/// bytes read from `src` and written to `dest`. This function can only decode
/// frames that declare the decompressed content size.
///
/// Errors returned:
/// - `error.BadMagic` if the first 4 bytes of `src` is not a valid magic
/// number for a Zstandard or skippable frame
/// - `error.UnknownContentSizeUnsupported` if the frame does not declare the
/// uncompressed content size
/// - `error.WindowSizeUnknown` if the frame does not have a valid window size
/// - `error.ContentTooLarge` if `dest` is smaller than the uncompressed data
/// size declared by the frame header
/// - `error.ContentSizeTooLarge` if the frame header indicates a content size
/// that is larger than `std.math.maxInt(usize)`
/// - `error.DictionaryIdFlagUnsupported` if the frame uses a dictionary
/// - `error.ChecksumFailure` if `verify_checksum` is true and the frame
/// contains a checksum that does not match the checksum of the decompressed
/// data
/// - `error.ReservedBitSet` if any of the reserved bits of the frame header
/// are set
/// - `error.EndOfStream` if `src` does not contain a complete frame
/// - `error.BadContentSize` if the content size declared by the frame does
/// not equal the actual size of decompressed data
/// - an error in `block.Error` if there are errors decoding a block
/// - `error.SkippableSizeTooLarge` if the frame is skippable and reports a
/// size greater than `src.len`
pub fn decodeFrame(
dest: []u8,
src: []const u8,
verify_checksum: bool,
) (error{
BadMagic,
UnknownContentSizeUnsupported,
ContentTooLarge,
ContentSizeTooLarge,
WindowSizeUnknown,
DictionaryIdFlagUnsupported,
SkippableSizeTooLarge,
} || FrameError)!ReadWriteCount {
var fbs = std.io.fixedBufferStream(src);
switch (try decodeFrameType(fbs.reader())) {
.zstandard => return decodeZstandardFrame(dest, src, verify_checksum),
.skippable => {
const content_size = try fbs.reader().readIntLittle(u32);
if (content_size > std.math.maxInt(usize) - 8) return error.SkippableSizeTooLarge;
const read_count = @as(usize, content_size) + 8;
if (read_count > src.len) return error.SkippableSizeTooLarge;
return ReadWriteCount{
.read_count = read_count,
.write_count = 0,
};
},
}
}
/// Decodes the frame at the start of `src` into `dest`. Returns the number of
/// bytes read from `src`.
///
/// Errors returned:
/// - `error.BadMagic` if the first 4 bytes of `src` is not a valid magic
/// number for a Zstandard or skippable frame
/// - `error.WindowSizeUnknown` if the frame does not have a valid window size
/// - `error.WindowTooLarge` if the window size is larger than
/// `window_size_max`
/// - `error.ContentSizeTooLarge` if the frame header indicates a content size
/// that is larger than `std.math.maxInt(usize)`
/// - `error.DictionaryIdFlagUnsupported` if the frame uses a dictionary
/// - `error.ChecksumFailure` if `verify_checksum` is true and the frame
/// contains a checksum that does not match the checksum of the decompressed
/// data
/// - `error.ReservedBitSet` if any of the reserved bits of the frame header
/// are set
/// - `error.EndOfStream` if `src` does not contain a complete frame
/// - `error.BadContentSize` if the content size declared by the frame does
/// not equal the actual size of decompressed data
/// - `error.OutOfMemory` if `allocator` cannot allocate enough memory
/// - an error in `block.Error` if there are errors decoding a block
/// - `error.SkippableSizeTooLarge` if the frame is skippable and reports a
/// size greater than `src.len`
pub fn decodeFrameArrayList(
allocator: Allocator,
dest: *std.ArrayList(u8),
src: []const u8,
verify_checksum: bool,
window_size_max: usize,
) (error{ BadMagic, OutOfMemory, SkippableSizeTooLarge } || FrameContext.Error || FrameError)!usize {
var fbs = std.io.fixedBufferStream(src);
const reader = fbs.reader();
const magic = try reader.readIntLittle(u32);
switch (try frameType(magic)) {
.zstandard => return decodeZstandardFrameArrayList(
allocator,
dest,
src,
verify_checksum,
window_size_max,
),
.skippable => {
const content_size = try fbs.reader().readIntLittle(u32);
if (content_size > std.math.maxInt(usize) - 8) return error.SkippableSizeTooLarge;
const read_count = @as(usize, content_size) + 8;
if (read_count > src.len) return error.SkippableSizeTooLarge;
return read_count;
},
}
}
/// Returns the frame checksum corresponding to the data fed into `hasher`
pub fn computeChecksum(hasher: *std.hash.XxHash64) u32 {
const hash = hasher.final();
return @intCast(u32, hash & 0xFFFFFFFF);
}
const FrameError = error{
ChecksumFailure,
BadContentSize,
EndOfStream,
ReservedBitSet,
} || block.Error;
/// Decode a Zstandard frame from `src` into `dest`, returning the number of
/// bytes read from `src` and written to `dest`. The first four bytes of `src`
/// must be the magic number for a Zstandard frame.
///
/// Error returned:
/// - `error.UnknownContentSizeUnsupported` if the frame does not declare the
/// uncompressed content size
/// - `error.ContentTooLarge` if `dest` is smaller than the uncompressed data
/// size declared by the frame header
/// - `error.WindowSizeUnknown` if the frame does not have a valid window size
/// - `error.DictionaryIdFlagUnsupported` if the frame uses a dictionary
/// - `error.ContentSizeTooLarge` if the frame header indicates a content size
/// that is larger than `std.math.maxInt(usize)`
/// - `error.ChecksumFailure` if `verify_checksum` is true and the frame
/// contains a checksum that does not match the checksum of the decompressed
/// data
/// - `error.ReservedBitSet` if the reserved bit of the frame header is set
/// - `error.EndOfStream` if `src` does not contain a complete frame
/// - an error in `block.Error` if there are errors decoding a block
/// - `error.BadContentSize` if the content size declared by the frame does
/// not equal the actual size of decompressed data
pub fn decodeZstandardFrame(
dest: []u8,
src: []const u8,
verify_checksum: bool,
) (error{
UnknownContentSizeUnsupported,
ContentTooLarge,
ContentSizeTooLarge,
WindowSizeUnknown,
DictionaryIdFlagUnsupported,
} || FrameError)!ReadWriteCount {
assert(readInt(u32, src[0..4]) == frame.Zstandard.magic_number);
var consumed_count: usize = 4;
var frame_context = context: {
var fbs = std.io.fixedBufferStream(src[consumed_count..]);
var source = fbs.reader();
const frame_header = try decodeZstandardHeader(source);
consumed_count += fbs.pos;
break :context FrameContext.init(
frame_header,
std.math.maxInt(usize),
verify_checksum,
) catch |err| switch (err) {
error.WindowTooLarge => unreachable,
inline else => |e| return e,
};
};
const counts = try decodeZStandardFrameBlocks(
dest,
src[consumed_count..],
&frame_context,
);
return ReadWriteCount{
.read_count = counts.read_count + consumed_count,
.write_count = counts.write_count,
};
}
pub fn decodeZStandardFrameBlocks(
dest: []u8,
src: []const u8,
frame_context: *FrameContext,
) (error{ ContentTooLarge, UnknownContentSizeUnsupported } || FrameError)!ReadWriteCount {
const content_size = frame_context.content_size orelse
return error.UnknownContentSizeUnsupported;
if (dest.len < content_size) return error.ContentTooLarge;
var consumed_count: usize = 0;
const written_count = decodeFrameBlocksInner(
dest[0..content_size],
src[consumed_count..],
&consumed_count,
if (frame_context.hasher_opt) |*hasher| hasher else null,
frame_context.block_size_max,
) catch |err| switch (err) {
error.DestTooSmall => return error.BadContentSize,
inline else => |e| return e,
};
if (written_count != content_size) return error.BadContentSize;
if (frame_context.has_checksum) {
if (src.len < consumed_count + 4) return error.EndOfStream;
const checksum = readIntSlice(u32, src[consumed_count .. consumed_count + 4]);
consumed_count += 4;
if (frame_context.hasher_opt) |*hasher| {
if (checksum != computeChecksum(hasher)) return error.ChecksumFailure;
}
}
return ReadWriteCount{ .read_count = consumed_count, .write_count = written_count };
}
pub const FrameContext = struct {
hasher_opt: ?std.hash.XxHash64,
window_size: usize,
has_checksum: bool,
block_size_max: usize,
content_size: ?usize,
const Error = error{
DictionaryIdFlagUnsupported,
WindowSizeUnknown,
WindowTooLarge,
ContentSizeTooLarge,
};
/// Validates `frame_header` and returns the associated `FrameContext`.
///
/// Errors returned:
/// - `error.DictionaryIdFlagUnsupported` if the frame uses a dictionary
/// - `error.WindowSizeUnknown` if the frame does not have a valid window
/// size
/// - `error.WindowTooLarge` if the window size is larger than
/// `window_size_max`
/// - `error.ContentSizeTooLarge` if the frame header indicates a content
/// size larger than `std.math.maxInt(usize)`
pub fn init(
frame_header: ZstandardHeader,
window_size_max: usize,
verify_checksum: bool,
) Error!FrameContext {
if (frame_header.descriptor.dictionary_id_flag != 0)
return error.DictionaryIdFlagUnsupported;
const window_size_raw = frameWindowSize(frame_header) orelse return error.WindowSizeUnknown;
const window_size = if (window_size_raw > window_size_max)
return error.WindowTooLarge
else
@intCast(usize, window_size_raw);
const should_compute_checksum =
frame_header.descriptor.content_checksum_flag and verify_checksum;
const content_size = if (frame_header.content_size) |size|
std.math.cast(usize, size) orelse return error.ContentSizeTooLarge
else
null;
return .{
.hasher_opt = if (should_compute_checksum) std.hash.XxHash64.init(0) else null,
.window_size = window_size,
.has_checksum = frame_header.descriptor.content_checksum_flag,
.block_size_max = @min(1 << 17, window_size),
.content_size = content_size,
};
}
};
/// Decode a Zstandard from from `src` and return number of bytes read; see
/// `decodeZstandardFrame()`. The first four bytes of `src` must be the magic
/// number for a Zstandard frame.
///
/// Errors returned:
/// - `error.WindowSizeUnknown` if the frame does not have a valid window size
/// - `error.WindowTooLarge` if the window size is larger than
/// `window_size_max`
/// - `error.DictionaryIdFlagUnsupported` if the frame uses a dictionary
/// - `error.ContentSizeTooLarge` if the frame header indicates a content size
/// that is larger than `std.math.maxInt(usize)`
/// - `error.ChecksumFailure` if `verify_checksum` is true and the frame
/// contains a checksum that does not match the checksum of the decompressed
/// data
/// - `error.ReservedBitSet` if the reserved bit of the frame header is set
/// - `error.EndOfStream` if `src` does not contain a complete frame
/// - `error.OutOfMemory` if `allocator` cannot allocate enough memory
/// - an error in `block.Error` if there are errors decoding a block
/// - `error.BadContentSize` if the content size declared by the frame does
/// not equal the size of decompressed data
pub fn decodeZstandardFrameArrayList(
allocator: Allocator,
dest: *std.ArrayList(u8),
src: []const u8,
verify_checksum: bool,
window_size_max: usize,
) (error{OutOfMemory} || FrameContext.Error || FrameError)!usize {
assert(readInt(u32, src[0..4]) == frame.Zstandard.magic_number);
var consumed_count: usize = 4;
var frame_context = context: {
var fbs = std.io.fixedBufferStream(src[consumed_count..]);
var source = fbs.reader();
const frame_header = try decodeZstandardHeader(source);
consumed_count += fbs.pos;
break :context try FrameContext.init(frame_header, window_size_max, verify_checksum);
};
consumed_count += try decodeZstandardFrameBlocksArrayList(
allocator,
dest,
src[consumed_count..],
&frame_context,
);
return consumed_count;
}
pub fn decodeZstandardFrameBlocksArrayList(
allocator: Allocator,
dest: *std.ArrayList(u8),
src: []const u8,
frame_context: *FrameContext,
) (error{OutOfMemory} || FrameError)!usize {
const initial_len = dest.items.len;
var ring_buffer = try RingBuffer.init(allocator, frame_context.window_size);
defer ring_buffer.deinit(allocator);
// These tables take 7680 bytes
var literal_fse_data: [types.compressed_block.table_size_max.literal]Table.Fse = undefined;
var match_fse_data: [types.compressed_block.table_size_max.match]Table.Fse = undefined;
var offset_fse_data: [types.compressed_block.table_size_max.offset]Table.Fse = undefined;
var block_header = try block.decodeBlockHeaderSlice(src);
var consumed_count: usize = 3;
var decode_state = block.DecodeState.init(&literal_fse_data, &match_fse_data, &offset_fse_data);
while (true) : ({
block_header = try block.decodeBlockHeaderSlice(src[consumed_count..]);
consumed_count += 3;
}) {
const written_size = try block.decodeBlockRingBuffer(
&ring_buffer,
src[consumed_count..],
block_header,
&decode_state,
&consumed_count,
frame_context.block_size_max,
);
if (frame_context.content_size) |size| {
if (dest.items.len - initial_len > size) {
return error.BadContentSize;
}
}
if (written_size > 0) {
const written_slice = ring_buffer.sliceLast(written_size);
try dest.appendSlice(written_slice.first);
try dest.appendSlice(written_slice.second);
if (frame_context.hasher_opt) |*hasher| {
hasher.update(written_slice.first);
hasher.update(written_slice.second);
}
}
if (block_header.last_block) break;
}
if (frame_context.content_size) |size| {
if (dest.items.len - initial_len != size) {
return error.BadContentSize;
}
}
if (frame_context.has_checksum) {
if (src.len < consumed_count + 4) return error.EndOfStream;
const checksum = readIntSlice(u32, src[consumed_count .. consumed_count + 4]);
consumed_count += 4;
if (frame_context.hasher_opt) |*hasher| {
if (checksum != computeChecksum(hasher)) return error.ChecksumFailure;
}
}
return consumed_count;
}
fn decodeFrameBlocksInner(
dest: []u8,
src: []const u8,
consumed_count: *usize,
hash: ?*std.hash.XxHash64,
block_size_max: usize,
) (error{ EndOfStream, DestTooSmall } || block.Error)!usize {
// These tables take 7680 bytes
var literal_fse_data: [types.compressed_block.table_size_max.literal]Table.Fse = undefined;
var match_fse_data: [types.compressed_block.table_size_max.match]Table.Fse = undefined;
var offset_fse_data: [types.compressed_block.table_size_max.offset]Table.Fse = undefined;
var block_header = try block.decodeBlockHeaderSlice(src);
var bytes_read: usize = 3;
defer consumed_count.* += bytes_read;
var decode_state = block.DecodeState.init(&literal_fse_data, &match_fse_data, &offset_fse_data);
var count: usize = 0;
while (true) : ({
block_header = try block.decodeBlockHeaderSlice(src[bytes_read..]);
bytes_read += 3;
}) {
const written_size = try block.decodeBlock(
dest,
src[bytes_read..],
block_header,
&decode_state,
&bytes_read,
block_size_max,
count,
);
if (hash) |hash_state| hash_state.update(dest[count .. count + written_size]);
count += written_size;
if (block_header.last_block) break;
}
return count;
}
/// Decode the header of a skippable frame. The first four bytes of `src` must
/// be a valid magic number for a skippable frame.
pub fn decodeSkippableHeader(src: *const [8]u8) SkippableHeader {
const magic = readInt(u32, src[0..4]);
assert(isSkippableMagic(magic));
const frame_size = readInt(u32, src[4..8]);
return .{
.magic_number = magic,
.frame_size = frame_size,
};
}
/// Returns the window size required to decompress a frame, or `null` if it
/// cannot be determined (which indicates a malformed frame header).
pub fn frameWindowSize(header: ZstandardHeader) ?u64 {
if (header.window_descriptor) |descriptor| {
const exponent = (descriptor & 0b11111000) >> 3;
const mantissa = descriptor & 0b00000111;
const window_log = 10 + exponent;
const window_base = @as(u64, 1) << @intCast(u6, window_log);
const window_add = (window_base / 8) * mantissa;
return window_base + window_add;
} else return header.content_size;
}
/// Decode the header of a Zstandard frame.
///
/// Errors returned:
/// - `error.ReservedBitSet` if any of the reserved bits of the header are set
/// - `error.EndOfStream` if `source` does not contain a complete header
pub fn decodeZstandardHeader(
source: anytype,
) (@TypeOf(source).Error || error{ EndOfStream, ReservedBitSet })!ZstandardHeader {
const descriptor = @bitCast(ZstandardHeader.Descriptor, try source.readByte());
if (descriptor.reserved) return error.ReservedBitSet;
var window_descriptor: ?u8 = null;
if (!descriptor.single_segment_flag) {
window_descriptor = try source.readByte();
}
var dictionary_id: ?u32 = null;
if (descriptor.dictionary_id_flag > 0) {
// if flag is 3 then field_size = 4, else field_size = flag
const field_size = (@as(u4, 1) << descriptor.dictionary_id_flag) >> 1;
dictionary_id = try source.readVarInt(u32, .Little, field_size);
}
var content_size: ?u64 = null;
if (descriptor.single_segment_flag or descriptor.content_size_flag > 0) {
const field_size = @as(u4, 1) << descriptor.content_size_flag;
content_size = try source.readVarInt(u64, .Little, field_size);
if (field_size == 2) content_size.? += 256;
}
const header = ZstandardHeader{
.descriptor = descriptor,
.window_descriptor = window_descriptor,
.dictionary_id = dictionary_id,
.content_size = content_size,
};
return header;
}
test {
std.testing.refAllDecls(@This());
}

View File

@ -0,0 +1,82 @@
const std = @import("std");
pub const ReversedByteReader = struct {
remaining_bytes: usize,
bytes: []const u8,
const Reader = std.io.Reader(*ReversedByteReader, error{}, readFn);
pub fn init(bytes: []const u8) ReversedByteReader {
return .{
.bytes = bytes,
.remaining_bytes = bytes.len,
};
}
pub fn reader(self: *ReversedByteReader) Reader {
return .{ .context = self };
}
fn readFn(ctx: *ReversedByteReader, buffer: []u8) !usize {
if (ctx.remaining_bytes == 0) return 0;
const byte_index = ctx.remaining_bytes - 1;
buffer[0] = ctx.bytes[byte_index];
// buffer[0] = @bitReverse(ctx.bytes[byte_index]);
ctx.remaining_bytes = byte_index;
return 1;
}
};
/// A bit reader for reading the reversed bit streams used to encode
/// FSE compressed data.
pub const ReverseBitReader = struct {
byte_reader: ReversedByteReader,
bit_reader: std.io.BitReader(.Big, ReversedByteReader.Reader),
pub fn init(self: *ReverseBitReader, bytes: []const u8) error{BitStreamHasNoStartBit}!void {
self.byte_reader = ReversedByteReader.init(bytes);
self.bit_reader = std.io.bitReader(.Big, self.byte_reader.reader());
if (bytes.len == 0) return;
var i: usize = 0;
while (i < 8 and 0 == self.readBitsNoEof(u1, 1) catch unreachable) : (i += 1) {}
if (i == 8) return error.BitStreamHasNoStartBit;
}
pub fn readBitsNoEof(self: *@This(), comptime U: type, num_bits: usize) error{EndOfStream}!U {
return self.bit_reader.readBitsNoEof(U, num_bits);
}
pub fn readBits(self: *@This(), comptime U: type, num_bits: usize, out_bits: *usize) error{}!U {
return try self.bit_reader.readBits(U, num_bits, out_bits);
}
pub fn alignToByte(self: *@This()) void {
self.bit_reader.alignToByte();
}
pub fn isEmpty(self: ReverseBitReader) bool {
return self.byte_reader.remaining_bytes == 0 and self.bit_reader.bit_count == 0;
}
};
pub fn BitReader(comptime Reader: type) type {
return struct {
underlying: std.io.BitReader(.Little, Reader),
pub fn readBitsNoEof(self: *@This(), comptime U: type, num_bits: usize) !U {
return self.underlying.readBitsNoEof(U, num_bits);
}
pub fn readBits(self: *@This(), comptime U: type, num_bits: usize, out_bits: *usize) !U {
return self.underlying.readBits(U, num_bits, out_bits);
}
pub fn alignToByte(self: *@This()) void {
self.underlying.alignToByte();
}
};
}
pub fn bitReader(reader: anytype) BitReader(@TypeOf(reader)) {
return .{ .underlying = std.io.bitReader(.Little, reader) };
}

View File

@ -0,0 +1,401 @@
pub const frame = struct {
pub const Kind = enum { zstandard, skippable };
pub const Zstandard = struct {
pub const magic_number = 0xFD2FB528;
header: Header,
data_blocks: []Block,
checksum: ?u32,
pub const Header = struct {
descriptor: Descriptor,
window_descriptor: ?u8,
dictionary_id: ?u32,
content_size: ?u64,
pub const Descriptor = packed struct {
dictionary_id_flag: u2,
content_checksum_flag: bool,
reserved: bool,
unused: bool,
single_segment_flag: bool,
content_size_flag: u2,
};
};
pub const Block = struct {
pub const Header = struct {
last_block: bool,
block_type: Block.Type,
block_size: u21,
};
pub const Type = enum(u2) {
raw,
rle,
compressed,
reserved,
};
};
};
pub const Skippable = struct {
pub const magic_number_min = 0x184D2A50;
pub const magic_number_max = 0x184D2A5F;
pub const Header = struct {
magic_number: u32,
frame_size: u32,
};
};
};
pub const compressed_block = struct {
pub const LiteralsSection = struct {
header: Header,
huffman_tree: ?HuffmanTree,
streams: Streams,
pub const Streams = union(enum) {
one: []const u8,
four: [4][]const u8,
};
pub const Header = struct {
block_type: BlockType,
size_format: u2,
regenerated_size: u20,
compressed_size: ?u18,
};
pub const BlockType = enum(u2) {
raw,
rle,
compressed,
treeless,
};
pub const HuffmanTree = struct {
max_bit_count: u4,
symbol_count_minus_one: u8,
nodes: [256]PrefixedSymbol,
pub const PrefixedSymbol = struct {
symbol: u8,
prefix: u16,
weight: u4,
};
pub const Result = union(enum) {
symbol: u8,
index: usize,
};
pub fn query(self: HuffmanTree, index: usize, prefix: u16) error{NotFound}!Result {
var node = self.nodes[index];
const weight = node.weight;
var i: usize = index;
while (node.weight == weight) {
if (node.prefix == prefix) return Result{ .symbol = node.symbol };
if (i == 0) return error.NotFound;
i -= 1;
node = self.nodes[i];
}
return Result{ .index = i };
}
pub fn weightToBitCount(weight: u4, max_bit_count: u4) u4 {
return if (weight == 0) 0 else ((max_bit_count + 1) - weight);
}
};
pub const StreamCount = enum { one, four };
pub fn streamCount(size_format: u2, block_type: BlockType) StreamCount {
return switch (block_type) {
.raw, .rle => .one,
.compressed, .treeless => if (size_format == 0) .one else .four,
};
}
};
pub const SequencesSection = struct {
header: SequencesSection.Header,
literals_length_table: Table,
offset_table: Table,
match_length_table: Table,
pub const Header = struct {
sequence_count: u24,
match_lengths: Mode,
offsets: Mode,
literal_lengths: Mode,
pub const Mode = enum(u2) {
predefined,
rle,
fse,
repeat,
};
};
};
pub const Table = union(enum) {
fse: []const Fse,
rle: u8,
pub const Fse = struct {
symbol: u8,
baseline: u16,
bits: u8,
};
};
pub const literals_length_code_table = [36]struct { u32, u5 }{
.{ 0, 0 }, .{ 1, 0 }, .{ 2, 0 }, .{ 3, 0 },
.{ 4, 0 }, .{ 5, 0 }, .{ 6, 0 }, .{ 7, 0 },
.{ 8, 0 }, .{ 9, 0 }, .{ 10, 0 }, .{ 11, 0 },
.{ 12, 0 }, .{ 13, 0 }, .{ 14, 0 }, .{ 15, 0 },
.{ 16, 1 }, .{ 18, 1 }, .{ 20, 1 }, .{ 22, 1 },
.{ 24, 2 }, .{ 28, 2 }, .{ 32, 3 }, .{ 40, 3 },
.{ 48, 4 }, .{ 64, 6 }, .{ 128, 7 }, .{ 256, 8 },
.{ 512, 9 }, .{ 1024, 10 }, .{ 2048, 11 }, .{ 4096, 12 },
.{ 8192, 13 }, .{ 16384, 14 }, .{ 32768, 15 }, .{ 65536, 16 },
};
pub const match_length_code_table = [53]struct { u32, u5 }{
.{ 3, 0 }, .{ 4, 0 }, .{ 5, 0 }, .{ 6, 0 }, .{ 7, 0 }, .{ 8, 0 },
.{ 9, 0 }, .{ 10, 0 }, .{ 11, 0 }, .{ 12, 0 }, .{ 13, 0 }, .{ 14, 0 },
.{ 15, 0 }, .{ 16, 0 }, .{ 17, 0 }, .{ 18, 0 }, .{ 19, 0 }, .{ 20, 0 },
.{ 21, 0 }, .{ 22, 0 }, .{ 23, 0 }, .{ 24, 0 }, .{ 25, 0 }, .{ 26, 0 },
.{ 27, 0 }, .{ 28, 0 }, .{ 29, 0 }, .{ 30, 0 }, .{ 31, 0 }, .{ 32, 0 },
.{ 33, 0 }, .{ 34, 0 }, .{ 35, 1 }, .{ 37, 1 }, .{ 39, 1 }, .{ 41, 1 },
.{ 43, 2 }, .{ 47, 2 }, .{ 51, 3 }, .{ 59, 3 }, .{ 67, 4 }, .{ 83, 4 },
.{ 99, 5 }, .{ 131, 7 }, .{ 259, 8 }, .{ 515, 9 }, .{ 1027, 10 }, .{ 2051, 11 },
.{ 4099, 12 }, .{ 8195, 13 }, .{ 16387, 14 }, .{ 32771, 15 }, .{ 65539, 16 },
};
pub const literals_length_default_distribution = [36]i16{
4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
-1, -1, -1, -1,
};
pub const match_lengths_default_distribution = [53]i16{
1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1,
-1, -1, -1, -1, -1,
};
pub const offset_codes_default_distribution = [29]i16{
1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1,
};
pub const predefined_literal_fse_table = Table{
.fse = &[64]Table.Fse{
.{ .symbol = 0, .bits = 4, .baseline = 0 },
.{ .symbol = 0, .bits = 4, .baseline = 16 },
.{ .symbol = 1, .bits = 5, .baseline = 32 },
.{ .symbol = 3, .bits = 5, .baseline = 0 },
.{ .symbol = 4, .bits = 5, .baseline = 0 },
.{ .symbol = 6, .bits = 5, .baseline = 0 },
.{ .symbol = 7, .bits = 5, .baseline = 0 },
.{ .symbol = 9, .bits = 5, .baseline = 0 },
.{ .symbol = 10, .bits = 5, .baseline = 0 },
.{ .symbol = 12, .bits = 5, .baseline = 0 },
.{ .symbol = 14, .bits = 6, .baseline = 0 },
.{ .symbol = 16, .bits = 5, .baseline = 0 },
.{ .symbol = 18, .bits = 5, .baseline = 0 },
.{ .symbol = 19, .bits = 5, .baseline = 0 },
.{ .symbol = 21, .bits = 5, .baseline = 0 },
.{ .symbol = 22, .bits = 5, .baseline = 0 },
.{ .symbol = 24, .bits = 5, .baseline = 0 },
.{ .symbol = 25, .bits = 5, .baseline = 32 },
.{ .symbol = 26, .bits = 5, .baseline = 0 },
.{ .symbol = 27, .bits = 6, .baseline = 0 },
.{ .symbol = 29, .bits = 6, .baseline = 0 },
.{ .symbol = 31, .bits = 6, .baseline = 0 },
.{ .symbol = 0, .bits = 4, .baseline = 32 },
.{ .symbol = 1, .bits = 4, .baseline = 0 },
.{ .symbol = 2, .bits = 5, .baseline = 0 },
.{ .symbol = 4, .bits = 5, .baseline = 32 },
.{ .symbol = 5, .bits = 5, .baseline = 0 },
.{ .symbol = 7, .bits = 5, .baseline = 32 },
.{ .symbol = 8, .bits = 5, .baseline = 0 },
.{ .symbol = 10, .bits = 5, .baseline = 32 },
.{ .symbol = 11, .bits = 5, .baseline = 0 },
.{ .symbol = 13, .bits = 6, .baseline = 0 },
.{ .symbol = 16, .bits = 5, .baseline = 32 },
.{ .symbol = 17, .bits = 5, .baseline = 0 },
.{ .symbol = 19, .bits = 5, .baseline = 32 },
.{ .symbol = 20, .bits = 5, .baseline = 0 },
.{ .symbol = 22, .bits = 5, .baseline = 32 },
.{ .symbol = 23, .bits = 5, .baseline = 0 },
.{ .symbol = 25, .bits = 4, .baseline = 0 },
.{ .symbol = 25, .bits = 4, .baseline = 16 },
.{ .symbol = 26, .bits = 5, .baseline = 32 },
.{ .symbol = 28, .bits = 6, .baseline = 0 },
.{ .symbol = 30, .bits = 6, .baseline = 0 },
.{ .symbol = 0, .bits = 4, .baseline = 48 },
.{ .symbol = 1, .bits = 4, .baseline = 16 },
.{ .symbol = 2, .bits = 5, .baseline = 32 },
.{ .symbol = 3, .bits = 5, .baseline = 32 },
.{ .symbol = 5, .bits = 5, .baseline = 32 },
.{ .symbol = 6, .bits = 5, .baseline = 32 },
.{ .symbol = 8, .bits = 5, .baseline = 32 },
.{ .symbol = 9, .bits = 5, .baseline = 32 },
.{ .symbol = 11, .bits = 5, .baseline = 32 },
.{ .symbol = 12, .bits = 5, .baseline = 32 },
.{ .symbol = 15, .bits = 6, .baseline = 0 },
.{ .symbol = 17, .bits = 5, .baseline = 32 },
.{ .symbol = 18, .bits = 5, .baseline = 32 },
.{ .symbol = 20, .bits = 5, .baseline = 32 },
.{ .symbol = 21, .bits = 5, .baseline = 32 },
.{ .symbol = 23, .bits = 5, .baseline = 32 },
.{ .symbol = 24, .bits = 5, .baseline = 32 },
.{ .symbol = 35, .bits = 6, .baseline = 0 },
.{ .symbol = 34, .bits = 6, .baseline = 0 },
.{ .symbol = 33, .bits = 6, .baseline = 0 },
.{ .symbol = 32, .bits = 6, .baseline = 0 },
},
};
pub const predefined_match_fse_table = Table{
.fse = &[64]Table.Fse{
.{ .symbol = 0, .bits = 6, .baseline = 0 },
.{ .symbol = 1, .bits = 4, .baseline = 0 },
.{ .symbol = 2, .bits = 5, .baseline = 32 },
.{ .symbol = 3, .bits = 5, .baseline = 0 },
.{ .symbol = 5, .bits = 5, .baseline = 0 },
.{ .symbol = 6, .bits = 5, .baseline = 0 },
.{ .symbol = 8, .bits = 5, .baseline = 0 },
.{ .symbol = 10, .bits = 6, .baseline = 0 },
.{ .symbol = 13, .bits = 6, .baseline = 0 },
.{ .symbol = 16, .bits = 6, .baseline = 0 },
.{ .symbol = 19, .bits = 6, .baseline = 0 },
.{ .symbol = 22, .bits = 6, .baseline = 0 },
.{ .symbol = 25, .bits = 6, .baseline = 0 },
.{ .symbol = 28, .bits = 6, .baseline = 0 },
.{ .symbol = 31, .bits = 6, .baseline = 0 },
.{ .symbol = 33, .bits = 6, .baseline = 0 },
.{ .symbol = 35, .bits = 6, .baseline = 0 },
.{ .symbol = 37, .bits = 6, .baseline = 0 },
.{ .symbol = 39, .bits = 6, .baseline = 0 },
.{ .symbol = 41, .bits = 6, .baseline = 0 },
.{ .symbol = 43, .bits = 6, .baseline = 0 },
.{ .symbol = 45, .bits = 6, .baseline = 0 },
.{ .symbol = 1, .bits = 4, .baseline = 16 },
.{ .symbol = 2, .bits = 4, .baseline = 0 },
.{ .symbol = 3, .bits = 5, .baseline = 32 },
.{ .symbol = 4, .bits = 5, .baseline = 0 },
.{ .symbol = 6, .bits = 5, .baseline = 32 },
.{ .symbol = 7, .bits = 5, .baseline = 0 },
.{ .symbol = 9, .bits = 6, .baseline = 0 },
.{ .symbol = 12, .bits = 6, .baseline = 0 },
.{ .symbol = 15, .bits = 6, .baseline = 0 },
.{ .symbol = 18, .bits = 6, .baseline = 0 },
.{ .symbol = 21, .bits = 6, .baseline = 0 },
.{ .symbol = 24, .bits = 6, .baseline = 0 },
.{ .symbol = 27, .bits = 6, .baseline = 0 },
.{ .symbol = 30, .bits = 6, .baseline = 0 },
.{ .symbol = 32, .bits = 6, .baseline = 0 },
.{ .symbol = 34, .bits = 6, .baseline = 0 },
.{ .symbol = 36, .bits = 6, .baseline = 0 },
.{ .symbol = 38, .bits = 6, .baseline = 0 },
.{ .symbol = 40, .bits = 6, .baseline = 0 },
.{ .symbol = 42, .bits = 6, .baseline = 0 },
.{ .symbol = 44, .bits = 6, .baseline = 0 },
.{ .symbol = 1, .bits = 4, .baseline = 32 },
.{ .symbol = 1, .bits = 4, .baseline = 48 },
.{ .symbol = 2, .bits = 4, .baseline = 16 },
.{ .symbol = 4, .bits = 5, .baseline = 32 },
.{ .symbol = 5, .bits = 5, .baseline = 32 },
.{ .symbol = 7, .bits = 5, .baseline = 32 },
.{ .symbol = 8, .bits = 5, .baseline = 32 },
.{ .symbol = 11, .bits = 6, .baseline = 0 },
.{ .symbol = 14, .bits = 6, .baseline = 0 },
.{ .symbol = 17, .bits = 6, .baseline = 0 },
.{ .symbol = 20, .bits = 6, .baseline = 0 },
.{ .symbol = 23, .bits = 6, .baseline = 0 },
.{ .symbol = 26, .bits = 6, .baseline = 0 },
.{ .symbol = 29, .bits = 6, .baseline = 0 },
.{ .symbol = 52, .bits = 6, .baseline = 0 },
.{ .symbol = 51, .bits = 6, .baseline = 0 },
.{ .symbol = 50, .bits = 6, .baseline = 0 },
.{ .symbol = 49, .bits = 6, .baseline = 0 },
.{ .symbol = 48, .bits = 6, .baseline = 0 },
.{ .symbol = 47, .bits = 6, .baseline = 0 },
.{ .symbol = 46, .bits = 6, .baseline = 0 },
},
};
pub const predefined_offset_fse_table = Table{
.fse = &[32]Table.Fse{
.{ .symbol = 0, .bits = 5, .baseline = 0 },
.{ .symbol = 6, .bits = 4, .baseline = 0 },
.{ .symbol = 9, .bits = 5, .baseline = 0 },
.{ .symbol = 15, .bits = 5, .baseline = 0 },
.{ .symbol = 21, .bits = 5, .baseline = 0 },
.{ .symbol = 3, .bits = 5, .baseline = 0 },
.{ .symbol = 7, .bits = 4, .baseline = 0 },
.{ .symbol = 12, .bits = 5, .baseline = 0 },
.{ .symbol = 18, .bits = 5, .baseline = 0 },
.{ .symbol = 23, .bits = 5, .baseline = 0 },
.{ .symbol = 5, .bits = 5, .baseline = 0 },
.{ .symbol = 8, .bits = 4, .baseline = 0 },
.{ .symbol = 14, .bits = 5, .baseline = 0 },
.{ .symbol = 20, .bits = 5, .baseline = 0 },
.{ .symbol = 2, .bits = 5, .baseline = 0 },
.{ .symbol = 7, .bits = 4, .baseline = 16 },
.{ .symbol = 11, .bits = 5, .baseline = 0 },
.{ .symbol = 17, .bits = 5, .baseline = 0 },
.{ .symbol = 22, .bits = 5, .baseline = 0 },
.{ .symbol = 4, .bits = 5, .baseline = 0 },
.{ .symbol = 8, .bits = 4, .baseline = 16 },
.{ .symbol = 13, .bits = 5, .baseline = 0 },
.{ .symbol = 19, .bits = 5, .baseline = 0 },
.{ .symbol = 1, .bits = 5, .baseline = 0 },
.{ .symbol = 6, .bits = 4, .baseline = 16 },
.{ .symbol = 10, .bits = 5, .baseline = 0 },
.{ .symbol = 16, .bits = 5, .baseline = 0 },
.{ .symbol = 28, .bits = 5, .baseline = 0 },
.{ .symbol = 27, .bits = 5, .baseline = 0 },
.{ .symbol = 26, .bits = 5, .baseline = 0 },
.{ .symbol = 25, .bits = 5, .baseline = 0 },
.{ .symbol = 24, .bits = 5, .baseline = 0 },
},
};
pub const start_repeated_offset_1 = 1;
pub const start_repeated_offset_2 = 4;
pub const start_repeated_offset_3 = 8;
pub const table_accuracy_log_max = struct {
pub const literal = 9;
pub const match = 9;
pub const offset = 8;
};
pub const table_symbol_count_max = struct {
pub const literal = 36;
pub const match = 53;
pub const offset = 32;
};
pub const default_accuracy_log = struct {
pub const literal = 6;
pub const match = 6;
pub const offset = 5;
};
pub const table_size_max = struct {
pub const literal = 1 << table_accuracy_log_max.literal;
pub const match = 1 << table_accuracy_log_max.match;
pub const offset = 1 << table_accuracy_log_max.match;
};
};
test {
const testing = @import("std").testing;
testing.refAllDeclsRecursive(@This());
}

View File

@ -178,7 +178,7 @@ pub fn benchmarkBatchSignatureVerification(comptime Signature: anytype, comptime
const sig = try key_pair.sign(&msg, null); const sig = try key_pair.sign(&msg, null);
var batch: [64]Signature.BatchElement = undefined; var batch: [64]Signature.BatchElement = undefined;
for (batch) |*element| { for (&batch) |*element| {
element.* = Signature.BatchElement{ .sig = sig, .msg = &msg, .public_key = key_pair.public_key }; element.* = Signature.BatchElement{ .sig = sig, .msg = &msg, .public_key = key_pair.public_key };
} }

View File

@ -32,6 +32,10 @@ pub const CityHash64 = cityhash.CityHash64;
const wyhash = @import("hash/wyhash.zig"); const wyhash = @import("hash/wyhash.zig");
pub const Wyhash = wyhash.Wyhash; pub const Wyhash = wyhash.Wyhash;
const xxhash = @import("hash/xxhash.zig");
pub const XxHash64 = xxhash.XxHash64;
pub const XxHash32 = xxhash.XxHash32;
test "hash" { test "hash" {
_ = adler; _ = adler;
_ = auto_hash; _ = auto_hash;
@ -40,4 +44,5 @@ test "hash" {
_ = murmur; _ = murmur;
_ = cityhash; _ = cityhash;
_ = wyhash; _ = wyhash;
_ = xxhash;
} }

268
lib/std/hash/xxhash.zig Normal file
View File

@ -0,0 +1,268 @@
const std = @import("std");
const mem = std.mem;
const expectEqual = std.testing.expectEqual;
const rotl = std.math.rotl;
pub const XxHash64 = struct {
acc1: u64,
acc2: u64,
acc3: u64,
acc4: u64,
seed: u64,
buf: [32]u8,
buf_len: usize,
byte_count: usize,
const prime_1 = 0x9E3779B185EBCA87; // 0b1001111000110111011110011011000110000101111010111100101010000111
const prime_2 = 0xC2B2AE3D27D4EB4F; // 0b1100001010110010101011100011110100100111110101001110101101001111
const prime_3 = 0x165667B19E3779F9; // 0b0001011001010110011001111011000110011110001101110111100111111001
const prime_4 = 0x85EBCA77C2B2AE63; // 0b1000010111101011110010100111011111000010101100101010111001100011
const prime_5 = 0x27D4EB2F165667C5; // 0b0010011111010100111010110010111100010110010101100110011111000101
pub fn init(seed: u64) XxHash64 {
return XxHash64{
.seed = seed,
.acc1 = seed +% prime_1 +% prime_2,
.acc2 = seed +% prime_2,
.acc3 = seed,
.acc4 = seed -% prime_1,
.buf = undefined,
.buf_len = 0,
.byte_count = 0,
};
}
pub fn update(self: *XxHash64, input: []const u8) void {
if (input.len < 32 - self.buf_len) {
mem.copy(u8, self.buf[self.buf_len..], input);
self.buf_len += input.len;
return;
}
var i: usize = 0;
if (self.buf_len > 0) {
i = 32 - self.buf_len;
mem.copy(u8, self.buf[self.buf_len..], input[0..i]);
self.processStripe(&self.buf);
self.buf_len = 0;
}
while (i + 32 <= input.len) : (i += 32) {
self.processStripe(input[i..][0..32]);
}
const remaining_bytes = input[i..];
mem.copy(u8, &self.buf, remaining_bytes);
self.buf_len = remaining_bytes.len;
}
inline fn processStripe(self: *XxHash64, buf: *const [32]u8) void {
self.acc1 = round(self.acc1, mem.readIntLittle(u64, buf[0..8]));
self.acc2 = round(self.acc2, mem.readIntLittle(u64, buf[8..16]));
self.acc3 = round(self.acc3, mem.readIntLittle(u64, buf[16..24]));
self.acc4 = round(self.acc4, mem.readIntLittle(u64, buf[24..32]));
self.byte_count += 32;
}
inline fn round(acc: u64, lane: u64) u64 {
const a = acc +% (lane *% prime_2);
const b = rotl(u64, a, 31);
return b *% prime_1;
}
pub fn final(self: *XxHash64) u64 {
var acc: u64 = undefined;
if (self.byte_count < 32) {
acc = self.seed +% prime_5;
} else {
acc = rotl(u64, self.acc1, 1) +% rotl(u64, self.acc2, 7) +%
rotl(u64, self.acc3, 12) +% rotl(u64, self.acc4, 18);
acc = mergeAccumulator(acc, self.acc1);
acc = mergeAccumulator(acc, self.acc2);
acc = mergeAccumulator(acc, self.acc3);
acc = mergeAccumulator(acc, self.acc4);
}
acc = acc +% @as(u64, self.byte_count) +% @as(u64, self.buf_len);
var pos: usize = 0;
while (pos + 8 <= self.buf_len) : (pos += 8) {
const lane = mem.readIntLittle(u64, self.buf[pos..][0..8]);
acc ^= round(0, lane);
acc = rotl(u64, acc, 27) *% prime_1;
acc +%= prime_4;
}
if (pos + 4 <= self.buf_len) {
const lane = @as(u64, mem.readIntLittle(u32, self.buf[pos..][0..4]));
acc ^= lane *% prime_1;
acc = rotl(u64, acc, 23) *% prime_2;
acc +%= prime_3;
pos += 4;
}
while (pos < self.buf_len) : (pos += 1) {
const lane = @as(u64, self.buf[pos]);
acc ^= lane *% prime_5;
acc = rotl(u64, acc, 11) *% prime_1;
}
acc ^= acc >> 33;
acc *%= prime_2;
acc ^= acc >> 29;
acc *%= prime_3;
acc ^= acc >> 32;
return acc;
}
inline fn mergeAccumulator(acc: u64, other: u64) u64 {
const a = acc ^ round(0, other);
const b = a *% prime_1;
return b +% prime_4;
}
pub fn hash(input: []const u8) u64 {
var hasher = XxHash64.init(0);
hasher.update(input);
return hasher.final();
}
};
pub const XxHash32 = struct {
acc1: u32,
acc2: u32,
acc3: u32,
acc4: u32,
seed: u32,
buf: [16]u8,
buf_len: usize,
byte_count: usize,
const prime_1 = 0x9E3779B1; // 0b10011110001101110111100110110001
const prime_2 = 0x85EBCA77; // 0b10000101111010111100101001110111
const prime_3 = 0xC2B2AE3D; // 0b11000010101100101010111000111101
const prime_4 = 0x27D4EB2F; // 0b00100111110101001110101100101111
const prime_5 = 0x165667B1; // 0b00010110010101100110011110110001
pub fn init(seed: u32) XxHash32 {
return XxHash32{
.seed = seed,
.acc1 = seed +% prime_1 +% prime_2,
.acc2 = seed +% prime_2,
.acc3 = seed,
.acc4 = seed -% prime_1,
.buf = undefined,
.buf_len = 0,
.byte_count = 0,
};
}
pub fn update(self: *XxHash32, input: []const u8) void {
if (input.len < 16 - self.buf_len) {
mem.copy(u8, self.buf[self.buf_len..], input);
self.buf_len += input.len;
return;
}
var i: usize = 0;
if (self.buf_len > 0) {
i = 16 - self.buf_len;
mem.copy(u8, self.buf[self.buf_len..], input[0..i]);
self.processStripe(&self.buf);
self.buf_len = 0;
}
while (i + 16 <= input.len) : (i += 16) {
self.processStripe(input[i..][0..16]);
}
const remaining_bytes = input[i..];
mem.copy(u8, &self.buf, remaining_bytes);
self.buf_len = remaining_bytes.len;
}
inline fn processStripe(self: *XxHash32, buf: *const [16]u8) void {
self.acc1 = round(self.acc1, mem.readIntLittle(u32, buf[0..4]));
self.acc2 = round(self.acc2, mem.readIntLittle(u32, buf[4..8]));
self.acc3 = round(self.acc3, mem.readIntLittle(u32, buf[8..12]));
self.acc4 = round(self.acc4, mem.readIntLittle(u32, buf[12..16]));
self.byte_count += 16;
}
inline fn round(acc: u32, lane: u32) u32 {
const a = acc +% (lane *% prime_2);
const b = rotl(u32, a, 13);
return b *% prime_1;
}
pub fn final(self: *XxHash32) u32 {
var acc: u32 = undefined;
if (self.byte_count < 16) {
acc = self.seed +% prime_5;
} else {
acc = rotl(u32, self.acc1, 1) +% rotl(u32, self.acc2, 7) +%
rotl(u32, self.acc3, 12) +% rotl(u32, self.acc4, 18);
}
acc = acc +% @intCast(u32, self.byte_count) +% @intCast(u32, self.buf_len);
var pos: usize = 0;
while (pos + 4 <= self.buf_len) : (pos += 4) {
const lane = mem.readIntLittle(u32, self.buf[pos..][0..4]);
acc +%= lane *% prime_3;
acc = rotl(u32, acc, 17) *% prime_4;
}
while (pos < self.buf_len) : (pos += 1) {
const lane = @as(u32, self.buf[pos]);
acc +%= lane *% prime_5;
acc = rotl(u32, acc, 11) *% prime_1;
}
acc ^= acc >> 15;
acc *%= prime_2;
acc ^= acc >> 13;
acc *%= prime_3;
acc ^= acc >> 16;
return acc;
}
pub fn hash(input: []const u8) u32 {
var hasher = XxHash32.init(0);
hasher.update(input);
return hasher.final();
}
};
test "xxhash64" {
const hash = XxHash64.hash;
try expectEqual(hash(""), 0xef46db3751d8e999);
try expectEqual(hash("a"), 0xd24ec4f1a98c6e5b);
try expectEqual(hash("abc"), 0x44bc2cf5ad770999);
try expectEqual(hash("message digest"), 0x066ed728fceeb3be);
try expectEqual(hash("abcdefghijklmnopqrstuvwxyz"), 0xcfe1f278fa89835c);
try expectEqual(hash("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), 0xaaa46907d3047814);
try expectEqual(hash("12345678901234567890123456789012345678901234567890123456789012345678901234567890"), 0xe04a477f19ee145d);
}
test "xxhash32" {
const hash = XxHash32.hash;
try expectEqual(hash(""), 0x02cc5d05);
try expectEqual(hash("a"), 0x550d7456);
try expectEqual(hash("abc"), 0x32d153ff);
try expectEqual(hash("message digest"), 0x7c948494);
try expectEqual(hash("abcdefghijklmnopqrstuvwxyz"), 0x63a14d5f);
try expectEqual(hash("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), 0x9c285e64);
try expectEqual(hash("12345678901234567890123456789012345678901234567890123456789012345678901234567890"), 0x9c05f475);
}

View File

@ -508,7 +508,7 @@ pub fn HashMap(
/// If a new entry needs to be stored, this function asserts there /// If a new entry needs to be stored, this function asserts there
/// is enough capacity to store it. /// is enough capacity to store it.
pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult { pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
return self.unmanaged.getOrPutAssumeCapacityAdapted(self.allocator, key, ctx); return self.unmanaged.getOrPutAssumeCapacityAdapted(key, ctx);
} }
pub fn getOrPutValue(self: *Self, key: K, value: V) Allocator.Error!Entry { pub fn getOrPutValue(self: *Self, key: K, value: V) Allocator.Error!Entry {
@ -2130,7 +2130,7 @@ test "std.hash_map getOrPutAdapted" {
try testing.expectEqual(map.count(), keys.len); try testing.expectEqual(map.count(), keys.len);
inline for (keys, 0..) |key_str, i| { inline for (keys, 0..) |key_str, i| {
const result = try map.getOrPutAdapted(key_str, AdaptedContext{}); const result = map.getOrPutAssumeCapacityAdapted(key_str, AdaptedContext{});
try testing.expect(result.found_existing); try testing.expect(result.found_existing);
try testing.expectEqual(real_keys[i], result.key_ptr.*); try testing.expectEqual(real_keys[i], result.key_ptr.*);
try testing.expectEqual(@as(u64, i) * 2, result.value_ptr.*); try testing.expectEqual(@as(u64, i) * 2, result.value_ptr.*);

View File

@ -941,21 +941,21 @@ pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool {
return true; return true;
} }
/// Remove values from the beginning of a slice. /// Remove a set of values from the beginning of a slice.
pub fn trimLeft(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { pub fn trimLeft(comptime T: type, slice: []const T, values_to_strip: []const T) []const T {
var begin: usize = 0; var begin: usize = 0;
while (begin < slice.len and indexOfScalar(T, values_to_strip, slice[begin]) != null) : (begin += 1) {} while (begin < slice.len and indexOfScalar(T, values_to_strip, slice[begin]) != null) : (begin += 1) {}
return slice[begin..]; return slice[begin..];
} }
/// Remove values from the end of a slice. /// Remove a set of values from the end of a slice.
pub fn trimRight(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { pub fn trimRight(comptime T: type, slice: []const T, values_to_strip: []const T) []const T {
var end: usize = slice.len; var end: usize = slice.len;
while (end > 0 and indexOfScalar(T, values_to_strip, slice[end - 1]) != null) : (end -= 1) {} while (end > 0 and indexOfScalar(T, values_to_strip, slice[end - 1]) != null) : (end -= 1) {}
return slice[0..end]; return slice[0..end];
} }
/// Remove values from the beginning and end of a slice. /// Remove a set of values from the beginning and end of a slice.
pub fn trim(comptime T: type, slice: []const T, values_to_strip: []const T) []const T { pub fn trim(comptime T: type, slice: []const T, values_to_strip: []const T) []const T {
var begin: usize = 0; var begin: usize = 0;
var end: usize = slice.len; var end: usize = slice.len;

View File

@ -433,15 +433,9 @@ pub fn MultiArrayList(comptime S: type) type {
} }
fn capacityInBytes(capacity: usize) usize { fn capacityInBytes(capacity: usize) usize {
if (builtin.zig_backend == .stage2_c) { comptime var elem_bytes: usize = 0;
var bytes: usize = 0; inline for (sizes.bytes) |size| elem_bytes += size;
for (sizes.bytes) |size| bytes += size * capacity; return elem_bytes * capacity;
return bytes;
} else {
const sizes_vector: @Vector(sizes.bytes.len, usize) = sizes.bytes;
const capacity_vector = @splat(sizes.bytes.len, capacity);
return @reduce(.Add, capacity_vector * sizes_vector);
}
} }
fn allocatedBytes(self: Self) []align(@alignOf(S)) u8 { fn allocatedBytes(self: Self) []align(@alignOf(S)) u8 {

View File

@ -7056,3 +7056,21 @@ pub fn timerfd_gettime(fd: i32) TimerFdGetError!linux.itimerspec {
else => |err| return unexpectedErrno(err), else => |err| return unexpectedErrno(err),
}; };
} }
pub const have_sigpipe_support = @hasDecl(@This(), "SIG") and @hasDecl(SIG, "PIPE");
fn noopSigHandler(_: c_int) callconv(.C) void {}
pub fn maybeIgnoreSigpipe() void {
if (have_sigpipe_support and !std.options.keep_sigpipe) {
const act = Sigaction{
// We set handler to a noop function instead of SIG.IGN so we don't leak our
// signal disposition to a child process
.handler = .{ .handler = noopSigHandler },
.mask = empty_sigset,
.flags = 0,
};
sigaction(SIG.PIPE, &act, null) catch |err|
std.debug.panic("failed to install noop SIGPIPE handler with '{s}'", .{@errorName(err)});
}
}

View File

@ -21,10 +21,10 @@ pub extern "advapi32" fn RegOpenKeyExW(
pub extern "advapi32" fn RegQueryValueExW( pub extern "advapi32" fn RegQueryValueExW(
hKey: HKEY, hKey: HKEY,
lpValueName: LPCWSTR, lpValueName: LPCWSTR,
lpReserved: *DWORD, lpReserved: ?*DWORD,
lpType: *DWORD, lpType: ?*DWORD,
lpData: *BYTE, lpData: ?*BYTE,
lpcbData: *DWORD, lpcbData: ?*DWORD,
) callconv(WINAPI) LSTATUS; ) callconv(WINAPI) LSTATUS;
// RtlGenRandom is known as SystemFunction036 under advapi32 // RtlGenRandom is known as SystemFunction036 under advapi32

View File

@ -6,10 +6,10 @@ const math = std.math;
pub fn binarySearch( pub fn binarySearch(
comptime T: type, comptime T: type,
key: T, key: anytype,
items: []const T, items: []const T,
context: anytype, context: anytype,
comptime compareFn: fn (context: @TypeOf(context), lhs: T, rhs: T) math.Order, comptime compareFn: fn (context: @TypeOf(context), key: @TypeOf(key), mid: T) math.Order,
) ?usize { ) ?usize {
var left: usize = 0; var left: usize = 0;
var right: usize = items.len; var right: usize = items.len;
@ -41,35 +41,69 @@ test "binarySearch" {
}; };
try testing.expectEqual( try testing.expectEqual(
@as(?usize, null), @as(?usize, null),
binarySearch(u32, 1, &[_]u32{}, {}, S.order_u32), binarySearch(u32, @as(u32, 1), &[_]u32{}, {}, S.order_u32),
); );
try testing.expectEqual( try testing.expectEqual(
@as(?usize, 0), @as(?usize, 0),
binarySearch(u32, 1, &[_]u32{1}, {}, S.order_u32), binarySearch(u32, @as(u32, 1), &[_]u32{1}, {}, S.order_u32),
); );
try testing.expectEqual( try testing.expectEqual(
@as(?usize, null), @as(?usize, null),
binarySearch(u32, 1, &[_]u32{0}, {}, S.order_u32), binarySearch(u32, @as(u32, 1), &[_]u32{0}, {}, S.order_u32),
); );
try testing.expectEqual( try testing.expectEqual(
@as(?usize, null), @as(?usize, null),
binarySearch(u32, 0, &[_]u32{1}, {}, S.order_u32), binarySearch(u32, @as(u32, 0), &[_]u32{1}, {}, S.order_u32),
); );
try testing.expectEqual( try testing.expectEqual(
@as(?usize, 4), @as(?usize, 4),
binarySearch(u32, 5, &[_]u32{ 1, 2, 3, 4, 5 }, {}, S.order_u32), binarySearch(u32, @as(u32, 5), &[_]u32{ 1, 2, 3, 4, 5 }, {}, S.order_u32),
); );
try testing.expectEqual( try testing.expectEqual(
@as(?usize, 0), @as(?usize, 0),
binarySearch(u32, 2, &[_]u32{ 2, 4, 8, 16, 32, 64 }, {}, S.order_u32), binarySearch(u32, @as(u32, 2), &[_]u32{ 2, 4, 8, 16, 32, 64 }, {}, S.order_u32),
); );
try testing.expectEqual( try testing.expectEqual(
@as(?usize, 1), @as(?usize, 1),
binarySearch(i32, -4, &[_]i32{ -7, -4, 0, 9, 10 }, {}, S.order_i32), binarySearch(i32, @as(i32, -4), &[_]i32{ -7, -4, 0, 9, 10 }, {}, S.order_i32),
); );
try testing.expectEqual( try testing.expectEqual(
@as(?usize, 3), @as(?usize, 3),
binarySearch(i32, 98, &[_]i32{ -100, -25, 2, 98, 99, 100 }, {}, S.order_i32), binarySearch(i32, @as(i32, 98), &[_]i32{ -100, -25, 2, 98, 99, 100 }, {}, S.order_i32),
);
const R = struct {
b: i32,
e: i32,
fn r(b: i32, e: i32) @This() {
return @This(){ .b = b, .e = e };
}
fn order(context: void, key: i32, mid: @This()) math.Order {
_ = context;
if (key < mid.b) {
return .lt;
}
if (key > mid.e) {
return .gt;
}
return .eq;
}
};
try testing.expectEqual(
@as(?usize, null),
binarySearch(R, @as(i32, -45), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order),
);
try testing.expectEqual(
@as(?usize, 2),
binarySearch(R, @as(i32, 10), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order),
);
try testing.expectEqual(
@as(?usize, 1),
binarySearch(R, @as(i32, -20), &[_]R{ R.r(-100, -50), R.r(-40, -20), R.r(-10, 20), R.r(30, 40) }, {}, R.order),
); );
} }

View File

@ -496,6 +496,7 @@ fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 {
std.os.environ = envp; std.os.environ = envp;
std.debug.maybeEnableSegfaultHandler(); std.debug.maybeEnableSegfaultHandler();
std.os.maybeIgnoreSigpipe();
return initEventLoopAndCallMain(); return initEventLoopAndCallMain();
} }

View File

@ -31,6 +31,7 @@ pub const PackedIntSliceEndian = @import("packed_int_array.zig").PackedIntSliceE
pub const PriorityQueue = @import("priority_queue.zig").PriorityQueue; pub const PriorityQueue = @import("priority_queue.zig").PriorityQueue;
pub const PriorityDequeue = @import("priority_dequeue.zig").PriorityDequeue; pub const PriorityDequeue = @import("priority_dequeue.zig").PriorityDequeue;
pub const Progress = @import("Progress.zig"); pub const Progress = @import("Progress.zig");
pub const RingBuffer = @import("RingBuffer.zig");
pub const SegmentedList = @import("segmented_list.zig").SegmentedList; pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
pub const SemanticVersion = @import("SemanticVersion.zig"); pub const SemanticVersion = @import("SemanticVersion.zig");
pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList; pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList;
@ -167,6 +168,22 @@ pub const options = struct {
options_override.crypto_always_getrandom options_override.crypto_always_getrandom
else else
false; false;
/// By default Zig disables SIGPIPE by setting a "no-op" handler for it. Set this option
/// to `true` to prevent that.
///
/// Note that we use a "no-op" handler instead of SIG_IGN because it will not be inherited by
/// any child process.
///
/// SIGPIPE is triggered when a process attempts to write to a broken pipe. By default, SIGPIPE
/// will terminate the process instead of exiting. It doesn't trigger the panic handler so in many
/// cases it's unclear why the process was terminated. By capturing SIGPIPE instead, functions that
/// write to broken pipes will return the EPIPE error (error.BrokenPipe) and the program can handle
/// it like any other error.
pub const keep_sigpipe: bool = if (@hasDecl(options_override, "keep_sigpipe"))
options_override.keep_sigpipe
else
false;
}; };
// This forces the start.zig file to be imported, and the comptime logic inside that // This forces the start.zig file to be imported, and the comptime logic inside that

View File

@ -89,7 +89,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}), .dependencies = featureSet(&[_]Feature{}),
}; };
const ti = @typeInfo(Feature); const ti = @typeInfo(Feature);
for (result) |*elem, i| { for (&result, 0..) |*elem, i| {
elem.index = i; elem.index = i;
elem.name = ti.Enum.fields[i].name; elem.name = ti.Enum.fields[i].name;
} }

View File

@ -23,7 +23,7 @@ pub const all_features = blk: {
.dependencies = featureSet(&[_]Feature{}), .dependencies = featureSet(&[_]Feature{}),
}; };
const ti = @typeInfo(Feature); const ti = @typeInfo(Feature);
for (result) |*elem, i| { for (&result, 0..) |*elem, i| {
elem.index = i; elem.index = i;
elem.name = ti.Enum.fields[i].name; elem.name = ti.Enum.fields[i].name;
} }

1696
lib/zig.h

File diff suppressed because it is too large Load Diff

View File

@ -860,17 +860,9 @@ fn walkInstruction(
const str_tok = data[inst_index].str_tok; const str_tok = data[inst_index].str_tok;
var path = str_tok.get(file.zir); var path = str_tok.get(file.zir);
const maybe_other_package: ?*Package = blk: {
if (self.module.main_pkg_is_std and std.mem.eql(u8, path, "std")) {
path = "std";
break :blk self.module.main_pkg;
} else {
break :blk file.pkg.table.get(path);
}
};
// importFile cannot error out since all files // importFile cannot error out since all files
// are already loaded at this point // are already loaded at this point
if (maybe_other_package) |other_package| { if (file.pkg.table.get(path)) |other_package| {
const result = try self.packages.getOrPut(self.arena, other_package); const result = try self.packages.getOrPut(self.arena, other_package);
// Immediately add this package to the import table of our // Immediately add this package to the import table of our
@ -3629,7 +3621,7 @@ fn tryResolveRefPath(
} }
if (self.pending_ref_paths.get(&path[path.len - 1])) |waiter_list| { if (self.pending_ref_paths.get(&path[path.len - 1])) |waiter_list| {
// It's important to de-register oureslves as pending before // It's important to de-register ourselves as pending before
// attempting to resolve any other decl. // attempting to resolve any other decl.
_ = self.pending_ref_paths.remove(&path[path.len - 1]); _ = self.pending_ref_paths.remove(&path[path.len - 1]);

View File

@ -1596,36 +1596,53 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
const builtin_pkg = try Package.createWithDir( const builtin_pkg = try Package.createWithDir(
gpa, gpa,
"builtin",
zig_cache_artifact_directory, zig_cache_artifact_directory,
null, null,
"builtin.zig", "builtin.zig",
); );
errdefer builtin_pkg.destroy(gpa); errdefer builtin_pkg.destroy(gpa);
const std_pkg = try Package.createWithDir( // When you're testing std, the main module is std. In that case, we'll just set the std
gpa, // module to the main one, since avoiding the errors caused by duplicating it is more
"std", // effort than it's worth.
options.zig_lib_directory, const main_pkg_is_std = m: {
"std", const std_path = try std.fs.path.resolve(arena, &[_][]const u8{
"std.zig", options.zig_lib_directory.path orelse ".",
); "std",
errdefer std_pkg.destroy(gpa); "std.zig",
});
defer arena.free(std_path);
const main_path = try std.fs.path.resolve(arena, &[_][]const u8{
main_pkg.root_src_directory.path orelse ".",
main_pkg.root_src_path,
});
defer arena.free(main_path);
break :m mem.eql(u8, main_path, std_path);
};
const std_pkg = if (main_pkg_is_std)
main_pkg
else
try Package.createWithDir(
gpa,
options.zig_lib_directory,
"std",
"std.zig",
);
errdefer if (!main_pkg_is_std) std_pkg.destroy(gpa);
const root_pkg = if (options.is_test) root_pkg: { const root_pkg = if (options.is_test) root_pkg: {
// TODO: we currently have two packages named 'root' here, which is weird. This
// should be changed as part of the resolution of #12201
const test_pkg = if (options.test_runner_path) |test_runner| test_pkg: { const test_pkg = if (options.test_runner_path) |test_runner| test_pkg: {
const test_dir = std.fs.path.dirname(test_runner); const test_dir = std.fs.path.dirname(test_runner);
const basename = std.fs.path.basename(test_runner); const basename = std.fs.path.basename(test_runner);
const pkg = try Package.create(gpa, "root", test_dir, basename); const pkg = try Package.create(gpa, test_dir, basename);
// copy package table from main_pkg to root_pkg // copy package table from main_pkg to root_pkg
pkg.table = try main_pkg.table.clone(gpa); pkg.table = try main_pkg.table.clone(gpa);
break :test_pkg pkg; break :test_pkg pkg;
} else try Package.createWithDir( } else try Package.createWithDir(
gpa, gpa,
"root",
options.zig_lib_directory, options.zig_lib_directory,
null, null,
"test_runner.zig", "test_runner.zig",
@ -1639,7 +1656,6 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
const compiler_rt_pkg = if (include_compiler_rt and options.output_mode == .Obj) compiler_rt_pkg: { const compiler_rt_pkg = if (include_compiler_rt and options.output_mode == .Obj) compiler_rt_pkg: {
break :compiler_rt_pkg try Package.createWithDir( break :compiler_rt_pkg try Package.createWithDir(
gpa, gpa,
"compiler_rt",
options.zig_lib_directory, options.zig_lib_directory,
null, null,
"compiler_rt.zig", "compiler_rt.zig",
@ -1647,28 +1663,14 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
} else null; } else null;
errdefer if (compiler_rt_pkg) |p| p.destroy(gpa); errdefer if (compiler_rt_pkg) |p| p.destroy(gpa);
try main_pkg.addAndAdopt(gpa, builtin_pkg); try main_pkg.add(gpa, "builtin", builtin_pkg);
try main_pkg.add(gpa, root_pkg); try main_pkg.add(gpa, "root", root_pkg);
try main_pkg.addAndAdopt(gpa, std_pkg); try main_pkg.add(gpa, "std", std_pkg);
if (compiler_rt_pkg) |p| { if (compiler_rt_pkg) |p| {
try main_pkg.addAndAdopt(gpa, p); try main_pkg.add(gpa, "compiler_rt", p);
} }
const main_pkg_is_std = m: {
const std_path = try std.fs.path.resolve(arena, &[_][]const u8{
std_pkg.root_src_directory.path orelse ".",
std_pkg.root_src_path,
});
defer arena.free(std_path);
const main_path = try std.fs.path.resolve(arena, &[_][]const u8{
main_pkg.root_src_directory.path orelse ".",
main_pkg.root_src_path,
});
defer arena.free(main_path);
break :m mem.eql(u8, main_path, std_path);
};
// Pre-open the directory handles for cached ZIR code so that it does not need // Pre-open the directory handles for cached ZIR code so that it does not need
// to redundantly happen for each AstGen operation. // to redundantly happen for each AstGen operation.
const zir_sub_dir = "z"; const zir_sub_dir = "z";
@ -1705,7 +1707,6 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.gpa = gpa, .gpa = gpa,
.comp = comp, .comp = comp,
.main_pkg = main_pkg, .main_pkg = main_pkg,
.main_pkg_is_std = main_pkg_is_std,
.root_pkg = root_pkg, .root_pkg = root_pkg,
.zig_cache_artifact_directory = zig_cache_artifact_directory, .zig_cache_artifact_directory = zig_cache_artifact_directory,
.global_zir_cache = global_zir_cache, .global_zir_cache = global_zir_cache,
@ -2772,6 +2773,111 @@ fn emitOthers(comp: *Compilation) void {
} }
} }
fn reportMultiModuleErrors(mod: *Module) !void {
// Some cases can give you a whole bunch of multi-module errors, which it's not helpful to
// print all of, so we'll cap the number of these to emit.
var num_errors: u32 = 0;
const max_errors = 5;
// Attach the "some omitted" note to the final error message
var last_err: ?*Module.ErrorMsg = null;
for (mod.import_table.values()) |file| {
if (!file.multi_pkg) continue;
num_errors += 1;
if (num_errors > max_errors) continue;
const err = err_blk: {
// Like with errors, let's cap the number of notes to prevent a huge error spew.
const max_notes = 5;
const omitted = file.references.items.len -| max_notes;
const num_notes = file.references.items.len - omitted;
const notes = try mod.gpa.alloc(Module.ErrorMsg, if (omitted > 0) num_notes + 1 else num_notes);
errdefer mod.gpa.free(notes);
for (notes[0..num_notes], file.references.items[0..num_notes], 0..) |*note, ref, i| {
errdefer for (notes[0..i]) |*n| n.deinit(mod.gpa);
note.* = switch (ref) {
.import => |loc| blk: {
const name = try loc.file_scope.pkg.getName(mod.gpa, mod.*);
defer mod.gpa.free(name);
break :blk try Module.ErrorMsg.init(
mod.gpa,
loc,
"imported from module {s}",
.{name},
);
},
.root => |pkg| blk: {
const name = try pkg.getName(mod.gpa, mod.*);
defer mod.gpa.free(name);
break :blk try Module.ErrorMsg.init(
mod.gpa,
.{ .file_scope = file, .parent_decl_node = 0, .lazy = .entire_file },
"root of module {s}",
.{name},
);
},
};
}
errdefer for (notes[0..num_notes]) |*n| n.deinit(mod.gpa);
if (omitted > 0) {
notes[num_notes] = try Module.ErrorMsg.init(
mod.gpa,
.{ .file_scope = file, .parent_decl_node = 0, .lazy = .entire_file },
"{} more references omitted",
.{omitted},
);
}
errdefer if (omitted > 0) notes[num_notes].deinit(mod.gpa);
const err = try Module.ErrorMsg.create(
mod.gpa,
.{ .file_scope = file, .parent_decl_node = 0, .lazy = .entire_file },
"file exists in multiple modules",
.{},
);
err.notes = notes;
break :err_blk err;
};
errdefer err.destroy(mod.gpa);
try mod.failed_files.putNoClobber(mod.gpa, file, err);
last_err = err;
}
// If we omitted any errors, add a note saying that
if (num_errors > max_errors) {
const err = last_err.?;
// There isn't really any meaningful place to put this note, so just attach it to the
// last failed file
var note = try Module.ErrorMsg.init(
mod.gpa,
err.src_loc,
"{} more errors omitted",
.{num_errors - max_errors},
);
errdefer note.deinit(mod.gpa);
const i = err.notes.len;
err.notes = try mod.gpa.realloc(err.notes, i + 1);
err.notes[i] = note;
}
// Now that we've reported the errors, we need to deal with
// dependencies. Any file referenced by a multi_pkg file should also be
// marked multi_pkg and have its status set to astgen_failure, as it's
// ambiguous which package they should be analyzed as a part of. We need
// to add this flag after reporting the errors however, as otherwise
// we'd get an error for every single downstream file, which wouldn't be
// very useful.
for (mod.import_table.values()) |file| {
if (file.multi_pkg) file.recursiveMarkMultiPkg(mod);
}
}
/// Having the file open for writing is problematic as far as executing the /// Having the file open for writing is problematic as far as executing the
/// binary is concerned. This will remove the write flag, or close the file, /// binary is concerned. This will remove the write flag, or close the file,
/// or whatever is needed so that it can be executed. /// or whatever is needed so that it can be executed.
@ -3098,54 +3204,7 @@ pub fn performAllTheWork(
} }
if (comp.bin_file.options.module) |mod| { if (comp.bin_file.options.module) |mod| {
for (mod.import_table.values()) |file| { try reportMultiModuleErrors(mod);
if (!file.multi_pkg) continue;
const err = err_blk: {
const notes = try mod.gpa.alloc(Module.ErrorMsg, file.references.items.len);
errdefer mod.gpa.free(notes);
for (notes, 0..) |*note, i| {
errdefer for (notes[0..i]) |*n| n.deinit(mod.gpa);
note.* = switch (file.references.items[i]) {
.import => |loc| try Module.ErrorMsg.init(
mod.gpa,
loc,
"imported from package {s}",
.{loc.file_scope.pkg.name},
),
.root => |pkg| try Module.ErrorMsg.init(
mod.gpa,
.{ .file_scope = file, .parent_decl_node = 0, .lazy = .entire_file },
"root of package {s}",
.{pkg.name},
),
};
}
errdefer for (notes) |*n| n.deinit(mod.gpa);
const err = try Module.ErrorMsg.create(
mod.gpa,
.{ .file_scope = file, .parent_decl_node = 0, .lazy = .entire_file },
"file exists in multiple packages",
.{},
);
err.notes = notes;
break :err_blk err;
};
errdefer err.destroy(mod.gpa);
try mod.failed_files.putNoClobber(mod.gpa, file, err);
}
// Now that we've reported the errors, we need to deal with
// dependencies. Any file referenced by a multi_pkg file should also be
// marked multi_pkg and have its status set to astgen_failure, as it's
// ambiguous which package they should be analyzed as a part of. We need
// to add this flag after reporting the errors however, as otherwise
// we'd get an error for every single downstream file, which wouldn't be
// very useful.
for (mod.import_table.values()) |file| {
if (file.multi_pkg) file.recursiveMarkMultiPkg(mod);
}
} }
{ {
@ -3266,24 +3325,20 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const decl_emit_h = emit_h.declPtr(decl_index); const decl_emit_h = emit_h.declPtr(decl_index);
const fwd_decl = &decl_emit_h.fwd_decl; const fwd_decl = &decl_emit_h.fwd_decl;
fwd_decl.shrinkRetainingCapacity(0); fwd_decl.shrinkRetainingCapacity(0);
var typedefs_arena = std.heap.ArenaAllocator.init(gpa); var ctypes_arena = std.heap.ArenaAllocator.init(gpa);
defer typedefs_arena.deinit(); defer ctypes_arena.deinit();
var dg: c_codegen.DeclGen = .{ var dg: c_codegen.DeclGen = .{
.gpa = gpa, .gpa = gpa,
.module = module, .module = module,
.error_msg = null, .error_msg = null,
.decl_index = decl_index, .decl_index = decl_index.toOptional(),
.decl = decl, .decl = decl,
.fwd_decl = fwd_decl.toManaged(gpa), .fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = c_codegen.TypedefMap.initContext(gpa, .{ .mod = module }), .ctypes = .{},
.typedefs_arena = typedefs_arena.allocator(),
}; };
defer { defer {
for (dg.typedefs.values()) |typedef| { dg.ctypes.deinit(gpa);
module.gpa.free(typedef.rendered);
}
dg.typedefs.deinit();
dg.fwd_decl.deinit(); dg.fwd_decl.deinit();
} }
@ -5415,7 +5470,6 @@ fn buildOutputFromZig(
var main_pkg: Package = .{ var main_pkg: Package = .{
.root_src_directory = comp.zig_lib_directory, .root_src_directory = comp.zig_lib_directory,
.root_src_path = src_basename, .root_src_path = src_basename,
.name = "root",
}; };
defer main_pkg.deinitTable(comp.gpa); defer main_pkg.deinitTable(comp.gpa);
const root_name = src_basename[0 .. src_basename.len - std.fs.path.extension(src_basename).len]; const root_name = src_basename[0 .. src_basename.len - std.fs.path.extension(src_basename).len];

View File

@ -144,10 +144,6 @@ stage1_flags: packed struct {
} = .{}, } = .{},
job_queued_update_builtin_zig: bool = true, job_queued_update_builtin_zig: bool = true,
/// This makes it so that we can run `zig test` on the standard library.
/// Otherwise, the logic for scanning test decls skips all of them because
/// `main_pkg != std_pkg`.
main_pkg_is_std: bool,
compile_log_text: ArrayListUnmanaged(u8) = .{}, compile_log_text: ArrayListUnmanaged(u8) = .{},
@ -1950,7 +1946,7 @@ pub const File = struct {
prev_zir: ?*Zir = null, prev_zir: ?*Zir = null,
/// A single reference to a file. /// A single reference to a file.
const Reference = union(enum) { pub const Reference = union(enum) {
/// The file is imported directly (i.e. not as a package) with @import. /// The file is imported directly (i.e. not as a package) with @import.
import: SrcLoc, import: SrcLoc,
/// The file is the root of a package. /// The file is the root of a package.
@ -2113,7 +2109,27 @@ pub const File = struct {
/// Add a reference to this file during AstGen. /// Add a reference to this file during AstGen.
pub fn addReference(file: *File, mod: Module, ref: Reference) !void { pub fn addReference(file: *File, mod: Module, ref: Reference) !void {
try file.references.append(mod.gpa, ref); // Don't add the same module root twice. Note that since we always add module roots at the
// front of the references array (see below), this loop is actually O(1) on valid code.
if (ref == .root) {
for (file.references.items) |other| {
switch (other) {
.root => |r| if (ref.root == r) return,
else => break, // reached the end of the "is-root" references
}
}
}
switch (ref) {
// We put root references at the front of the list both to make the above loop fast and
// to make multi-module errors more helpful (since "root-of" notes are generally more
// informative than "imported-from" notes). This path is hit very rarely, so the speed
// of the insert operation doesn't matter too much.
.root => try file.references.insert(mod.gpa, 0, ref),
// Other references we'll just put at the end.
else => try file.references.append(mod.gpa, ref),
}
const pkg = switch (ref) { const pkg = switch (ref) {
.import => |loc| loc.file_scope.pkg, .import => |loc| loc.file_scope.pkg,
@ -2128,7 +2144,10 @@ pub const File = struct {
file.multi_pkg = true; file.multi_pkg = true;
file.status = .astgen_failure; file.status = .astgen_failure;
std.debug.assert(file.zir_loaded); // We can only mark children as failed if the ZIR is loaded, which may not
// be the case if there were other astgen failures in this file
if (!file.zir_loaded) return;
const imports_index = file.zir.extra[@enumToInt(Zir.ExtraIndex.imports)]; const imports_index = file.zir.extra[@enumToInt(Zir.ExtraIndex.imports)];
if (imports_index == 0) return; if (imports_index == 0) return;
const extra = file.zir.extraData(Zir.Inst.Imports, imports_index); const extra = file.zir.extraData(Zir.Inst.Imports, imports_index);
@ -3323,10 +3342,19 @@ pub fn deinit(mod: *Module) void {
// The callsite of `Compilation.create` owns the `main_pkg`, however // The callsite of `Compilation.create` owns the `main_pkg`, however
// Module owns the builtin and std packages that it adds. // Module owns the builtin and std packages that it adds.
if (mod.main_pkg.table.fetchRemove("builtin")) |kv| { if (mod.main_pkg.table.fetchRemove("builtin")) |kv| {
gpa.free(kv.key);
kv.value.destroy(gpa); kv.value.destroy(gpa);
} }
if (mod.main_pkg.table.fetchRemove("std")) |kv| { if (mod.main_pkg.table.fetchRemove("std")) |kv| {
kv.value.destroy(gpa); gpa.free(kv.key);
// It's possible for main_pkg to be std when running 'zig test'! In this case, we must not
// destroy it, since it would lead to a double-free.
if (kv.value != mod.main_pkg) {
kv.value.destroy(gpa);
}
}
if (mod.main_pkg.table.fetchRemove("root")) |kv| {
gpa.free(kv.key);
} }
if (mod.root_pkg != mod.main_pkg) { if (mod.root_pkg != mod.main_pkg) {
mod.root_pkg.destroy(gpa); mod.root_pkg.destroy(gpa);
@ -4808,11 +4836,14 @@ pub fn importPkg(mod: *Module, pkg: *Package) !ImportFileResult {
const gop = try mod.import_table.getOrPut(gpa, resolved_path); const gop = try mod.import_table.getOrPut(gpa, resolved_path);
errdefer _ = mod.import_table.pop(); errdefer _ = mod.import_table.pop();
if (gop.found_existing) return ImportFileResult{ if (gop.found_existing) {
.file = gop.value_ptr.*, try gop.value_ptr.*.addReference(mod.*, .{ .root = pkg });
.is_new = false, return ImportFileResult{
.is_pkg = true, .file = gop.value_ptr.*,
}; .is_new = false,
.is_pkg = true,
};
}
const sub_file_path = try gpa.dupe(u8, pkg.root_src_path); const sub_file_path = try gpa.dupe(u8, pkg.root_src_path);
errdefer gpa.free(sub_file_path); errdefer gpa.free(sub_file_path);
@ -5208,22 +5239,14 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) Allocator.Err
// test decl with no name. Skip the part where we check against // test decl with no name. Skip the part where we check against
// the test name filter. // the test name filter.
if (!comp.bin_file.options.is_test) break :blk false; if (!comp.bin_file.options.is_test) break :blk false;
if (decl_pkg != mod.main_pkg) { if (decl_pkg != mod.main_pkg) break :blk false;
if (!mod.main_pkg_is_std) break :blk false;
const std_pkg = mod.main_pkg.table.get("std").?;
if (std_pkg != decl_pkg) break :blk false;
}
try mod.test_functions.put(gpa, new_decl_index, {}); try mod.test_functions.put(gpa, new_decl_index, {});
break :blk true; break :blk true;
}, },
else => blk: { else => blk: {
if (!is_named_test) break :blk false; if (!is_named_test) break :blk false;
if (!comp.bin_file.options.is_test) break :blk false; if (!comp.bin_file.options.is_test) break :blk false;
if (decl_pkg != mod.main_pkg) { if (decl_pkg != mod.main_pkg) break :blk false;
if (!mod.main_pkg_is_std) break :blk false;
const std_pkg = mod.main_pkg.table.get("std").?;
if (std_pkg != decl_pkg) break :blk false;
}
if (comp.test_filter) |test_filter| { if (comp.test_filter) |test_filter| {
if (mem.indexOf(u8, decl_name, test_filter) == null) { if (mem.indexOf(u8, decl_name, test_filter) == null) {
break :blk false; break :blk false;

View File

@ -22,17 +22,16 @@ pub const Table = std.StringHashMapUnmanaged(*Package);
root_src_directory: Compilation.Directory, root_src_directory: Compilation.Directory,
/// Relative to `root_src_directory`. May contain path separators. /// Relative to `root_src_directory`. May contain path separators.
root_src_path: []const u8, root_src_path: []const u8,
/// The dependency table of this module. Shared dependencies such as 'std', 'builtin', and 'root'
/// are not specified in every dependency table, but instead only in the table of `main_pkg`.
/// `Module.importFile` is responsible for detecting these names and using the correct package.
table: Table = .{}, table: Table = .{},
parent: ?*Package = null,
/// Whether to free `root_src_directory` on `destroy`. /// Whether to free `root_src_directory` on `destroy`.
root_src_directory_owned: bool = false, root_src_directory_owned: bool = false,
/// This information can be recovered from 'table', but it's more convenient to store on the package.
name: []const u8,
/// Allocate a Package. No references to the slices passed are kept. /// Allocate a Package. No references to the slices passed are kept.
pub fn create( pub fn create(
gpa: Allocator, gpa: Allocator,
name: []const u8,
/// Null indicates the current working directory /// Null indicates the current working directory
root_src_dir_path: ?[]const u8, root_src_dir_path: ?[]const u8,
/// Relative to root_src_dir_path /// Relative to root_src_dir_path
@ -47,9 +46,6 @@ pub fn create(
const owned_src_path = try gpa.dupe(u8, root_src_path); const owned_src_path = try gpa.dupe(u8, root_src_path);
errdefer gpa.free(owned_src_path); errdefer gpa.free(owned_src_path);
const owned_name = try gpa.dupe(u8, name);
errdefer gpa.free(owned_name);
ptr.* = .{ ptr.* = .{
.root_src_directory = .{ .root_src_directory = .{
.path = owned_dir_path, .path = owned_dir_path,
@ -57,7 +53,6 @@ pub fn create(
}, },
.root_src_path = owned_src_path, .root_src_path = owned_src_path,
.root_src_directory_owned = true, .root_src_directory_owned = true,
.name = owned_name,
}; };
return ptr; return ptr;
@ -65,7 +60,6 @@ pub fn create(
pub fn createWithDir( pub fn createWithDir(
gpa: Allocator, gpa: Allocator,
name: []const u8,
directory: Compilation.Directory, directory: Compilation.Directory,
/// Relative to `directory`. If null, means `directory` is the root src dir /// Relative to `directory`. If null, means `directory` is the root src dir
/// and is owned externally. /// and is owned externally.
@ -79,9 +73,6 @@ pub fn createWithDir(
const owned_src_path = try gpa.dupe(u8, root_src_path); const owned_src_path = try gpa.dupe(u8, root_src_path);
errdefer gpa.free(owned_src_path); errdefer gpa.free(owned_src_path);
const owned_name = try gpa.dupe(u8, name);
errdefer gpa.free(owned_name);
if (root_src_dir_path) |p| { if (root_src_dir_path) |p| {
const owned_dir_path = try directory.join(gpa, &[1][]const u8{p}); const owned_dir_path = try directory.join(gpa, &[1][]const u8{p});
errdefer gpa.free(owned_dir_path); errdefer gpa.free(owned_dir_path);
@ -93,14 +84,12 @@ pub fn createWithDir(
}, },
.root_src_directory_owned = true, .root_src_directory_owned = true,
.root_src_path = owned_src_path, .root_src_path = owned_src_path,
.name = owned_name,
}; };
} else { } else {
ptr.* = .{ ptr.* = .{
.root_src_directory = directory, .root_src_directory = directory,
.root_src_directory_owned = false, .root_src_directory_owned = false,
.root_src_path = owned_src_path, .root_src_path = owned_src_path,
.name = owned_name,
}; };
} }
return ptr; return ptr;
@ -110,7 +99,6 @@ pub fn createWithDir(
/// inside its table; the caller is responsible for calling destroy() on them. /// inside its table; the caller is responsible for calling destroy() on them.
pub fn destroy(pkg: *Package, gpa: Allocator) void { pub fn destroy(pkg: *Package, gpa: Allocator) void {
gpa.free(pkg.root_src_path); gpa.free(pkg.root_src_path);
gpa.free(pkg.name);
if (pkg.root_src_directory_owned) { if (pkg.root_src_directory_owned) {
// If root_src_directory.path is null then the handle is the cwd() // If root_src_directory.path is null then the handle is the cwd()
@ -130,15 +118,97 @@ pub fn deinitTable(pkg: *Package, gpa: Allocator) void {
pkg.table.deinit(gpa); pkg.table.deinit(gpa);
} }
pub fn add(pkg: *Package, gpa: Allocator, package: *Package) !void { pub fn add(pkg: *Package, gpa: Allocator, name: []const u8, package: *Package) !void {
try pkg.table.ensureUnusedCapacity(gpa, 1); try pkg.table.ensureUnusedCapacity(gpa, 1);
pkg.table.putAssumeCapacityNoClobber(package.name, package); const name_dupe = try gpa.dupe(u8, name);
pkg.table.putAssumeCapacityNoClobber(name_dupe, package);
} }
pub fn addAndAdopt(parent: *Package, gpa: Allocator, child: *Package) !void { /// Compute a readable name for the package. The returned name should be freed from gpa. This
assert(child.parent == null); // make up your mind, who is the parent?? /// function is very slow, as it traverses the whole package hierarchy to find a path to this
child.parent = parent; /// package. It should only be used for error output.
return parent.add(gpa, child); pub fn getName(target: *const Package, gpa: Allocator, mod: Module) ![]const u8 {
// we'll do a breadth-first search from the root module to try and find a short name for this
// module, using a TailQueue of module/parent pairs. note that the "parent" there is just the
// first-found shortest path - a module may be children of arbitrarily many other modules.
// also, this path may vary between executions due to hashmap iteration order, but that doesn't
// matter too much.
var node_arena = std.heap.ArenaAllocator.init(gpa);
defer node_arena.deinit();
const Parented = struct {
parent: ?*const @This(),
mod: *const Package,
};
const Queue = std.TailQueue(Parented);
var to_check: Queue = .{};
{
const new = try node_arena.allocator().create(Queue.Node);
new.* = .{ .data = .{ .parent = null, .mod = mod.root_pkg } };
to_check.prepend(new);
}
if (mod.main_pkg != mod.root_pkg) {
const new = try node_arena.allocator().create(Queue.Node);
// TODO: once #12201 is resolved, we may want a way of indicating a different name for this
new.* = .{ .data = .{ .parent = null, .mod = mod.main_pkg } };
to_check.prepend(new);
}
// set of modules we've already checked to prevent loops
var checked = std.AutoHashMap(*const Package, void).init(gpa);
defer checked.deinit();
const linked = while (to_check.pop()) |node| {
const check = &node.data;
if (checked.contains(check.mod)) continue;
try checked.put(check.mod, {});
if (check.mod == target) break check;
var it = check.mod.table.iterator();
while (it.next()) |kv| {
var new = try node_arena.allocator().create(Queue.Node);
new.* = .{ .data = .{
.parent = check,
.mod = kv.value_ptr.*,
} };
to_check.prepend(new);
}
} else {
// this can happen for e.g. @cImport packages
return gpa.dupe(u8, "<unnamed>");
};
// we found a path to the module! unfortunately, we can only traverse *up* it, so we have to put
// all the names into a buffer so we can then print them in order.
var names = std.ArrayList([]const u8).init(gpa);
defer names.deinit();
var cur: *const Parented = linked;
while (cur.parent) |parent| : (cur = parent) {
// find cur's name in parent
var it = parent.mod.table.iterator();
const name = while (it.next()) |kv| {
if (kv.value_ptr.* == cur.mod) {
break kv.key_ptr.*;
}
} else unreachable;
try names.append(name);
}
// finally, print the names into a buffer!
var buf = std.ArrayList(u8).init(gpa);
defer buf.deinit();
try buf.writer().writeAll("root");
var i: usize = names.items.len;
while (i > 0) {
i -= 1;
try buf.writer().print(".{s}", .{names.items[i]});
}
return buf.toOwnedSlice();
} }
pub const build_zig_basename = "build.zig"; pub const build_zig_basename = "build.zig";
@ -236,7 +306,7 @@ pub fn fetchAndAddDependencies(
color, color,
); );
try addAndAdopt(pkg, gpa, sub_pkg); try add(pkg, gpa, fqn, sub_pkg);
try dependencies_source.writer().print(" pub const {s} = @import(\"{}\");\n", .{ try dependencies_source.writer().print(" pub const {s} = @import(\"{}\");\n", .{
std.zig.fmtId(fqn), std.zig.fmtEscapes(fqn), std.zig.fmtId(fqn), std.zig.fmtEscapes(fqn),
@ -248,7 +318,6 @@ pub fn fetchAndAddDependencies(
pub fn createFilePkg( pub fn createFilePkg(
gpa: Allocator, gpa: Allocator,
name: []const u8,
cache_directory: Compilation.Directory, cache_directory: Compilation.Directory,
basename: []const u8, basename: []const u8,
contents: []const u8, contents: []const u8,
@ -269,7 +338,7 @@ pub fn createFilePkg(
const o_dir_sub_path = "o" ++ fs.path.sep_str ++ hex_digest; const o_dir_sub_path = "o" ++ fs.path.sep_str ++ hex_digest;
try renameTmpIntoCache(cache_directory.handle, tmp_dir_sub_path, o_dir_sub_path); try renameTmpIntoCache(cache_directory.handle, tmp_dir_sub_path, o_dir_sub_path);
return createWithDir(gpa, name, cache_directory, o_dir_sub_path, basename); return createWithDir(gpa, cache_directory, o_dir_sub_path, basename);
} }
const Report = struct { const Report = struct {
@ -363,9 +432,6 @@ fn fetchAndUnpack(
const owned_src_path = try gpa.dupe(u8, build_zig_basename); const owned_src_path = try gpa.dupe(u8, build_zig_basename);
errdefer gpa.free(owned_src_path); errdefer gpa.free(owned_src_path);
const owned_name = try gpa.dupe(u8, fqn);
errdefer gpa.free(owned_name);
const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path}); const build_root = try global_cache_directory.join(gpa, &.{pkg_dir_sub_path});
errdefer gpa.free(build_root); errdefer gpa.free(build_root);
@ -380,7 +446,6 @@ fn fetchAndUnpack(
}, },
.root_src_directory_owned = true, .root_src_directory_owned = true,
.root_src_path = owned_src_path, .root_src_path = owned_src_path,
.name = owned_name,
}; };
return ptr; return ptr;
@ -428,6 +493,11 @@ fn fetchAndUnpack(
// apply those rules directly to the filesystem right here. This ensures that files // apply those rules directly to the filesystem right here. This ensures that files
// not protected by the hash are not present on the file system. // not protected by the hash are not present on the file system.
// TODO: raise an error for files that have illegal paths on some operating systems.
// For example, on Linux a path with a backslash should raise an error here.
// Of course, if the ignore rules above omit the file from the package, then everything
// is fine and no error should be raised.
break :a try computePackageHash(thread_pool, .{ .dir = tmp_directory.handle }); break :a try computePackageHash(thread_pool, .{ .dir = tmp_directory.handle });
}; };
@ -455,7 +525,7 @@ fn fetchAndUnpack(
std.zig.fmtId(fqn), std.zig.fmtEscapes(build_root), std.zig.fmtId(fqn), std.zig.fmtEscapes(build_root),
}); });
return createWithDir(gpa, fqn, global_cache_directory, pkg_dir_sub_path, build_zig_basename); return createWithDir(gpa, global_cache_directory, pkg_dir_sub_path, build_zig_basename);
} }
fn unpackTarball( fn unpackTarball(
@ -481,7 +551,8 @@ fn unpackTarball(
} }
const HashedFile = struct { const HashedFile = struct {
path: []const u8, fs_path: []const u8,
normalized_path: []const u8,
hash: [Manifest.Hash.digest_length]u8, hash: [Manifest.Hash.digest_length]u8,
failure: Error!void, failure: Error!void,
@ -489,7 +560,7 @@ const HashedFile = struct {
fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool { fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
_ = context; _ = context;
return mem.lessThan(u8, lhs.path, rhs.path); return mem.lessThan(u8, lhs.normalized_path, rhs.normalized_path);
} }
}; };
@ -525,8 +596,10 @@ fn computePackageHash(
else => return error.IllegalFileTypeInPackage, else => return error.IllegalFileTypeInPackage,
} }
const hashed_file = try arena.create(HashedFile); const hashed_file = try arena.create(HashedFile);
const fs_path = try arena.dupe(u8, entry.path);
hashed_file.* = .{ hashed_file.* = .{
.path = try arena.dupe(u8, entry.path), .fs_path = fs_path,
.normalized_path = try normalizePath(arena, fs_path),
.hash = undefined, // to be populated by the worker .hash = undefined, // to be populated by the worker
.failure = undefined, // to be populated by the worker .failure = undefined, // to be populated by the worker
}; };
@ -544,7 +617,7 @@ fn computePackageHash(
for (all_files.items) |hashed_file| { for (all_files.items) |hashed_file| {
hashed_file.failure catch |err| { hashed_file.failure catch |err| {
any_failures = true; any_failures = true;
std.log.err("unable to hash '{s}': {s}", .{ hashed_file.path, @errorName(err) }); std.log.err("unable to hash '{s}': {s}", .{ hashed_file.fs_path, @errorName(err) });
}; };
hasher.update(&hashed_file.hash); hasher.update(&hashed_file.hash);
} }
@ -552,6 +625,24 @@ fn computePackageHash(
return hasher.finalResult(); return hasher.finalResult();
} }
/// Make a file system path identical independently of operating system path inconsistencies.
/// This converts backslashes into forward slashes.
fn normalizePath(arena: Allocator, fs_path: []const u8) ![]const u8 {
const canonical_sep = '/';
if (fs.path.sep == canonical_sep)
return fs_path;
const normalized = try arena.dupe(u8, fs_path);
for (normalized) |*byte| {
switch (byte.*) {
fs.path.sep => byte.* = canonical_sep,
else => continue,
}
}
return normalized;
}
fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile, wg: *WaitGroup) void { fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile, wg: *WaitGroup) void {
defer wg.finish(); defer wg.finish();
hashed_file.failure = hashFileFallible(dir, hashed_file); hashed_file.failure = hashFileFallible(dir, hashed_file);
@ -559,9 +650,10 @@ fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile, wg: *WaitGroup) void {
fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void { fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
var buf: [8000]u8 = undefined; var buf: [8000]u8 = undefined;
var file = try dir.openFile(hashed_file.path, .{}); var file = try dir.openFile(hashed_file.fs_path, .{});
defer file.close();
var hasher = Manifest.Hash.init(.{}); var hasher = Manifest.Hash.init(.{});
hasher.update(hashed_file.path); hasher.update(hashed_file.normalized_path);
hasher.update(&.{ 0, @boolToInt(try isExecutable(file)) }); hasher.update(&.{ 0, @boolToInt(try isExecutable(file)) });
while (true) { while (true) {
const bytes_read = try file.read(&buf); const bytes_read = try file.read(&buf);

View File

@ -2529,7 +2529,7 @@ fn coerceResultPtr(
_ = try block.addBinOp(.store, new_ptr, null_inst); _ = try block.addBinOp(.store, new_ptr, null_inst);
return Air.Inst.Ref.void_value; return Air.Inst.Ref.void_value;
} }
return sema.bitCast(block, ptr_ty, new_ptr, src); return sema.bitCast(block, ptr_ty, new_ptr, src, null);
} }
const trash_inst = trash_block.instructions.pop(); const trash_inst = trash_block.instructions.pop();
@ -2545,7 +2545,7 @@ fn coerceResultPtr(
if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| { if (try sema.resolveDefinedValue(block, src, new_ptr)) |ptr_val| {
new_ptr = try sema.addConstant(ptr_operand_ty, ptr_val); new_ptr = try sema.addConstant(ptr_operand_ty, ptr_val);
} else { } else {
new_ptr = try sema.bitCast(block, ptr_operand_ty, new_ptr, src); new_ptr = try sema.bitCast(block, ptr_operand_ty, new_ptr, src, null);
} }
}, },
.wrap_optional => { .wrap_optional => {
@ -5311,7 +5311,6 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
} }
const c_import_pkg = Package.create( const c_import_pkg = Package.create(
sema.gpa, sema.gpa,
"c_import", // TODO: should we make this unique?
null, null,
c_import_res.out_zig_path, c_import_res.out_zig_path,
) catch |err| switch (err) { ) catch |err| switch (err) {
@ -9655,7 +9654,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
.Vector, .Vector,
=> {}, => {},
} }
return sema.bitCast(block, dest_ty, operand, operand_src); return sema.bitCast(block, dest_ty, operand, inst_data.src(), operand_src);
} }
fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { fn zirFloatCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -9888,7 +9887,7 @@ fn zirSwitchCapture(
switch (operand_ty.zigTypeTag()) { switch (operand_ty.zigTypeTag()) {
.ErrorSet => if (block.switch_else_err_ty) |some| { .ErrorSet => if (block.switch_else_err_ty) |some| {
return sema.bitCast(block, some, operand, operand_src); return sema.bitCast(block, some, operand, operand_src, null);
} else { } else {
try block.addUnreachable(false); try block.addUnreachable(false);
return Air.Inst.Ref.unreachable_value; return Air.Inst.Ref.unreachable_value;
@ -9988,14 +9987,14 @@ fn zirSwitchCapture(
Module.ErrorSet.sortNames(&names); Module.ErrorSet.sortNames(&names);
const else_error_ty = try Type.Tag.error_set_merged.create(sema.arena, names); const else_error_ty = try Type.Tag.error_set_merged.create(sema.arena, names);
return sema.bitCast(block, else_error_ty, operand, operand_src); return sema.bitCast(block, else_error_ty, operand, operand_src, null);
} else { } else {
const item_ref = try sema.resolveInst(items[0]); const item_ref = try sema.resolveInst(items[0]);
// Previous switch validation ensured this will succeed // Previous switch validation ensured this will succeed
const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable; const item_val = sema.resolveConstValue(block, .unneeded, item_ref, "") catch unreachable;
const item_ty = try Type.Tag.error_set_single.create(sema.arena, item_val.getError().?); const item_ty = try Type.Tag.error_set_single.create(sema.arena, item_val.getError().?);
return sema.bitCast(block, item_ty, operand, operand_src); return sema.bitCast(block, item_ty, operand, operand_src, null);
} }
}, },
else => { else => {
@ -11793,8 +11792,9 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.fail(block, operand_src, "import of file outside package path: '{s}'", .{operand}); return sema.fail(block, operand_src, "import of file outside package path: '{s}'", .{operand});
}, },
error.PackageNotFound => { error.PackageNotFound => {
const cur_pkg = block.getFileScope().pkg; const name = try block.getFileScope().pkg.getName(sema.gpa, mod.*);
return sema.fail(block, operand_src, "no package named '{s}' available within package '{s}'", .{ operand, cur_pkg.name }); defer sema.gpa.free(name);
return sema.fail(block, operand_src, "no package named '{s}' available within package '{s}'", .{ operand, name });
}, },
else => { else => {
// TODO: these errors are file system errors; make sure an update() will // TODO: these errors are file system errors; make sure an update() will
@ -19953,7 +19953,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
} else is_aligned; } else is_aligned;
try sema.addSafetyCheck(block, ok, .incorrect_alignment); try sema.addSafetyCheck(block, ok, .incorrect_alignment);
} }
return sema.bitCast(block, dest_ty, ptr, ptr_src); return sema.bitCast(block, dest_ty, ptr, ptr_src, null);
} }
fn zirBitCount( fn zirBitCount(
@ -21482,24 +21482,32 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
const struct_ty = try sema.resolveType(block, ty_src, extra.parent_type); const parent_ty = try sema.resolveType(block, ty_src, extra.parent_type);
const field_name = try sema.resolveConstString(block, name_src, extra.field_name, "field name must be comptime-known"); const field_name = try sema.resolveConstString(block, name_src, extra.field_name, "field name must be comptime-known");
const field_ptr = try sema.resolveInst(extra.field_ptr); const field_ptr = try sema.resolveInst(extra.field_ptr);
const field_ptr_ty = sema.typeOf(field_ptr); const field_ptr_ty = sema.typeOf(field_ptr);
if (struct_ty.zigTypeTag() != .Struct) { if (parent_ty.zigTypeTag() != .Struct and parent_ty.zigTypeTag() != .Union) {
return sema.fail(block, ty_src, "expected struct type, found '{}'", .{struct_ty.fmt(sema.mod)}); return sema.fail(block, ty_src, "expected struct or union type, found '{}'", .{parent_ty.fmt(sema.mod)});
} }
try sema.resolveTypeLayout(struct_ty); try sema.resolveTypeLayout(parent_ty);
const field_index = if (struct_ty.isTuple()) blk: { const field_index = switch (parent_ty.zigTypeTag()) {
if (mem.eql(u8, field_name, "len")) { .Struct => blk: {
return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{}); if (parent_ty.isTuple()) {
} if (mem.eql(u8, field_name, "len")) {
break :blk try sema.tupleFieldIndex(block, struct_ty, field_name, name_src); return sema.fail(block, src, "cannot get @fieldParentPtr of 'len' field of tuple", .{});
} else try sema.structFieldIndex(block, struct_ty, field_name, name_src); }
break :blk try sema.tupleFieldIndex(block, parent_ty, field_name, name_src);
} else {
break :blk try sema.structFieldIndex(block, parent_ty, field_name, name_src);
}
},
.Union => try sema.unionFieldIndex(block, parent_ty, field_name, name_src),
else => unreachable,
};
if (struct_ty.structFieldIsComptime(field_index)) { if (parent_ty.zigTypeTag() == .Struct and parent_ty.structFieldIsComptime(field_index)) {
return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{}); return sema.fail(block, src, "cannot get @fieldParentPtr of a comptime field", .{});
} }
@ -21507,23 +21515,29 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
const field_ptr_ty_info = field_ptr_ty.ptrInfo().data; const field_ptr_ty_info = field_ptr_ty.ptrInfo().data;
var ptr_ty_data: Type.Payload.Pointer.Data = .{ var ptr_ty_data: Type.Payload.Pointer.Data = .{
.pointee_type = struct_ty.structFieldType(field_index), .pointee_type = parent_ty.structFieldType(field_index),
.mutable = field_ptr_ty_info.mutable, .mutable = field_ptr_ty_info.mutable,
.@"addrspace" = field_ptr_ty_info.@"addrspace", .@"addrspace" = field_ptr_ty_info.@"addrspace",
}; };
if (struct_ty.containerLayout() == .Packed) { if (parent_ty.containerLayout() == .Packed) {
return sema.fail(block, src, "TODO handle packed structs with @fieldParentPtr", .{}); return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{});
} else { } else {
ptr_ty_data.@"align" = if (struct_ty.castTag(.@"struct")) |struct_obj| b: { ptr_ty_data.@"align" = blk: {
break :b struct_obj.data.fields.values()[field_index].abi_align; if (parent_ty.castTag(.@"struct")) |struct_obj| {
} else 0; break :blk struct_obj.data.fields.values()[field_index].abi_align;
} else if (parent_ty.cast(Type.Payload.Union)) |union_obj| {
break :blk union_obj.data.fields.values()[field_index].abi_align;
} else {
break :blk 0;
}
};
} }
const actual_field_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); const actual_field_ptr_ty = try Type.ptr(sema.arena, sema.mod, ptr_ty_data);
const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, ptr_src); const casted_field_ptr = try sema.coerce(block, actual_field_ptr_ty, field_ptr, ptr_src);
ptr_ty_data.pointee_type = struct_ty; ptr_ty_data.pointee_type = parent_ty;
const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data); const result_ptr = try Type.ptr(sema.arena, sema.mod, ptr_ty_data);
if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| { if (try sema.resolveDefinedValue(block, src, casted_field_ptr)) |field_ptr_val| {
@ -21540,11 +21554,11 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
field_name, field_name,
field_index, field_index,
payload.data.field_index, payload.data.field_index,
struct_ty.fmt(sema.mod), parent_ty.fmt(sema.mod),
}, },
); );
errdefer msg.destroy(sema.gpa); errdefer msg.destroy(sema.gpa);
try sema.addDeclaredHereNote(msg, struct_ty); try sema.addDeclaredHereNote(msg, parent_ty);
break :msg msg; break :msg msg;
}; };
return sema.failWithOwnedErrorMsg(msg); return sema.failWithOwnedErrorMsg(msg);
@ -24141,8 +24155,9 @@ fn unionFieldVal(
return sema.addConstant(field.ty, tag_and_val.val); return sema.addConstant(field.ty, tag_and_val.val);
} else { } else {
const old_ty = union_ty.unionFieldType(tag_and_val.tag, sema.mod); const old_ty = union_ty.unionFieldType(tag_and_val.tag, sema.mod);
const new_val = try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0); if (try sema.bitCastVal(block, src, tag_and_val.val, old_ty, field.ty, 0)) |new_val| {
return sema.addConstant(field.ty, new_val); return sema.addConstant(field.ty, new_val);
}
} }
}, },
} }
@ -26514,8 +26529,12 @@ fn storePtrVal(
const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(target)); const abi_size = try sema.usizeCast(block, src, mut_kit.ty.abiSize(target));
const buffer = try sema.gpa.alloc(u8, abi_size); const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer); defer sema.gpa.free(buffer);
reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer); reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) {
operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]); error.ReinterpretDeclRef => unreachable,
};
operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) {
error.ReinterpretDeclRef => unreachable,
};
const arena = mut_kit.beginArena(sema.mod); const arena = mut_kit.beginArena(sema.mod);
defer mut_kit.finishArena(sema.mod); defer mut_kit.finishArena(sema.mod);
@ -27398,6 +27417,7 @@ fn bitCast(
dest_ty_unresolved: Type, dest_ty_unresolved: Type,
inst: Air.Inst.Ref, inst: Air.Inst.Ref,
inst_src: LazySrcLoc, inst_src: LazySrcLoc,
operand_src: ?LazySrcLoc,
) CompileError!Air.Inst.Ref { ) CompileError!Air.Inst.Ref {
const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved); const dest_ty = try sema.resolveTypeFields(dest_ty_unresolved);
try sema.resolveTypeLayout(dest_ty); try sema.resolveTypeLayout(dest_ty);
@ -27419,10 +27439,11 @@ fn bitCast(
} }
if (try sema.resolveMaybeUndefVal(inst)) |val| { if (try sema.resolveMaybeUndefVal(inst)) |val| {
const result_val = try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty, 0); if (try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty, 0)) |result_val| {
return sema.addConstant(dest_ty, result_val); return sema.addConstant(dest_ty, result_val);
}
} }
try sema.requireRuntimeBlock(block, inst_src, null); try sema.requireRuntimeBlock(block, inst_src, operand_src);
return block.addBitCast(dest_ty, inst); return block.addBitCast(dest_ty, inst);
} }
@ -27434,7 +27455,7 @@ fn bitCastVal(
old_ty: Type, old_ty: Type,
new_ty: Type, new_ty: Type,
buffer_offset: usize, buffer_offset: usize,
) !Value { ) !?Value {
const target = sema.mod.getTarget(); const target = sema.mod.getTarget();
if (old_ty.eql(new_ty, sema.mod)) return val; if (old_ty.eql(new_ty, sema.mod)) return val;
@ -27443,8 +27464,10 @@ fn bitCastVal(
const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target)); const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target));
const buffer = try sema.gpa.alloc(u8, abi_size); const buffer = try sema.gpa.alloc(u8, abi_size);
defer sema.gpa.free(buffer); defer sema.gpa.free(buffer);
val.writeToMemory(old_ty, sema.mod, buffer); val.writeToMemory(old_ty, sema.mod, buffer) catch |err| switch (err) {
return Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena); error.ReinterpretDeclRef => return null,
};
return try Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena);
} }
fn coerceArrayPtrToSlice( fn coerceArrayPtrToSlice(
@ -27551,7 +27574,7 @@ fn coerceCompatiblePtrs(
} else is_non_zero; } else is_non_zero;
try sema.addSafetyCheck(block, ok, .cast_to_null); try sema.addSafetyCheck(block, ok, .cast_to_null);
} }
return sema.bitCast(block, dest_ty, inst, inst_src); return sema.bitCast(block, dest_ty, inst, inst_src, null);
} }
fn coerceEnumToUnion( fn coerceEnumToUnion(
@ -28291,7 +28314,7 @@ fn analyzeRef(
try sema.storePtr(block, src, alloc, operand); try sema.storePtr(block, src, alloc, operand);
// TODO: Replace with sema.coerce when that supports adding pointer constness. // TODO: Replace with sema.coerce when that supports adding pointer constness.
return sema.bitCast(block, ptr_type, alloc, src); return sema.bitCast(block, ptr_type, alloc, src, null);
} }
fn analyzeLoad( fn analyzeLoad(
@ -32327,11 +32350,11 @@ fn pointerDerefExtra(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value
// Try the smaller bit-cast first, since that's more efficient than using the larger `parent` // Try the smaller bit-cast first, since that's more efficient than using the larger `parent`
if (deref.pointee) |tv| if (load_sz <= try sema.typeAbiSize(tv.ty)) if (deref.pointee) |tv| if (load_sz <= try sema.typeAbiSize(tv.ty))
return DerefResult{ .val = try sema.bitCastVal(block, src, tv.val, tv.ty, load_ty, 0) }; return DerefResult{ .val = (try sema.bitCastVal(block, src, tv.val, tv.ty, load_ty, 0)) orelse return .runtime_load };
// If that fails, try to bit-cast from the largest parent value with a well-defined layout // If that fails, try to bit-cast from the largest parent value with a well-defined layout
if (deref.parent) |parent| if (load_sz + parent.byte_offset <= try sema.typeAbiSize(parent.tv.ty)) if (deref.parent) |parent| if (load_sz + parent.byte_offset <= try sema.typeAbiSize(parent.tv.ty))
return DerefResult{ .val = try sema.bitCastVal(block, src, parent.tv.val, parent.tv.ty, load_ty, parent.byte_offset) }; return DerefResult{ .val = (try sema.bitCastVal(block, src, parent.tv.val, parent.tv.ty, load_ty, parent.byte_offset)) orelse return .runtime_load };
if (deref.ty_without_well_defined_layout) |bad_ty| { if (deref.ty_without_well_defined_layout) |bad_ty| {
// We got no parent for bit-casting, or the parent we got was too small. Either way, the problem // We got no parent for bit-casting, or the parent we got was too small. Either way, the problem

View File

@ -3958,7 +3958,9 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
switch (value) { switch (value) {
.dead => unreachable, .dead => unreachable,
.undef => unreachable, .undef => {
try self.genSetReg(value_ty, addr_reg, value);
},
.register => |value_reg| { .register => |value_reg| {
log.debug("store: register {} to {}", .{ value_reg, addr_reg }); log.debug("store: register {} to {}", .{ value_reg, addr_reg });
try self.genStrRegister(value_reg, addr_reg, value_ty); try self.genStrRegister(value_reg, addr_reg, value_ty);
@ -5870,7 +5872,22 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = try self.resolveInst(ty_op.operand); const result = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, operand)) break :result operand;
const operand_lock = switch (operand) {
.register => |reg| self.register_manager.lockReg(reg),
.register_with_overflow => |rwo| self.register_manager.lockReg(rwo.reg),
else => null,
};
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
const dest_ty = self.air.typeOfIndex(inst);
const dest = try self.allocRegOrMem(dest_ty, true, inst);
try self.setRegOrMem(dest_ty, dest, operand);
break :result dest;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
} }

View File

@ -2765,7 +2765,9 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
switch (value) { switch (value) {
.dead => unreachable, .dead => unreachable,
.undef => unreachable, .undef => {
try self.genSetReg(value_ty, addr_reg, value);
},
.register => |value_reg| { .register => |value_reg| {
try self.genStrRegister(value_reg, addr_reg, value_ty); try self.genStrRegister(value_reg, addr_reg, value_ty);
}, },
@ -2971,6 +2973,11 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const field_ptr = try self.resolveInst(extra.field_ptr); const field_ptr = try self.resolveInst(extra.field_ptr);
const struct_ty = self.air.getRefType(ty_pl.ty).childType(); const struct_ty = self.air.getRefType(ty_pl.ty).childType();
if (struct_ty.zigTypeTag() == .Union) {
return self.fail("TODO implement @fieldParentPtr codegen for unions", .{});
}
const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*));
switch (field_ptr) { switch (field_ptr) {
.ptr_stack_offset => |off| { .ptr_stack_offset => |off| {
@ -5816,7 +5823,24 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = try self.resolveInst(ty_op.operand); const result = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, operand)) break :result operand;
const operand_lock = switch (operand) {
.register,
.register_c_flag,
.register_v_flag,
=> |reg| self.register_manager.lockReg(reg),
else => null,
};
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
const dest_ty = self.air.typeOfIndex(inst);
const dest = try self.allocRegOrMem(dest_ty, true, inst);
try self.setRegOrMem(dest_ty, dest, operand);
break :result dest;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
} }

View File

@ -2338,7 +2338,20 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = try self.resolveInst(ty_op.operand); const result = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, operand)) break :result operand;
const operand_lock = switch (operand) {
.register => |reg| self.register_manager.lockReg(reg),
else => null,
};
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
const dest = try self.allocRegOrMem(inst, true);
try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand);
break :result dest;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
} }

View File

@ -1091,7 +1091,21 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = try self.resolveInst(ty_op.operand); const result = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, operand)) break :result operand;
const operand_lock = switch (operand) {
.register => |reg| self.register_manager.lockReg(reg),
.register_with_overflow => |rwo| self.register_manager.lockReg(rwo.reg),
else => null,
};
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
const dest = try self.allocRegOrMem(inst, true);
try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand);
break :result dest;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
} }

View File

@ -2896,7 +2896,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
const struct_obj = ty.castTag(.@"struct").?.data; const struct_obj = ty.castTag(.@"struct").?.data;
assert(struct_obj.layout == .Packed); assert(struct_obj.layout == .Packed);
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0); val.writeToPackedMemory(ty, func.bin_file.base.options.module.?, &buf, 0) catch unreachable;
var payload: Value.Payload.U64 = .{ var payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 }, .base = .{ .tag = .int_u64 },
.data = std.mem.readIntLittle(u64, &buf), .data = std.mem.readIntLittle(u64, &buf),
@ -2907,7 +2907,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
.Vector => { .Vector => {
assert(determineSimdStoreStrategy(ty, target) == .direct); assert(determineSimdStoreStrategy(ty, target) == .direct);
var buf: [16]u8 = undefined; var buf: [16]u8 = undefined;
val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf); val.writeToMemory(ty, func.bin_file.base.options.module.?, &buf) catch unreachable;
return func.storeSimdImmd(buf); return func.storeSimdImmd(buf);
}, },
else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}), else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}),
@ -4944,8 +4944,8 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{extra.field_ptr}); if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{extra.field_ptr});
const field_ptr = try func.resolveInst(extra.field_ptr); const field_ptr = try func.resolveInst(extra.field_ptr);
const struct_ty = func.air.getRefType(ty_pl.ty).childType(); const parent_ty = func.air.getRefType(ty_pl.ty).childType();
const field_offset = struct_ty.structFieldOffset(extra.field_index, func.target); const field_offset = parent_ty.structFieldOffset(extra.field_index, func.target);
const result = if (field_offset != 0) result: { const result = if (field_offset != 0) result: {
const base = try func.buildPointerOffset(field_ptr, 0, .new); const base = try func.buildPointerOffset(field_ptr, 0, .new);

View File

@ -920,9 +920,6 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const mod = self.bin_file.options.module.?; const mod = self.bin_file.options.module.?;
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
}; };
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
self.stack_align = abi_align;
if (reg_ok) { if (reg_ok) {
switch (elem_ty.zigTypeTag()) { switch (elem_ty.zigTypeTag()) {
@ -951,6 +948,10 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
}, },
} }
} }
const abi_align = elem_ty.abiAlignment(self.target.*);
if (abi_align > self.stack_align)
self.stack_align = abi_align;
const stack_offset = try self.allocMem(inst, abi_size, abi_align); const stack_offset = try self.allocMem(inst, abi_size, abi_align);
return MCValue{ .stack_offset = @intCast(i32, stack_offset) }; return MCValue{ .stack_offset = @intCast(i32, stack_offset) };
} }
@ -990,7 +991,7 @@ fn revertState(self: *Self, state: State) void {
pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void {
const stack_mcv = try self.allocRegOrMem(inst, false); const stack_mcv = try self.allocRegOrMem(inst, false);
log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); log.debug("spilling %{d} to stack mcv {any}", .{ inst, stack_mcv });
const reg_mcv = self.getResolvedInstValue(inst); const reg_mcv = self.getResolvedInstValue(inst);
switch (reg_mcv) { switch (reg_mcv) {
.register => |other| { .register => |other| {
@ -1016,7 +1017,7 @@ pub fn spillEflagsIfOccupied(self: *Self) !void {
}; };
try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv);
log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); log.debug("spilling %{d} to mcv {any}", .{ inst_to_save, new_mcv });
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
try branch.inst_table.put(self.gpa, inst_to_save, new_mcv); try branch.inst_table.put(self.gpa, inst_to_save, new_mcv);
@ -2114,6 +2115,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
}; };
break :result dst_mcv; break :result dst_mcv;
}; };
log.debug("airSliceLen(%{d}): {}", .{ inst, result });
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
} }
@ -2641,6 +2643,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
fn airLoad(self: *Self, inst: Air.Inst.Index) !void { fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst); const elem_ty = self.air.typeOfIndex(inst);
const elem_size = elem_ty.abiSize(self.target.*);
const result: MCValue = result: { const result: MCValue = result: {
if (!elem_ty.hasRuntimeBitsIgnoreComptime()) if (!elem_ty.hasRuntimeBitsIgnoreComptime())
break :result MCValue.none; break :result MCValue.none;
@ -2651,13 +2654,14 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
break :result MCValue.dead; break :result MCValue.dead;
const dst_mcv: MCValue = blk: { const dst_mcv: MCValue = blk: {
if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) { if (elem_size <= 8 and self.reuseOperand(inst, ty_op.operand, 0, ptr)) {
// The MCValue that holds the pointer can be re-used as the value. // The MCValue that holds the pointer can be re-used as the value.
break :blk ptr; break :blk ptr;
} else { } else {
break :blk try self.allocRegOrMem(inst, true); break :blk try self.allocRegOrMem(inst, true);
} }
}; };
log.debug("airLoad(%{d}): {} <- {}", .{ inst, dst_mcv, ptr });
try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand));
break :result dst_mcv; break :result dst_mcv;
}; };
@ -2728,10 +2732,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
switch (value) { switch (value) {
.none => unreachable, .none => unreachable,
.undef => unreachable,
.dead => unreachable, .dead => unreachable,
.unreach => unreachable, .unreach => unreachable,
.eflags => unreachable, .eflags => unreachable,
.undef => {
try self.genSetReg(value_ty, reg, value);
},
.immediate => |imm| { .immediate => |imm| {
switch (abi_size) { switch (abi_size) {
1, 2, 4 => { 1, 2, 4 => {
@ -2773,6 +2779,30 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.register => |src_reg| { .register => |src_reg| {
try self.genInlineMemcpyRegisterRegister(value_ty, reg, src_reg, 0); try self.genInlineMemcpyRegisterRegister(value_ty, reg, src_reg, 0);
}, },
.register_overflow => |ro| {
const ro_reg_lock = self.register_manager.lockReg(ro.reg);
defer if (ro_reg_lock) |lock| self.register_manager.unlockReg(lock);
const wrapped_ty = value_ty.structFieldType(0);
try self.genInlineMemcpyRegisterRegister(wrapped_ty, reg, ro.reg, 0);
const overflow_bit_ty = value_ty.structFieldType(1);
const overflow_bit_offset = value_ty.structFieldOffset(1, self.target.*);
const tmp_reg = try self.register_manager.allocReg(null, gp);
_ = try self.addInst(.{
.tag = .cond_set_byte,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = tmp_reg.to8(),
}),
.data = .{ .cc = ro.eflags },
});
try self.genInlineMemcpyRegisterRegister(
overflow_bit_ty,
reg,
tmp_reg,
-@intCast(i32, overflow_bit_offset),
);
},
.linker_load, .linker_load,
.memory, .memory,
.stack_offset, .stack_offset,
@ -2787,8 +2817,9 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.dest_stack_base = reg.to64(), .dest_stack_base = reg.to64(),
}); });
}, },
else => |other| { .ptr_stack_offset => {
return self.fail("TODO implement set pointee with {}", .{other}); const tmp_reg = try self.copyToTmpRegister(value_ty, value);
return self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
}, },
} }
}, },
@ -2902,6 +2933,7 @@ fn airStore(self: *Self, inst: Air.Inst.Index) !void {
const ptr_ty = self.air.typeOf(bin_op.lhs); const ptr_ty = self.air.typeOf(bin_op.lhs);
const value = try self.resolveInst(bin_op.rhs); const value = try self.resolveInst(bin_op.rhs);
const value_ty = self.air.typeOf(bin_op.rhs); const value_ty = self.air.typeOf(bin_op.rhs);
log.debug("airStore(%{d}): {} <- {}", .{ inst, ptr, value });
try self.store(ptr, value, ptr_ty, value_ty); try self.store(ptr, value, ptr_ty, value_ty);
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
} }
@ -6321,7 +6353,22 @@ fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void {
fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op; const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result = try self.resolveInst(ty_op.operand); const result = if (self.liveness.isUnused(inst)) .dead else result: {
const operand = try self.resolveInst(ty_op.operand);
if (self.reuseOperand(inst, ty_op.operand, 0, operand)) break :result operand;
const operand_lock = switch (operand) {
.register => |reg| self.register_manager.lockReg(reg),
.register_overflow => |ro| self.register_manager.lockReg(ro.reg),
else => null,
};
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
const dest = try self.allocRegOrMem(inst, true);
try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand);
break :result dest;
};
log.debug("airBitCast(%{d}): {}", .{ inst, result });
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
} }

View File

@ -527,7 +527,7 @@ pub fn generateSymbol(
.fail => |em| return Result{ .fail = em }, .fail => |em| return Result{ .fail = em },
} }
} else { } else {
field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits); field_val.writeToPackedMemory(field_ty, mod, code.items[current_pos..], bits) catch unreachable;
} }
bits += @intCast(u16, field_ty.bitSize(target)); bits += @intCast(u16, field_ty.bitSize(target));
} }

File diff suppressed because it is too large Load Diff

1896
src/codegen/c/type.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -6025,8 +6025,8 @@ pub const FuncGen = struct {
const field_ptr = try self.resolveInst(extra.field_ptr); const field_ptr = try self.resolveInst(extra.field_ptr);
const target = self.dg.module.getTarget(); const target = self.dg.module.getTarget();
const struct_ty = self.air.getRefType(ty_pl.ty).childType(); const parent_ty = self.air.getRefType(ty_pl.ty).childType();
const field_offset = struct_ty.structFieldOffset(extra.field_index, target); const field_offset = parent_ty.structFieldOffset(extra.field_index, target);
const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty)); const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty));
if (field_offset == 0) { if (field_offset == 0) {

View File

@ -22,27 +22,22 @@ base: link.File,
/// Instead, it tracks all declarations in this table, and iterates over it /// Instead, it tracks all declarations in this table, and iterates over it
/// in the flush function, stitching pre-rendered pieces of C code together. /// in the flush function, stitching pre-rendered pieces of C code together.
decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclBlock) = .{}, decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclBlock) = .{},
/// Stores Type/Value data for `typedefs` to reference.
/// Accumulates allocations and then there is a periodic garbage collection after flush().
arena: std.heap.ArenaAllocator,
/// Per-declaration data. /// Per-declaration data.
const DeclBlock = struct { const DeclBlock = struct {
code: std.ArrayListUnmanaged(u8) = .{}, code: std.ArrayListUnmanaged(u8) = .{},
fwd_decl: std.ArrayListUnmanaged(u8) = .{}, fwd_decl: std.ArrayListUnmanaged(u8) = .{},
/// Each Decl stores a mapping of Zig Types to corresponding C types, for every /// Each `Decl` stores a set of used `CType`s. In `flush()`, we iterate
/// Zig Type used by the Decl. In flush(), we iterate over each Decl /// over each `Decl` and generate the definition for each used `CType` once.
/// and emit the typedef code for all types, making sure to not emit the same thing twice. ctypes: codegen.CType.Store = .{},
/// Any arena memory the Type points to lives in the `arena` field of `C`. /// Key and Value storage use the ctype arena.
typedefs: codegen.TypedefMap.Unmanaged = .{}, lazy_fns: codegen.LazyFnMap = .{},
fn deinit(db: *DeclBlock, gpa: Allocator) void { fn deinit(db: *DeclBlock, gpa: Allocator) void {
db.code.deinit(gpa); db.lazy_fns.deinit(gpa);
db.ctypes.deinit(gpa);
db.fwd_decl.deinit(gpa); db.fwd_decl.deinit(gpa);
for (db.typedefs.values()) |typedef| { db.code.deinit(gpa);
gpa.free(typedef.rendered);
}
db.typedefs.deinit(gpa);
db.* = undefined; db.* = undefined;
} }
}; };
@ -64,7 +59,6 @@ pub fn openPath(gpa: Allocator, sub_path: []const u8, options: link.Options) !*C
errdefer gpa.destroy(c_file); errdefer gpa.destroy(c_file);
c_file.* = C{ c_file.* = C{
.arena = std.heap.ArenaAllocator.init(gpa),
.base = .{ .base = .{
.tag = .c, .tag = .c,
.options = options, .options = options,
@ -83,8 +77,6 @@ pub fn deinit(self: *C) void {
db.deinit(gpa); db.deinit(gpa);
} }
self.decl_table.deinit(gpa); self.decl_table.deinit(gpa);
self.arena.deinit();
} }
pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void { pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void {
@ -99,124 +91,122 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes
const tracy = trace(@src()); const tracy = trace(@src());
defer tracy.end(); defer tracy.end();
const gpa = self.base.allocator;
const decl_index = func.owner_decl; const decl_index = func.owner_decl;
const gop = try self.decl_table.getOrPut(self.base.allocator, decl_index); const gop = try self.decl_table.getOrPut(gpa, decl_index);
if (!gop.found_existing) { if (!gop.found_existing) {
gop.value_ptr.* = .{}; gop.value_ptr.* = .{};
} }
const ctypes = &gop.value_ptr.ctypes;
const lazy_fns = &gop.value_ptr.lazy_fns;
const fwd_decl = &gop.value_ptr.fwd_decl; const fwd_decl = &gop.value_ptr.fwd_decl;
const typedefs = &gop.value_ptr.typedefs;
const code = &gop.value_ptr.code; const code = &gop.value_ptr.code;
ctypes.clearRetainingCapacity(gpa);
lazy_fns.clearRetainingCapacity();
fwd_decl.shrinkRetainingCapacity(0); fwd_decl.shrinkRetainingCapacity(0);
for (typedefs.values()) |typedef| {
module.gpa.free(typedef.rendered);
}
typedefs.clearRetainingCapacity();
code.shrinkRetainingCapacity(0); code.shrinkRetainingCapacity(0);
var function: codegen.Function = .{ var function: codegen.Function = .{
.value_map = codegen.CValueMap.init(module.gpa), .value_map = codegen.CValueMap.init(gpa),
.air = air, .air = air,
.liveness = liveness, .liveness = liveness,
.func = func, .func = func,
.object = .{ .object = .{
.dg = .{ .dg = .{
.gpa = module.gpa, .gpa = gpa,
.module = module, .module = module,
.error_msg = null, .error_msg = null,
.decl_index = decl_index, .decl_index = decl_index.toOptional(),
.decl = module.declPtr(decl_index), .decl = module.declPtr(decl_index),
.fwd_decl = fwd_decl.toManaged(module.gpa), .fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), .ctypes = ctypes.*,
.typedefs_arena = self.arena.allocator(),
}, },
.code = code.toManaged(module.gpa), .code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code .indent_writer = undefined, // set later so we can get a pointer to object.code
}, },
.arena = std.heap.ArenaAllocator.init(module.gpa), .lazy_fns = lazy_fns.*,
.arena = std.heap.ArenaAllocator.init(gpa),
}; };
function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() }; function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() };
defer function.deinit(module.gpa); defer function.deinit();
codegen.genFunc(&function) catch |err| switch (err) { codegen.genFunc(&function) catch |err| switch (err) {
error.AnalysisFail => { error.AnalysisFail => {
try module.failed_decls.put(module.gpa, decl_index, function.object.dg.error_msg.?); try module.failed_decls.put(gpa, decl_index, function.object.dg.error_msg.?);
return; return;
}, },
else => |e| return e, else => |e| return e,
}; };
ctypes.* = function.object.dg.ctypes.move();
lazy_fns.* = function.lazy_fns.move();
fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged(); fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged();
typedefs.* = function.object.dg.typedefs.unmanaged;
function.object.dg.typedefs.unmanaged = .{};
code.* = function.object.code.moveToUnmanaged(); code.* = function.object.code.moveToUnmanaged();
// Free excess allocated memory for this Decl. // Free excess allocated memory for this Decl.
fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); ctypes.shrinkAndFree(gpa, ctypes.count());
code.shrinkAndFree(module.gpa, code.items.len); lazy_fns.shrinkAndFree(gpa, lazy_fns.count());
fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len);
code.shrinkAndFree(gpa, code.items.len);
} }
pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !void { pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src()); const tracy = trace(@src());
defer tracy.end(); defer tracy.end();
const gop = try self.decl_table.getOrPut(self.base.allocator, decl_index); const gpa = self.base.allocator;
const gop = try self.decl_table.getOrPut(gpa, decl_index);
if (!gop.found_existing) { if (!gop.found_existing) {
gop.value_ptr.* = .{}; gop.value_ptr.* = .{};
} }
const ctypes = &gop.value_ptr.ctypes;
const fwd_decl = &gop.value_ptr.fwd_decl; const fwd_decl = &gop.value_ptr.fwd_decl;
const typedefs = &gop.value_ptr.typedefs;
const code = &gop.value_ptr.code; const code = &gop.value_ptr.code;
ctypes.clearRetainingCapacity(gpa);
fwd_decl.shrinkRetainingCapacity(0); fwd_decl.shrinkRetainingCapacity(0);
for (typedefs.values()) |value| {
module.gpa.free(value.rendered);
}
typedefs.clearRetainingCapacity();
code.shrinkRetainingCapacity(0); code.shrinkRetainingCapacity(0);
const decl = module.declPtr(decl_index); const decl = module.declPtr(decl_index);
var object: codegen.Object = .{ var object: codegen.Object = .{
.dg = .{ .dg = .{
.gpa = module.gpa, .gpa = gpa,
.module = module, .module = module,
.error_msg = null, .error_msg = null,
.decl_index = decl_index, .decl_index = decl_index.toOptional(),
.decl = decl, .decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa), .fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), .ctypes = ctypes.*,
.typedefs_arena = self.arena.allocator(),
}, },
.code = code.toManaged(module.gpa), .code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code .indent_writer = undefined, // set later so we can get a pointer to object.code
}; };
object.indent_writer = .{ .underlying_writer = object.code.writer() }; object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer { defer {
object.code.deinit(); object.code.deinit();
for (object.dg.typedefs.values()) |typedef| { object.dg.ctypes.deinit(object.dg.gpa);
module.gpa.free(typedef.rendered);
}
object.dg.typedefs.deinit();
object.dg.fwd_decl.deinit(); object.dg.fwd_decl.deinit();
} }
codegen.genDecl(&object) catch |err| switch (err) { codegen.genDecl(&object) catch |err| switch (err) {
error.AnalysisFail => { error.AnalysisFail => {
try module.failed_decls.put(module.gpa, decl_index, object.dg.error_msg.?); try module.failed_decls.put(gpa, decl_index, object.dg.error_msg.?);
return; return;
}, },
else => |e| return e, else => |e| return e,
}; };
ctypes.* = object.dg.ctypes.move();
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
typedefs.* = object.dg.typedefs.unmanaged;
object.dg.typedefs.unmanaged = .{};
code.* = object.code.moveToUnmanaged(); code.* = object.code.moveToUnmanaged();
// Free excess allocated memory for this Decl. // Free excess allocated memory for this Decl.
fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); ctypes.shrinkAndFree(gpa, ctypes.count());
code.shrinkAndFree(module.gpa, code.items.len); fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len);
code.shrinkAndFree(gpa, code.items.len);
} }
pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void { pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void {
@ -246,7 +236,7 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
sub_prog_node.activate(); sub_prog_node.activate();
defer sub_prog_node.end(); defer sub_prog_node.end();
const gpa = comp.gpa; const gpa = self.base.allocator;
const module = self.base.options.module.?; const module = self.base.options.module.?;
// This code path happens exclusively with -ofmt=c. The flush logic for // This code path happens exclusively with -ofmt=c. The flush logic for
@ -257,30 +247,28 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
const abi_define = abiDefine(comp); const abi_define = abiDefine(comp);
// Covers defines, zig.h, typedef, and asm. // Covers defines, zig.h, ctypes, asm, lazy fwd.
var buf_count: usize = 2; try f.all_buffers.ensureUnusedCapacity(gpa, 5);
if (abi_define != null) buf_count += 1;
try f.all_buffers.ensureUnusedCapacity(gpa, buf_count);
if (abi_define) |buf| f.appendBufAssumeCapacity(buf); if (abi_define) |buf| f.appendBufAssumeCapacity(buf);
f.appendBufAssumeCapacity(zig_h); f.appendBufAssumeCapacity(zig_h);
const typedef_index = f.all_buffers.items.len; const ctypes_index = f.all_buffers.items.len;
f.all_buffers.items.len += 1; f.all_buffers.items.len += 1;
{ {
var asm_buf = f.asm_buf.toManaged(module.gpa); var asm_buf = f.asm_buf.toManaged(gpa);
defer asm_buf.deinit(); defer f.asm_buf = asm_buf.moveToUnmanaged();
try codegen.genGlobalAsm(module, asm_buf.writer());
try codegen.genGlobalAsm(module, &asm_buf); f.appendBufAssumeCapacity(asm_buf.items);
f.asm_buf = asm_buf.moveToUnmanaged();
f.appendBufAssumeCapacity(f.asm_buf.items);
} }
try self.flushErrDecls(&f); const lazy_index = f.all_buffers.items.len;
f.all_buffers.items.len += 1;
// Typedefs, forward decls, and non-functions first. try self.flushErrDecls(&f.lazy_db);
// `CType`s, forward decls, and non-functions first.
// Unlike other backends, the .c code we are emitting is order-dependent. Therefore // Unlike other backends, the .c code we are emitting is order-dependent. Therefore
// we must traverse the set of Decls that we are emitting according to their dependencies. // we must traverse the set of Decls that we are emitting according to their dependencies.
// Our strategy is to populate a set of remaining decls, pop Decls one by one, // Our strategy is to populate a set of remaining decls, pop Decls one by one,
@ -307,16 +295,33 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
} }
} }
f.all_buffers.items[typedef_index] = .{ {
.iov_base = if (f.typedef_buf.items.len > 0) f.typedef_buf.items.ptr else "", // We need to flush lazy ctypes after flushing all decls but before flushing any decl ctypes.
.iov_len = f.typedef_buf.items.len, // This ensures that every lazy CType.Index exactly matches the global CType.Index.
assert(f.ctypes.count() == 0);
try self.flushCTypes(&f, .none, f.lazy_db.ctypes);
var it = self.decl_table.iterator();
while (it.next()) |entry|
try self.flushCTypes(&f, entry.key_ptr.toOptional(), entry.value_ptr.ctypes);
}
f.all_buffers.items[ctypes_index] = .{
.iov_base = if (f.ctypes_buf.items.len > 0) f.ctypes_buf.items.ptr else "",
.iov_len = f.ctypes_buf.items.len,
}; };
f.file_size += f.typedef_buf.items.len; f.file_size += f.ctypes_buf.items.len;
f.all_buffers.items[lazy_index] = .{
.iov_base = if (f.lazy_db.fwd_decl.items.len > 0) f.lazy_db.fwd_decl.items.ptr else "",
.iov_len = f.lazy_db.fwd_decl.items.len,
};
f.file_size += f.lazy_db.fwd_decl.items.len;
// Now the code. // Now the code.
try f.all_buffers.ensureUnusedCapacity(gpa, decl_values.len); try f.all_buffers.ensureUnusedCapacity(gpa, 1 + decl_values.len);
for (decl_values) |decl| f.appendBufAssumeCapacity(f.lazy_db.code.items);
f.appendBufAssumeCapacity(decl.code.items); for (decl_values) |decl| f.appendBufAssumeCapacity(decl.code.items);
const file = self.base.file.?; const file = self.base.file.?;
try file.setEndPos(f.file_size); try file.setEndPos(f.file_size);
@ -324,22 +329,23 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
} }
const Flush = struct { const Flush = struct {
err_decls: DeclBlock = .{},
remaining_decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void) = .{}, remaining_decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void) = .{},
typedefs: Typedefs = .{},
typedef_buf: std.ArrayListUnmanaged(u8) = .{}, ctypes: codegen.CType.Store = .{},
ctypes_map: std.ArrayListUnmanaged(codegen.CType.Index) = .{},
ctypes_buf: std.ArrayListUnmanaged(u8) = .{},
lazy_db: DeclBlock = .{},
lazy_fns: LazyFns = .{},
asm_buf: std.ArrayListUnmanaged(u8) = .{}, asm_buf: std.ArrayListUnmanaged(u8) = .{},
/// We collect a list of buffers to write, and write them all at once with pwritev 😎 /// We collect a list of buffers to write, and write them all at once with pwritev 😎
all_buffers: std.ArrayListUnmanaged(std.os.iovec_const) = .{}, all_buffers: std.ArrayListUnmanaged(std.os.iovec_const) = .{},
/// Keeps track of the total bytes of `all_buffers`. /// Keeps track of the total bytes of `all_buffers`.
file_size: u64 = 0, file_size: u64 = 0,
const Typedefs = std.HashMapUnmanaged( const LazyFns = std.AutoHashMapUnmanaged(codegen.LazyFnKey, void);
Type,
void,
Type.HashContext64,
std.hash_map.default_max_load_percentage,
);
fn appendBufAssumeCapacity(f: *Flush, buf: []const u8) void { fn appendBufAssumeCapacity(f: *Flush, buf: []const u8) void {
if (buf.len == 0) return; if (buf.len == 0) return;
@ -349,10 +355,13 @@ const Flush = struct {
fn deinit(f: *Flush, gpa: Allocator) void { fn deinit(f: *Flush, gpa: Allocator) void {
f.all_buffers.deinit(gpa); f.all_buffers.deinit(gpa);
f.typedef_buf.deinit(gpa); f.asm_buf.deinit(gpa);
f.typedefs.deinit(gpa); f.lazy_fns.deinit(gpa);
f.lazy_db.deinit(gpa);
f.ctypes_buf.deinit(gpa);
f.ctypes_map.deinit(gpa);
f.ctypes.deinit(gpa);
f.remaining_decls.deinit(gpa); f.remaining_decls.deinit(gpa);
f.err_decls.deinit(gpa);
} }
}; };
@ -360,53 +369,116 @@ const FlushDeclError = error{
OutOfMemory, OutOfMemory,
}; };
fn flushTypedefs(self: *C, f: *Flush, typedefs: codegen.TypedefMap.Unmanaged) FlushDeclError!void { fn flushCTypes(
if (typedefs.count() == 0) return; self: *C,
f: *Flush,
decl_index: Module.Decl.OptionalIndex,
decl_ctypes: codegen.CType.Store,
) FlushDeclError!void {
const gpa = self.base.allocator; const gpa = self.base.allocator;
const module = self.base.options.module.?; const mod = self.base.options.module.?;
try f.typedefs.ensureUnusedCapacityContext(gpa, @intCast(u32, typedefs.count()), .{ const decl_ctypes_len = decl_ctypes.count();
.mod = module, f.ctypes_map.clearRetainingCapacity();
}); try f.ctypes_map.ensureTotalCapacity(gpa, decl_ctypes_len);
var it = typedefs.iterator();
while (it.next()) |new| { var global_ctypes = f.ctypes.promote(gpa);
const gop = f.typedefs.getOrPutAssumeCapacityContext(new.key_ptr.*, .{ defer f.ctypes.demote(global_ctypes);
.mod = module,
var ctypes_buf = f.ctypes_buf.toManaged(gpa);
defer f.ctypes_buf = ctypes_buf.moveToUnmanaged();
const writer = ctypes_buf.writer();
const slice = decl_ctypes.set.map.entries.slice();
for (slice.items(.key), 0..) |decl_cty, decl_i| {
const Context = struct {
arena: Allocator,
ctypes_map: []codegen.CType.Index,
cached_hash: codegen.CType.Store.Set.Map.Hash,
idx: codegen.CType.Index,
pub fn hash(ctx: @This(), _: codegen.CType) codegen.CType.Store.Set.Map.Hash {
return ctx.cached_hash;
}
pub fn eql(ctx: @This(), lhs: codegen.CType, rhs: codegen.CType, _: usize) bool {
return lhs.eqlContext(rhs, ctx);
}
pub fn eqlIndex(
ctx: @This(),
lhs_idx: codegen.CType.Index,
rhs_idx: codegen.CType.Index,
) bool {
if (lhs_idx < codegen.CType.Tag.no_payload_count or
rhs_idx < codegen.CType.Tag.no_payload_count) return lhs_idx == rhs_idx;
const lhs_i = lhs_idx - codegen.CType.Tag.no_payload_count;
if (lhs_i >= ctx.ctypes_map.len) return false;
return ctx.ctypes_map[lhs_i] == rhs_idx;
}
pub fn copyIndex(ctx: @This(), idx: codegen.CType.Index) codegen.CType.Index {
if (idx < codegen.CType.Tag.no_payload_count) return idx;
return ctx.ctypes_map[idx - codegen.CType.Tag.no_payload_count];
}
};
const decl_idx = @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + decl_i);
const ctx = Context{
.arena = global_ctypes.arena.allocator(),
.ctypes_map = f.ctypes_map.items,
.cached_hash = decl_ctypes.indexToHash(decl_idx),
.idx = decl_idx,
};
const gop = try global_ctypes.set.map.getOrPutContextAdapted(gpa, decl_cty, ctx, .{
.store = &global_ctypes.set,
}); });
const global_idx =
@intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + gop.index);
f.ctypes_map.appendAssumeCapacity(global_idx);
if (!gop.found_existing) { if (!gop.found_existing) {
try f.typedef_buf.appendSlice(gpa, new.value_ptr.rendered); errdefer _ = global_ctypes.set.map.pop();
gop.key_ptr.* = try decl_cty.copyContext(ctx);
} }
if (std.debug.runtime_safety) {
const global_cty = &global_ctypes.set.map.entries.items(.key)[gop.index];
assert(global_cty == gop.key_ptr);
assert(decl_cty.eqlContext(global_cty.*, ctx));
assert(decl_cty.hash(decl_ctypes.set) == global_cty.hash(global_ctypes.set));
}
try codegen.genTypeDecl(
mod,
writer,
global_ctypes.set,
global_idx,
decl_index,
decl_ctypes.set,
decl_idx,
gop.found_existing,
);
} }
} }
fn flushErrDecls(self: *C, f: *Flush) FlushDeclError!void { fn flushErrDecls(self: *C, db: *DeclBlock) FlushDeclError!void {
const module = self.base.options.module.?; const gpa = self.base.allocator;
const fwd_decl = &f.err_decls.fwd_decl; const fwd_decl = &db.fwd_decl;
const typedefs = &f.err_decls.typedefs; const ctypes = &db.ctypes;
const code = &f.err_decls.code; const code = &db.code;
var object = codegen.Object{ var object = codegen.Object{
.dg = .{ .dg = .{
.gpa = module.gpa, .gpa = gpa,
.module = module, .module = self.base.options.module.?,
.error_msg = null, .error_msg = null,
.decl_index = undefined, .decl_index = .none,
.decl = undefined, .decl = null,
.fwd_decl = fwd_decl.toManaged(module.gpa), .fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), .ctypes = ctypes.*,
.typedefs_arena = self.arena.allocator(),
}, },
.code = code.toManaged(module.gpa), .code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code .indent_writer = undefined, // set later so we can get a pointer to object.code
}; };
object.indent_writer = .{ .underlying_writer = object.code.writer() }; object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer { defer {
object.code.deinit(); object.code.deinit();
for (object.dg.typedefs.values()) |typedef| { object.dg.ctypes.deinit(gpa);
module.gpa.free(typedef.rendered);
}
object.dg.typedefs.deinit();
object.dg.fwd_decl.deinit(); object.dg.fwd_decl.deinit();
} }
@ -416,14 +488,58 @@ fn flushErrDecls(self: *C, f: *Flush) FlushDeclError!void {
}; };
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
typedefs.* = object.dg.typedefs.unmanaged; ctypes.* = object.dg.ctypes.move();
object.dg.typedefs.unmanaged = .{};
code.* = object.code.moveToUnmanaged(); code.* = object.code.moveToUnmanaged();
}
try self.flushTypedefs(f, typedefs.*); fn flushLazyFn(self: *C, db: *DeclBlock, lazy_fn: codegen.LazyFnMap.Entry) FlushDeclError!void {
try f.all_buffers.ensureUnusedCapacity(self.base.allocator, 1); const gpa = self.base.allocator;
f.appendBufAssumeCapacity(fwd_decl.items);
f.appendBufAssumeCapacity(code.items); const fwd_decl = &db.fwd_decl;
const ctypes = &db.ctypes;
const code = &db.code;
var object = codegen.Object{
.dg = .{
.gpa = gpa,
.module = self.base.options.module.?,
.error_msg = null,
.decl_index = .none,
.decl = null,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctypes = ctypes.*,
},
.code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer {
object.code.deinit();
object.dg.ctypes.deinit(gpa);
object.dg.fwd_decl.deinit();
}
codegen.genLazyFn(&object, lazy_fn) catch |err| switch (err) {
error.AnalysisFail => unreachable,
else => |e| return e,
};
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
ctypes.* = object.dg.ctypes.move();
code.* = object.code.moveToUnmanaged();
}
fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError!void {
const gpa = self.base.allocator;
try f.lazy_fns.ensureUnusedCapacity(gpa, @intCast(Flush.LazyFns.Size, lazy_fns.count()));
var it = lazy_fns.iterator();
while (it.next()) |entry| {
const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*);
if (gop.found_existing) continue;
gop.value_ptr.* = {};
try self.flushLazyFn(&f.lazy_db, entry);
}
} }
/// Assumes `decl` was in the `remaining_decls` set, and has already been removed. /// Assumes `decl` was in the `remaining_decls` set, and has already been removed.
@ -433,8 +549,8 @@ fn flushDecl(
decl_index: Module.Decl.Index, decl_index: Module.Decl.Index,
export_names: std.StringHashMapUnmanaged(void), export_names: std.StringHashMapUnmanaged(void),
) FlushDeclError!void { ) FlushDeclError!void {
const module = self.base.options.module.?; const gpa = self.base.allocator;
const decl = module.declPtr(decl_index); const decl = self.base.options.module.?.declPtr(decl_index);
// Before flushing any particular Decl we must ensure its // Before flushing any particular Decl we must ensure its
// dependencies are already flushed, so that the order in the .c // dependencies are already flushed, so that the order in the .c
// file comes out correctly. // file comes out correctly.
@ -445,10 +561,9 @@ fn flushDecl(
} }
const decl_block = self.decl_table.getPtr(decl_index).?; const decl_block = self.decl_table.getPtr(decl_index).?;
const gpa = self.base.allocator;
try self.flushTypedefs(f, decl_block.typedefs); try self.flushLazyFns(f, decl_block.lazy_fns);
try f.all_buffers.ensureUnusedCapacity(gpa, 2); try f.all_buffers.ensureUnusedCapacity(gpa, 1);
if (!(decl.isExtern() and export_names.contains(mem.span(decl.name)))) if (!(decl.isExtern() and export_names.contains(mem.span(decl.name))))
f.appendBufAssumeCapacity(decl_block.fwd_decl.items); f.appendBufAssumeCapacity(decl_block.fwd_decl.items);
} }

View File

@ -403,8 +403,11 @@ const usage_build_generic =
\\ ReleaseFast Optimizations on, safety off \\ ReleaseFast Optimizations on, safety off
\\ ReleaseSafe Optimizations on, safety on \\ ReleaseSafe Optimizations on, safety on
\\ ReleaseSmall Optimize for small binary, safety off \\ ReleaseSmall Optimize for small binary, safety off
\\ --pkg-begin [name] [path] Make pkg available to import and push current pkg \\ --mod [name]:[deps]:[src] Make a module available for dependency under the given name
\\ --pkg-end Pop current pkg \\ deps: [dep],[dep],...
\\ dep: [[import=]name]
\\ --deps [dep],[dep],... Set dependency names for the root package
\\ dep: [[import=]name]
\\ --main-pkg-path Set the directory of the root package \\ --main-pkg-path Set the directory of the root package
\\ -fPIC Force-enable Position Independent Code \\ -fPIC Force-enable Position Independent Code
\\ -fno-PIC Force-disable Position Independent Code \\ -fno-PIC Force-disable Position Independent Code
@ -858,15 +861,21 @@ fn buildOutputType(
var linker_export_symbol_names = std.ArrayList([]const u8).init(gpa); var linker_export_symbol_names = std.ArrayList([]const u8).init(gpa);
defer linker_export_symbol_names.deinit(); defer linker_export_symbol_names.deinit();
// This package only exists to clean up the code parsing --pkg-begin and // Contains every module specified via --mod. The dependencies are added
// --pkg-end flags. Use dummy values that are safe for the destroy call. // after argument parsing is completed. We use a StringArrayHashMap to make
var pkg_tree_root: Package = .{ // error output consistent.
.root_src_directory = .{ .path = null, .handle = fs.cwd() }, var modules = std.StringArrayHashMap(struct {
.root_src_path = &[0]u8{}, mod: *Package,
.name = &[0]u8{}, deps_str: []const u8, // still in CLI arg format
}; }).init(gpa);
defer freePkgTree(gpa, &pkg_tree_root, false); defer {
var cur_pkg: *Package = &pkg_tree_root; var it = modules.iterator();
while (it.next()) |kv| kv.value_ptr.mod.destroy(gpa);
modules.deinit();
}
// The dependency string for the root package
var root_deps_str: ?[]const u8 = null;
// before arg parsing, check for the NO_COLOR environment variable // before arg parsing, check for the NO_COLOR environment variable
// if it exists, default the color setting to .off // if it exists, default the color setting to .off
@ -943,34 +952,44 @@ fn buildOutputType(
} else { } else {
fatal("unexpected end-of-parameter mark: --", .{}); fatal("unexpected end-of-parameter mark: --", .{});
} }
} else if (mem.eql(u8, arg, "--pkg-begin")) { } else if (mem.eql(u8, arg, "--mod")) {
const opt_pkg_name = args_iter.next(); const info = args_iter.nextOrFatal();
const opt_pkg_path = args_iter.next(); var info_it = mem.split(u8, info, ":");
if (opt_pkg_name == null or opt_pkg_path == null) const mod_name = info_it.next() orelse fatal("expected non-empty argument after {s}", .{arg});
fatal("Expected 2 arguments after {s}", .{arg}); const deps_str = info_it.next() orelse fatal("expected 'name:deps:path' after {s}", .{arg});
const root_src_orig = info_it.rest();
if (root_src_orig.len == 0) fatal("expected 'name:deps:path' after {s}", .{arg});
if (mod_name.len == 0) fatal("empty name for module at '{s}'", .{root_src_orig});
const pkg_name = opt_pkg_name.?; const root_src = try introspect.resolvePath(arena, root_src_orig);
const pkg_path = try introspect.resolvePath(arena, opt_pkg_path.?);
const new_cur_pkg = Package.create( for ([_][]const u8{ "std", "root", "builtin" }) |name| {
gpa, if (mem.eql(u8, mod_name, name)) {
pkg_name, fatal("unable to add module '{s}' -> '{s}': conflicts with builtin module", .{ mod_name, root_src });
fs.path.dirname(pkg_path), }
fs.path.basename(pkg_path),
) catch |err| {
fatal("Failed to add package at path {s}: {s}", .{ pkg_path, @errorName(err) });
};
if (mem.eql(u8, pkg_name, "std") or mem.eql(u8, pkg_name, "root") or mem.eql(u8, pkg_name, "builtin")) {
fatal("unable to add package '{s}' -> '{s}': conflicts with builtin package", .{ pkg_name, pkg_path });
} else if (cur_pkg.table.get(pkg_name)) |prev| {
fatal("unable to add package '{s}' -> '{s}': already exists as '{s}", .{ pkg_name, pkg_path, prev.root_src_path });
} }
try cur_pkg.addAndAdopt(gpa, new_cur_pkg);
cur_pkg = new_cur_pkg; var mod_it = modules.iterator();
} else if (mem.eql(u8, arg, "--pkg-end")) { while (mod_it.next()) |kv| {
cur_pkg = cur_pkg.parent orelse if (std.mem.eql(u8, mod_name, kv.key_ptr.*)) {
fatal("encountered --pkg-end with no matching --pkg-begin", .{}); fatal("unable to add module '{s}' -> '{s}': already exists as '{s}'", .{ mod_name, root_src, kv.value_ptr.mod.root_src_path });
}
}
try modules.ensureUnusedCapacity(1);
modules.put(mod_name, .{
.mod = try Package.create(
gpa,
fs.path.dirname(root_src),
fs.path.basename(root_src),
),
.deps_str = deps_str,
}) catch unreachable;
} else if (mem.eql(u8, arg, "--deps")) {
if (root_deps_str != null) {
fatal("only one --deps argument is allowed", .{});
}
root_deps_str = args_iter.nextOrFatal();
} else if (mem.eql(u8, arg, "--main-pkg-path")) { } else if (mem.eql(u8, arg, "--main-pkg-path")) {
main_pkg_path = args_iter.nextOrFatal(); main_pkg_path = args_iter.nextOrFatal();
} else if (mem.eql(u8, arg, "-cflags")) { } else if (mem.eql(u8, arg, "-cflags")) {
@ -1955,12 +1974,15 @@ fn buildOutputType(
linker_compress_debug_sections = std.meta.stringToEnum(link.CompressDebugSections, arg1) orelse { linker_compress_debug_sections = std.meta.stringToEnum(link.CompressDebugSections, arg1) orelse {
fatal("expected [none|zlib] after --compress-debug-sections, found '{s}'", .{arg1}); fatal("expected [none|zlib] after --compress-debug-sections, found '{s}'", .{arg1});
}; };
} else if (mem.eql(u8, arg, "-z")) { } else if (mem.startsWith(u8, arg, "-z")) {
i += 1; var z_arg = arg[2..];
if (i >= linker_args.items.len) { if (z_arg.len == 0) {
fatal("expected linker extension flag after '{s}'", .{arg}); i += 1;
if (i >= linker_args.items.len) {
fatal("expected linker extension flag after '{s}'", .{arg});
}
z_arg = linker_args.items[i];
} }
const z_arg = linker_args.items[i];
if (mem.eql(u8, z_arg, "nodelete")) { if (mem.eql(u8, z_arg, "nodelete")) {
linker_z_nodelete = true; linker_z_nodelete = true;
} else if (mem.eql(u8, z_arg, "notext")) { } else if (mem.eql(u8, z_arg, "notext")) {
@ -2304,6 +2326,31 @@ fn buildOutputType(
}, },
} }
{
// Resolve module dependencies
var it = modules.iterator();
while (it.next()) |kv| {
const deps_str = kv.value_ptr.deps_str;
var deps_it = ModuleDepIterator.init(deps_str);
while (deps_it.next()) |dep| {
if (dep.expose.len == 0) {
fatal("module '{s}' depends on '{s}' with a blank name", .{ kv.key_ptr.*, dep.name });
}
for ([_][]const u8{ "std", "root", "builtin" }) |name| {
if (mem.eql(u8, dep.expose, name)) {
fatal("unable to add module '{s}' under name '{s}': conflicts with builtin module", .{ dep.name, dep.expose });
}
}
const dep_mod = modules.get(dep.name) orelse
fatal("module '{s}' depends on module '{s}' which does not exist", .{ kv.key_ptr.*, dep.name });
try kv.value_ptr.mod.add(gpa, dep.expose, dep_mod.mod);
}
}
}
if (arg_mode == .build and optimize_mode == .ReleaseSmall and strip == null) if (arg_mode == .build and optimize_mode == .ReleaseSmall and strip == null)
strip = true; strip = true;
@ -2883,14 +2930,14 @@ fn buildOutputType(
if (main_pkg_path) |unresolved_main_pkg_path| { if (main_pkg_path) |unresolved_main_pkg_path| {
const p = try introspect.resolvePath(arena, unresolved_main_pkg_path); const p = try introspect.resolvePath(arena, unresolved_main_pkg_path);
if (p.len == 0) { if (p.len == 0) {
break :blk try Package.create(gpa, "root", null, src_path); break :blk try Package.create(gpa, null, src_path);
} else { } else {
const rel_src_path = try fs.path.relative(arena, p, src_path); const rel_src_path = try fs.path.relative(arena, p, src_path);
break :blk try Package.create(gpa, "root", p, rel_src_path); break :blk try Package.create(gpa, p, rel_src_path);
} }
} else { } else {
const root_src_dir_path = fs.path.dirname(src_path); const root_src_dir_path = fs.path.dirname(src_path);
break :blk Package.create(gpa, "root", root_src_dir_path, fs.path.basename(src_path)) catch |err| { break :blk Package.create(gpa, root_src_dir_path, fs.path.basename(src_path)) catch |err| {
if (root_src_dir_path) |p| { if (root_src_dir_path) |p| {
fatal("unable to open '{s}': {s}", .{ p, @errorName(err) }); fatal("unable to open '{s}': {s}", .{ p, @errorName(err) });
} else { } else {
@ -2901,23 +2948,24 @@ fn buildOutputType(
} else null; } else null;
defer if (main_pkg) |p| p.destroy(gpa); defer if (main_pkg) |p| p.destroy(gpa);
// Transfer packages added with --pkg-begin/--pkg-end to the root package // Transfer packages added with --deps to the root package
if (main_pkg) |pkg| { if (main_pkg) |mod| {
var it = pkg_tree_root.table.valueIterator(); var it = ModuleDepIterator.init(root_deps_str orelse "");
while (it.next()) |p| { while (it.next()) |dep| {
if (p.*.parent == &pkg_tree_root) { if (dep.expose.len == 0) {
p.*.parent = pkg; fatal("root module depends on '{s}' with a blank name", .{dep.name});
} }
}
pkg.table = pkg_tree_root.table; for ([_][]const u8{ "std", "root", "builtin" }) |name| {
pkg_tree_root.table = .{}; if (mem.eql(u8, dep.expose, name)) {
} else { fatal("unable to add module '{s}' under name '{s}': conflicts with builtin module", .{ dep.name, dep.expose });
// Remove any dangling pointers just in case. }
var it = pkg_tree_root.table.valueIterator();
while (it.next()) |p| {
if (p.*.parent == &pkg_tree_root) {
p.*.parent = null;
} }
const dep_mod = modules.get(dep.name) orelse
fatal("root module depends on module '{s}' which does not exist", .{dep.name});
try mod.add(gpa, dep.expose, dep_mod.mod);
} }
} }
@ -3397,6 +3445,32 @@ fn buildOutputType(
return cleanExit(); return cleanExit();
} }
const ModuleDepIterator = struct {
split: mem.SplitIterator(u8),
fn init(deps_str: []const u8) ModuleDepIterator {
return .{ .split = mem.split(u8, deps_str, ",") };
}
const Dependency = struct {
expose: []const u8,
name: []const u8,
};
fn next(it: *ModuleDepIterator) ?Dependency {
if (it.split.buffer.len == 0) return null; // don't return "" for the first iteration on ""
const str = it.split.next() orelse return null;
if (mem.indexOfScalar(u8, str, '=')) |i| {
return .{
.expose = str[0..i],
.name = str[i + 1 ..],
};
} else {
return .{ .expose = str, .name = str };
}
}
};
fn parseCrossTargetOrReportFatalError( fn parseCrossTargetOrReportFatalError(
allocator: Allocator, allocator: Allocator,
opts: std.zig.CrossTarget.ParseOptions, opts: std.zig.CrossTarget.ParseOptions,
@ -3623,18 +3697,6 @@ fn updateModule(gpa: Allocator, comp: *Compilation, hook: AfterUpdateHook) !void
} }
} }
fn freePkgTree(gpa: Allocator, pkg: *Package, free_parent: bool) void {
{
var it = pkg.table.valueIterator();
while (it.next()) |value| {
freePkgTree(gpa, value.*, true);
}
}
if (free_parent) {
pkg.destroy(gpa);
}
}
fn cmdTranslateC(comp: *Compilation, arena: Allocator, enable_cache: bool) !void { fn cmdTranslateC(comp: *Compilation, arena: Allocator, enable_cache: bool) !void {
if (!build_options.have_llvm) if (!build_options.have_llvm)
fatal("cannot translate-c: compiler built without LLVM extensions", .{}); fatal("cannot translate-c: compiler built without LLVM extensions", .{});
@ -4138,7 +4200,6 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
var main_pkg: Package = .{ var main_pkg: Package = .{
.root_src_directory = zig_lib_directory, .root_src_directory = zig_lib_directory,
.root_src_path = "build_runner.zig", .root_src_path = "build_runner.zig",
.name = "root",
}; };
if (!build_options.omit_pkg_fetching_code) { if (!build_options.omit_pkg_fetching_code) {
@ -4181,22 +4242,20 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
const deps_pkg = try Package.createFilePkg( const deps_pkg = try Package.createFilePkg(
gpa, gpa,
"@dependencies",
local_cache_directory, local_cache_directory,
"dependencies.zig", "dependencies.zig",
dependencies_source.items, dependencies_source.items,
); );
mem.swap(Package.Table, &main_pkg.table, &deps_pkg.table); mem.swap(Package.Table, &main_pkg.table, &deps_pkg.table);
try main_pkg.addAndAdopt(gpa, deps_pkg); try main_pkg.add(gpa, "@dependencies", deps_pkg);
} }
var build_pkg: Package = .{ var build_pkg: Package = .{
.root_src_directory = build_directory, .root_src_directory = build_directory,
.root_src_path = build_zig_basename, .root_src_path = build_zig_basename,
.name = "@build",
}; };
try main_pkg.addAndAdopt(gpa, &build_pkg); try main_pkg.add(gpa, "@build", &build_pkg);
const comp = Compilation.create(gpa, .{ const comp = Compilation.create(gpa, .{
.zig_lib_directory = zig_lib_directory, .zig_lib_directory = zig_lib_directory,
@ -4431,7 +4490,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void
.root_decl = .none, .root_decl = .none,
}; };
file.pkg = try Package.create(gpa, "root", null, file.sub_file_path); file.pkg = try Package.create(gpa, null, file.sub_file_path);
defer file.pkg.destroy(gpa); defer file.pkg.destroy(gpa);
file.zir = try AstGen.generate(gpa, file.tree); file.zir = try AstGen.generate(gpa, file.tree);
@ -4642,7 +4701,7 @@ fn fmtPathFile(
.root_decl = .none, .root_decl = .none,
}; };
file.pkg = try Package.create(fmt.gpa, "root", null, file.sub_file_path); file.pkg = try Package.create(fmt.gpa, null, file.sub_file_path);
defer file.pkg.destroy(fmt.gpa); defer file.pkg.destroy(fmt.gpa);
if (stat.size > max_src_size) if (stat.size > max_src_size)
@ -5354,7 +5413,7 @@ pub fn cmdAstCheck(
file.stat.size = source.len; file.stat.size = source.len;
} }
file.pkg = try Package.create(gpa, "root", null, file.sub_file_path); file.pkg = try Package.create(gpa, null, file.sub_file_path);
defer file.pkg.destroy(gpa); defer file.pkg.destroy(gpa);
file.tree = try Ast.parse(gpa, file.source, .zig); file.tree = try Ast.parse(gpa, file.source, .zig);
@ -5473,7 +5532,7 @@ pub fn cmdChangelist(
.root_decl = .none, .root_decl = .none,
}; };
file.pkg = try Package.create(gpa, "root", null, file.sub_file_path); file.pkg = try Package.create(gpa, null, file.sub_file_path);
defer file.pkg.destroy(gpa); defer file.pkg.destroy(gpa);
const source = try arena.allocSentinel(u8, @intCast(usize, stat.size), 0); const source = try arena.allocSentinel(u8, @intCast(usize, stat.size), 0);

View File

@ -727,6 +727,7 @@ pub fn supportsFunctionAlignment(target: std.Target) bool {
pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend) bool { pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend) bool {
switch (backend) { switch (backend) {
.stage1, .stage2_llvm => return @import("codegen/llvm.zig").supportsTailCall(target), .stage1, .stage2_llvm => return @import("codegen/llvm.zig").supportsTailCall(target),
.stage2_c => return true,
else => return false, else => return false,
} }
} }

View File

@ -583,6 +583,11 @@ pub const TestContext = struct {
path: []const u8, path: []const u8,
}; };
pub const DepModule = struct {
name: []const u8,
path: []const u8,
};
pub const Backend = enum { pub const Backend = enum {
stage1, stage1,
stage2, stage2,
@ -611,6 +616,7 @@ pub const TestContext = struct {
link_libc: bool = false, link_libc: bool = false,
files: std.ArrayList(File), files: std.ArrayList(File),
deps: std.ArrayList(DepModule),
result: anyerror!void = {}, result: anyerror!void = {},
@ -618,6 +624,13 @@ pub const TestContext = struct {
case.files.append(.{ .path = name, .src = src }) catch @panic("out of memory"); case.files.append(.{ .path = name, .src = src }) catch @panic("out of memory");
} }
pub fn addDepModule(case: *Case, name: []const u8, path: []const u8) void {
case.deps.append(.{
.name = name,
.path = path,
}) catch @panic("out of memory");
}
/// Adds a subcase in which the module is updated with `src`, and a C /// Adds a subcase in which the module is updated with `src`, and a C
/// header is generated. /// header is generated.
pub fn addHeader(self: *Case, src: [:0]const u8, result: [:0]const u8) void { pub fn addHeader(self: *Case, src: [:0]const u8, result: [:0]const u8) void {
@ -767,6 +780,7 @@ pub const TestContext = struct {
.updates = std.ArrayList(Update).init(ctx.cases.allocator), .updates = std.ArrayList(Update).init(ctx.cases.allocator),
.output_mode = .Exe, .output_mode = .Exe,
.files = std.ArrayList(File).init(ctx.arena), .files = std.ArrayList(File).init(ctx.arena),
.deps = std.ArrayList(DepModule).init(ctx.arena),
}) catch @panic("out of memory"); }) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1]; return &ctx.cases.items[ctx.cases.items.len - 1];
} }
@ -787,6 +801,7 @@ pub const TestContext = struct {
.updates = std.ArrayList(Update).init(ctx.cases.allocator), .updates = std.ArrayList(Update).init(ctx.cases.allocator),
.output_mode = .Exe, .output_mode = .Exe,
.files = std.ArrayList(File).init(ctx.arena), .files = std.ArrayList(File).init(ctx.arena),
.deps = std.ArrayList(DepModule).init(ctx.arena),
.link_libc = true, .link_libc = true,
}) catch @panic("out of memory"); }) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1]; return &ctx.cases.items[ctx.cases.items.len - 1];
@ -801,6 +816,7 @@ pub const TestContext = struct {
.updates = std.ArrayList(Update).init(ctx.cases.allocator), .updates = std.ArrayList(Update).init(ctx.cases.allocator),
.output_mode = .Exe, .output_mode = .Exe,
.files = std.ArrayList(File).init(ctx.arena), .files = std.ArrayList(File).init(ctx.arena),
.deps = std.ArrayList(DepModule).init(ctx.arena),
.backend = .llvm, .backend = .llvm,
.link_libc = true, .link_libc = true,
}) catch @panic("out of memory"); }) catch @panic("out of memory");
@ -818,6 +834,7 @@ pub const TestContext = struct {
.updates = std.ArrayList(Update).init(ctx.cases.allocator), .updates = std.ArrayList(Update).init(ctx.cases.allocator),
.output_mode = .Obj, .output_mode = .Obj,
.files = std.ArrayList(File).init(ctx.arena), .files = std.ArrayList(File).init(ctx.arena),
.deps = std.ArrayList(DepModule).init(ctx.arena),
}) catch @panic("out of memory"); }) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1]; return &ctx.cases.items[ctx.cases.items.len - 1];
} }
@ -834,6 +851,7 @@ pub const TestContext = struct {
.output_mode = .Exe, .output_mode = .Exe,
.is_test = true, .is_test = true,
.files = std.ArrayList(File).init(ctx.arena), .files = std.ArrayList(File).init(ctx.arena),
.deps = std.ArrayList(DepModule).init(ctx.arena),
}) catch @panic("out of memory"); }) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1]; return &ctx.cases.items[ctx.cases.items.len - 1];
} }
@ -858,6 +876,7 @@ pub const TestContext = struct {
.updates = std.ArrayList(Update).init(ctx.cases.allocator), .updates = std.ArrayList(Update).init(ctx.cases.allocator),
.output_mode = .Obj, .output_mode = .Obj,
.files = std.ArrayList(File).init(ctx.arena), .files = std.ArrayList(File).init(ctx.arena),
.deps = std.ArrayList(DepModule).init(ctx.arena),
}) catch @panic("out of memory"); }) catch @panic("out of memory");
return &ctx.cases.items[ctx.cases.items.len - 1]; return &ctx.cases.items[ctx.cases.items.len - 1];
} }
@ -1145,6 +1164,7 @@ pub const TestContext = struct {
.output_mode = output_mode, .output_mode = output_mode,
.link_libc = backend == .llvm, .link_libc = backend == .llvm,
.files = std.ArrayList(TestContext.File).init(ctx.cases.allocator), .files = std.ArrayList(TestContext.File).init(ctx.cases.allocator),
.deps = std.ArrayList(DepModule).init(ctx.cases.allocator),
}); });
try cases.append(next); try cases.append(next);
} }
@ -1497,9 +1517,25 @@ pub const TestContext = struct {
var main_pkg: Package = .{ var main_pkg: Package = .{
.root_src_directory = .{ .path = tmp_dir_path, .handle = tmp.dir }, .root_src_directory = .{ .path = tmp_dir_path, .handle = tmp.dir },
.root_src_path = tmp_src_path, .root_src_path = tmp_src_path,
.name = "root",
}; };
defer main_pkg.table.deinit(allocator); defer {
var it = main_pkg.table.iterator();
while (it.next()) |kv| {
allocator.free(kv.key_ptr.*);
kv.value_ptr.*.destroy(allocator);
}
main_pkg.table.deinit(allocator);
}
for (case.deps.items) |dep| {
var pkg = try Package.create(
allocator,
tmp_dir_path,
dep.path,
);
errdefer pkg.destroy(allocator);
try main_pkg.add(allocator, dep.name, pkg);
}
const bin_name = try std.zig.binNameAlloc(arena, .{ const bin_name = try std.zig.binNameAlloc(arena, .{
.root_name = "test_case", .root_name = "test_case",

View File

@ -1249,11 +1249,22 @@ pub const Value = extern union {
}; };
} }
fn isDeclRef(val: Value) bool {
var check = val;
while (true) switch (check.tag()) {
.variable, .decl_ref, .decl_ref_mut, .comptime_field_ptr => return true,
.field_ptr => check = check.castTag(.field_ptr).?.data.container_ptr,
.elem_ptr => check = check.castTag(.elem_ptr).?.data.array_ptr,
.eu_payload_ptr, .opt_payload_ptr => check = check.cast(Value.Payload.PayloadPtr).?.data.container_ptr,
else => return false,
};
}
/// Write a Value's contents to `buffer`. /// Write a Value's contents to `buffer`.
/// ///
/// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past /// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past
/// the end of the value in memory. /// the end of the value in memory.
pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) void { pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{ReinterpretDeclRef}!void {
const target = mod.getTarget(); const target = mod.getTarget();
const endian = target.cpu.arch.endian(); const endian = target.cpu.arch.endian();
if (val.isUndef()) { if (val.isUndef()) {
@ -1309,7 +1320,7 @@ pub const Value = extern union {
var buf_off: usize = 0; var buf_off: usize = 0;
while (elem_i < len) : (elem_i += 1) { while (elem_i < len) : (elem_i += 1) {
const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf); const elem_val = val.elemValueBuffer(mod, elem_i, &elem_value_buf);
elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]); try elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]);
buf_off += elem_size; buf_off += elem_size;
} }
}, },
@ -1317,7 +1328,7 @@ pub const Value = extern union {
// We use byte_count instead of abi_size here, so that any padding bytes // We use byte_count instead of abi_size here, so that any padding bytes
// follow the data bytes, on both big- and little-endian systems. // follow the data bytes, on both big- and little-endian systems.
const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
}, },
.Struct => switch (ty.containerLayout()) { .Struct => switch (ty.containerLayout()) {
.Auto => unreachable, // Sema is supposed to have emitted a compile error already .Auto => unreachable, // Sema is supposed to have emitted a compile error already
@ -1326,12 +1337,12 @@ pub const Value = extern union {
const field_vals = val.castTag(.aggregate).?.data; const field_vals = val.castTag(.aggregate).?.data;
for (fields, 0..) |field, i| { for (fields, 0..) |field, i| {
const off = @intCast(usize, ty.structFieldOffset(i, target)); const off = @intCast(usize, ty.structFieldOffset(i, target));
writeToMemory(field_vals[i], field.ty, mod, buffer[off..]); try writeToMemory(field_vals[i], field.ty, mod, buffer[off..]);
} }
}, },
.Packed => { .Packed => {
const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
}, },
}, },
.ErrorSet => { .ErrorSet => {
@ -1345,9 +1356,14 @@ pub const Value = extern union {
.Extern => @panic("TODO implement writeToMemory for extern unions"), .Extern => @panic("TODO implement writeToMemory for extern unions"),
.Packed => { .Packed => {
const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8; const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
}, },
}, },
.Pointer => {
assert(!ty.isSlice()); // No well defined layout.
if (val.isDeclRef()) return error.ReinterpretDeclRef;
return val.writeToMemory(Type.usize, mod, buffer);
},
else => @panic("TODO implement writeToMemory for more types"), else => @panic("TODO implement writeToMemory for more types"),
} }
} }
@ -1356,7 +1372,7 @@ pub const Value = extern union {
/// ///
/// Both the start and the end of the provided buffer must be tight, since /// Both the start and the end of the provided buffer must be tight, since
/// big-endian packed memory layouts start at the end of the buffer. /// big-endian packed memory layouts start at the end of the buffer.
pub fn writeToPackedMemory(val: Value, ty: Type, mod: *Module, buffer: []u8, bit_offset: usize) void { pub fn writeToPackedMemory(val: Value, ty: Type, mod: *Module, buffer: []u8, bit_offset: usize) error{ReinterpretDeclRef}!void {
const target = mod.getTarget(); const target = mod.getTarget();
const endian = target.cpu.arch.endian(); const endian = target.cpu.arch.endian();
if (val.isUndef()) { if (val.isUndef()) {
@ -1420,7 +1436,7 @@ pub const Value = extern union {
// On big-endian systems, LLVM reverses the element order of vectors by default // On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .Big) len - elem_i - 1 else elem_i; const tgt_elem_i = if (endian == .Big) len - elem_i - 1 else elem_i;
const elem_val = val.elemValueBuffer(mod, tgt_elem_i, &elem_value_buf); const elem_val = val.elemValueBuffer(mod, tgt_elem_i, &elem_value_buf);
elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits); try elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits);
bits += elem_bit_size; bits += elem_bit_size;
} }
}, },
@ -1433,7 +1449,7 @@ pub const Value = extern union {
const field_vals = val.castTag(.aggregate).?.data; const field_vals = val.castTag(.aggregate).?.data;
for (fields, 0..) |field, i| { for (fields, 0..) |field, i| {
const field_bits = @intCast(u16, field.ty.bitSize(target)); const field_bits = @intCast(u16, field.ty.bitSize(target));
field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits); try field_vals[i].writeToPackedMemory(field.ty, mod, buffer, bit_offset + bits);
bits += field_bits; bits += field_bits;
} }
}, },
@ -1446,9 +1462,14 @@ pub const Value = extern union {
const field_type = ty.unionFields().values()[field_index.?].ty; const field_type = ty.unionFields().values()[field_index.?].ty;
const field_val = val.fieldValue(field_type, field_index.?); const field_val = val.fieldValue(field_type, field_index.?);
field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset);
}, },
}, },
.Pointer => {
assert(!ty.isSlice()); // No well defined layout.
if (val.isDeclRef()) return error.ReinterpretDeclRef;
return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset);
},
else => @panic("TODO implement writeToPackedMemory for more types"), else => @panic("TODO implement writeToPackedMemory for more types"),
} }
} }
@ -1553,6 +1574,10 @@ pub const Value = extern union {
}; };
return Value.initPayload(&payload.base); return Value.initPayload(&payload.base);
}, },
.Pointer => {
assert(!ty.isSlice()); // No well defined layout.
return readFromMemory(Type.usize, mod, buffer, arena);
},
else => @panic("TODO implement readFromMemory for more types"), else => @panic("TODO implement readFromMemory for more types"),
} }
} }
@ -1640,6 +1665,10 @@ pub const Value = extern union {
return Tag.aggregate.create(arena, field_vals); return Tag.aggregate.create(arena, field_vals);
}, },
}, },
.Pointer => {
assert(!ty.isSlice()); // No well defined layout.
return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena);
},
else => @panic("TODO implement readFromPackedMemory for more types"), else => @panic("TODO implement readFromPackedMemory for more types"),
} }
} }

2486
stage1/zig.h Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -551,7 +551,11 @@ test "align(N) on functions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO this is not supported on MSVC
// This is not supported on MSVC
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) {
return error.SkipZigTest;
}
// function alignment is a compile error on wasm32/wasm64 // function alignment is a compile error on wasm32/wasm64
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;

View File

@ -7,6 +7,7 @@ const is_x86_64_linux = builtin.cpu.arch == .x86_64 and builtin.os.tag == .linux
comptime { comptime {
if (builtin.zig_backend != .stage2_arm and if (builtin.zig_backend != .stage2_arm and
builtin.zig_backend != .stage2_aarch64 and builtin.zig_backend != .stage2_aarch64 and
!(builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) and // MSVC doesn't support inline assembly
is_x86_64_linux) is_x86_64_linux)
{ {
asm ( asm (
@ -23,7 +24,8 @@ test "module level assembly" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
if (is_x86_64_linux) { if (is_x86_64_linux) {
try expect(this_is_my_alias() == 1234); try expect(this_is_my_alias() == 1234);
@ -36,7 +38,8 @@ test "output constraint modifiers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
// This is only testing compilation. // This is only testing compilation.
var a: u32 = 3; var a: u32 = 3;
@ -58,7 +61,8 @@ test "alternative constraints" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
// Make sure we allow commas as a separator for alternative constraints. // Make sure we allow commas as a separator for alternative constraints.
var a: u32 = 3; var a: u32 = 3;
@ -75,7 +79,8 @@ test "sized integer/float in asm input" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
asm volatile ("" asm volatile (""
: :
@ -125,7 +130,8 @@ test "struct/array/union types as input values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
asm volatile ("" asm volatile (""
: :
@ -151,6 +157,8 @@ test "asm modifiers (AArch64)" {
if (builtin.target.cpu.arch != .aarch64) return error.SkipZigTest; if (builtin.target.cpu.arch != .aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
var x: u32 = 15; var x: u32 = 15;
const double = asm ("add %[ret:w], %[in:w], %[in:w]" const double = asm ("add %[ret:w], %[in:w], %[in:w]"
: [ret] "=r" (-> u32), : [ret] "=r" (-> u32),

View File

@ -387,6 +387,7 @@ fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA {
} }
test "take address of parameter" { test "take address of parameter" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1143,3 +1144,17 @@ test "orelse coercion as function argument" {
var foo = Container.init(optional orelse .{}); var foo = Container.init(optional orelse .{});
try expect(foo.a.?.start == -1); try expect(foo.a.?.start == -1);
} }
test "runtime-known globals initialized with undefined" {
const S = struct {
var array: [10]u32 = [_]u32{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
var vp: [*]u32 = undefined;
var s: []u32 = undefined;
};
S.vp = &S.array;
S.s = S.vp[0..5];
try expect(S.s[0] == 1);
try expect(S.s[4] == 5);
}

View File

@ -1568,3 +1568,12 @@ test "@volatileCast without a result location" {
try expect(@TypeOf(z) == *i32); try expect(@TypeOf(z) == *i32);
try expect(z.* == 1234); try expect(z.* == 1234);
} }
test "coercion from single-item pointer to @as to slice" {
var x: u32 = 1;
// Why the following line gets a compile error?
const t: []u32 = @as(*[1]u32, &x);
try expect(t[0] == 1);
}

View File

@ -9,6 +9,7 @@ var argv: [*]const [*]const u8 = undefined;
test "const slice child" { test "const slice child" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const strs = [_][*]const u8{ "one", "two", "three" }; const strs = [_][*]const u8{ "one", "two", "three" };

View File

@ -1338,6 +1338,7 @@ test "lazy sizeof is resolved in division" {
test "lazy value is resolved as slice operand" { test "lazy value is resolved as slice operand" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const A = struct { a: u32 }; const A = struct { a: u32 };

View File

@ -44,3 +44,81 @@ fn testParentFieldPtrFirst(a: *const bool) !void {
try expect(base == &foo); try expect(base == &foo);
try expect(&base.a == a); try expect(&base.a == a);
} }
test "@fieldParentPtr untagged union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testFieldParentPtrUnion(&bar.c);
comptime try testFieldParentPtrUnion(&bar.c);
}
const Bar = union(enum) {
a: bool,
b: f32,
c: i32,
d: i32,
};
const bar = Bar{ .c = 42 };
fn testFieldParentPtrUnion(c: *const i32) !void {
try expect(c == &bar.c);
const base = @fieldParentPtr(Bar, "c", c);
try expect(base == &bar);
try expect(&base.c == c);
}
test "@fieldParentPtr tagged union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testFieldParentPtrTaggedUnion(&bar_tagged.c);
comptime try testFieldParentPtrTaggedUnion(&bar_tagged.c);
}
const BarTagged = union(enum) {
a: bool,
b: f32,
c: i32,
d: i32,
};
const bar_tagged = BarTagged{ .c = 42 };
fn testFieldParentPtrTaggedUnion(c: *const i32) !void {
try expect(c == &bar_tagged.c);
const base = @fieldParentPtr(BarTagged, "c", c);
try expect(base == &bar_tagged);
try expect(&base.c == c);
}
test "@fieldParentPtr extern union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testFieldParentPtrExternUnion(&bar_extern.c);
comptime try testFieldParentPtrExternUnion(&bar_extern.c);
}
const BarExtern = extern union {
a: bool,
b: f32,
c: i32,
d: i32,
};
const bar_extern = BarExtern{ .c = 42 };
fn testFieldParentPtrExternUnion(c: *const i32) !void {
try expect(c == &bar_extern.c);
const base = @fieldParentPtr(BarExtern, "c", c);
try expect(base == &bar_extern);
try expect(&base.c == c);
}

View File

@ -13,7 +13,6 @@ test "int comparison elision" {
// TODO: support int types > 128 bits wide in other backends // TODO: support int types > 128 bits wide in other backends
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -7,7 +7,6 @@ test "strlit to vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const strlit = "0123456789abcdef0123456789ABCDEF"; const strlit = "0123456789abcdef0123456789ABCDEF";
const vec_from_strlit: @Vector(32, u8) = strlit.*; const vec_from_strlit: @Vector(32, u8) = strlit.*;

View File

@ -1463,7 +1463,6 @@ test "vector integer addition" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct { const S = struct {

View File

@ -603,7 +603,6 @@ test "packed struct initialized in bitcast" {
test "pointer to container level packed struct field" { test "pointer to container level packed struct field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;

View File

@ -507,7 +507,6 @@ test "ptrCast comptime known slice to C pointer" {
} }
test "ptrToInt on a generic function" { test "ptrToInt on a generic function" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO

View File

@ -1330,7 +1330,6 @@ test "struct field init value is size of the struct" {
} }
test "under-aligned struct field" { test "under-aligned struct field" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -1578,3 +1577,38 @@ test "directly initiating tuple like struct" {
const a = struct { u8 }{8}; const a = struct { u8 }{8};
try expect(a[0] == 8); try expect(a[0] == 8);
} }
test "instantiate struct with comptime field" {
{
var things = struct {
comptime foo: i8 = 1,
}{};
comptime std.debug.assert(things.foo == 1);
}
{
const T = struct {
comptime foo: i8 = 1,
};
var things = T{};
comptime std.debug.assert(things.foo == 1);
}
{
var things: struct {
comptime foo: i8 = 1,
} = .{};
comptime std.debug.assert(things.foo == 1);
}
{
var things: struct {
comptime foo: i8 = 1,
} = undefined; // Segmentation fault at address 0x0
comptime std.debug.assert(things.foo == 1);
}
}

View File

@ -603,3 +603,9 @@ test "@typeInfo decls ignore dependency loops" {
}; };
_ = S.foo; _ = S.foo;
} }
test "type info of tuple of string literal default value" {
const struct_field = @typeInfo(@TypeOf(.{"hi"})).Struct.fields[0];
const value = @ptrCast(*align(1) const *const [2:0]u8, struct_field.default_value.?).*;
comptime std.debug.assert(value[0] == 'h');
}

View File

@ -96,10 +96,9 @@ fn doNothingWithFirstArg(args: anytype) void {
test "simple variadic function" { test "simple variadic function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos and builtin.zig_backend == .stage2_llvm) { if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos) {
// https://github.com/ziglang/zig/issues/14096 // https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest; return error.SkipZigTest;
} }
@ -112,6 +111,12 @@ test "simple variadic function" {
return @cVaArg(&ap, c_int); return @cVaArg(&ap, c_int);
} }
fn compatible(_: c_int, ...) callconv(.C) c_int {
var ap = @cVaStart();
defer @cVaEnd(&ap);
return @cVaArg(&ap, c_int);
}
fn add(count: c_int, ...) callconv(.C) c_int { fn add(count: c_int, ...) callconv(.C) c_int {
var ap = @cVaStart(); var ap = @cVaStart();
defer @cVaEnd(&ap); defer @cVaEnd(&ap);
@ -124,8 +129,13 @@ test "simple variadic function" {
} }
}; };
try std.testing.expectEqual(@as(c_int, 0), S.simple(@as(c_int, 0))); if (builtin.zig_backend != .stage2_c) {
try std.testing.expectEqual(@as(c_int, 1024), S.simple(@as(c_int, 1024))); // pre C23 doesn't support varargs without a preceding runtime arg.
try std.testing.expectEqual(@as(c_int, 0), S.simple(@as(c_int, 0)));
try std.testing.expectEqual(@as(c_int, 1024), S.simple(@as(c_int, 1024)));
}
try std.testing.expectEqual(@as(c_int, 0), S.compatible(undefined, @as(c_int, 0)));
try std.testing.expectEqual(@as(c_int, 1024), S.compatible(undefined, @as(c_int, 1024)));
try std.testing.expectEqual(@as(c_int, 0), S.add(0)); try std.testing.expectEqual(@as(c_int, 0), S.add(0));
try std.testing.expectEqual(@as(c_int, 1), S.add(1, @as(c_int, 1))); try std.testing.expectEqual(@as(c_int, 1), S.add(1, @as(c_int, 1)));
try std.testing.expectEqual(@as(c_int, 3), S.add(2, @as(c_int, 1), @as(c_int, 2))); try std.testing.expectEqual(@as(c_int, 3), S.add(2, @as(c_int, 1), @as(c_int, 2)));
@ -134,10 +144,9 @@ test "simple variadic function" {
test "variadic functions" { test "variadic functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos and builtin.zig_backend == .stage2_llvm) { if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos) {
// https://github.com/ziglang/zig/issues/14096 // https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest; return error.SkipZigTest;
} }
@ -178,10 +187,9 @@ test "variadic functions" {
test "copy VaList" { test "copy VaList" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos and builtin.zig_backend == .stage2_llvm) { if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos) {
// https://github.com/ziglang/zig/issues/14096 // https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest; return error.SkipZigTest;
} }

View File

@ -75,7 +75,6 @@ test "vector int operators" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct { const S = struct {
@ -178,7 +177,6 @@ test "tuple to vector" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
@ -943,7 +941,6 @@ test "multiplication-assignment operator with an array operand" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct { const S = struct {
@ -1247,10 +1244,10 @@ test "array operands to shuffle are coerced to vectors" {
test "load packed vector element" { test "load packed vector element" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var x: @Vector(2, u15) = .{ 1, 4 }; var x: @Vector(2, u15) = .{ 1, 4 };
try expect((&x[0]).* == 1); try expect((&x[0]).* == 1);
@ -1260,10 +1257,10 @@ test "load packed vector element" {
test "store packed vector element" { test "store packed vector element" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var v = @Vector(4, u1){ 1, 1, 1, 1 }; var v = @Vector(4, u1){ 1, 1, 1, 1 };
try expectEqual(@Vector(4, u1){ 1, 1, 1, 1 }, v); try expectEqual(@Vector(4, u1){ 1, 1, 1, 1 }, v);

View File

@ -7,4 +7,4 @@ export fn entry(byte: u8) void {
// backend=stage2 // backend=stage2
// target=native // target=native
// //
// :2:29: error: @bitCast size mismatch: destination type 'u7' has 7 bits but source type 'u8' has 8 bits // :2:16: error: @bitCast size mismatch: destination type 'u7' has 7 bits but source type 'u8' has 8 bits

View File

@ -7,4 +7,4 @@ export fn entry() void {
// backend=stage2 // backend=stage2
// target=native // target=native
// //
// :2:29: error: @bitCast size mismatch: destination type 'u8' has 8 bits but source type 'f32' has 32 bits // :2:16: error: @bitCast size mismatch: destination type 'u8' has 8 bits but source type 'f32' has 32 bits

View File

@ -0,0 +1,31 @@
const std = @import("std");
const Error = error{InvalidCharacter};
const Direction = enum { upside_down };
const Barrrr = union(enum) {
float: f64,
direction: Direction,
};
fn fooey(bar: std.meta.Tag(Barrrr), args: []const []const u8) !Barrrr {
return switch (bar) {
.float => .{ .float = try std.fmt.parseFloat(f64, args[0]) },
.direction => if (std.mem.eql(u8, args[0], "upside_down"))
Barrrr{ .direction = .upside_down }
else
error.InvalidDirection,
};
}
pub fn main() Error!void {
std.debug.print("{}", .{try fooey(.direction, &[_][]const u8{ "one", "two", "three" })});
}
// error
// backend=llvm
// target=native
//
// :23:29: error: expected type 'error{InvalidCharacter}', found '@typeInfo(@typeInfo(@TypeOf(tmp.fooey)).Fn.return_type.?).ErrorUnion.error_set'
// :23:29: note: 'error.InvalidDirection' not a member of destination error set

View File

@ -7,4 +7,4 @@ export fn foo(a: *i32) *Foo {
// backend=llvm // backend=llvm
// target=native // target=native
// //
// :3:28: error: expected struct type, found 'i32' // :3:28: error: expected struct or union type, found 'i32'

View File

@ -288,4 +288,26 @@ pub fn addCases(ctx: *TestContext) !void {
//, &[_][]const u8{ //, &[_][]const u8{
// "tmp.zig:4:1: error: unable to inline function", // "tmp.zig:4:1: error: unable to inline function",
//}); //});
{
const case = ctx.obj("file in multiple modules", .{});
case.backend = .stage2;
case.addSourceFile("foo.zig",
\\const dummy = 0;
);
case.addDepModule("foo", "foo.zig");
case.addError(
\\comptime {
\\ _ = @import("foo");
\\ _ = @import("foo.zig");
\\}
, &[_][]const u8{
":1:1: error: file exists in multiple modules",
":1:1: note: root of module root.foo",
":3:17: note: imported from module root",
});
}
} }

View File

@ -29,7 +29,7 @@ pub fn build(b: *std.Build) void {
exe.dead_strip_dylibs = true; exe.dead_strip_dylibs = true;
const run_cmd = exe.run(); const run_cmd = exe.run();
run_cmd.expected_exit_code = @bitCast(u8, @as(i8, -2)); // should fail run_cmd.expected_term = .{ .Exited = @bitCast(u8, @as(i8, -2)) }; // should fail
test_step.dependOn(&run_cmd.step); test_step.dependOn(&run_cmd.step);
} }
} }

View File

@ -168,7 +168,7 @@ pub const CompareOutputContext = struct {
run.addArgs(case.cli_args); run.addArgs(case.cli_args);
run.stderr_action = .ignore; run.stderr_action = .ignore;
run.stdout_action = .ignore; run.stdout_action = .ignore;
run.expected_exit_code = 126; run.expected_term = .{ .Exited = 126 };
self.step.dependOn(&run.step); self.step.dependOn(&run.step);
}, },

View File

@ -959,7 +959,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = a; \\ _ = a;
\\} \\}
, ,
\\zig_extern void start(zig_u8 const a0); \\zig_extern void start(uint8_t const a0);
\\ \\
); );
ctx.h("header with multiple param function", linux_x64, ctx.h("header with multiple param function", linux_x64,
@ -967,19 +967,19 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = a; _ = b; _ = c; \\ _ = a; _ = b; _ = c;
\\} \\}
, ,
\\zig_extern void start(zig_u8 const a0, zig_u8 const a1, zig_u8 const a2); \\zig_extern void start(uint8_t const a0, uint8_t const a1, uint8_t const a2);
\\ \\
); );
ctx.h("header with u32 param function", linux_x64, ctx.h("header with u32 param function", linux_x64,
\\export fn start(a: u32) void{ _ = a; } \\export fn start(a: u32) void{ _ = a; }
, ,
\\zig_extern void start(zig_u32 const a0); \\zig_extern void start(uint32_t const a0);
\\ \\
); );
ctx.h("header with usize param function", linux_x64, ctx.h("header with usize param function", linux_x64,
\\export fn start(a: usize) void{ _ = a; } \\export fn start(a: usize) void{ _ = a; }
, ,
\\zig_extern void start(zig_usize const a0); \\zig_extern void start(uintptr_t const a0);
\\ \\
); );
ctx.h("header with bool param function", linux_x64, ctx.h("header with bool param function", linux_x64,
@ -993,7 +993,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ unreachable; \\ unreachable;
\\} \\}
, ,
\\zig_extern zig_noreturn start(void); \\zig_extern zig_noreturn void start(void);
\\ \\
); );
ctx.h("header with multiple functions", linux_x64, ctx.h("header with multiple functions", linux_x64,
@ -1009,7 +1009,7 @@ pub fn addCases(ctx: *TestContext) !void {
ctx.h("header with multiple includes", linux_x64, ctx.h("header with multiple includes", linux_x64,
\\export fn start(a: u32, b: usize) void{ _ = a; _ = b; } \\export fn start(a: u32, b: usize) void{ _ = a; _ = b; }
, ,
\\zig_extern void start(zig_u32 const a0, zig_usize const a1); \\zig_extern void start(uint32_t const a0, uintptr_t const a1);
\\ \\
); );
} }

View File

@ -97,6 +97,7 @@ pub fn addPtx(
.updates = std.ArrayList(TestContext.Update).init(ctx.cases.allocator), .updates = std.ArrayList(TestContext.Update).init(ctx.cases.allocator),
.output_mode = .Obj, .output_mode = .Obj,
.files = std.ArrayList(TestContext.File).init(ctx.cases.allocator), .files = std.ArrayList(TestContext.File).init(ctx.cases.allocator),
.deps = std.ArrayList(TestContext.DepModule).init(ctx.cases.allocator),
.link_libc = false, .link_libc = false,
.backend = .llvm, .backend = .llvm,
// Bug in Debug mode // Bug in Debug mode

View File

@ -84,6 +84,9 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
cases.addBuildFile("test/standalone/pie/build.zig", .{}); cases.addBuildFile("test/standalone/pie/build.zig", .{});
} }
cases.addBuildFile("test/standalone/issue_12706/build.zig", .{}); cases.addBuildFile("test/standalone/issue_12706/build.zig", .{});
if (std.os.have_sigpipe_support) {
cases.addBuildFile("test/standalone/sigpipe/build.zig", .{});
}
// Ensure the development tools are buildable. Alphabetically sorted. // Ensure the development tools are buildable. Alphabetically sorted.
// No need to build `tools/spirv/grammar.zig`. // No need to build `tools/spirv/grammar.zig`.
@ -104,4 +107,10 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
cases.addBuildFile("test/standalone/emit_asm_and_bin/build.zig", .{}); cases.addBuildFile("test/standalone/emit_asm_and_bin/build.zig", .{});
cases.addBuildFile("test/standalone/issue_12588/build.zig", .{}); cases.addBuildFile("test/standalone/issue_12588/build.zig", .{});
cases.addBuildFile("test/standalone/embed_generated_file/build.zig", .{}); cases.addBuildFile("test/standalone/embed_generated_file/build.zig", .{});
cases.addBuildFile("test/standalone/dep_diamond/build.zig", .{});
cases.addBuildFile("test/standalone/dep_triangle/build.zig", .{});
cases.addBuildFile("test/standalone/dep_recursive/build.zig", .{});
cases.addBuildFile("test/standalone/dep_mutually_recursive/build.zig", .{});
cases.addBuildFile("test/standalone/dep_shared_builtin/build.zig", .{});
} }

View File

@ -0,0 +1 @@
pub const shared = @import("shared");

Some files were not shown because too many files have changed in this diff Show More