Merge remote-tracking branch 'origin/master' into wrangle-writer-buffering

This commit is contained in:
Andrew Kelley 2025-07-14 23:49:34 -07:00
commit ac8f757cb3
125 changed files with 17264 additions and 13809 deletions

View File

@ -50,6 +50,24 @@ jobs:
uses: actions/checkout@v4
- name: Build and Test
run: sh ci/aarch64-linux-release.sh
riscv64-linux-debug:
if: ${{ github.event_name == 'push' }}
timeout-minutes: 1020
runs-on: [self-hosted, Linux, riscv64]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Build and Test
run: sh ci/riscv64-linux-debug.sh
riscv64-linux-release:
if: ${{ github.event_name == 'push' }}
timeout-minutes: 900
runs-on: [self-hosted, Linux, riscv64]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Build and Test
run: sh ci/riscv64-linux-release.sh
x86_64-macos-release:
runs-on: "macos-13"
env:

58
ci/riscv64-linux-debug.sh Executable file
View File

@ -0,0 +1,58 @@
#!/bin/sh
# Requires cmake ninja-build
set -x
set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="spacemit_x60"
CACHE_BASENAME="zig+llvm+lld+clang-riscv64-linux-musl-0.15.0-dev.929+31e46be74"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"
export PATH="$HOME/local/bin:$PATH"
# Make the `zig version` number consistent.
# This will affect the cmake command below.
git fetch --unshallow || true
git fetch --tags
# Override the cache directories because they won't actually help other CI runs
# which will be testing alternate versions of zig, and ultimately would just
# fill up space on the hard drive for no reason.
export ZIG_GLOBAL_CACHE_DIR="$PWD/zig-global-cache"
export ZIG_LOCAL_CACHE_DIR="$PWD/zig-local-cache"
mkdir build-debug
cd build-debug
export CC="$ZIG cc -target $TARGET -mcpu=$MCPU"
export CXX="$ZIG c++ -target $TARGET -mcpu=$MCPU"
cmake .. \
-DCMAKE_INSTALL_PREFIX="stage3-debug" \
-DCMAKE_PREFIX_PATH="$PREFIX" \
-DCMAKE_BUILD_TYPE=Debug \
-DZIG_TARGET_TRIPLE="$TARGET" \
-DZIG_TARGET_MCPU="$MCPU" \
-DZIG_STATIC=ON \
-DZIG_NO_LIB=ON \
-GNinja
# Now cmake will use zig as the C/C++ compiler. We reset the environment variables
# so that installation and testing do not get affected by them.
unset CC
unset CXX
ninja install
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
stage3-debug/bin/zig build test-cases test-modules test-unit test-standalone test-c-abi test-link test-stack-traces test-asm-link test-llvm-ir docs \
--maxrss 34359738368 \
-Dstatic-llvm \
-Dskip-non-native \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib"

58
ci/riscv64-linux-release.sh Executable file
View File

@ -0,0 +1,58 @@
#!/bin/sh
# Requires cmake ninja-build
set -x
set -e
ARCH="$(uname -m)"
TARGET="$ARCH-linux-musl"
MCPU="spacemit_x60"
CACHE_BASENAME="zig+llvm+lld+clang-riscv64-linux-musl-0.15.0-dev.929+31e46be74"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"
export PATH="$HOME/local/bin:$PATH"
# Make the `zig version` number consistent.
# This will affect the cmake command below.
git fetch --unshallow || true
git fetch --tags
# Override the cache directories because they won't actually help other CI runs
# which will be testing alternate versions of zig, and ultimately would just
# fill up space on the hard drive for no reason.
export ZIG_GLOBAL_CACHE_DIR="$PWD/zig-global-cache"
export ZIG_LOCAL_CACHE_DIR="$PWD/zig-local-cache"
mkdir build-release
cd build-release
export CC="$ZIG cc -target $TARGET -mcpu=$MCPU"
export CXX="$ZIG c++ -target $TARGET -mcpu=$MCPU"
cmake .. \
-DCMAKE_INSTALL_PREFIX="stage3-release" \
-DCMAKE_PREFIX_PATH="$PREFIX" \
-DCMAKE_BUILD_TYPE=Release \
-DZIG_TARGET_TRIPLE="$TARGET" \
-DZIG_TARGET_MCPU="$MCPU" \
-DZIG_STATIC=ON \
-DZIG_NO_LIB=ON \
-GNinja
# Now cmake will use zig as the C/C++ compiler. We reset the environment variables
# so that installation and testing do not get affected by them.
unset CC
unset CXX
ninja install
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
stage3-release/bin/zig build test-cases test-modules test-unit test-standalone test-c-abi test-link test-stack-traces test-asm-link test-llvm-ir docs \
--maxrss 34359738368 \
-Dstatic-llvm \
-Dskip-non-native \
-Dtarget=native-native-musl \
--search-prefix "$PREFIX" \
--zig-lib-dir "$PWD/../lib"

View File

@ -1,7 +1,8 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
const lib = b.addSharedLibrary(.{
const lib = b.addLibrary(.{
.linkage = .dynamic,
.name = "mathtest",
.root_source_file = b.path("mathtest.zig"),
.version = .{ .major = 1, .minor = 0, .patch = 0 },

View File

@ -3,7 +3,7 @@ const builtin = @import("builtin");
const native_arch = builtin.cpu.arch;
const expect = std.testing.expect;
const WINAPI: std.builtin.CallingConvention = if (native_arch == .x86) .Stdcall else .C;
const WINAPI: std.builtin.CallingConvention = if (native_arch == .x86) .{ .x86_stdcall = .{} } else .c;
extern "kernel32" fn ExitProcess(exit_code: c_uint) callconv(WINAPI) noreturn;
test "foo" {

View File

@ -8259,7 +8259,7 @@ fn charLiteral(p: *Parser) Error!Result {
const slice = char_kind.contentSlice(p.tokSlice(p.tok_i));
var is_multichar = false;
if (slice.len == 1 and std.ascii.isASCII(slice[0])) {
if (slice.len == 1 and std.ascii.isAscii(slice[0])) {
// fast path: single unescaped ASCII char
val = slice[0];
} else {

View File

@ -1820,7 +1820,7 @@ pub fn main() !void {
var tree = translate(gpa, &aro_comp, args) catch |err| switch (err) {
error.ParsingFailed, error.FatalError => renderErrorsAndExit(&aro_comp),
error.OutOfMemory => return error.OutOfMemory,
error.StreamTooLong => std.zig.fatal("An input file was larger than 4GiB", .{}),
error.StreamTooLong => std.process.fatal("An input file was larger than 4GiB", .{}),
};
defer tree.deinit(gpa);

View File

@ -7,7 +7,7 @@ const Allocator = std.mem.Allocator;
const File = std.fs.File;
const assert = std.debug.assert;
const fatal = std.zig.fatal;
const fatal = std.process.fatal;
const Server = std.zig.Server;
pub fn main() !void {

View File

@ -693,6 +693,7 @@ pub fn addOptions(b: *Build) *Step.Options {
pub const ExecutableOptions = struct {
name: []const u8,
root_module: *Module,
version: ?std.SemanticVersion = null,
linkage: ?std.builtin.LinkMode = null,
max_rss: usize = 0,
@ -705,58 +706,12 @@ pub const ExecutableOptions = struct {
/// Can be set regardless of target. The `.manifest` file will be ignored
/// if the target object format does not support embedded manifests.
win32_manifest: ?LazyPath = null,
/// Prefer populating this field (using e.g. `createModule`) instead of populating
/// the following fields (`root_source_file` etc). In a future release, those fields
/// will be removed, and this field will become non-optional.
root_module: ?*Module = null,
/// Deprecated; prefer populating `root_module`.
root_source_file: ?LazyPath = null,
/// Deprecated; prefer populating `root_module`.
target: ?ResolvedTarget = null,
/// Deprecated; prefer populating `root_module`.
optimize: std.builtin.OptimizeMode = .Debug,
/// Deprecated; prefer populating `root_module`.
code_model: std.builtin.CodeModel = .default,
/// Deprecated; prefer populating `root_module`.
link_libc: ?bool = null,
/// Deprecated; prefer populating `root_module`.
single_threaded: ?bool = null,
/// Deprecated; prefer populating `root_module`.
pic: ?bool = null,
/// Deprecated; prefer populating `root_module`.
strip: ?bool = null,
/// Deprecated; prefer populating `root_module`.
unwind_tables: ?std.builtin.UnwindTables = null,
/// Deprecated; prefer populating `root_module`.
omit_frame_pointer: ?bool = null,
/// Deprecated; prefer populating `root_module`.
sanitize_thread: ?bool = null,
/// Deprecated; prefer populating `root_module`.
error_tracing: ?bool = null,
};
pub fn addExecutable(b: *Build, options: ExecutableOptions) *Step.Compile {
if (options.root_module != null and options.target != null) {
@panic("`root_module` and `target` cannot both be populated");
}
return .create(b, .{
.name = options.name,
.root_module = options.root_module orelse b.createModule(.{
.root_source_file = options.root_source_file,
.target = options.target orelse @panic("`root_module` and `target` cannot both be null"),
.optimize = options.optimize,
.link_libc = options.link_libc,
.single_threaded = options.single_threaded,
.pic = options.pic,
.strip = options.strip,
.unwind_tables = options.unwind_tables,
.omit_frame_pointer = options.omit_frame_pointer,
.sanitize_thread = options.sanitize_thread,
.error_tracing = options.error_tracing,
.code_model = options.code_model,
}),
.root_module = options.root_module,
.version = options.version,
.kind = .exe,
.linkage = options.linkage,
@ -770,62 +725,17 @@ pub fn addExecutable(b: *Build, options: ExecutableOptions) *Step.Compile {
pub const ObjectOptions = struct {
name: []const u8,
root_module: *Module,
max_rss: usize = 0,
use_llvm: ?bool = null,
use_lld: ?bool = null,
zig_lib_dir: ?LazyPath = null,
/// Prefer populating this field (using e.g. `createModule`) instead of populating
/// the following fields (`root_source_file` etc). In a future release, those fields
/// will be removed, and this field will become non-optional.
root_module: ?*Module = null,
/// Deprecated; prefer populating `root_module`.
root_source_file: ?LazyPath = null,
/// Deprecated; prefer populating `root_module`.
target: ?ResolvedTarget = null,
/// Deprecated; prefer populating `root_module`.
optimize: std.builtin.OptimizeMode = .Debug,
/// Deprecated; prefer populating `root_module`.
code_model: std.builtin.CodeModel = .default,
/// Deprecated; prefer populating `root_module`.
link_libc: ?bool = null,
/// Deprecated; prefer populating `root_module`.
single_threaded: ?bool = null,
/// Deprecated; prefer populating `root_module`.
pic: ?bool = null,
/// Deprecated; prefer populating `root_module`.
strip: ?bool = null,
/// Deprecated; prefer populating `root_module`.
unwind_tables: ?std.builtin.UnwindTables = null,
/// Deprecated; prefer populating `root_module`.
omit_frame_pointer: ?bool = null,
/// Deprecated; prefer populating `root_module`.
sanitize_thread: ?bool = null,
/// Deprecated; prefer populating `root_module`.
error_tracing: ?bool = null,
};
pub fn addObject(b: *Build, options: ObjectOptions) *Step.Compile {
if (options.root_module != null and options.target != null) {
@panic("`root_module` and `target` cannot both be populated");
}
return .create(b, .{
.name = options.name,
.root_module = options.root_module orelse b.createModule(.{
.root_source_file = options.root_source_file,
.target = options.target orelse @panic("`root_module` and `target` cannot both be null"),
.optimize = options.optimize,
.link_libc = options.link_libc,
.single_threaded = options.single_threaded,
.pic = options.pic,
.strip = options.strip,
.unwind_tables = options.unwind_tables,
.omit_frame_pointer = options.omit_frame_pointer,
.sanitize_thread = options.sanitize_thread,
.error_tracing = options.error_tracing,
.code_model = options.code_model,
}),
.root_module = options.root_module,
.kind = .obj,
.max_rss = options.max_rss,
.use_llvm = options.use_llvm,
@ -834,153 +744,6 @@ pub fn addObject(b: *Build, options: ObjectOptions) *Step.Compile {
});
}
pub const SharedLibraryOptions = struct {
name: []const u8,
version: ?std.SemanticVersion = null,
max_rss: usize = 0,
use_llvm: ?bool = null,
use_lld: ?bool = null,
zig_lib_dir: ?LazyPath = null,
/// Embed a `.manifest` file in the compilation if the object format supports it.
/// https://learn.microsoft.com/en-us/windows/win32/sbscs/manifest-files-reference
/// Manifest files must have the extension `.manifest`.
/// Can be set regardless of target. The `.manifest` file will be ignored
/// if the target object format does not support embedded manifests.
win32_manifest: ?LazyPath = null,
/// Prefer populating this field (using e.g. `createModule`) instead of populating
/// the following fields (`root_source_file` etc). In a future release, those fields
/// will be removed, and this field will become non-optional.
root_module: ?*Module = null,
/// Deprecated; prefer populating `root_module`.
root_source_file: ?LazyPath = null,
/// Deprecated; prefer populating `root_module`.
target: ?ResolvedTarget = null,
/// Deprecated; prefer populating `root_module`.
optimize: std.builtin.OptimizeMode = .Debug,
/// Deprecated; prefer populating `root_module`.
code_model: std.builtin.CodeModel = .default,
/// Deprecated; prefer populating `root_module`.
link_libc: ?bool = null,
/// Deprecated; prefer populating `root_module`.
single_threaded: ?bool = null,
/// Deprecated; prefer populating `root_module`.
pic: ?bool = null,
/// Deprecated; prefer populating `root_module`.
strip: ?bool = null,
/// Deprecated; prefer populating `root_module`.
unwind_tables: ?std.builtin.UnwindTables = null,
/// Deprecated; prefer populating `root_module`.
omit_frame_pointer: ?bool = null,
/// Deprecated; prefer populating `root_module`.
sanitize_thread: ?bool = null,
/// Deprecated; prefer populating `root_module`.
error_tracing: ?bool = null,
};
/// Deprecated: use `b.addLibrary(.{ ..., .linkage = .dynamic })` instead.
pub fn addSharedLibrary(b: *Build, options: SharedLibraryOptions) *Step.Compile {
if (options.root_module != null and options.target != null) {
@panic("`root_module` and `target` cannot both be populated");
}
return .create(b, .{
.name = options.name,
.root_module = options.root_module orelse b.createModule(.{
.target = options.target orelse @panic("`root_module` and `target` cannot both be null"),
.optimize = options.optimize,
.root_source_file = options.root_source_file,
.link_libc = options.link_libc,
.single_threaded = options.single_threaded,
.pic = options.pic,
.strip = options.strip,
.unwind_tables = options.unwind_tables,
.omit_frame_pointer = options.omit_frame_pointer,
.sanitize_thread = options.sanitize_thread,
.error_tracing = options.error_tracing,
.code_model = options.code_model,
}),
.kind = .lib,
.linkage = .dynamic,
.version = options.version,
.max_rss = options.max_rss,
.use_llvm = options.use_llvm,
.use_lld = options.use_lld,
.zig_lib_dir = options.zig_lib_dir,
.win32_manifest = options.win32_manifest,
});
}
pub const StaticLibraryOptions = struct {
name: []const u8,
version: ?std.SemanticVersion = null,
max_rss: usize = 0,
use_llvm: ?bool = null,
use_lld: ?bool = null,
zig_lib_dir: ?LazyPath = null,
/// Prefer populating this field (using e.g. `createModule`) instead of populating
/// the following fields (`root_source_file` etc). In a future release, those fields
/// will be removed, and this field will become non-optional.
root_module: ?*Module = null,
/// Deprecated; prefer populating `root_module`.
root_source_file: ?LazyPath = null,
/// Deprecated; prefer populating `root_module`.
target: ?ResolvedTarget = null,
/// Deprecated; prefer populating `root_module`.
optimize: std.builtin.OptimizeMode = .Debug,
/// Deprecated; prefer populating `root_module`.
code_model: std.builtin.CodeModel = .default,
/// Deprecated; prefer populating `root_module`.
link_libc: ?bool = null,
/// Deprecated; prefer populating `root_module`.
single_threaded: ?bool = null,
/// Deprecated; prefer populating `root_module`.
pic: ?bool = null,
/// Deprecated; prefer populating `root_module`.
strip: ?bool = null,
/// Deprecated; prefer populating `root_module`.
unwind_tables: ?std.builtin.UnwindTables = null,
/// Deprecated; prefer populating `root_module`.
omit_frame_pointer: ?bool = null,
/// Deprecated; prefer populating `root_module`.
sanitize_thread: ?bool = null,
/// Deprecated; prefer populating `root_module`.
error_tracing: ?bool = null,
};
/// Deprecated: use `b.addLibrary(.{ ..., .linkage = .static })` instead.
pub fn addStaticLibrary(b: *Build, options: StaticLibraryOptions) *Step.Compile {
if (options.root_module != null and options.target != null) {
@panic("`root_module` and `target` cannot both be populated");
}
return .create(b, .{
.name = options.name,
.root_module = options.root_module orelse b.createModule(.{
.target = options.target orelse @panic("`root_module` and `target` cannot both be null"),
.optimize = options.optimize,
.root_source_file = options.root_source_file,
.link_libc = options.link_libc,
.single_threaded = options.single_threaded,
.pic = options.pic,
.strip = options.strip,
.unwind_tables = options.unwind_tables,
.omit_frame_pointer = options.omit_frame_pointer,
.sanitize_thread = options.sanitize_thread,
.error_tracing = options.error_tracing,
.code_model = options.code_model,
}),
.kind = .lib,
.linkage = .static,
.version = options.version,
.max_rss = options.max_rss,
.use_llvm = options.use_llvm,
.use_lld = options.use_lld,
.zig_lib_dir = options.zig_lib_dir,
});
}
pub const LibraryOptions = struct {
linkage: std.builtin.LinkMode = .static,
name: []const u8,
@ -1015,9 +778,8 @@ pub fn addLibrary(b: *Build, options: LibraryOptions) *Step.Compile {
pub const TestOptions = struct {
name: []const u8 = "test",
root_module: *Module,
max_rss: usize = 0,
/// Deprecated; use `.filters = &.{filter}` instead of `.filter = filter`.
filter: ?[]const u8 = null,
filters: []const []const u8 = &.{},
test_runner: ?Step.Compile.TestRunner = null,
use_llvm: ?bool = null,
@ -1027,38 +789,6 @@ pub const TestOptions = struct {
/// The object must be linked separately.
/// Usually used in conjunction with a custom `test_runner`.
emit_object: bool = false,
/// Prefer populating this field (using e.g. `createModule`) instead of populating
/// the following fields (`root_source_file` etc). In a future release, those fields
/// will be removed, and this field will become non-optional.
root_module: ?*Module = null,
/// Deprecated; prefer populating `root_module`.
root_source_file: ?LazyPath = null,
/// Deprecated; prefer populating `root_module`.
target: ?ResolvedTarget = null,
/// Deprecated; prefer populating `root_module`.
optimize: std.builtin.OptimizeMode = .Debug,
/// Deprecated; prefer populating `root_module`.
version: ?std.SemanticVersion = null,
/// Deprecated; prefer populating `root_module`.
link_libc: ?bool = null,
/// Deprecated; prefer populating `root_module`.
link_libcpp: ?bool = null,
/// Deprecated; prefer populating `root_module`.
single_threaded: ?bool = null,
/// Deprecated; prefer populating `root_module`.
pic: ?bool = null,
/// Deprecated; prefer populating `root_module`.
strip: ?bool = null,
/// Deprecated; prefer populating `root_module`.
unwind_tables: ?std.builtin.UnwindTables = null,
/// Deprecated; prefer populating `root_module`.
omit_frame_pointer: ?bool = null,
/// Deprecated; prefer populating `root_module`.
sanitize_thread: ?bool = null,
/// Deprecated; prefer populating `root_module`.
error_tracing: ?bool = null,
};
/// Creates an executable containing unit tests.
@ -1070,33 +800,12 @@ pub const TestOptions = struct {
/// two steps are separated because they are independently configured and
/// cached.
pub fn addTest(b: *Build, options: TestOptions) *Step.Compile {
if (options.root_module != null and options.root_source_file != null) {
@panic("`root_module` and `root_source_file` cannot both be populated");
}
return .create(b, .{
.name = options.name,
.kind = if (options.emit_object) .test_obj else .@"test",
.root_module = options.root_module orelse b.createModule(.{
.root_source_file = options.root_source_file orelse @panic("`root_module` and `root_source_file` cannot both be null"),
.target = options.target orelse b.graph.host,
.optimize = options.optimize,
.link_libc = options.link_libc,
.link_libcpp = options.link_libcpp,
.single_threaded = options.single_threaded,
.pic = options.pic,
.strip = options.strip,
.unwind_tables = options.unwind_tables,
.omit_frame_pointer = options.omit_frame_pointer,
.sanitize_thread = options.sanitize_thread,
.error_tracing = options.error_tracing,
}),
.root_module = options.root_module,
.max_rss = options.max_rss,
.filters = if (options.filter != null and options.filters.len > 0) filters: {
const filters = b.allocator.alloc([]const u8, 1 + options.filters.len) catch @panic("OOM");
filters[0] = b.dupe(options.filter.?);
for (filters[1..], options.filters) |*dest, source| dest.* = b.dupe(source);
break :filters filters;
} else b.dupeStrings(if (options.filter) |filter| &.{filter} else options.filters),
.filters = b.dupeStrings(options.filters),
.test_runner = options.test_runner,
.use_llvm = options.use_llvm,
.use_lld = options.use_lld,
@ -1115,22 +824,6 @@ pub const AssemblyOptions = struct {
zig_lib_dir: ?LazyPath = null,
};
/// Deprecated; prefer using `addObject` where the `root_module` has an empty
/// `root_source_file` and contains an assembly file via `Module.addAssemblyFile`.
pub fn addAssembly(b: *Build, options: AssemblyOptions) *Step.Compile {
const root_module = b.createModule(.{
.target = options.target,
.optimize = options.optimize,
});
root_module.addAssemblyFile(options.source_file);
return b.addObject(.{
.name = options.name,
.max_rss = options.max_rss,
.zig_lib_dir = options.zig_lib_dir,
.root_module = root_module,
});
}
/// This function creates a module and adds it to the package's module set, making
/// it available to other packages which depend on this one.
/// `createModule` can be used instead to create a private module.

View File

@ -661,7 +661,8 @@ pub const Manifest = struct {
} {
const gpa = self.cache.gpa;
const input_file_count = self.files.entries.len;
var manifest_reader = self.manifest_file.?.reader(&.{}); // Reads positionally from zero.
var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
var manifest_reader = self.manifest_file.?.reader(&tiny_buffer); // Reads positionally from zero.
const limit: std.io.Limit = .limited(manifest_file_size_max);
const file_contents = manifest_reader.interface.allocRemaining(gpa, limit) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
@ -1326,7 +1327,7 @@ test "cache file and then recall it" {
// Wait for file timestamps to tick
const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
std.time.sleep(1);
std.Thread.sleep(1);
}
var digest1: HexDigest = undefined;
@ -1389,7 +1390,7 @@ test "check that changing a file makes cache fail" {
// Wait for file timestamps to tick
const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
std.time.sleep(1);
std.Thread.sleep(1);
}
var digest1: HexDigest = undefined;
@ -1501,7 +1502,7 @@ test "Manifest with files added after initial hash work" {
// Wait for file timestamps to tick
const initial_time = try testGetCurrentFileTimestamp(tmp.dir);
while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time) {
std.time.sleep(1);
std.Thread.sleep(1);
}
var digest1: HexDigest = undefined;
@ -1551,7 +1552,7 @@ test "Manifest with files added after initial hash work" {
// Wait for file timestamps to tick
const initial_time2 = try testGetCurrentFileTimestamp(tmp.dir);
while ((try testGetCurrentFileTimestamp(tmp.dir)) == initial_time2) {
std.time.sleep(1);
std.Thread.sleep(1);
}
{

View File

@ -8,9 +8,6 @@ pub const Style = union(enum) {
/// A configure format supported by autotools that uses `#undef foo` to
/// mark lines that can be substituted with different values.
autoconf_undef: std.Build.LazyPath,
/// Deprecated. Renamed to `autoconf_undef`.
/// To be removed after 0.14.0 is tagged.
autoconf: std.Build.LazyPath,
/// A configure format supported by autotools that uses `@FOO@` output variables.
autoconf_at: std.Build.LazyPath,
/// The configure format supported by CMake. It uses `@FOO@`, `${}` and
@ -23,7 +20,7 @@ pub const Style = union(enum) {
pub fn getPath(style: Style) ?std.Build.LazyPath {
switch (style) {
.autoconf_undef, .autoconf, .autoconf_at, .cmake => |s| return s,
.autoconf_undef, .autoconf_at, .cmake => |s| return s,
.blank, .nasm => return null,
}
}
@ -205,7 +202,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const asm_generated_line = "; " ++ header_text ++ "\n";
switch (config_header.style) {
.autoconf_undef, .autoconf, .autoconf_at => |file_source| {
.autoconf_undef, .autoconf_at => |file_source| {
try bw.writeAll(c_generated_line);
const src_path = file_source.getPath2(b, step);
const contents = std.fs.cwd().readFileAlloc(src_path, arena, .limited(config_header.max_bytes)) catch |err| {
@ -214,7 +211,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
});
};
switch (config_header.style) {
.autoconf_undef, .autoconf => try render_autoconf_undef(step, contents, bw, config_header.values, src_path),
.autoconf_undef => try render_autoconf_undef(step, contents, bw, config_header.values, src_path),
.autoconf_at => try render_autoconf_at(step, contents, &aw, config_header.values, src_path),
else => unreachable,
}

View File

@ -1083,7 +1083,9 @@ fn runCommand(
var interp_argv = std.ArrayList([]const u8).init(b.allocator);
defer interp_argv.deinit();
const result = spawnChildAndCollect(run, argv, has_side_effects, prog_node, fuzz_context) catch |err| term: {
var env_map = run.env_map orelse &b.graph.env_map;
const result = spawnChildAndCollect(run, argv, env_map, has_side_effects, prog_node, fuzz_context) catch |err| term: {
// InvalidExe: cpu arch mismatch
// FileNotFound: can happen with a wrong dynamic linker path
if (err == error.InvalidExe or err == error.FileNotFound) interpret: {
@ -1116,6 +1118,15 @@ fn runCommand(
if (b.enable_wine) {
try interp_argv.append(bin_name);
try interp_argv.appendSlice(argv);
// Wine's excessive stderr logging is only situationally helpful. Disable it by default, but
// allow the user to override it (e.g. with `WINEDEBUG=err+all`) if desired.
if (env_map.get("WINEDEBUG") == null) {
// We don't own `env_map` at this point, so turn it into a copy before modifying it.
env_map = arena.create(EnvMap) catch @panic("OOM");
env_map.hash_map = try env_map.hash_map.cloneWithAllocator(arena);
try env_map.put("WINEDEBUG", "-all");
}
} else {
return failForeign(run, "-fwine", argv[0], exe);
}
@ -1211,7 +1222,7 @@ fn runCommand(
try Step.handleVerbose2(step.owner, cwd, run.env_map, interp_argv.items);
break :term spawnChildAndCollect(run, interp_argv.items, has_side_effects, prog_node, fuzz_context) catch |e| {
break :term spawnChildAndCollect(run, interp_argv.items, env_map, has_side_effects, prog_node, fuzz_context) catch |e| {
if (!run.failing_to_execute_foreign_is_an_error) return error.MakeSkipped;
return step.fail("unable to spawn interpreter {s}: {s}", .{
@ -1394,6 +1405,7 @@ const ChildProcResult = struct {
fn spawnChildAndCollect(
run: *Run,
argv: []const []const u8,
env_map: *EnvMap,
has_side_effects: bool,
prog_node: std.Progress.Node,
fuzz_context: ?FuzzContext,
@ -1410,7 +1422,7 @@ fn spawnChildAndCollect(
if (run.cwd) |lazy_cwd| {
child.cwd = lazy_cwd.getPath2(b, &run.step);
}
child.env_map = run.env_map orelse &b.graph.env_map;
child.env_map = env_map;
child.request_resource_usage_statistics = true;
child.stdin_behavior = switch (run.stdio) {

View File

@ -63,19 +63,6 @@ pub fn getOutput(translate_c: *TranslateC) std.Build.LazyPath {
return .{ .generated = .{ .file = &translate_c.output_file } };
}
/// Deprecated: use `createModule` or `addModule` with `std.Build.addExecutable` instead.
/// Creates a step to build an executable from the translated source.
pub fn addExecutable(translate_c: *TranslateC, options: AddExecutableOptions) *Step.Compile {
return translate_c.step.owner.addExecutable(.{
.root_source_file = translate_c.getOutput(),
.name = options.name orelse "translated_c",
.version = options.version,
.target = options.target orelse translate_c.target,
.optimize = options.optimize orelse translate_c.optimize,
.linkage = options.linkage,
});
}
/// Creates a module from the translated source and adds it to the package's
/// module set making it available to other packages which depend on this one.
/// `createModule` can be used instead to create a private module.

View File

@ -4,7 +4,7 @@ const Watch = @This();
const Step = std.Build.Step;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fatal = std.zig.fatal;
const fatal = std.process.fatal;
dir_table: DirTable,
os: Os,

View File

@ -494,6 +494,7 @@ pub fn PollFiles(comptime StreamEnum: type) type {
test {
_ = Reader;
_ = Reader.Limited;
_ = Writer;
_ = @import("Io/test.zig");
}

View File

@ -99,7 +99,7 @@ pub const ShortError = error{
pub const failing: Reader = .{
.vtable = &.{
.read = failingStream,
.stream = failingStream,
.discard = failingDiscard,
},
.buffer = &.{},
@ -245,6 +245,7 @@ pub fn appendRemaining(
list: *std.ArrayListAlignedUnmanaged(u8, alignment),
limit: Limit,
) LimitedAllocError!void {
assert(r.buffer.len != 0); // Needed to detect limit exceeded without losing data.
const buffer = r.buffer;
const buffer_contents = buffer[r.seek..r.end];
const copy_len = limit.minInt(buffer_contents.len);
@ -858,8 +859,10 @@ pub fn streamDelimiter(r: *Reader, w: *Writer, delimiter: u8) StreamError!usize
/// Appends to `w` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
///
/// Returns number of bytes streamed, which may be zero. End of stream can be
/// detected by checking if the next byte in the stream is the delimiter.
/// Returns number of bytes streamed, which may be zero. If the stream reaches
/// the end, the reader buffer will be empty when this function returns.
/// Otherwise, it will have at least one byte buffered, starting with the
/// delimiter.
///
/// Asserts buffer capacity of at least one. This function performs better with
/// larger buffers.
@ -1000,6 +1003,18 @@ pub fn fill(r: *Reader, n: usize) Error!void {
@branchHint(.likely);
return;
}
return fillUnbuffered(r, n);
}
/// This internal function is separated from `fill` to encourage optimizers to inline `fill`, hence
/// propagating its `@branchHint` to usage sites. If these functions are combined, `fill` is large
/// enough that LLVM is reluctant to inline it, forcing usages of APIs like `takeInt` to go through
/// an expensive runtime function call just to figure out that the data is, in fact, already in the
/// buffer.
///
/// Missing this optimization can result in wall-clock time for the most affected benchmarks
/// increasing by a factor of 5 or more.
fn fillUnbuffered(r: *Reader, n: usize) Error!void {
if (r.seek + n <= r.buffer.len) while (true) {
const end_cap = r.buffer[r.end..];
var writer: Writer = .fixed(end_cap);
@ -1645,11 +1660,12 @@ test "readAlloc when the backing reader provides one byte at a time" {
}
};
const str = "This is a test";
var tiny_buffer: [1]u8 = undefined;
var one_byte_stream: OneByteReader = .{
.str = str,
.i = 0,
.reader = .{
.buffer = &.{},
.buffer = &tiny_buffer,
.vtable = &.{ .stream = OneByteReader.stream },
.seek = 0,
.end = 0,

View File

@ -25,18 +25,34 @@ pub fn init(reader: *Reader, limit: Limit, buffer: []u8) Limited {
};
}
fn stream(context: ?*anyopaque, w: *Writer, limit: Limit) Reader.StreamError!usize {
const l: *Limited = @alignCast(@ptrCast(context));
fn stream(r: *Reader, w: *Writer, limit: Limit) Reader.StreamError!usize {
const l: *Limited = @fieldParentPtr("interface", r);
const combined_limit = limit.min(l.remaining);
const n = try l.unlimited_reader.read(w, combined_limit);
const n = try l.unlimited.stream(w, combined_limit);
l.remaining = l.remaining.subtract(n).?;
return n;
}
fn discard(context: ?*anyopaque, limit: Limit) Reader.Error!usize {
const l: *Limited = @alignCast(@ptrCast(context));
test stream {
var orig_buf: [10]u8 = undefined;
@memcpy(&orig_buf, "test bytes");
var fixed: std.Io.Reader = .fixed(&orig_buf);
var limit_buf: [1]u8 = undefined;
var limited: std.Io.Reader.Limited = .init(&fixed, @enumFromInt(4), &limit_buf);
var result_buf: [10]u8 = undefined;
var fixed_writer: std.Io.Writer = .fixed(&result_buf);
const streamed = try limited.interface.stream(&fixed_writer, @enumFromInt(7));
try std.testing.expect(streamed == 4);
try std.testing.expectEqualStrings("test", result_buf[0..streamed]);
}
fn discard(r: *Reader, limit: Limit) Reader.Error!usize {
const l: *Limited = @fieldParentPtr("interface", r);
const combined_limit = limit.min(l.remaining);
const n = try l.unlimited_reader.discard(combined_limit);
const n = try l.unlimited.discard(combined_limit);
l.remaining = l.remaining.subtract(n).?;
return n;
}

View File

@ -114,7 +114,10 @@ pub const FileError = error{
/// Writes to `buffer` and returns `error.WriteFailed` when it is full.
pub fn fixed(buffer: []u8) Writer {
return .{
.vtable = &.{ .drain = fixedDrain },
.vtable = &.{
.drain = fixedDrain,
.flush = noopFlush,
},
.buffer = buffer,
};
}
@ -244,6 +247,15 @@ pub fn noopFlush(w: *Writer) Error!void {
_ = w;
}
test "fixed buffer flush" {
var buffer: [1]u8 = undefined;
var writer: std.io.Writer = .fixed(&buffer);
try writer.writeByte(10);
try writer.flush();
try testing.expectEqual(10, buffer[0]);
}
/// Calls `VTable.drain` but hides the last `preserve_length` bytes from the
/// implementation, keeping them buffered.
pub fn drainPreserve(w: *Writer, preserve_length: usize) Error!void {
@ -381,6 +393,32 @@ pub fn writableVectorPosix(w: *Writer, buffer: []std.posix.iovec, limit: Limit)
return buffer[0..i];
}
pub fn writableVectorWsa(
w: *Writer,
buffer: []std.os.windows.ws2_32.WSABUF,
limit: Limit,
) Error![]std.os.windows.ws2_32.WSABUF {
var it = try writableVectorIterator(w);
var i: usize = 0;
var remaining = limit;
while (it.next()) |full_buffer| {
if (!remaining.nonzero()) break;
if (buffer.len - i == 0) break;
const buf = remaining.slice(full_buffer);
if (buf.len == 0) continue;
if (std.math.cast(u32, buf.len)) |len| {
buffer[i] = .{ .buf = buf.ptr, .len = len };
i += 1;
remaining = remaining.subtract(len).?;
continue;
}
buffer[i] = .{ .buf = buf.ptr, .len = std.math.maxInt(u32) };
i += 1;
break;
}
return buffer[0..i];
}
pub fn ensureUnusedCapacity(w: *Writer, n: usize) Error!void {
_ = try writableSliceGreedy(w, n);
}
@ -2150,7 +2188,7 @@ pub const Discarding = struct {
pub fn drain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const d: *Discarding = @alignCast(@fieldParentPtr("writer", w));
const slice = data[0 .. data.len - 1];
const pattern = data[slice.len..];
const pattern = data[slice.len];
var written: usize = pattern.len * splat;
for (slice) |bytes| written += bytes.len;
d.count += w.end + written;

View File

@ -58,7 +58,7 @@ pub fn sleep(nanoseconds: u64) void {
const boot_services = std.os.uefi.system_table.boot_services.?;
const us_from_ns = nanoseconds / std.time.ns_per_us;
const us = math.cast(usize, us_from_ns) orelse math.maxInt(usize);
_ = boot_services.stall(us);
boot_services.stall(us) catch unreachable;
return;
}

View File

@ -130,7 +130,7 @@ const SingleThreadedImpl = struct {
unreachable; // deadlock detected
};
std.time.sleep(timeout_ns);
std.Thread.sleep(timeout_ns);
return error.Timeout;
}
@ -348,7 +348,7 @@ test "wait and signal" {
}
while (true) {
std.time.sleep(100 * std.time.ns_per_ms);
std.Thread.sleep(100 * std.time.ns_per_ms);
multi_wait.mutex.lock();
defer multi_wait.mutex.unlock();
@ -405,7 +405,7 @@ test signal {
}
while (true) {
std.time.sleep(10 * std.time.ns_per_ms);
std.Thread.sleep(10 * std.time.ns_per_ms);
signal_test.mutex.lock();
defer signal_test.mutex.unlock();

View File

@ -116,7 +116,7 @@ const SingleThreadedImpl = struct {
unreachable; // deadlock detected
};
std.time.sleep(delay);
std.Thread.sleep(delay);
return error.Timeout;
}

View File

@ -74,7 +74,7 @@ const SingleThreadedImpl = struct {
unreachable; // deadlock detected
};
std.time.sleep(timeout_ns);
std.Thread.sleep(timeout_ns);
return error.Timeout;
}

View File

@ -181,9 +181,6 @@ pub fn isAscii(c: u8) bool {
return c < 128;
}
/// Deprecated: use `isAscii`
pub const isASCII = isAscii;
/// Uppercases the character and returns it as-is if already uppercase or not a letter.
pub fn toUpper(c: u8) u8 {
const mask = @as(u8, @intFromBool(isLower(c))) << 5;

View File

@ -10,8 +10,6 @@ pub fn Value(comptime T: type) type {
return .{ .raw = value };
}
pub const fence = @compileError("@fence is deprecated, use other atomics to establish ordering");
pub inline fn load(self: *const Self, comptime order: AtomicOrder) T {
return @atomicLoad(T, &self.raw, order);
}

View File

@ -156,9 +156,6 @@ pub const OptimizeMode = enum {
ReleaseSmall,
};
/// Deprecated; use OptimizeMode.
pub const Mode = OptimizeMode;
/// The calling convention of a function defines how arguments and return values are passed, as well
/// as any other requirements which callers and callees must respect, such as register preservation
/// and stack alignment.
@ -187,51 +184,6 @@ pub const CallingConvention = union(enum(u8)) {
else => unreachable,
};
/// Deprecated; use `.auto`.
pub const Unspecified: CallingConvention = .auto;
/// Deprecated; use `.c`.
pub const C: CallingConvention = .c;
/// Deprecated; use `.naked`.
pub const Naked: CallingConvention = .naked;
/// Deprecated; use `.@"inline"`.
pub const Inline: CallingConvention = .@"inline";
/// Deprecated; use `.x86_64_interrupt`, `.x86_interrupt`, or `.avr_interrupt`.
pub const Interrupt: CallingConvention = switch (builtin.target.cpu.arch) {
.x86_64 => .{ .x86_64_interrupt = .{} },
.x86 => .{ .x86_interrupt = .{} },
.avr => .avr_interrupt,
else => unreachable,
};
/// Deprecated; use `.avr_signal`.
pub const Signal: CallingConvention = .avr_signal;
/// Deprecated; use `.x86_stdcall`.
pub const Stdcall: CallingConvention = .{ .x86_stdcall = .{} };
/// Deprecated; use `.x86_fastcall`.
pub const Fastcall: CallingConvention = .{ .x86_fastcall = .{} };
/// Deprecated; use `.x86_64_vectorcall`, `.x86_vectorcall`, or `aarch64_vfabi`.
pub const Vectorcall: CallingConvention = switch (builtin.target.cpu.arch) {
.x86_64 => .{ .x86_64_vectorcall = .{} },
.x86 => .{ .x86_vectorcall = .{} },
.aarch64, .aarch64_be => .{ .aarch64_vfabi = .{} },
else => unreachable,
};
/// Deprecated; use `.x86_thiscall`.
pub const Thiscall: CallingConvention = .{ .x86_thiscall = .{} };
/// Deprecated; use `.arm_aapcs`.
pub const AAPCS: CallingConvention = .{ .arm_aapcs = .{} };
/// Deprecated; use `.arm_aapcs_vfp`.
pub const AAPCSVFP: CallingConvention = .{ .arm_aapcs_vfp = .{} };
/// Deprecated; use `.x86_64_sysv`.
pub const SysV: CallingConvention = .{ .x86_64_sysv = .{} };
/// Deprecated; use `.x86_64_win`.
pub const Win64: CallingConvention = .{ .x86_64_win = .{} };
/// Deprecated; use `.kernel`.
pub const Kernel: CallingConvention = .kernel;
/// Deprecated; use `.spirv_fragment`.
pub const Fragment: CallingConvention = .spirv_fragment;
/// Deprecated; use `.spirv_vertex`.
pub const Vertex: CallingConvention = .spirv_vertex;
/// The default Zig calling convention when neither `export` nor `inline` is specified.
/// This calling convention makes no guarantees about stack alignment, registers, etc.
/// It can only be used within this Zig compilation unit.
@ -1119,10 +1071,6 @@ pub const TestFn = struct {
func: *const fn () anyerror!void,
};
/// Deprecated, use the `Panic` namespace instead.
/// To be deleted after 0.14.0 is released.
pub const PanicFn = fn ([]const u8, ?*StackTrace, ?usize) noreturn;
/// This namespace is used by the Zig compiler to emit various kinds of safety
/// panics. These can be overridden by making a public `panic` namespace in the
/// root source file.
@ -1138,9 +1086,6 @@ pub const panic: type = p: {
}
break :p root.panic;
}
if (@hasDecl(root, "Panic")) {
break :p root.Panic; // Deprecated; use `panic` instead.
}
break :p switch (builtin.zig_backend) {
.stage2_powerpc,
.stage2_riscv64,

View File

@ -4110,7 +4110,7 @@ pub const msghdr_const = switch (native_os) {
/// scatter/gather array
iov: [*]const iovec_const,
/// # elements in iov
iovlen: i32,
iovlen: u32,
/// ancillary data
control: ?*const anyopaque,
/// ancillary data buffer len
@ -4122,7 +4122,7 @@ pub const msghdr_const = switch (native_os) {
name: ?*const anyopaque,
namelen: socklen_t,
iov: [*]const iovec,
iovlen: c_int,
iovlen: c_uint,
control: ?*const anyopaque,
controllen: socklen_t,
flags: c_int,
@ -5625,6 +5625,43 @@ pub const MSG = switch (native_os) {
pub const NOSIGNAL = 0x80;
pub const EOR = 0x100;
},
.freebsd => struct {
pub const OOB = 0x00000001;
pub const PEEK = 0x00000002;
pub const DONTROUTE = 0x00000004;
pub const EOR = 0x00000008;
pub const TRUNC = 0x00000010;
pub const CTRUNC = 0x00000020;
pub const WAITALL = 0x00000040;
pub const DONTWAIT = 0x00000080;
pub const EOF = 0x00000100;
pub const NOTIFICATION = 0x00002000;
pub const NBIO = 0x00004000;
pub const COMPAT = 0x00008000;
pub const SOCALLBCK = 0x00010000;
pub const NOSIGNAL = 0x00020000;
pub const CMSG_CLOEXEC = 0x00040000;
pub const WAITFORONE = 0x00080000;
pub const MORETOCOME = 0x00100000;
pub const TLSAPPDATA = 0x00200000;
},
.netbsd => struct {
pub const OOB = 0x0001;
pub const PEEK = 0x0002;
pub const DONTROUTE = 0x0004;
pub const EOR = 0x0008;
pub const TRUNC = 0x0010;
pub const CTRUNC = 0x0020;
pub const WAITALL = 0x0040;
pub const DONTWAIT = 0x0080;
pub const BCAST = 0x0100;
pub const MCAST = 0x0200;
pub const NOSIGNAL = 0x0400;
pub const CMSG_CLOEXEC = 0x0800;
pub const NBIO = 0x1000;
pub const WAITFORONE = 0x2000;
pub const NOTIFICATION = 0x4000;
},
else => void,
};
pub const SOCK = switch (native_os) {
@ -10808,6 +10845,7 @@ pub extern "c" fn if_nametoindex([*:0]const u8) c_int;
pub extern "c" fn getpid() pid_t;
pub extern "c" fn getppid() pid_t;
pub extern "c" fn setsid() pid_t;
/// These are implementation defined but share identical values in at least musl and glibc:
/// - https://git.musl-libc.org/cgit/musl/tree/include/locale.h?id=ab31e9d6a0fa7c5c408856c89df2dfb12c344039#n18

View File

@ -103,7 +103,6 @@ pub const dh = struct {
pub const kem = struct {
pub const kyber_d00 = @import("crypto/ml_kem.zig").d00;
pub const ml_kem = @import("crypto/ml_kem.zig").nist;
pub const ml_kem_01 = @compileError("deprecated: final version of the specification has been published, use ml_kem instead");
};
/// Elliptic-curve arithmetic.
@ -387,7 +386,7 @@ test "issue #4532: no index out of bounds" {
/// Sets a slice to zeroes.
/// Prevents the store from being optimized out.
pub inline fn secureZero(comptime T: type, s: []volatile T) void {
pub fn secureZero(comptime T: type, s: []volatile T) void {
@memset(s, 0);
}
@ -400,20 +399,3 @@ test secureZero {
try std.testing.expectEqualSlices(u8, &a, &b);
}
/// Deprecated in favor of `std.crypto`. To be removed after Zig 0.14.0 is released.
///
/// As a reminder, never use "utils" in a namespace (in any programming language).
/// https://ziglang.org/documentation/0.13.0/#Avoid-Redundancy-in-Names
pub const utils = struct {
/// Deprecated in favor of `std.crypto.secureZero`.
pub const secureZero = std.crypto.secureZero;
/// Deprecated in favor of `std.crypto.timing_safe.eql`.
pub const timingSafeEql = timing_safe.eql;
/// Deprecated in favor of `std.crypto.timing_safe.compare`.
pub const timingSafeCompare = timing_safe.compare;
/// Deprecated in favor of `std.crypto.timing_safe.add`.
pub const timingSafeAdd = timing_safe.add;
/// Deprecated in favor of `std.crypto.timing_safe.sub`.
pub const timingSafeSub = timing_safe.sub;
};

View File

@ -15,12 +15,12 @@ pub const Curve25519 = struct {
x: Fe,
/// Decode a Curve25519 point from its compressed (X) coordinates.
pub inline fn fromBytes(s: [32]u8) Curve25519 {
pub fn fromBytes(s: [32]u8) Curve25519 {
return .{ .x = Fe.fromBytes(s) };
}
/// Encode a Curve25519 point.
pub inline fn toBytes(p: Curve25519) [32]u8 {
pub fn toBytes(p: Curve25519) [32]u8 {
return p.x.toBytes();
}

View File

@ -138,7 +138,7 @@ pub const Edwards25519 = struct {
}
/// Flip the sign of the X coordinate.
pub inline fn neg(p: Edwards25519) Edwards25519 {
pub fn neg(p: Edwards25519) Edwards25519 {
return .{ .x = p.x.neg(), .y = p.y, .z = p.z, .t = p.t.neg() };
}
@ -190,14 +190,14 @@ pub const Edwards25519 = struct {
return q;
}
inline fn cMov(p: *Edwards25519, a: Edwards25519, c: u64) void {
fn cMov(p: *Edwards25519, a: Edwards25519, c: u64) void {
p.x.cMov(a.x, c);
p.y.cMov(a.y, c);
p.z.cMov(a.z, c);
p.t.cMov(a.t, c);
}
inline fn pcSelect(comptime n: usize, pc: *const [n]Edwards25519, b: u8) Edwards25519 {
fn pcSelect(comptime n: usize, pc: *const [n]Edwards25519, b: u8) Edwards25519 {
var t = Edwards25519.identityElement;
comptime var i: u8 = 1;
inline while (i < pc.len) : (i += 1) {

View File

@ -56,7 +56,7 @@ pub const Fe = struct {
pub const edwards25519sqrtam2 = Fe{ .limbs = .{ 1693982333959686, 608509411481997, 2235573344831311, 947681270984193, 266558006233600 } };
/// Return true if the field element is zero
pub inline fn isZero(fe: Fe) bool {
pub fn isZero(fe: Fe) bool {
var reduced = fe;
reduced.reduce();
const limbs = reduced.limbs;
@ -64,7 +64,7 @@ pub const Fe = struct {
}
/// Return true if both field elements are equivalent
pub inline fn equivalent(a: Fe, b: Fe) bool {
pub fn equivalent(a: Fe, b: Fe) bool {
return a.sub(b).isZero();
}
@ -168,7 +168,7 @@ pub const Fe = struct {
}
/// Add a field element
pub inline fn add(a: Fe, b: Fe) Fe {
pub fn add(a: Fe, b: Fe) Fe {
var fe: Fe = undefined;
comptime var i = 0;
inline while (i < 5) : (i += 1) {
@ -178,7 +178,7 @@ pub const Fe = struct {
}
/// Subtract a field element
pub inline fn sub(a: Fe, b: Fe) Fe {
pub fn sub(a: Fe, b: Fe) Fe {
var fe = b;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
@ -197,17 +197,17 @@ pub const Fe = struct {
}
/// Negate a field element
pub inline fn neg(a: Fe) Fe {
pub fn neg(a: Fe) Fe {
return zero.sub(a);
}
/// Return true if a field element is negative
pub inline fn isNegative(a: Fe) bool {
pub fn isNegative(a: Fe) bool {
return (a.toBytes()[0] & 1) != 0;
}
/// Conditonally replace a field element with `a` if `c` is positive
pub inline fn cMov(fe: *Fe, a: Fe, c: u64) void {
pub fn cMov(fe: *Fe, a: Fe, c: u64) void {
const mask: u64 = 0 -% c;
var x = fe.*;
comptime var i = 0;
@ -248,7 +248,7 @@ pub const Fe = struct {
}
}
inline fn _carry128(r: *[5]u128) Fe {
fn _carry128(r: *[5]u128) Fe {
var rs: [5]u64 = undefined;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
@ -321,17 +321,17 @@ pub const Fe = struct {
}
/// Square a field element
pub inline fn sq(a: Fe) Fe {
pub fn sq(a: Fe) Fe {
return _sq(a, false);
}
/// Square and double a field element
pub inline fn sq2(a: Fe) Fe {
pub fn sq2(a: Fe) Fe {
return _sq(a, true);
}
/// Multiply a field element with a small (32-bit) integer
pub inline fn mul32(a: Fe, comptime n: u32) Fe {
pub fn mul32(a: Fe, comptime n: u32) Fe {
const sn = @as(u128, @intCast(n));
var fe: Fe = undefined;
var x: u128 = 0;

View File

@ -42,7 +42,7 @@ pub const Ristretto255 = struct {
}
/// Reject the neutral element.
pub inline fn rejectIdentity(p: Ristretto255) IdentityElementError!void {
pub fn rejectIdentity(p: Ristretto255) IdentityElementError!void {
return p.p.rejectIdentity();
}
@ -141,24 +141,24 @@ pub const Ristretto255 = struct {
}
/// Double a Ristretto255 element.
pub inline fn dbl(p: Ristretto255) Ristretto255 {
pub fn dbl(p: Ristretto255) Ristretto255 {
return .{ .p = p.p.dbl() };
}
/// Add two Ristretto255 elements.
pub inline fn add(p: Ristretto255, q: Ristretto255) Ristretto255 {
pub fn add(p: Ristretto255, q: Ristretto255) Ristretto255 {
return .{ .p = p.p.add(q.p) };
}
/// Subtract two Ristretto255 elements.
pub inline fn sub(p: Ristretto255, q: Ristretto255) Ristretto255 {
pub fn sub(p: Ristretto255, q: Ristretto255) Ristretto255 {
return .{ .p = p.p.sub(q.p) };
}
/// Multiply a Ristretto255 element with a scalar.
/// Return error.WeakPublicKey if the resulting element is
/// the identity element.
pub inline fn mul(p: Ristretto255, s: [encoded_length]u8) (IdentityElementError || WeakPublicKeyError)!Ristretto255 {
pub fn mul(p: Ristretto255, s: [encoded_length]u8) (IdentityElementError || WeakPublicKeyError)!Ristretto255 {
return .{ .p = try p.p.mul(s) };
}

View File

@ -50,7 +50,7 @@ pub fn reduce64(s: [64]u8) CompressedScalar {
/// Perform the X25519 "clamping" operation.
/// The scalar is then guaranteed to be a multiple of the cofactor.
pub inline fn clamp(s: *CompressedScalar) void {
pub fn clamp(s: *CompressedScalar) void {
s[0] &= 248;
s[31] = (s[31] & 127) | 64;
}
@ -514,7 +514,7 @@ pub const Scalar = struct {
}
/// Square a scalar `n` times
inline fn sqn(x: Scalar, comptime n: comptime_int) Scalar {
fn sqn(x: Scalar, comptime n: comptime_int) Scalar {
var i: usize = 0;
var t = x;
while (i < n) : (i += 1) {

View File

@ -104,7 +104,7 @@ fn State128X(comptime degree: u7) type {
return state;
}
inline fn update(state: *State, d1: AesBlockVec, d2: AesBlockVec) void {
fn update(state: *State, d1: AesBlockVec, d2: AesBlockVec) void {
const blocks = &state.blocks;
const tmp = blocks[7];
comptime var i: usize = 7;
@ -413,7 +413,7 @@ fn State256X(comptime degree: u7) type {
return state;
}
inline fn update(state: *State, d: AesBlockVec) void {
fn update(state: *State, d: AesBlockVec) void {
const blocks = &state.blocks;
const tmp = blocks[5].encrypt(blocks[0]);
comptime var i: usize = 5;

View File

@ -17,24 +17,24 @@ pub const Block = struct {
repr: Repr,
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [16]u8) Block {
pub fn fromBytes(bytes: *const [16]u8) Block {
const repr = mem.bytesToValue(Repr, bytes);
return Block{ .repr = repr };
}
/// Convert the internal representation of a block into a byte sequence.
pub inline fn toBytes(block: Block) [16]u8 {
pub fn toBytes(block: Block) [16]u8 {
return mem.toBytes(block.repr);
}
/// XOR the block with a byte sequence.
pub inline fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 {
pub fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 {
const x = block.repr ^ fromBytes(bytes).repr;
return mem.toBytes(x);
}
/// Encrypt a block with a round key.
pub inline fn encrypt(block: Block, round_key: Block) Block {
pub fn encrypt(block: Block, round_key: Block) Block {
return Block{
.repr = asm (
\\ vaesenc %[rk], %[in], %[out]
@ -46,7 +46,7 @@ pub const Block = struct {
}
/// Encrypt a block with the last round key.
pub inline fn encryptLast(block: Block, round_key: Block) Block {
pub fn encryptLast(block: Block, round_key: Block) Block {
return Block{
.repr = asm (
\\ vaesenclast %[rk], %[in], %[out]
@ -58,7 +58,7 @@ pub const Block = struct {
}
/// Decrypt a block with a round key.
pub inline fn decrypt(block: Block, inv_round_key: Block) Block {
pub fn decrypt(block: Block, inv_round_key: Block) Block {
return Block{
.repr = asm (
\\ vaesdec %[rk], %[in], %[out]
@ -70,7 +70,7 @@ pub const Block = struct {
}
/// Decrypt a block with the last round key.
pub inline fn decryptLast(block: Block, inv_round_key: Block) Block {
pub fn decryptLast(block: Block, inv_round_key: Block) Block {
return Block{
.repr = asm (
\\ vaesdeclast %[rk], %[in], %[out]
@ -82,17 +82,17 @@ pub const Block = struct {
}
/// Apply the bitwise XOR operation to the content of two blocks.
pub inline fn xorBlocks(block1: Block, block2: Block) Block {
pub fn xorBlocks(block1: Block, block2: Block) Block {
return Block{ .repr = block1.repr ^ block2.repr };
}
/// Apply the bitwise AND operation to the content of two blocks.
pub inline fn andBlocks(block1: Block, block2: Block) Block {
pub fn andBlocks(block1: Block, block2: Block) Block {
return Block{ .repr = block1.repr & block2.repr };
}
/// Apply the bitwise OR operation to the content of two blocks.
pub inline fn orBlocks(block1: Block, block2: Block) Block {
pub fn orBlocks(block1: Block, block2: Block) Block {
return Block{ .repr = block1.repr | block2.repr };
}
@ -112,7 +112,7 @@ pub const Block = struct {
};
/// Encrypt multiple blocks in parallel, each their own round key.
pub inline fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
pub fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -122,7 +122,7 @@ pub const Block = struct {
}
/// Decrypt multiple blocks in parallel, each their own round key.
pub inline fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
pub fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -132,7 +132,7 @@ pub const Block = struct {
}
/// Encrypt multiple blocks in parallel with the same round key.
pub inline fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -142,7 +142,7 @@ pub const Block = struct {
}
/// Decrypt multiple blocks in parallel with the same round key.
pub inline fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -152,7 +152,7 @@ pub const Block = struct {
}
/// Encrypt multiple blocks in parallel with the same last round key.
pub inline fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -162,7 +162,7 @@ pub const Block = struct {
}
/// Decrypt multiple blocks in parallel with the same last round key.
pub inline fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -200,7 +200,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
pub const block_length: usize = blocks_count * 16;
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [blocks_count * 16]u8) Self {
pub fn fromBytes(bytes: *const [blocks_count * 16]u8) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = mem.bytesToValue(Repr, bytes[i * native_word_size ..][0..native_word_size]);
@ -209,7 +209,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Convert the internal representation of a block vector into a byte sequence.
pub inline fn toBytes(block_vec: Self) [blocks_count * 16]u8 {
pub fn toBytes(block_vec: Self) [blocks_count * 16]u8 {
var out: [blocks_count * 16]u8 = undefined;
inline for (0..native_words) |i| {
out[i * native_word_size ..][0..native_word_size].* = mem.toBytes(block_vec.repr[i]);
@ -218,7 +218,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// XOR the block vector with a byte sequence.
pub inline fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [blocks_count * 16]u8 {
pub fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [blocks_count * 16]u8 {
var x: Self = undefined;
inline for (0..native_words) |i| {
x.repr[i] = block_vec.repr[i] ^ mem.bytesToValue(Repr, bytes[i * native_word_size ..][0..native_word_size]);
@ -227,7 +227,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the forward AES operation to the block vector with a vector of round keys.
pub inline fn encrypt(block_vec: Self, round_key_vec: Self) Self {
pub fn encrypt(block_vec: Self, round_key_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = asm (
@ -241,7 +241,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the forward AES operation to the block vector with a vector of last round keys.
pub inline fn encryptLast(block_vec: Self, round_key_vec: Self) Self {
pub fn encryptLast(block_vec: Self, round_key_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = asm (
@ -255,7 +255,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the inverse AES operation to the block vector with a vector of round keys.
pub inline fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self {
pub fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = asm (
@ -269,7 +269,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the inverse AES operation to the block vector with a vector of last round keys.
pub inline fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self {
pub fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = asm (
@ -283,7 +283,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the bitwise XOR operation to the content of two block vectors.
pub inline fn xorBlocks(block_vec1: Self, block_vec2: Self) Self {
pub fn xorBlocks(block_vec1: Self, block_vec2: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec1.repr[i] ^ block_vec2.repr[i];
@ -292,7 +292,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the bitwise AND operation to the content of two block vectors.
pub inline fn andBlocks(block_vec1: Self, block_vec2: Self) Self {
pub fn andBlocks(block_vec1: Self, block_vec2: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec1.repr[i] & block_vec2.repr[i];
@ -301,7 +301,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the bitwise OR operation to the content of two block vectors.
pub inline fn orBlocks(block_vec1: Self, block_vec2: Block) Self {
pub fn orBlocks(block_vec1: Self, block_vec2: Block) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec1.repr[i] | block_vec2.repr[i];

View File

@ -12,18 +12,18 @@ pub const Block = struct {
repr: Repr,
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [16]u8) Block {
pub fn fromBytes(bytes: *const [16]u8) Block {
const repr = mem.bytesToValue(Repr, bytes);
return Block{ .repr = repr };
}
/// Convert the internal representation of a block into a byte sequence.
pub inline fn toBytes(block: Block) [16]u8 {
pub fn toBytes(block: Block) [16]u8 {
return mem.toBytes(block.repr);
}
/// XOR the block with a byte sequence.
pub inline fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 {
pub fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 {
const x = block.repr ^ fromBytes(bytes).repr;
return mem.toBytes(x);
}
@ -31,7 +31,7 @@ pub const Block = struct {
const zero = @Vector(2, u64){ 0, 0 };
/// Encrypt a block with a round key.
pub inline fn encrypt(block: Block, round_key: Block) Block {
pub fn encrypt(block: Block, round_key: Block) Block {
return Block{
.repr = (asm (
\\ mov %[out].16b, %[in].16b
@ -45,7 +45,7 @@ pub const Block = struct {
}
/// Encrypt a block with the last round key.
pub inline fn encryptLast(block: Block, round_key: Block) Block {
pub fn encryptLast(block: Block, round_key: Block) Block {
return Block{
.repr = (asm (
\\ mov %[out].16b, %[in].16b
@ -58,7 +58,7 @@ pub const Block = struct {
}
/// Decrypt a block with a round key.
pub inline fn decrypt(block: Block, inv_round_key: Block) Block {
pub fn decrypt(block: Block, inv_round_key: Block) Block {
return Block{
.repr = (asm (
\\ mov %[out].16b, %[in].16b
@ -72,7 +72,7 @@ pub const Block = struct {
}
/// Decrypt a block with the last round key.
pub inline fn decryptLast(block: Block, inv_round_key: Block) Block {
pub fn decryptLast(block: Block, inv_round_key: Block) Block {
return Block{
.repr = (asm (
\\ mov %[out].16b, %[in].16b
@ -85,17 +85,17 @@ pub const Block = struct {
}
/// Apply the bitwise XOR operation to the content of two blocks.
pub inline fn xorBlocks(block1: Block, block2: Block) Block {
pub fn xorBlocks(block1: Block, block2: Block) Block {
return Block{ .repr = block1.repr ^ block2.repr };
}
/// Apply the bitwise AND operation to the content of two blocks.
pub inline fn andBlocks(block1: Block, block2: Block) Block {
pub fn andBlocks(block1: Block, block2: Block) Block {
return Block{ .repr = block1.repr & block2.repr };
}
/// Apply the bitwise OR operation to the content of two blocks.
pub inline fn orBlocks(block1: Block, block2: Block) Block {
pub fn orBlocks(block1: Block, block2: Block) Block {
return Block{ .repr = block1.repr | block2.repr };
}
@ -105,7 +105,7 @@ pub const Block = struct {
pub const optimal_parallel_blocks = 6;
/// Encrypt multiple blocks in parallel, each their own round key.
pub inline fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
pub fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -115,7 +115,7 @@ pub const Block = struct {
}
/// Decrypt multiple blocks in parallel, each their own round key.
pub inline fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
pub fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -125,7 +125,7 @@ pub const Block = struct {
}
/// Encrypt multiple blocks in parallel with the same round key.
pub inline fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -135,7 +135,7 @@ pub const Block = struct {
}
/// Decrypt multiple blocks in parallel with the same round key.
pub inline fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -145,7 +145,7 @@ pub const Block = struct {
}
/// Encrypt multiple blocks in parallel with the same last round key.
pub inline fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -155,7 +155,7 @@ pub const Block = struct {
}
/// Decrypt multiple blocks in parallel with the same last round key.
pub inline fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
pub fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
@ -187,7 +187,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
pub const block_length: usize = blocks_count * 16;
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [blocks_count * 16]u8) Self {
pub fn fromBytes(bytes: *const [blocks_count * 16]u8) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = Block.fromBytes(bytes[i * native_word_size ..][0..native_word_size]);
@ -196,7 +196,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Convert the internal representation of a block vector into a byte sequence.
pub inline fn toBytes(block_vec: Self) [blocks_count * 16]u8 {
pub fn toBytes(block_vec: Self) [blocks_count * 16]u8 {
var out: [blocks_count * 16]u8 = undefined;
inline for (0..native_words) |i| {
out[i * native_word_size ..][0..native_word_size].* = block_vec.repr[i].toBytes();
@ -205,7 +205,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// XOR the block vector with a byte sequence.
pub inline fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [32]u8 {
pub fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [32]u8 {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].xorBytes(bytes[i * native_word_size ..][0..native_word_size]);
@ -214,7 +214,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the forward AES operation to the block vector with a vector of round keys.
pub inline fn encrypt(block_vec: Self, round_key_vec: Self) Self {
pub fn encrypt(block_vec: Self, round_key_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].encrypt(round_key_vec.repr[i]);
@ -223,7 +223,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the forward AES operation to the block vector with a vector of last round keys.
pub inline fn encryptLast(block_vec: Self, round_key_vec: Self) Self {
pub fn encryptLast(block_vec: Self, round_key_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].encryptLast(round_key_vec.repr[i]);
@ -232,7 +232,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the inverse AES operation to the block vector with a vector of round keys.
pub inline fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self {
pub fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].decrypt(inv_round_key_vec.repr[i]);
@ -241,7 +241,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the inverse AES operation to the block vector with a vector of last round keys.
pub inline fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self {
pub fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].decryptLast(inv_round_key_vec.repr[i]);
@ -250,7 +250,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the bitwise XOR operation to the content of two block vectors.
pub inline fn xorBlocks(block_vec1: Self, block_vec2: Self) Self {
pub fn xorBlocks(block_vec1: Self, block_vec2: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec1.repr[i].xorBlocks(block_vec2.repr[i]);
@ -259,7 +259,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the bitwise AND operation to the content of two block vectors.
pub inline fn andBlocks(block_vec1: Self, block_vec2: Self) Self {
pub fn andBlocks(block_vec1: Self, block_vec2: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec1.repr[i].andBlocks(block_vec2.repr[i]);
@ -268,7 +268,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the bitwise OR operation to the content of two block vectors.
pub inline fn orBlocks(block_vec1: Self, block_vec2: Block) Self {
pub fn orBlocks(block_vec1: Self, block_vec2: Block) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec1.repr[i].orBlocks(block_vec2.repr[i]);

View File

@ -14,7 +14,7 @@ pub const Block = struct {
repr: Repr align(16),
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [16]u8) Block {
pub fn fromBytes(bytes: *const [16]u8) Block {
const s0 = mem.readInt(u32, bytes[0..4], .little);
const s1 = mem.readInt(u32, bytes[4..8], .little);
const s2 = mem.readInt(u32, bytes[8..12], .little);
@ -23,7 +23,7 @@ pub const Block = struct {
}
/// Convert the internal representation of a block into a byte sequence.
pub inline fn toBytes(block: Block) [16]u8 {
pub fn toBytes(block: Block) [16]u8 {
var bytes: [16]u8 = undefined;
mem.writeInt(u32, bytes[0..4], block.repr[0], .little);
mem.writeInt(u32, bytes[4..8], block.repr[1], .little);
@ -33,7 +33,7 @@ pub const Block = struct {
}
/// XOR the block with a byte sequence.
pub inline fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 {
pub fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 {
const block_bytes = block.toBytes();
var x: [16]u8 = undefined;
comptime var i: usize = 0;
@ -44,7 +44,7 @@ pub const Block = struct {
}
/// Encrypt a block with a round key.
pub inline fn encrypt(block: Block, round_key: Block) Block {
pub fn encrypt(block: Block, round_key: Block) Block {
const s0 = block.repr[0];
const s1 = block.repr[1];
const s2 = block.repr[2];
@ -69,7 +69,7 @@ pub const Block = struct {
}
/// Encrypt a block with a round key *WITHOUT ANY PROTECTION AGAINST SIDE CHANNELS*
pub inline fn encryptUnprotected(block: Block, round_key: Block) Block {
pub fn encryptUnprotected(block: Block, round_key: Block) Block {
const s0 = block.repr[0];
const s1 = block.repr[1];
const s2 = block.repr[2];
@ -114,7 +114,7 @@ pub const Block = struct {
}
/// Encrypt a block with the last round key.
pub inline fn encryptLast(block: Block, round_key: Block) Block {
pub fn encryptLast(block: Block, round_key: Block) Block {
const s0 = block.repr[0];
const s1 = block.repr[1];
const s2 = block.repr[2];
@ -140,7 +140,7 @@ pub const Block = struct {
}
/// Decrypt a block with a round key.
pub inline fn decrypt(block: Block, round_key: Block) Block {
pub fn decrypt(block: Block, round_key: Block) Block {
const s0 = block.repr[0];
const s1 = block.repr[1];
const s2 = block.repr[2];
@ -165,7 +165,7 @@ pub const Block = struct {
}
/// Decrypt a block with a round key *WITHOUT ANY PROTECTION AGAINST SIDE CHANNELS*
pub inline fn decryptUnprotected(block: Block, round_key: Block) Block {
pub fn decryptUnprotected(block: Block, round_key: Block) Block {
const s0 = block.repr[0];
const s1 = block.repr[1];
const s2 = block.repr[2];
@ -210,7 +210,7 @@ pub const Block = struct {
}
/// Decrypt a block with the last round key.
pub inline fn decryptLast(block: Block, round_key: Block) Block {
pub fn decryptLast(block: Block, round_key: Block) Block {
const s0 = block.repr[0];
const s1 = block.repr[1];
const s2 = block.repr[2];
@ -236,7 +236,7 @@ pub const Block = struct {
}
/// Apply the bitwise XOR operation to the content of two blocks.
pub inline fn xorBlocks(block1: Block, block2: Block) Block {
pub fn xorBlocks(block1: Block, block2: Block) Block {
var x: Repr = undefined;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
@ -246,7 +246,7 @@ pub const Block = struct {
}
/// Apply the bitwise AND operation to the content of two blocks.
pub inline fn andBlocks(block1: Block, block2: Block) Block {
pub fn andBlocks(block1: Block, block2: Block) Block {
var x: Repr = undefined;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
@ -256,7 +256,7 @@ pub const Block = struct {
}
/// Apply the bitwise OR operation to the content of two blocks.
pub inline fn orBlocks(block1: Block, block2: Block) Block {
pub fn orBlocks(block1: Block, block2: Block) Block {
var x: Repr = undefined;
comptime var i = 0;
inline while (i < 4) : (i += 1) {
@ -353,7 +353,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
pub const block_length: usize = blocks_count * 16;
/// Convert a byte sequence into an internal representation.
pub inline fn fromBytes(bytes: *const [blocks_count * 16]u8) Self {
pub fn fromBytes(bytes: *const [blocks_count * 16]u8) Self {
var out: Self = undefined;
for (0..native_words) |i| {
out.repr[i] = Block.fromBytes(bytes[i * native_word_size ..][0..native_word_size]);
@ -362,7 +362,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Convert the internal representation of a block vector into a byte sequence.
pub inline fn toBytes(block_vec: Self) [blocks_count * 16]u8 {
pub fn toBytes(block_vec: Self) [blocks_count * 16]u8 {
var out: [blocks_count * 16]u8 = undefined;
for (0..native_words) |i| {
out[i * native_word_size ..][0..native_word_size].* = block_vec.repr[i].toBytes();
@ -371,7 +371,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// XOR the block vector with a byte sequence.
pub inline fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [32]u8 {
pub fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [32]u8 {
var out: Self = undefined;
for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].xorBytes(bytes[i * native_word_size ..][0..native_word_size]);
@ -380,7 +380,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the forward AES operation to the block vector with a vector of round keys.
pub inline fn encrypt(block_vec: Self, round_key_vec: Self) Self {
pub fn encrypt(block_vec: Self, round_key_vec: Self) Self {
var out: Self = undefined;
for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].encrypt(round_key_vec.repr[i]);
@ -389,7 +389,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the forward AES operation to the block vector with a vector of last round keys.
pub inline fn encryptLast(block_vec: Self, round_key_vec: Self) Self {
pub fn encryptLast(block_vec: Self, round_key_vec: Self) Self {
var out: Self = undefined;
for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].encryptLast(round_key_vec.repr[i]);
@ -398,7 +398,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the inverse AES operation to the block vector with a vector of round keys.
pub inline fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self {
pub fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self {
var out: Self = undefined;
for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].decrypt(inv_round_key_vec.repr[i]);
@ -407,7 +407,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the inverse AES operation to the block vector with a vector of last round keys.
pub inline fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self {
pub fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self {
var out: Self = undefined;
for (0..native_words) |i| {
out.repr[i] = block_vec.repr[i].decryptLast(inv_round_key_vec.repr[i]);
@ -416,7 +416,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the bitwise XOR operation to the content of two block vectors.
pub inline fn xorBlocks(block_vec1: Self, block_vec2: Self) Self {
pub fn xorBlocks(block_vec1: Self, block_vec2: Self) Self {
var out: Self = undefined;
for (0..native_words) |i| {
out.repr[i] = block_vec1.repr[i].xorBlocks(block_vec2.repr[i]);
@ -425,7 +425,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the bitwise AND operation to the content of two block vectors.
pub inline fn andBlocks(block_vec1: Self, block_vec2: Self) Self {
pub fn andBlocks(block_vec1: Self, block_vec2: Self) Self {
var out: Self = undefined;
for (0..native_words) |i| {
out.repr[i] = block_vec1.repr[i].andBlocks(block_vec2.repr[i]);
@ -434,7 +434,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type {
}
/// Apply the bitwise OR operation to the content of two block vectors.
pub inline fn orBlocks(block_vec1: Self, block_vec2: Block) Self {
pub fn orBlocks(block_vec1: Self, block_vec2: Block) Self {
var out: Self = undefined;
for (0..native_words) |i| {
out.repr[i] = block_vec1.repr[i].orBlocks(block_vec2.repr[i]);

View File

@ -21,6 +21,12 @@ fn AesGcm(comptime Aes: anytype) type {
const zeros = [_]u8{0} ** 16;
/// `c`: The ciphertext buffer to write the encrypted data to.
/// `tag`: The authentication tag buffer to write the computed tag to.
/// `m`: The plaintext message to encrypt.
/// `ad`: The associated data to authenticate.
/// `npub`: The nonce to use for encryption.
/// `key`: The encryption key.
pub fn encrypt(c: []u8, tag: *[tag_length]u8, m: []const u8, ad: []const u8, npub: [nonce_length]u8, key: [key_length]u8) void {
debug.assert(c.len == m.len);
debug.assert(m.len <= 16 * ((1 << 32) - 2));

View File

@ -28,7 +28,7 @@ fn AesOcb(comptime Aes: anytype) type {
table: [56]Block align(16) = undefined,
upto: usize,
inline fn double(l: Block) Block {
fn double(l: Block) Block {
const l_ = mem.readInt(u128, &l, .big);
const l_2 = (l_ << 1) ^ (0x87 & -%(l_ >> 127));
var l2: Block = undefined;
@ -244,7 +244,7 @@ fn AesOcb(comptime Aes: anytype) type {
};
}
inline fn xorBlocks(x: Block, y: Block) Block {
fn xorBlocks(x: Block, y: Block) Block {
var z: Block = x;
for (&z, 0..) |*v, i| {
v.* = x[i] ^ y[i];
@ -252,7 +252,7 @@ inline fn xorBlocks(x: Block, y: Block) Block {
return z;
}
inline fn xorWith(x: *Block, y: Block) void {
fn xorWith(x: *Block, y: Block) void {
for (x, 0..) |*v, i| {
v.* ^= y[i];
}

View File

@ -157,7 +157,7 @@ pub fn State(comptime endian: std.builtin.Endian) type {
}
/// Apply a reduced-round permutation to the state.
pub inline fn permuteR(state: *Self, comptime rounds: u4) void {
pub fn permuteR(state: *Self, comptime rounds: u4) void {
const rks = [16]u64{ 0x3c, 0x2d, 0x1e, 0x0f, 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87, 0x78, 0x69, 0x5a, 0x4b };
inline for (rks[rks.len - rounds ..]) |rk| {
state.round(rk);
@ -165,13 +165,13 @@ pub fn State(comptime endian: std.builtin.Endian) type {
}
/// Apply a full-round permutation to the state.
pub inline fn permute(state: *Self) void {
pub fn permute(state: *Self) void {
state.permuteR(12);
}
/// Apply a permutation to the state and prevent backtracking.
/// The rate is expressed in bytes and must be a multiple of the word size (8).
pub inline fn permuteRatchet(state: *Self, comptime rounds: u4, comptime rate: u6) void {
pub fn permuteRatchet(state: *Self, comptime rounds: u4, comptime rate: u6) void {
const capacity = block_bytes - rate;
debug.assert(capacity > 0 and capacity % 8 == 0); // capacity must be a multiple of 64 bits
var mask: [capacity / 8]u64 = undefined;
@ -181,7 +181,7 @@ pub fn State(comptime endian: std.builtin.Endian) type {
}
// Core Ascon permutation.
inline fn round(state: *Self, rk: u64) void {
fn round(state: *Self, rk: u64) void {
const x = &state.st;
x[2] ^= rk;

View File

@ -61,7 +61,7 @@ const CompressVectorized = struct {
const Lane = @Vector(4, u32);
const Rows = [4]Lane;
inline fn g(comptime even: bool, rows: *Rows, m: Lane) void {
fn g(comptime even: bool, rows: *Rows, m: Lane) void {
rows[0] +%= rows[1] +% m;
rows[3] ^= rows[0];
rows[3] = math.rotr(Lane, rows[3], if (even) 8 else 16);
@ -70,13 +70,13 @@ const CompressVectorized = struct {
rows[1] = math.rotr(Lane, rows[1], if (even) 7 else 12);
}
inline fn diagonalize(rows: *Rows) void {
fn diagonalize(rows: *Rows) void {
rows[0] = @shuffle(u32, rows[0], undefined, [_]i32{ 3, 0, 1, 2 });
rows[3] = @shuffle(u32, rows[3], undefined, [_]i32{ 2, 3, 0, 1 });
rows[2] = @shuffle(u32, rows[2], undefined, [_]i32{ 1, 2, 3, 0 });
}
inline fn undiagonalize(rows: *Rows) void {
fn undiagonalize(rows: *Rows) void {
rows[0] = @shuffle(u32, rows[0], undefined, [_]i32{ 1, 2, 3, 0 });
rows[3] = @shuffle(u32, rows[3], undefined, [_]i32{ 2, 3, 0, 1 });
rows[2] = @shuffle(u32, rows[2], undefined, [_]i32{ 3, 0, 1, 2 });

View File

@ -151,7 +151,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
}
}
inline fn chacha20Core(x: *BlockVec, input: BlockVec) void {
fn chacha20Core(x: *BlockVec, input: BlockVec) void {
x.* = input;
const m0 = switch (degree) {
@ -215,7 +215,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
}
}
inline fn hashToBytes(comptime dm: usize, out: *[64 * dm]u8, x: BlockVec) void {
fn hashToBytes(comptime dm: usize, out: *[64 * dm]u8, x: BlockVec) void {
for (0..dm) |d| {
for (0..4) |i| {
mem.writeInt(u32, out[64 * d + 16 * i + 0 ..][0..4], x[i][0 + 4 * d], .little);
@ -226,7 +226,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
}
}
inline fn contextFeedback(x: *BlockVec, ctx: BlockVec) void {
fn contextFeedback(x: *BlockVec, ctx: BlockVec) void {
x[0] +%= ctx[0];
x[1] +%= ctx[1];
x[2] +%= ctx[2];
@ -365,7 +365,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
};
}
inline fn chacha20Core(x: *BlockVec, input: BlockVec) void {
fn chacha20Core(x: *BlockVec, input: BlockVec) void {
x.* = input;
const rounds = comptime [_]QuarterRound{
@ -394,7 +394,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
}
}
inline fn hashToBytes(out: *[64]u8, x: BlockVec) void {
fn hashToBytes(out: *[64]u8, x: BlockVec) void {
for (0..4) |i| {
mem.writeInt(u32, out[16 * i + 0 ..][0..4], x[i * 4 + 0], .little);
mem.writeInt(u32, out[16 * i + 4 ..][0..4], x[i * 4 + 1], .little);
@ -403,7 +403,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
}
}
inline fn contextFeedback(x: *BlockVec, ctx: BlockVec) void {
fn contextFeedback(x: *BlockVec, ctx: BlockVec) void {
for (0..16) |i| {
x[i] +%= ctx[i];
}

View File

@ -280,34 +280,34 @@ pub const base64 = struct {
return DecoderWithIgnore{ .ignored_chars = ignored_chars };
}
inline fn eq(x: u8, y: u8) u8 {
fn eq(x: u8, y: u8) u8 {
return ~@as(u8, @truncate((0 -% (@as(u16, x) ^ @as(u16, y))) >> 8));
}
inline fn gt(x: u8, y: u8) u8 {
fn gt(x: u8, y: u8) u8 {
return @truncate((@as(u16, y) -% @as(u16, x)) >> 8);
}
inline fn ge(x: u8, y: u8) u8 {
fn ge(x: u8, y: u8) u8 {
return ~gt(y, x);
}
inline fn lt(x: u8, y: u8) u8 {
fn lt(x: u8, y: u8) u8 {
return gt(y, x);
}
inline fn le(x: u8, y: u8) u8 {
fn le(x: u8, y: u8) u8 {
return ge(y, x);
}
inline fn charFromByte(x: u8, comptime urlsafe: bool) u8 {
fn charFromByte(x: u8, comptime urlsafe: bool) u8 {
return (lt(x, 26) & (x +% 'A')) |
(ge(x, 26) & lt(x, 52) & (x +% 'a' -% 26)) |
(ge(x, 52) & lt(x, 62) & (x +% '0' -% 52)) |
(eq(x, 62) & '+') | (eq(x, 63) & if (urlsafe) '_' else '/');
}
inline fn byteFromChar(c: u8, comptime urlsafe: bool) u8 {
fn byteFromChar(c: u8, comptime urlsafe: bool) u8 {
const x =
(ge(c, 'A') & le(c, 'Z') & (c -% 'A')) |
(ge(c, 'a') & le(c, 'z') & (c -% 'a' +% 26)) |

View File

@ -89,7 +89,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
const Selector = enum { lo, hi, hi_lo };
// Carryless multiplication of two 64-bit integers for x86_64.
inline fn clmulPclmul(x: u128, y: u128, comptime half: Selector) u128 {
fn clmulPclmul(x: u128, y: u128, comptime half: Selector) u128 {
switch (half) {
.hi => {
const product = asm (
@ -122,7 +122,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
}
// Carryless multiplication of two 64-bit integers for ARM crypto.
inline fn clmulPmull(x: u128, y: u128, comptime half: Selector) u128 {
fn clmulPmull(x: u128, y: u128, comptime half: Selector) u128 {
switch (half) {
.hi => {
const product = asm (
@ -231,7 +231,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
mid: u128,
};
inline fn xor256(x: *I256, y: I256) void {
fn xor256(x: *I256, y: I256) void {
x.* = I256{
.hi = x.hi ^ y.hi,
.lo = x.lo ^ y.lo,
@ -249,7 +249,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
}
// Multiply two 128-bit integers in GF(2^128).
inline fn clmul128(x: u128, y: u128) I256 {
fn clmul128(x: u128, y: u128) I256 {
if (mul_algorithm == .karatsuba) {
const x_hi = @as(u64, @truncate(x >> 64));
const y_hi = @as(u64, @truncate(y >> 64));
@ -273,7 +273,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type {
// Reduce a 256-bit representative of a polynomial modulo the irreducible polynomial x^128 + x^127 + x^126 + x^121 + 1.
// This is done using Shay Gueron's black magic demysticated here:
// https://blog.quarkslab.com/reversing-a-finite-field-multiplication-optimization.html
inline fn reduce(x: I256) u128 {
fn reduce(x: I256) u128 {
const hi = x.hi ^ (x.mid >> 64);
const lo = x.lo ^ (x.mid << 64);
const p64 = (((1 << 121) | (1 << 126) | (1 << 127)) >> 64);

View File

@ -72,7 +72,7 @@ pub const NonMontgomeryDomainFieldElement = [4]u64;
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0x1]
inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
const x = @as(u128, arg2) +% arg3 +% arg1;
out1.* = @truncate(x);
out2.* = @truncate(x >> 64);
@ -91,7 +91,7 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0x1]
inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
const x = @as(u128, arg2) -% arg3 -% arg1;
out1.* = @truncate(x);
out2.* = @truncate(x >> 64);
@ -109,7 +109,7 @@ inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) v
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0xffffffffffffffff]
inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
@setRuntimeSafety(mode == .Debug);
const x = @as(u128, arg1) * @as(u128, arg2);
@ -128,7 +128,7 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
/// arg3: [0x0 ~> 0xffffffffffffffff]
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
inline fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void {
fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
const mask = 0 -% @as(u64, arg1);

View File

@ -72,7 +72,7 @@ pub const NonMontgomeryDomainFieldElement = [4]u64;
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0x1]
inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
const x = @as(u128, arg2) +% arg3 +% arg1;
out1.* = @truncate(x);
out2.* = @truncate(x >> 64);
@ -91,7 +91,7 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0x1]
inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
const x = @as(u128, arg2) -% arg3 -% arg1;
out1.* = @truncate(x);
out2.* = @truncate(x >> 64);
@ -109,7 +109,7 @@ inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) v
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0xffffffffffffffff]
inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
@setRuntimeSafety(mode == .Debug);
const x = @as(u128, arg1) * @as(u128, arg2);
@ -128,7 +128,7 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
/// arg3: [0x0 ~> 0xffffffffffffffff]
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
inline fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void {
fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
const mask = 0 -% @as(u64, arg1);

View File

@ -41,7 +41,7 @@ pub const NonMontgomeryDomainFieldElement = [6]u64;
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0x1]
inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
const x = @as(u128, arg2) +% arg3 +% arg1;
out1.* = @truncate(x);
out2.* = @truncate(x >> 64);
@ -60,7 +60,7 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0x1]
inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
const x = @as(u128, arg2) -% arg3 -% arg1;
out1.* = @truncate(x);
out2.* = @truncate(x >> 64);
@ -78,7 +78,7 @@ inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) v
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0xffffffffffffffff]
inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
@setRuntimeSafety(mode == .Debug);
const x = @as(u128, arg1) * @as(u128, arg2);
@ -97,7 +97,7 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
/// arg3: [0x0 ~> 0xffffffffffffffff]
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
inline fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void {
fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
const mask = 0 -% @as(u64, arg1);

View File

@ -41,7 +41,7 @@ pub const NonMontgomeryDomainFieldElement = [6]u64;
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0x1]
inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
const x = @as(u128, arg2) +% arg3 +% arg1;
out1.* = @truncate(x);
out2.* = @truncate(x >> 64);
@ -60,7 +60,7 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0x1]
inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
const x = @as(u128, arg2) -% arg3 -% arg1;
out1.* = @truncate(x);
out2.* = @truncate(x >> 64);
@ -78,7 +78,7 @@ inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) v
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0xffffffffffffffff]
inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
@setRuntimeSafety(mode == .Debug);
const x = @as(u128, arg1) * @as(u128, arg2);
@ -97,7 +97,7 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
/// arg3: [0x0 ~> 0xffffffffffffffff]
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
inline fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void {
fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
const mask = 0 -% @as(u64, arg1);

View File

@ -41,7 +41,7 @@ pub const NonMontgomeryDomainFieldElement = [4]u64;
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0x1]
inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
const x = @as(u128, arg2) +% arg3 +% arg1;
out1.* = @truncate(x);
out2.* = @truncate(x >> 64);
@ -60,7 +60,7 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0x1]
inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
const x = @as(u128, arg2) -% arg3 -% arg1;
out1.* = @truncate(x);
out2.* = @truncate(x >> 64);
@ -78,7 +78,7 @@ inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) v
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0xffffffffffffffff]
inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
@setRuntimeSafety(mode == .Debug);
const x = @as(u128, arg1) * @as(u128, arg2);
@ -97,7 +97,7 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
/// arg3: [0x0 ~> 0xffffffffffffffff]
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
inline fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void {
fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
const mask = 0 -% @as(u64, arg1);

View File

@ -41,7 +41,7 @@ pub const NonMontgomeryDomainFieldElement = [4]u64;
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0x1]
inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
const x = @as(u128, arg2) +% arg3 +% arg1;
out1.* = @truncate(x);
out2.* = @truncate(x >> 64);
@ -60,7 +60,7 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0x1]
inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void {
const x = @as(u128, arg2) -% arg3 -% arg1;
out1.* = @truncate(x);
out2.* = @truncate(x >> 64);
@ -78,7 +78,7 @@ inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) v
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
/// out2: [0x0 ~> 0xffffffffffffffff]
inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
@setRuntimeSafety(mode == .Debug);
const x = @as(u128, arg1) * @as(u128, arg2);
@ -97,7 +97,7 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void {
/// arg3: [0x0 ~> 0xffffffffffffffff]
/// Output Bounds:
/// out1: [0x0 ~> 0xffffffffffffffff]
inline fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void {
fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void {
@setRuntimeSafety(mode == .Debug);
const mask = 0 -% @as(u64, arg1);

View File

@ -31,13 +31,13 @@ pub const Poly1305 = struct {
};
}
inline fn add(a: u64, b: u64, c: u1) struct { u64, u1 } {
fn add(a: u64, b: u64, c: u1) struct { u64, u1 } {
const v1 = @addWithOverflow(a, b);
const v2 = @addWithOverflow(v1[0], c);
return .{ v2[0], v1[1] | v2[1] };
}
inline fn sub(a: u64, b: u64, c: u1) struct { u64, u1 } {
fn sub(a: u64, b: u64, c: u1) struct { u64, u1 } {
const v1 = @subWithOverflow(a, b);
const v2 = @subWithOverflow(v1[0], c);
return .{ v2[0], v1[1] | v2[1] };

View File

@ -41,7 +41,7 @@ fn SalsaVecImpl(comptime rounds: comptime_int) type {
};
}
inline fn salsaCore(x: *BlockVec, input: BlockVec, comptime feedback: bool) void {
fn salsaCore(x: *BlockVec, input: BlockVec, comptime feedback: bool) void {
const n1n2n3n0 = Lane{ input[3][1], input[3][2], input[3][3], input[3][0] };
const n1n2 = Half{ n1n2n3n0[0], n1n2n3n0[1] };
const n3n0 = Half{ n1n2n3n0[2], n1n2n3n0[3] };
@ -203,7 +203,7 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type {
d: u6,
};
inline fn Rp(a: usize, b: usize, c: usize, d: u6) QuarterRound {
fn Rp(a: usize, b: usize, c: usize, d: u6) QuarterRound {
return QuarterRound{
.a = a,
.b = b,
@ -212,7 +212,7 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type {
};
}
inline fn salsaCore(x: *BlockVec, input: BlockVec, comptime feedback: bool) void {
fn salsaCore(x: *BlockVec, input: BlockVec, comptime feedback: bool) void {
const arx_steps = comptime [_]QuarterRound{
Rp(4, 0, 12, 7), Rp(8, 4, 0, 9), Rp(12, 8, 4, 13), Rp(0, 12, 8, 18),
Rp(9, 5, 1, 7), Rp(13, 9, 5, 9), Rp(1, 13, 9, 13), Rp(5, 1, 13, 18),

View File

@ -265,7 +265,7 @@ test classify {
// Comparing secret data must be done in constant time. The result
// is going to be considered as secret as well.
var res = std.crypto.utils.timingSafeEql([32]u8, out, secret);
var res = std.crypto.timing_safe.eql([32]u8, out, secret);
// If we want to make a conditional jump based on a secret,
// it has to be declassified.

View File

@ -593,11 +593,11 @@ pub fn hmac(comptime Hmac: type, message: []const u8, key: [Hmac.key_length]u8)
return result;
}
pub inline fn extension(et: ExtensionType, bytes: anytype) [2 + 2 + bytes.len]u8 {
pub fn extension(et: ExtensionType, bytes: anytype) [2 + 2 + bytes.len]u8 {
return int(u16, @intFromEnum(et)) ++ array(u16, u8, bytes);
}
pub inline fn array(
pub fn array(
comptime Len: type,
comptime Elem: type,
elems: anytype,
@ -622,7 +622,7 @@ pub inline fn array(
return arr;
}
pub inline fn int(comptime Int: type, val: Int) [@divExact(@bitSizeOf(Int), 8)]u8 {
pub fn int(comptime Int: type, val: Int) [@divExact(@bitSizeOf(Int), 8)]u8 {
var arr: [@divExact(@bitSizeOf(Int), 8)]u8 = undefined;
std.mem.writeInt(Int, &arr, val, .big);
return arr;

View File

@ -1235,7 +1235,7 @@ fn logSecrets(w: *Writer, context: anytype, secrets: anytype) void {
}) catch {};
}
inline fn big(x: anytype) @TypeOf(x) {
fn big(x: anytype) @TypeOf(x) {
return switch (native_endian) {
.big => x,
.little => @byteSwap(x),

View File

@ -231,10 +231,6 @@ pub fn print(comptime fmt: []const u8, args: anytype) void {
nosuspend w.print(fmt, args) catch return;
}
pub fn getStderrMutex() *std.Thread.Mutex {
@compileError("deprecated. call std.debug.lockStdErr() and std.debug.unlockStdErr() instead which will integrate properly with std.Progress");
}
/// TODO multithreaded awareness
var self_debug_info: ?SelfInfo = null;
@ -658,9 +654,8 @@ pub fn defaultPanic(
if (uefi.system_table.boot_services) |bs| {
// ExitData buffer must be allocated using boot_services.allocatePool (spec: page 220)
const exit_data: []u16 = uefi.raw_pool_allocator.alloc(u16, exit_msg.len + 1) catch @trap();
@memcpy(exit_data, exit_msg[0..exit_data.len]); // Includes null terminator.
_ = bs.exit(uefi.handle, .aborted, exit_data.len, exit_data.ptr);
const exit_data = uefi.raw_pool_allocator.dupeZ(u16, exit_msg) catch @trap();
bs.exit(uefi.handle, .aborted, exit_data) catch {};
}
@trap();
},

View File

@ -35,8 +35,6 @@ pub const realpathW = posix.realpathW;
pub const getAppDataDir = @import("fs/get_app_data_dir.zig").getAppDataDir;
pub const GetAppDataDirError = @import("fs/get_app_data_dir.zig").GetAppDataDirError;
pub const MAX_PATH_BYTES = @compileError("deprecated; renamed to max_path_bytes");
/// The maximum length of a file path that the operating system will accept.
///
/// Paths, including those returned from file system operations, may be longer
@ -90,9 +88,6 @@ pub const max_name_bytes = switch (native_os) {
@compileError("NAME_MAX not implemented for " ++ @tagName(native_os)),
};
/// Deprecated: use `max_name_bytes`
pub const MAX_NAME_BYTES = max_name_bytes;
pub const base64_alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_".*;
/// Base64 encoder, replacing the standard `+/` with `-_` so that it can be used in a file name on any filesystem.
@ -101,11 +96,6 @@ pub const base64_encoder = base64.Base64Encoder.init(base64_alphabet, null);
/// Base64 decoder, replacing the standard `+/` with `-_` so that it can be used in a file name on any filesystem.
pub const base64_decoder = base64.Base64Decoder.init(base64_alphabet, null);
/// Deprecated. Use `cwd().atomicSymLink()` instead.
pub fn atomicSymLink(_: Allocator, existing_path: []const u8, new_path: []const u8) !void {
try cwd().atomicSymLink(existing_path, new_path, .{});
}
/// Same as `Dir.updateFile`, except asserts that both `source_path` and `dest_path`
/// are absolute. See `Dir.updateFile` for a function that operates on both
/// absolute and relative paths.

View File

@ -1402,9 +1402,6 @@ pub fn setAsCwd(self: Dir) !void {
try posix.fchdir(self.fd);
}
/// Deprecated: use `OpenOptions`
pub const OpenDirOptions = OpenOptions;
pub const OpenOptions = struct {
/// `true` means the opened directory can be used as the `Dir` parameter
/// for functions which operate based on an open directory handle. When `false`,
@ -2511,8 +2508,6 @@ pub fn writeFile(self: Dir, options: WriteFileOptions) WriteFileError!void {
try file.writeAll(options.data);
}
pub const writeFile2 = @compileError("deprecated; renamed to writeFile");
pub const AccessError = posix.AccessError;
/// Test accessing `sub_path`.

View File

@ -1045,7 +1045,7 @@ pub const Reader = struct {
r.pos = offset;
},
.streaming, .streaming_reading => {
if (offset >= r.pos) return Reader.seekBy(r, offset - r.pos);
if (offset >= r.pos) return Reader.seekBy(r, @intCast(offset - r.pos));
if (r.seek_err) |err| return err;
posix.lseek_SET(r.file.handle, offset) catch |err| {
r.seek_err = err;
@ -1333,6 +1333,7 @@ pub const Writer = struct {
.file = w.file,
.mode = w.mode,
.pos = w.pos,
.interface = Reader.initInterface(w.interface.buffer),
.seek_err = w.seek_err,
};
}

View File

@ -80,9 +80,8 @@ fn uint16(input: u16) u16 {
return x;
}
/// DEPRECATED: use std.hash.int()
/// Source: https://github.com/skeeto/hash-prospector
pub fn uint32(input: u32) u32 {
fn uint32(input: u32) u32 {
var x: u32 = input;
x = (x ^ (x >> 17)) *% 0xed5ad4bb;
x = (x ^ (x >> 11)) *% 0xac4c1b51;

View File

@ -150,7 +150,8 @@ test "HTTP server handles a chunked transfer coding request" {
"content-type: text/plain\r\n" ++
"\r\n" ++
"message from server!\n";
var stream_reader = stream.reader(&.{});
var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
var stream_reader = stream.reader(&tiny_buffer);
const response = try stream_reader.interface().allocRemaining(gpa, .limited(expected_response.len));
defer gpa.free(response);
try expectEqualStrings(expected_response, response);
@ -288,7 +289,8 @@ test "Server.Request.respondStreaming non-chunked, unknown content-length" {
var stream_writer = stream.writer(&.{});
try stream_writer.interface.writeAll(request_bytes);
var stream_reader = stream.reader(&.{});
var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
var stream_reader = stream.reader(&tiny_buffer);
const response = try stream_reader.interface().allocRemaining(gpa, .limited(8192));
defer gpa.free(response);
@ -358,7 +360,8 @@ test "receiving arbitrary http headers from the client" {
var stream_writer = stream.writer(&.{});
try stream_writer.interface.writeAll(request_bytes);
var stream_reader = stream.reader(&.{});
var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
var stream_reader = stream.reader(&tiny_buffer);
const response = try stream_reader.interface().allocRemaining(gpa, .limited(8192));
defer gpa.free(response);

View File

@ -2222,9 +2222,6 @@ pub const Const = struct {
TargetTooSmall,
};
/// Deprecated; use `toInt`.
pub const to = toInt;
/// Convert `self` to `Int`.
///
/// Returns an error if self cannot be narrowed into the requested type without truncation.
@ -2855,9 +2852,6 @@ pub const Managed = struct {
pub const ConvertError = Const.ConvertError;
/// Deprecated; use `toInt`.
pub const to = toInt;
/// Convert `self` to `Int`.
///
/// Returns an error if self cannot be narrowed into the requested type without truncation.

View File

@ -688,7 +688,7 @@ test "string set base 36" {
defer a.deinit();
try a.setString(36, "fifvthrv1mzt79ez9");
try testing.expectEqual(123456789123456789123456789, try a.to(u128));
try testing.expectEqual(123456789123456789123456789, try a.toInt(u128));
}
test "string set bad char error" {

View File

@ -2258,8 +2258,6 @@ test byteSwapAllFields {
}, k);
}
pub const tokenize = @compileError("deprecated; use tokenizeAny, tokenizeSequence, or tokenizeScalar");
/// Returns an iterator that iterates over the slices of `buffer` that are not
/// any of the items in `delimiters`.
///
@ -2458,8 +2456,6 @@ test "tokenize (reset)" {
}
}
pub const split = @compileError("deprecated; use splitSequence, splitAny, or splitScalar");
/// Returns an iterator that iterates over the slices of `buffer` that
/// are separated by the byte sequence in `delimiter`.
///
@ -2659,8 +2655,6 @@ test "split (reset)" {
}
}
pub const splitBackwards = @compileError("deprecated; use splitBackwardsSequence, splitBackwardsAny, or splitBackwardsScalar");
/// Returns an iterator that iterates backwards over the slices of `buffer` that
/// are separated by the sequence in `delimiter`.
///

View File

@ -418,29 +418,6 @@ test fieldInfo {
try testing.expect(comptime uf.type == u8);
}
/// Deprecated: use @FieldType
pub fn FieldType(comptime T: type, comptime field: FieldEnum(T)) type {
return @FieldType(T, @tagName(field));
}
test FieldType {
const S = struct {
a: u8,
b: u16,
};
const U = union {
c: u32,
d: *const u8,
};
try testing.expect(FieldType(S, .a) == u8);
try testing.expect(FieldType(S, .b) == u16);
try testing.expect(FieldType(U, .c) == u32);
try testing.expect(FieldType(U, .d) == *const u8);
}
pub fn fieldNames(comptime T: type) *const [fields(T).len][:0]const u8 {
return comptime blk: {
const fieldInfos = fields(T);

View File

@ -19,6 +19,7 @@ const File = std.fs.File;
// first release to support them.
pub const has_unix_sockets = switch (native_os) {
.windows => builtin.os.version_range.windows.isAtLeast(.win10_rs4) orelse false,
.wasi => false,
else => true,
};
@ -217,8 +218,6 @@ pub const Address = extern union {
/// Sets SO_REUSEADDR and SO_REUSEPORT on POSIX.
/// Sets SO_REUSEADDR on Windows, which is roughly equivalent.
reuse_address: bool = false,
/// Deprecated. Does the same thing as reuse_address.
reuse_port: bool = false,
force_nonblocking: bool = false,
};
@ -235,7 +234,7 @@ pub const Address = extern union {
};
errdefer s.stream.close();
if (options.reuse_address or options.reuse_port) {
if (options.reuse_address) {
try posix.setsockopt(
sockfd,
posix.SOL.SOCKET,
@ -869,7 +868,7 @@ pub fn getAddressList(gpa: Allocator, name: []const u8, port: u16) GetAddressLis
const name_c = try gpa.dupeZ(u8, name);
defer gpa.free(name_c);
const port_c = try std.fmt.allocPrintSentinel(gpa, "{}", .{port}, 0);
const port_c = try std.fmt.allocPrintSentinel(gpa, "{d}", .{port}, 0);
defer gpa.free(port_c);
const ws2_32 = windows.ws2_32;
@ -1080,7 +1079,7 @@ fn linuxLookupName(
}
} else {
try canon.resize(gpa, 0);
try addrs.ensureUnusedCapacity(gpa, 1);
try addrs.ensureUnusedCapacity(gpa, 2);
linuxLookupNameFromNull(addrs, family, flags, port);
}
if (addrs.items.len == 0) return error.UnknownHostName;
@ -1355,7 +1354,7 @@ fn parseHosts(
const line = br.takeDelimiterExclusive('\n') catch |err| switch (err) {
error.StreamTooLong => {
// Skip lines that are too long.
br.discardDelimiterInclusive('\n') catch |e| switch (e) {
_ = br.discardDelimiterInclusive('\n') catch |e| switch (e) {
error.EndOfStream => break,
error.ReadFailed => return error.ReadFailed,
};
@ -1398,6 +1397,25 @@ fn parseHosts(
}
}
test parseHosts {
if (builtin.os.tag == .wasi) {
// TODO parsing addresses should not have OS dependencies
return error.SkipZigTest;
}
var reader: std.io.Reader = .fixed(
\\127.0.0.1 localhost
\\::1 localhost
\\127.0.0.2 abcd
);
var addrs: ArrayList(LookupAddr) = .empty;
defer addrs.deinit(std.testing.allocator);
var canon: ArrayList(u8) = .empty;
defer canon.deinit(std.testing.allocator);
try parseHosts(std.testing.allocator, &addrs, &canon, "abcd", posix.AF.UNSPEC, 1234, &reader);
try std.testing.expectEqual(1, addrs.items.len);
try std.testing.expectFmt("127.0.0.2:1234", "{f}", .{addrs.items[0].addr});
}
pub fn isValidHostName(hostname: []const u8) bool {
if (hostname.len >= 254) return false;
if (!std.unicode.utf8ValidateSlice(hostname)) return false;
@ -1562,9 +1580,12 @@ const ResolvConf = struct {
};
}
fn parse(rc: *ResolvConf, br: *io.Reader) !void {
const Directive = enum { options, nameserver, domain, search };
const Option = enum { ndots, attempts, timeout };
fn parse(rc: *ResolvConf, reader: *io.Reader) !void {
const gpa = rc.gpa;
while (br.takeSentinel('\n')) |line_with_comment| {
while (reader.takeSentinel('\n')) |line_with_comment| {
const line = line: {
var split = mem.splitScalar(u8, line_with_comment, '#');
break :line split.first();
@ -1572,8 +1593,8 @@ const ResolvConf = struct {
var line_it = mem.tokenizeAny(u8, line, " \t");
const token = line_it.next() orelse continue;
if (mem.eql(u8, token, "options")) {
while (line_it.next()) |sub_tok| {
switch (std.meta.stringToEnum(Directive, token) orelse continue) {
.options => while (line_it.next()) |sub_tok| {
var colon_it = mem.splitScalar(u8, sub_tok, ':');
const name = colon_it.first();
const value_txt = colon_it.next() orelse continue;
@ -1581,22 +1602,25 @@ const ResolvConf = struct {
error.Overflow => 255,
error.InvalidCharacter => continue,
};
if (mem.eql(u8, name, "ndots")) {
rc.ndots = @min(value, 15);
} else if (mem.eql(u8, name, "attempts")) {
rc.attempts = @min(value, 10);
} else if (mem.eql(u8, name, "timeout")) {
rc.timeout = @min(value, 60);
switch (std.meta.stringToEnum(Option, name) orelse continue) {
.ndots => rc.ndots = @min(value, 15),
.attempts => rc.attempts = @min(value, 10),
.timeout => rc.timeout = @min(value, 60),
}
}
} else if (mem.eql(u8, token, "nameserver")) {
const ip_txt = line_it.next() orelse continue;
try linuxLookupNameFromNumericUnspec(gpa, &rc.ns, ip_txt, 53);
} else if (mem.eql(u8, token, "domain") or mem.eql(u8, token, "search")) {
rc.search.items.len = 0;
try rc.search.appendSlice(gpa, line_it.rest());
},
.nameserver => {
const ip_txt = line_it.next() orelse continue;
try linuxLookupNameFromNumericUnspec(gpa, &rc.ns, ip_txt, 53);
},
.domain, .search => {
rc.search.items.len = 0;
try rc.search.appendSlice(gpa, line_it.rest());
},
}
} else |err| return err;
} else |err| switch (err) {
error.EndOfStream => if (reader.bufferedLen() != 0) return error.EndOfStream,
else => |e| return e,
}
if (rc.ns.items.len == 0) {
return linuxLookupNameFromNumericUnspec(gpa, &rc.ns, "127.0.0.1", 53);
@ -1849,9 +1873,15 @@ pub const Stream = struct {
}
}
const ReadError = posix.ReadError;
pub const ReadError = posix.ReadError || error{
SocketNotBound,
MessageTooBig,
NetworkSubsystemFailed,
ConnectionResetByPeer,
SocketNotConnected,
};
const WriteError = posix.SendMsgError || error{
pub const WriteError = posix.SendMsgError || error{
ConnectionResetByPeer,
SocketNotBound,
MessageTooBig,
@ -1863,11 +1893,12 @@ pub const Stream = struct {
pub const Reader = switch (native_os) {
.windows => struct {
/// Use `interface` to access portably.
/// Use `interface` for portable code.
interface_state: io.Reader,
/// Use `getStream` to access portably.
/// Use `getStream` for portable code.
net_stream: Stream,
err: ?Error = null,
/// Use `getError` for portable code.
error_state: ?Error,
pub const Error = ReadError;
@ -1875,6 +1906,10 @@ pub const Stream = struct {
return r.stream;
}
pub fn getError(r: *const Reader) ?Error {
return r.error_state;
}
pub fn interface(r: *Reader) *io.Reader {
return &r.interface_state;
}
@ -1882,22 +1917,33 @@ pub const Stream = struct {
pub fn init(net_stream: Stream, buffer: []u8) Reader {
return .{
.interface_state = .{
.context = undefined,
.vtable = &.{ .stream = stream },
.buffer = buffer,
.seek = 0,
.end = 0,
},
.net_stream = net_stream,
.error_state = null,
};
}
fn stream(io_r: *io.Reader, io_w: *io.Writer, limit: io.Limit) io.Reader.StreamError!usize {
const r: *Reader = @fieldParentPtr("interface", io_r);
var iovecs: [max_buffers_len]windows.WSABUF = undefined;
const bufs = io_w.writableVectorWsa(&iovecs, limit);
assert(bufs[0].len > 0);
const r: *Reader = @alignCast(@fieldParentPtr("interface_state", io_r));
var iovecs: [max_buffers_len]windows.ws2_32.WSABUF = undefined;
const bufs = try io_w.writableVectorWsa(&iovecs, limit);
assert(bufs[0].len != 0);
const n = streamBufs(r, bufs) catch |err| {
r.error_state = err;
return error.ReadFailed;
};
if (n == 0) return error.EndOfStream;
return n;
}
fn streamBufs(r: *Reader, bufs: []windows.ws2_32.WSABUF) Error!u32 {
var n: u32 = undefined;
var flags: u32 = 0;
const rc = windows.ws2_32.WSARecvFrom(r.net_stream.handle, bufs.ptr, bufs.len, &n, &flags, null, null, null, null);
const rc = windows.ws2_32.WSARecvFrom(r.net_stream.handle, bufs.ptr, @intCast(bufs.len), &n, &flags, null, null, null, null);
if (rc != 0) switch (windows.ws2_32.WSAGetLastError()) {
.WSAECONNRESET => return error.ConnectionResetByPeer,
.WSAEFAULT => unreachable, // a pointer is not completely contained in user address space.
@ -1913,11 +1959,11 @@ pub const Stream = struct {
.WSA_OPERATION_ABORTED => unreachable, // not using overlapped I/O
else => |err| return windows.unexpectedWSAError(err),
};
if (n == 0) return error.EndOfStream;
return n;
}
},
else => struct {
/// Use `getStream`, `interface`, and `getError` for portable code.
file_reader: File.Reader,
pub const Error = ReadError;
@ -1940,6 +1986,10 @@ pub const Stream = struct {
pub fn getStream(r: *const Reader) Stream {
return .{ .handle = r.file_reader.file.handle };
}
pub fn getError(r: *const Reader) ?Error {
return r.file_reader.err;
}
},
};
@ -1949,6 +1999,8 @@ pub const Stream = struct {
interface: io.Writer,
/// Use `getStream` for cross-platform support.
stream: Stream,
/// This field is present on all systems.
err: ?Error = null,
pub const Error = WriteError;
@ -1966,43 +2018,39 @@ pub const Stream = struct {
return w.stream;
}
fn addWsaBuf(v: []windows.ws2_32.WSABUF, i: *u32, bytes: []const u8) void {
const cap = std.math.maxInt(u32);
var remaining = bytes;
while (remaining.len > cap) {
if (v.len - i.* == 0) return;
v[i.*] = .{ .buf = @constCast(remaining.ptr), .len = cap };
i.* += 1;
remaining = remaining[cap..];
} else {
@branchHint(.likely);
if (v.len - i.* == 0) return;
v[i.*] = .{ .buf = @constCast(remaining.ptr), .len = @intCast(remaining.len) };
i.* += 1;
}
}
fn drain(io_w: *io.Writer, data: []const []const u8, splat: usize) io.Writer.Error!usize {
const w: *Writer = @fieldParentPtr("interface", io_w);
const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
const buffered = io_w.buffered();
comptime assert(native_os == .windows);
var iovecs: [max_buffers_len]windows.WSABUF = undefined;
var iovecs: [max_buffers_len]windows.ws2_32.WSABUF = undefined;
var len: u32 = 0;
if (buffered.len != 0) {
iovecs[len] = .{
.buf = buffered.ptr,
.len = buffered.len,
};
len += 1;
}
for (data) |bytes| {
if (bytes.len == 0) continue;
iovecs[len] = .{
.buf = bytes.ptr,
.len = bytes.len,
};
len += 1;
if (iovecs.len - len == 0) break;
}
if (len == 0) return 0;
addWsaBuf(&iovecs, &len, buffered);
for (data[0 .. data.len - 1]) |bytes| addWsaBuf(&iovecs, &len, bytes);
const pattern = data[data.len - 1];
switch (splat) {
0 => if (iovecs[len - 1].buf == data[data.len - 1].ptr) {
len -= 1;
},
1 => {},
if (iovecs.len - len != 0) switch (splat) {
0 => {},
1 => addWsaBuf(&iovecs, &len, pattern),
else => switch (pattern.len) {
0 => {},
1 => memset: {
// Replace the 1-byte buffer with a bigger one.
if (iovecs[len - 1].buf == data[data.len - 1].ptr) len -= 1;
if (iovecs.len - len == 0) break :memset;
1 => {
const splat_buffer_candidate = io_w.buffer[io_w.end..];
var backup_buffer: [32]u8 = undefined;
var backup_buffer: [64]u8 = undefined;
const splat_buffer = if (splat_buffer_candidate.len >= backup_buffer.len)
splat_buffer_candidate
else
@ -2010,31 +2058,29 @@ pub const Stream = struct {
const memset_len = @min(splat_buffer.len, splat);
const buf = splat_buffer[0..memset_len];
@memset(buf, pattern[0]);
iovecs[len] = .{ .buf = buf.ptr, .len = buf.len };
len += 1;
addWsaBuf(&iovecs, &len, buf);
var remaining_splat = splat - buf.len;
while (remaining_splat > splat_buffer.len and len < iovecs.len) {
iovecs[len] = .{ .buf = splat_buffer.ptr, .len = splat_buffer.len };
addWsaBuf(&iovecs, &len, splat_buffer);
remaining_splat -= splat_buffer.len;
len += 1;
}
if (remaining_splat > 0 and iovecs.len - len != 0) {
iovecs[len] = .{ .buf = splat_buffer.ptr, .len = remaining_splat };
len += 1;
}
addWsaBuf(&iovecs, &len, splat_buffer[0..remaining_splat]);
},
else => for (0..splat - 1) |_| {
if (iovecs.len - len == 0) break;
iovecs[len] = .{
.buf = pattern.ptr,
.len = pattern.len,
};
len += 1;
else => for (0..@min(splat, iovecs.len - len)) |_| {
addWsaBuf(&iovecs, &len, pattern);
},
},
}
};
const n = sendBufs(w.stream.handle, iovecs[0..len]) catch |err| {
w.err = err;
return error.WriteFailed;
};
return io_w.consume(n);
}
fn sendBufs(handle: Stream.Handle, bufs: []windows.ws2_32.WSABUF) Error!u32 {
var n: u32 = undefined;
const rc = windows.ws2_32.WSASend(w.stream.handle, &iovecs, len, &n, 0, null, null);
const rc = windows.ws2_32.WSASend(handle, bufs.ptr, @intCast(bufs.len), &n, 0, null, null);
if (rc == windows.ws2_32.SOCKET_ERROR) switch (windows.ws2_32.WSAGetLastError()) {
.WSAECONNABORTED => return error.ConnectionResetByPeer,
.WSAECONNRESET => return error.ConnectionResetByPeer,
@ -2055,7 +2101,7 @@ pub const Stream = struct {
.WSA_OPERATION_ABORTED => unreachable, // not using overlapped I/O
else => |err| return windows.unexpectedWSAError(err),
};
return io_w.consume(n);
return n;
}
},
else => struct {
@ -2084,95 +2130,68 @@ pub const Stream = struct {
return .{ .handle = w.file_writer.file.handle };
}
fn addBuf(v: []posix.iovec_const, i: *@FieldType(posix.msghdr_const, "iovlen"), bytes: []const u8) void {
// OS checks ptr addr before length so zero length vectors must be omitted.
if (bytes.len == 0) return;
if (v.len - i.* == 0) return;
v[i.*] = .{ .base = bytes.ptr, .len = bytes.len };
i.* += 1;
}
fn drain(io_w: *io.Writer, data: []const []const u8, splat: usize) io.Writer.Error!usize {
const w: *Writer = @fieldParentPtr("interface", io_w);
const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
const buffered = io_w.buffered();
var iovecs: [max_buffers_len]std.posix.iovec_const = undefined;
var msg: posix.msghdr_const = msg: {
var i: usize = 0;
if (buffered.len != 0) {
iovecs[i] = .{
.base = buffered.ptr,
.len = buffered.len,
};
i += 1;
}
for (data) |bytes| {
// OS checks ptr addr before length so zero length vectors must be omitted.
if (bytes.len == 0) continue;
iovecs[i] = .{
.base = bytes.ptr,
.len = bytes.len,
};
i += 1;
if (iovecs.len - i == 0) break;
}
break :msg .{
.name = null,
.namelen = 0,
.iov = &iovecs,
.iovlen = i,
.control = null,
.controllen = 0,
.flags = 0,
};
var iovecs: [max_buffers_len]posix.iovec_const = undefined;
var msg: posix.msghdr_const = .{
.name = null,
.namelen = 0,
.iov = &iovecs,
.iovlen = 0,
.control = null,
.controllen = 0,
.flags = 0,
};
if (msg.iovlen == 0) return 0;
addBuf(&iovecs, &msg.iovlen, buffered);
for (data[0 .. data.len - 1]) |bytes| addBuf(&iovecs, &msg.iovlen, bytes);
const pattern = data[data.len - 1];
switch (splat) {
0 => if (iovecs[msg.iovlen - 1].base == data[data.len - 1].ptr) {
msg.iovlen -= 1;
},
1 => {},
if (iovecs.len - msg.iovlen != 0) switch (splat) {
0 => {},
1 => addBuf(&iovecs, &msg.iovlen, pattern),
else => switch (pattern.len) {
0 => {},
1 => memset: {
// Replace the 1-byte buffer with a bigger one.
if (iovecs[msg.iovlen - 1].base == data[data.len - 1].ptr) msg.iovlen -= 1;
if (iovecs.len - msg.iovlen == 0) break :memset;
1 => {
const splat_buffer_candidate = io_w.buffer[io_w.end..];
var backup_buffer: [32]u8 = undefined;
var backup_buffer: [64]u8 = undefined;
const splat_buffer = if (splat_buffer_candidate.len >= backup_buffer.len)
splat_buffer_candidate
else
&backup_buffer;
if (splat_buffer.len == 0) break :memset;
const memset_len = @min(splat_buffer.len, splat);
const buf = splat_buffer[0..memset_len];
@memset(buf, pattern[0]);
iovecs[msg.iovlen] = .{ .base = buf.ptr, .len = buf.len };
msg.iovlen += 1;
addBuf(&iovecs, &msg.iovlen, buf);
var remaining_splat = splat - buf.len;
while (remaining_splat > splat_buffer.len and iovecs.len - msg.iovlen != 0) {
assert(buf.len == splat_buffer.len);
iovecs[msg.iovlen] = .{ .base = splat_buffer.ptr, .len = splat_buffer.len };
msg.iovlen += 1;
addBuf(&iovecs, &msg.iovlen, splat_buffer);
remaining_splat -= splat_buffer.len;
}
if (remaining_splat > 0 and iovecs.len - msg.iovlen != 0) {
iovecs[msg.iovlen] = .{ .base = splat_buffer.ptr, .len = remaining_splat };
msg.iovlen += 1;
}
addBuf(&iovecs, &msg.iovlen, splat_buffer[0..remaining_splat]);
},
else => for (0..splat - 1) |_| {
if (iovecs.len - msg.iovlen == 0) break;
iovecs[msg.iovlen] = .{
.base = pattern.ptr,
.len = pattern.len,
};
msg.iovlen += 1;
else => for (0..@min(splat, iovecs.len - msg.iovlen)) |_| {
addBuf(&iovecs, &msg.iovlen, pattern);
},
},
}
};
const flags = posix.MSG.NOSIGNAL;
return io_w.consume(std.posix.sendmsg(w.file_writer.file.handle, &msg, flags) catch |err| {
return io_w.consume(posix.sendmsg(w.file_writer.file.handle, &msg, flags) catch |err| {
w.err = err;
return error.WriteFailed;
});
}
fn sendFile(io_w: *io.Writer, file_reader: *File.Reader, limit: io.Limit) io.Writer.FileError!usize {
const w: *Writer = @fieldParentPtr("interface", io_w);
const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
const n = try w.file_writer.interface.sendFileHeader(io_w.buffered(), file_reader, limit);
return io_w.consume(n);
}
@ -2188,7 +2207,6 @@ pub const Stream = struct {
}
const max_buffers_len = 8;
const splat_buffer_len = 256;
};
pub const Server = struct {

View File

@ -208,7 +208,8 @@ test "listen on a port, send bytes, receive bytes" {
const socket = try net.tcpConnectToAddress(server_address);
defer socket.close();
_ = try socket.writer().writeAll("Hello world!");
var stream_writer = socket.writer(&.{});
try stream_writer.interface.writeAll("Hello world!");
}
};
@ -218,7 +219,8 @@ test "listen on a port, send bytes, receive bytes" {
var client = try server.accept();
defer client.stream.close();
var buf: [16]u8 = undefined;
const n = try client.stream.reader().read(&buf);
var stream_reader = client.stream.reader(&.{});
const n = try stream_reader.interface().readSliceShort(&buf);
try testing.expectEqual(@as(usize, 12), n);
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
@ -232,10 +234,10 @@ test "listen on an in use port" {
const localhost = try net.Address.parseIp("127.0.0.1", 0);
var server1 = try localhost.listen(.{ .reuse_port = true });
var server1 = try localhost.listen(.{ .reuse_address = true });
defer server1.deinit();
var server2 = try server1.listen_address.listen(.{ .reuse_port = true });
var server2 = try server1.listen_address.listen(.{ .reuse_address = true });
defer server2.deinit();
}
@ -299,7 +301,8 @@ test "listen on a unix socket, send bytes, receive bytes" {
const socket = try net.connectUnixSocket(path);
defer socket.close();
_ = try socket.writer().writeAll("Hello world!");
var stream_writer = socket.writer(&.{});
try stream_writer.interface.writeAll("Hello world!");
}
};
@ -309,13 +312,14 @@ test "listen on a unix socket, send bytes, receive bytes" {
var client = try server.accept();
defer client.stream.close();
var buf: [16]u8 = undefined;
const n = try client.stream.reader().read(&buf);
var stream_reader = client.stream.reader(&.{});
const n = try stream_reader.interface().readSliceShort(&buf);
try testing.expectEqual(@as(usize, 12), n);
try testing.expectEqualSlices(u8, "Hello world!", buf[0..n]);
}
test "listen on a unix socket with reuse_port option" {
test "listen on a unix socket with reuse_address option" {
if (!net.has_unix_sockets) return error.SkipZigTest;
// Windows doesn't implement reuse port option.
if (builtin.os.tag == .windows) return error.SkipZigTest;
@ -326,7 +330,7 @@ test "listen on a unix socket with reuse_port option" {
const socket_addr = try net.Address.initUnix(socket_path);
defer std.fs.cwd().deleteFile(socket_path) catch {};
var server = try socket_addr.listen(.{ .reuse_port = true });
var server = try socket_addr.listen(.{ .reuse_address = true });
server.deinit();
}

View File

@ -24,9 +24,51 @@ pub var handle: Handle = undefined;
/// A pointer to the EFI System Table that is passed to the EFI image's entry point.
pub var system_table: *tables.SystemTable = undefined;
/// UEFI's memory interfaces exclusively act on 4096-byte pages.
pub const Page = [4096]u8;
/// A handle to an event structure.
pub const Event = *opaque {};
pub const EventRegistration = *const opaque {};
pub const EventType = packed struct(u32) {
lo_context: u8 = 0,
/// If an event of this type is not already in the signaled state, then
/// the events NotificationFunction will be queued at the events NotifyTpl
/// whenever the event is being waited on via EFI_BOOT_SERVICES.WaitForEvent()
/// or EFI_BOOT_SERVICES.CheckEvent() .
wait: bool = false,
/// The events NotifyFunction is queued whenever the event is signaled.
signal: bool = false,
hi_context: u20 = 0,
/// The event is allocated from runtime memory. If an event is to be signaled
/// after the call to EFI_BOOT_SERVICES.ExitBootServices() the events data
/// structure and notification function need to be allocated from runtime
/// memory.
runtime: bool = false,
timer: bool = false,
/// This event should not be combined with any other event types. This event
/// type is functionally equivalent to the EFI_EVENT_GROUP_EXIT_BOOT_SERVICES
/// event group.
pub const signal_exit_boot_services: EventType = .{
.signal = true,
.lo_context = 1,
};
/// The event is to be notified by the system when SetVirtualAddressMap()
/// is performed. This event type is a composite of EVT_NOTIFY_SIGNAL,
/// EVT_RUNTIME, and EVT_RUNTIME_CONTEXT and should not be combined with
/// any other event types.
pub const signal_virtual_address_change: EventType = .{
.runtime = true,
.hi_context = 0x20000,
.signal = true,
.lo_context = 2,
};
};
/// The calling convention used for all external functions part of the UEFI API.
pub const cc: std.builtin.CallingConvention = switch (@import("builtin").target.cpu.arch) {
.x86_64 => .{ .x86_64_win = .{} },
@ -52,7 +94,11 @@ pub const IpAddress = extern union {
/// GUIDs are align(8) unless otherwise specified.
pub const Guid = extern struct {
time_low: u32,
comptime {
std.debug.assert(std.mem.Alignment.of(Guid) == .@"8");
}
time_low: u32 align(8),
time_mid: u16,
time_high_and_version: u16,
clock_seq_high_and_reserved: u8,
@ -60,7 +106,7 @@ pub const Guid = extern struct {
node: [6]u8,
/// Format GUID into hexadecimal lowercase xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
pub fn format(self: @This(), writer: *std.io.Writer) std.io.Writer.Error!void {
pub fn format(self: Guid, writer: *std.io.Writer) std.io.Writer.Error!void {
const time_low = @byteSwap(self.time_low);
const time_mid = @byteSwap(self.time_mid);
const time_high_and_version = @byteSwap(self.time_high_and_version);
@ -75,7 +121,7 @@ pub const Guid = extern struct {
});
}
pub fn eql(a: std.os.uefi.Guid, b: std.os.uefi.Guid) bool {
pub fn eql(a: Guid, b: Guid) bool {
return a.time_low == b.time_low and
a.time_mid == b.time_mid and
a.time_high_and_version == b.time_high_and_version and

View File

@ -28,14 +28,16 @@ const UefiPoolAllocator = struct {
const full_len = metadata_len + len;
var unaligned_ptr: [*]align(8) u8 = undefined;
if (uefi.system_table.boot_services.?.allocatePool(uefi.efi_pool_memory_type, full_len, &unaligned_ptr) != .success) return null;
const unaligned_slice = uefi.system_table.boot_services.?.allocatePool(
uefi.efi_pool_memory_type,
full_len,
) catch return null;
const unaligned_addr = @intFromPtr(unaligned_ptr);
const unaligned_addr = @intFromPtr(unaligned_slice.ptr);
const aligned_addr = mem.alignForward(usize, unaligned_addr + @sizeOf(usize), ptr_align);
const aligned_ptr = unaligned_ptr + (aligned_addr - unaligned_addr);
getHeader(aligned_ptr).* = unaligned_ptr;
const aligned_ptr = unaligned_slice.ptr + (aligned_addr - unaligned_addr);
getHeader(aligned_ptr).* = unaligned_slice.ptr;
return aligned_ptr;
}
@ -76,7 +78,7 @@ const UefiPoolAllocator = struct {
) void {
_ = alignment;
_ = ret_addr;
_ = uefi.system_table.boot_services.?.freePool(getHeader(buf.ptr).*);
uefi.system_table.boot_services.?.freePool(getHeader(buf.ptr).*) catch unreachable;
}
};
@ -117,10 +119,12 @@ fn uefi_alloc(
std.debug.assert(@intFromEnum(alignment) <= 3);
var ptr: [*]align(8) u8 = undefined;
if (uefi.system_table.boot_services.?.allocatePool(uefi.efi_pool_memory_type, len, &ptr) != .success) return null;
const slice = uefi.system_table.boot_services.?.allocatePool(
uefi.efi_pool_memory_type,
len,
) catch return null;
return ptr;
return slice.ptr;
}
fn uefi_resize(
@ -161,5 +165,5 @@ fn uefi_free(
) void {
_ = alignment;
_ = ret_addr;
_ = uefi.system_table.boot_services.?.freePool(@alignCast(buf.ptr));
uefi.system_table.boot_services.?.freePool(@alignCast(buf.ptr)) catch unreachable;
}

View File

@ -1,3 +1,12 @@
const std = @import("std");
const uefi = std.os.uefi;
const Handle = uefi.Handle;
const Event = uefi.Event;
const Guid = uefi.Guid;
const cc = uefi.cc;
const math = std.math;
const assert = std.debug.assert;
pub const BootServices = @import("tables/boot_services.zig").BootServices;
pub const RuntimeServices = @import("tables/runtime_services.zig").RuntimeServices;
pub const ConfigurationTable = @import("tables/configuration_table.zig").ConfigurationTable;
@ -7,29 +16,90 @@ pub const TableHeader = @import("tables/table_header.zig").TableHeader;
pub const EventNotify = *const fn (event: Event, ctx: *anyopaque) callconv(cc) void;
pub const TimerDelay = enum(u32) {
timer_cancel,
timer_periodic,
timer_relative,
cancel,
periodic,
relative,
};
pub const MemoryType = enum(u32) {
pub const Oem = math.IntFittingRange(
0,
@intFromEnum(MemoryType.oem_end) - @intFromEnum(MemoryType.oem_start),
);
pub const Vendor = math.IntFittingRange(
0,
@intFromEnum(MemoryType.vendor_end) - @intFromEnum(MemoryType.vendor_start),
);
/// can only be allocated using .allocate_any_pages mode unless you are explicitly targeting an interface that states otherwise
reserved_memory_type,
loader_code,
loader_data,
boot_services_code,
boot_services_data,
/// can only be allocated using .allocate_any_pages mode unless you are explicitly targeting an interface that states otherwise
runtime_services_code,
/// can only be allocated using .allocate_any_pages mode unless you are explicitly targeting an interface that states otherwise
runtime_services_data,
conventional_memory,
unusable_memory,
/// can only be allocated using .allocate_any_pages mode unless you are explicitly targeting an interface that states otherwise
acpi_reclaim_memory,
/// can only be allocated using .allocate_any_pages mode unless you are explicitly targeting an interface that states otherwise
acpi_memory_nvs,
memory_mapped_io,
memory_mapped_io_port_space,
pal_code,
persistent_memory,
unaccepted_memory,
max_memory_type,
invalid_start,
invalid_end = 0x6FFFFFFF,
/// MemoryType values in the range 0x70000000..0x7FFFFFFF are reserved for OEM use.
oem_start = 0x70000000,
oem_end = 0x7FFFFFFF,
/// MemoryType values in the range 0x80000000..0xFFFFFFFF are reserved for use by UEFI
/// OS loaders that are provided by operating system vendors.
vendor_start = 0x80000000,
vendor_end = 0xFFFFFFFF,
_,
pub fn fromOem(value: Oem) MemoryType {
const oem_start = @intFromEnum(MemoryType.oem_start);
return @enumFromInt(oem_start + value);
}
pub fn toOem(memtype: MemoryType) ?Oem {
const as_int = @intFromEnum(memtype);
const oem_start = @intFromEnum(MemoryType.oem_start);
if (as_int < oem_start) return null;
if (as_int > @intFromEnum(MemoryType.oem_end)) return null;
return @truncate(as_int - oem_start);
}
pub fn fromVendor(value: Vendor) MemoryType {
const vendor_start = @intFromEnum(MemoryType.vendor_start);
return @enumFromInt(vendor_start + value);
}
pub fn toVendor(memtype: MemoryType) ?Vendor {
const as_int = @intFromEnum(memtype);
const vendor_start = @intFromEnum(MemoryType.vendor_start);
if (as_int < @intFromEnum(MemoryType.vendor_end)) return null;
if (as_int > @intFromEnum(MemoryType.vendor_end)) return null;
return @truncate(as_int - vendor_start);
}
pub fn format(self: MemoryType, w: *std.io.Writer) std.io.WriteError!void {
if (self.toOem()) |oemval|
try w.print("OEM({X})", .{oemval})
else if (self.toVendor()) |vendorval|
try w.print("Vendor({X})", .{vendorval})
else if (std.enums.tagName(MemoryType, self)) |name|
try w.print("{s}", .{name})
else
try w.print("INVALID({X})", .{@intFromEnum(self)});
}
};
pub const MemoryDescriptorAttribute = packed struct(u64) {
@ -51,6 +121,8 @@ pub const MemoryDescriptorAttribute = packed struct(u64) {
memory_runtime: bool,
};
pub const MemoryMapKey = enum(usize) { _ };
pub const MemoryDescriptor = extern struct {
type: MemoryType,
physical_start: u64,
@ -59,20 +131,121 @@ pub const MemoryDescriptor = extern struct {
attribute: MemoryDescriptorAttribute,
};
pub const MemoryMapInfo = struct {
key: MemoryMapKey,
descriptor_size: usize,
descriptor_version: u32,
/// The number of descriptors in the map.
len: usize,
};
pub const MemoryMapSlice = struct {
info: MemoryMapInfo,
ptr: [*]align(@alignOf(MemoryDescriptor)) u8,
pub fn iterator(self: MemoryMapSlice) MemoryDescriptorIterator {
return .{ .ctx = self };
}
pub fn get(self: MemoryMapSlice, index: usize) ?*MemoryDescriptor {
if (index >= self.info.len) return null;
return self.getUnchecked(index);
}
pub fn getUnchecked(self: MemoryMapSlice, index: usize) *MemoryDescriptor {
const offset: usize = index * self.info.descriptor_size;
return @alignCast(@ptrCast(self.ptr[offset..]));
}
};
pub const MemoryDescriptorIterator = struct {
ctx: MemoryMapSlice,
index: usize = 0,
pub fn next(self: *MemoryDescriptorIterator) ?*MemoryDescriptor {
const md = self.ctx.get(self.index) orelse return null;
self.index += 1;
return md;
}
};
pub const LocateSearchType = enum(u32) {
all_handles,
by_register_notify,
by_protocol,
};
pub const OpenProtocolAttributes = packed struct(u32) {
by_handle_protocol: bool = false,
get_protocol: bool = false,
test_protocol: bool = false,
by_child_controller: bool = false,
by_driver: bool = false,
exclusive: bool = false,
reserved: u26 = 0,
pub const LocateSearch = union(LocateSearchType) {
all_handles,
by_register_notify: uefi.EventRegistration,
by_protocol: *const Guid,
};
pub const OpenProtocolAttributes = enum(u32) {
pub const Bits = packed struct(u32) {
by_handle_protocol: bool = false,
get_protocol: bool = false,
test_protocol: bool = false,
by_child_controller: bool = false,
by_driver: bool = false,
exclusive: bool = false,
reserved: u26 = 0,
};
by_handle_protocol = @bitCast(Bits{ .by_handle_protocol = true }),
get_protocol = @bitCast(Bits{ .get_protocol = true }),
test_protocol = @bitCast(Bits{ .test_protocol = true }),
by_child_controller = @bitCast(Bits{ .by_child_controller = true }),
by_driver = @bitCast(Bits{ .by_driver = true }),
by_driver_exclusive = @bitCast(Bits{ .by_driver = true, .exclusive = true }),
exclusive = @bitCast(Bits{ .exclusive = true }),
_,
pub fn fromBits(bits: Bits) OpenProtocolAttributes {
return @bitCast(bits);
}
pub fn toBits(self: OpenProtocolAttributes) Bits {
return @bitCast(self);
}
};
pub const OpenProtocolArgs = union(OpenProtocolAttributes) {
/// Used in the implementation of `handleProtocol`.
by_handle_protocol: struct { agent: ?Handle = null, controller: ?Handle = null },
/// Used by a driver to get a protocol interface from a handle. Care must be
/// taken when using this open mode because the driver that opens a protocol
/// interface in this manner will not be informed if the protocol interface
/// is uninstalled or reinstalled. The caller is also not required to close
/// the protocol interface with `closeProtocol`.
get_protocol: struct { agent: ?Handle = null, controller: ?Handle = null },
/// Used by a driver to test for the existence of a protocol interface on a
/// handle. The caller only use the return status code. The caller is also
/// not required to close the protocol interface with `closeProtocol`.
test_protocol: struct { agent: ?Handle = null, controller: ?Handle = null },
/// Used by bus drivers to show that a protocol interface is being used by one
/// of the child controllers of a bus. This information is used by
/// `BootServices.connectController` to recursively connect all child controllers
/// and by `BootServices.disconnectController` to get the list of child
/// controllers that a bus driver created.
by_child_controller: struct { agent: Handle, controller: Handle },
/// Used by a driver to gain access to a protocol interface. When this mode
/// is used, the drivers Stop() function will be called by
/// `BootServices.disconnectController` if the protocol interface is reinstalled
/// or uninstalled. Once a protocol interface is opened by a driver with this
/// attribute, no other drivers will be allowed to open the same protocol interface
/// with the `.by_driver` attribute.
by_driver: struct { agent: Handle, controller: Handle },
/// Used by a driver to gain exclusive access to a protocol interface. If any
/// other drivers have the protocol interface opened with an attribute of
/// `.by_driver`, then an attempt will be made to remove them with
/// `BootServices.disconnectController`.
by_driver_exclusive: struct { agent: Handle, controller: Handle },
/// Used by applications to gain exclusive access to a protocol interface. If
/// any drivers have the protocol interface opened with an attribute of
/// `.by_driver`, then an attempt will be made to remove them by calling the
/// drivers Stop() function.
exclusive: struct { agent: Handle, controller: ?Handle = null },
};
pub const ProtocolInformationEntry = extern struct {
@ -83,19 +256,25 @@ pub const ProtocolInformationEntry = extern struct {
};
pub const InterfaceType = enum(u32) {
efi_native_interface,
native,
};
pub const AllocateLocation = union(AllocateType) {
any,
max_address: [*]align(4096) uefi.Page,
address: [*]align(4096) uefi.Page,
};
pub const AllocateType = enum(u32) {
allocate_any_pages,
allocate_max_address,
allocate_address,
any,
max_address,
address,
};
pub const PhysicalAddress = u64;
pub const CapsuleHeader = extern struct {
capsule_guid: Guid align(8),
capsule_guid: Guid,
header_size: u32,
flags: u32,
capsule_image_size: u32,
@ -110,13 +289,13 @@ pub const UefiCapsuleBlockDescriptor = extern struct {
};
pub const ResetType = enum(u32) {
reset_cold,
reset_warm,
reset_shutdown,
reset_platform_specific,
cold,
warm,
shutdown,
platform_specific,
};
pub const global_variable align(8) = Guid{
pub const global_variable = Guid{
.time_low = 0x8be4df61,
.time_mid = 0x93ca,
.time_high_and_version = 0x11d2,
@ -128,10 +307,3 @@ pub const global_variable align(8) = Guid{
test {
std.testing.refAllDeclsRecursive(@This());
}
const std = @import("std");
const uefi = std.os.uefi;
const Handle = uefi.Handle;
const Event = uefi.Event;
const Guid = uefi.Guid;
const cc = uefi.cc;

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@ pub const ConfigurationTable = extern struct {
vendor_guid: Guid,
vendor_table: *anyopaque,
pub const acpi_20_table_guid align(8) = Guid{
pub const acpi_20_table_guid: Guid = .{
.time_low = 0x8868e871,
.time_mid = 0xe4f1,
.time_high_and_version = 0x11d3,
@ -13,7 +13,7 @@ pub const ConfigurationTable = extern struct {
.clock_seq_low = 0x22,
.node = [_]u8{ 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81 },
};
pub const acpi_10_table_guid align(8) = Guid{
pub const acpi_10_table_guid: Guid = .{
.time_low = 0xeb9d2d30,
.time_mid = 0x2d88,
.time_high_and_version = 0x11d3,
@ -21,7 +21,7 @@ pub const ConfigurationTable = extern struct {
.clock_seq_low = 0x16,
.node = [_]u8{ 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d },
};
pub const sal_system_table_guid align(8) = Guid{
pub const sal_system_table_guid: Guid = .{
.time_low = 0xeb9d2d32,
.time_mid = 0x2d88,
.time_high_and_version = 0x113d,
@ -29,7 +29,7 @@ pub const ConfigurationTable = extern struct {
.clock_seq_low = 0x16,
.node = [_]u8{ 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d },
};
pub const smbios_table_guid align(8) = Guid{
pub const smbios_table_guid: Guid = .{
.time_low = 0xeb9d2d31,
.time_mid = 0x2d88,
.time_high_and_version = 0x11d3,
@ -37,7 +37,7 @@ pub const ConfigurationTable = extern struct {
.clock_seq_low = 0x16,
.node = [_]u8{ 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d },
};
pub const smbios3_table_guid align(8) = Guid{
pub const smbios3_table_guid: Guid = .{
.time_low = 0xf2fd1544,
.time_mid = 0x9794,
.time_high_and_version = 0x4a2c,
@ -45,7 +45,7 @@ pub const ConfigurationTable = extern struct {
.clock_seq_low = 0x2e,
.node = [_]u8{ 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 },
};
pub const mps_table_guid align(8) = Guid{
pub const mps_table_guid: Guid = .{
.time_low = 0xeb9d2d2f,
.time_mid = 0x2d88,
.time_high_and_version = 0x11d3,
@ -53,7 +53,7 @@ pub const ConfigurationTable = extern struct {
.clock_seq_low = 0x16,
.node = [_]u8{ 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d },
};
pub const json_config_data_table_guid align(8) = Guid{
pub const json_config_data_table_guid: Guid = .{
.time_low = 0x87367f87,
.time_mid = 0x1119,
.time_high_and_version = 0x41ce,
@ -61,7 +61,7 @@ pub const ConfigurationTable = extern struct {
.clock_seq_low = 0xec,
.node = [_]u8{ 0x8b, 0xe0, 0x11, 0x1f, 0x55, 0x8a },
};
pub const json_capsule_data_table_guid align(8) = Guid{
pub const json_capsule_data_table_guid: Guid = .{
.time_low = 0x35e7a725,
.time_mid = 0x8dd2,
.time_high_and_version = 0x4cac,
@ -69,7 +69,7 @@ pub const ConfigurationTable = extern struct {
.clock_seq_low = 0x11,
.node = [_]u8{ 0x33, 0xcd, 0xa8, 0x10, 0x90, 0x56 },
};
pub const json_capsule_result_table_guid align(8) = Guid{
pub const json_capsule_result_table_guid: Guid = .{
.time_low = 0xdbc461c3,
.time_mid = 0xb3de,
.time_high_and_version = 0x422a,

View File

@ -6,10 +6,12 @@ const Time = uefi.Time;
const TimeCapabilities = uefi.TimeCapabilities;
const Status = uefi.Status;
const MemoryDescriptor = uefi.tables.MemoryDescriptor;
const MemoryMapSlice = uefi.tables.MemoryMapSlice;
const ResetType = uefi.tables.ResetType;
const CapsuleHeader = uefi.tables.CapsuleHeader;
const PhysicalAddress = uefi.tables.PhysicalAddress;
const cc = uefi.cc;
const Error = Status.Error;
/// Runtime services are provided by the firmware before and after exitBootServices has been called.
///
@ -23,50 +25,511 @@ pub const RuntimeServices = extern struct {
hdr: TableHeader,
/// Returns the current time and date information, and the time-keeping capabilities of the hardware platform.
getTime: *const fn (time: *uefi.Time, capabilities: ?*TimeCapabilities) callconv(cc) Status,
_getTime: *const fn (time: *Time, capabilities: ?*TimeCapabilities) callconv(cc) Status,
/// Sets the current local time and date information
setTime: *const fn (time: *uefi.Time) callconv(cc) Status,
_setTime: *const fn (time: *const Time) callconv(cc) Status,
/// Returns the current wakeup alarm clock setting
getWakeupTime: *const fn (enabled: *bool, pending: *bool, time: *uefi.Time) callconv(cc) Status,
_getWakeupTime: *const fn (enabled: *bool, pending: *bool, time: *Time) callconv(cc) Status,
/// Sets the system wakeup alarm clock time
setWakeupTime: *const fn (enable: *bool, time: ?*uefi.Time) callconv(cc) Status,
_setWakeupTime: *const fn (enable: bool, time: ?*const Time) callconv(cc) Status,
/// Changes the runtime addressing mode of EFI firmware from physical to virtual.
setVirtualAddressMap: *const fn (mmap_size: usize, descriptor_size: usize, descriptor_version: u32, virtual_map: [*]MemoryDescriptor) callconv(cc) Status,
_setVirtualAddressMap: *const fn (mmap_size: usize, descriptor_size: usize, descriptor_version: u32, virtual_map: [*]align(@alignOf(MemoryDescriptor)) u8) callconv(cc) Status,
/// Determines the new virtual address that is to be used on subsequent memory accesses.
convertPointer: *const fn (debug_disposition: usize, address: **anyopaque) callconv(cc) Status,
_convertPointer: *const fn (debug_disposition: DebugDisposition, address: *?*anyopaque) callconv(cc) Status,
/// Returns the value of a variable.
getVariable: *const fn (var_name: [*:0]const u16, vendor_guid: *align(8) const Guid, attributes: ?*u32, data_size: *usize, data: ?*anyopaque) callconv(cc) Status,
_getVariable: *const fn (var_name: [*:0]const u16, vendor_guid: *const Guid, attributes: ?*VariableAttributes, data_size: *usize, data: ?*anyopaque) callconv(cc) Status,
/// Enumerates the current variable names.
getNextVariableName: *const fn (var_name_size: *usize, var_name: [*:0]u16, vendor_guid: *align(8) Guid) callconv(cc) Status,
_getNextVariableName: *const fn (var_name_size: *usize, var_name: ?[*:0]const u16, vendor_guid: *Guid) callconv(cc) Status,
/// Sets the value of a variable.
setVariable: *const fn (var_name: [*:0]const u16, vendor_guid: *align(8) const Guid, attributes: u32, data_size: usize, data: *anyopaque) callconv(cc) Status,
_setVariable: *const fn (var_name: [*:0]const u16, vendor_guid: *const Guid, attributes: VariableAttributes, data_size: usize, data: [*]const u8) callconv(cc) Status,
/// Return the next high 32 bits of the platform's monotonic counter
getNextHighMonotonicCount: *const fn (high_count: *u32) callconv(cc) Status,
_getNextHighMonotonicCount: *const fn (high_count: *u32) callconv(cc) Status,
/// Resets the entire platform.
resetSystem: *const fn (reset_type: ResetType, reset_status: Status, data_size: usize, reset_data: ?*const anyopaque) callconv(cc) noreturn,
_resetSystem: *const fn (reset_type: ResetType, reset_status: Status, data_size: usize, reset_data: ?[*]const u16) callconv(cc) noreturn,
/// Passes capsules to the firmware with both virtual and physical mapping.
/// Depending on the intended consumption, the firmware may process the capsule immediately.
/// If the payload should persist across a system reset, the reset value returned from
/// `queryCapsuleCapabilities` must be passed into resetSystem and will cause the capsule
/// to be processed by the firmware as part of the reset process.
updateCapsule: *const fn (capsule_header_array: **CapsuleHeader, capsule_count: usize, scatter_gather_list: PhysicalAddress) callconv(cc) Status,
_updateCapsule: *const fn (capsule_header_array: [*]*const CapsuleHeader, capsule_count: usize, scatter_gather_list: PhysicalAddress) callconv(cc) Status,
/// Returns if the capsule can be supported via `updateCapsule`
queryCapsuleCapabilities: *const fn (capsule_header_array: **CapsuleHeader, capsule_count: usize, maximum_capsule_size: *usize, reset_type: ResetType) callconv(cc) Status,
_queryCapsuleCapabilities: *const fn (capsule_header_array: [*]*const CapsuleHeader, capsule_count: usize, maximum_capsule_size: *usize, reset_type: *ResetType) callconv(cc) Status,
/// Returns information about the EFI variables
queryVariableInfo: *const fn (attributes: *u32, maximum_variable_storage_size: *u64, remaining_variable_storage_size: *u64, maximum_variable_size: *u64) callconv(cc) Status,
_queryVariableInfo: *const fn (attributes: VariableAttributes, maximum_variable_storage_size: *u64, remaining_variable_storage_size: *u64, maximum_variable_size: *u64) callconv(cc) Status,
pub const GetTimeError = uefi.UnexpectedError || error{
DeviceError,
Unsupported,
};
pub const SetTimeError = uefi.UnexpectedError || error{
DeviceError,
Unsupported,
};
pub const GetWakeupTimeError = uefi.UnexpectedError || error{
DeviceError,
Unsupported,
};
pub const SetWakeupTimeError = uefi.UnexpectedError || error{
InvalidParameter,
DeviceError,
Unsupported,
};
pub const SetVirtualAddressMapError = uefi.UnexpectedError || error{
Unsupported,
NoMapping,
NotFound,
};
pub const ConvertPointerError = uefi.UnexpectedError || error{
InvalidParameter,
Unsupported,
};
pub const GetVariableSizeError = uefi.UnexpectedError || error{
DeviceError,
Unsupported,
};
pub const GetVariableError = GetVariableSizeError || error{
BufferTooSmall,
};
pub const SetVariableError = uefi.UnexpectedError || error{
InvalidParameter,
OutOfResources,
DeviceError,
WriteProtected,
SecurityViolation,
NotFound,
Unsupported,
};
pub const GetNextHighMonotonicCountError = uefi.UnexpectedError || error{
DeviceError,
Unsupported,
};
pub const UpdateCapsuleError = uefi.UnexpectedError || error{
InvalidParameter,
DeviceError,
Unsupported,
OutOfResources,
};
pub const QueryCapsuleCapabilitiesError = uefi.UnexpectedError || error{
Unsupported,
OutOfResources,
};
pub const QueryVariableInfoError = uefi.UnexpectedError || error{
InvalidParameter,
Unsupported,
};
/// Returns the current time and the time capabilities of the platform.
pub fn getTime(
self: *const RuntimeServices,
) GetTimeError!struct { Time, TimeCapabilities } {
var time: Time = undefined;
var capabilities: TimeCapabilities = undefined;
switch (self._getTime(&time, &capabilities)) {
.success => return .{ time, capabilities },
.device_error => return error.DeviceError,
.unsupported => return error.Unsupported,
else => |status| return uefi.unexpectedStatus(status),
}
}
pub fn setTime(self: *RuntimeServices, time: *const Time) SetTimeError!void {
switch (self._setTime(time)) {
.success => {},
.device_error => return error.DeviceError,
.unsupported => return error.Unsupported,
else => |status| return uefi.unexpectedStatus(status),
}
}
pub const GetWakeupTime = struct {
enabled: bool,
pending: bool,
time: Time,
};
pub fn getWakeupTime(
self: *const RuntimeServices,
) GetWakeupTimeError!GetWakeupTime {
var result: GetWakeupTime = undefined;
switch (self._getWakeupTime(
&result.enabled,
&result.pending,
&result.time,
)) {
.success => return result,
.device_error => return error.DeviceError,
.unsupported => return error.Unsupported,
else => |status| return uefi.unexpectedStatus(status),
}
}
pub const SetWakeupTime = union(enum) {
enabled: *const Time,
disabled,
};
pub fn setWakeupTime(
self: *RuntimeServices,
set: SetWakeupTime,
) SetWakeupTimeError!void {
switch (self._setWakeupTime(
set != .disabled,
if (set == .enabled) set.enabled else null,
)) {
.success => {},
.invalid_parameter => return error.InvalidParameter,
.device_error => return error.DeviceError,
.unsupported => return error.Unsupported,
else => |status| return uefi.unexpectedStatus(status),
}
}
pub fn setVirtualAddressMap(
self: *RuntimeServices,
map: MemoryMapSlice,
) SetVirtualAddressMapError!void {
switch (self._setVirtualAddressMap(
map.info.len * map.info.descriptor_size,
map.info.descriptor_size,
map.info.descriptor_version,
@ptrCast(map.ptr),
)) {
.success => {},
.unsupported => return error.Unsupported,
.no_mapping => return error.NoMapping,
.not_found => return error.NotFound,
else => |status| return uefi.unexpectedStatus(status),
}
}
pub fn convertPointer(
self: *const RuntimeServices,
comptime disposition: DebugDisposition,
cvt: @FieldType(PointerConversion, @tagName(disposition)),
) ConvertPointerError!?@FieldType(PointerConversion, @tagName(disposition)) {
var pointer = cvt;
switch (self._convertPointer(disposition, @ptrCast(&pointer))) {
.success => return pointer,
.not_found => return null,
.invalid_parameter => return error.InvalidParameter,
.unsupported => return error.Unsupported,
else => |status| return uefi.unexpectedStatus(status),
}
}
/// Returns the length of the variable's data and its attributes.
pub fn getVariableSize(
self: *const RuntimeServices,
name: [*:0]const u16,
guid: *const Guid,
) GetVariableSizeError!?struct { usize, VariableAttributes } {
var size: usize = 0;
var attrs: VariableAttributes = undefined;
switch (self._getVariable(
name,
guid,
&attrs,
&size,
null,
)) {
.buffer_too_small => return .{ size, attrs },
.not_found => return null,
.device_error => return error.DeviceError,
.unsupported => return error.Unsupported,
else => |status| return uefi.unexpectedStatus(status),
}
}
/// To determine the minimum necessary buffer size for the variable, call
/// `getVariableSize` first.
pub fn getVariable(
self: *const RuntimeServices,
name: [*:0]const u16,
guid: *const Guid,
buffer: []u8,
) GetVariableError!?struct { []u8, VariableAttributes } {
var attrs: VariableAttributes = undefined;
var len = buffer.len;
switch (self._getVariable(
name,
guid,
&attrs,
&len,
buffer.ptr,
)) {
.success => return .{ buffer[0..len], attrs },
.not_found => return null,
.buffer_too_small => return error.BufferTooSmall,
.device_error => return error.DeviceError,
.unsupported => return error.Unsupported,
else => |status| return uefi.unexpectedStatus(status),
}
}
pub fn variableNameIterator(
self: *const RuntimeServices,
buffer: []u16,
) VariableNameIterator {
buffer[0] = 0;
return .{
.services = self,
.buffer = buffer,
.guid = undefined,
};
}
pub fn setVariable(
self: *RuntimeServices,
name: [*:0]const u16,
guid: *const Guid,
attributes: VariableAttributes,
data: []const u8,
) SetVariableError!void {
switch (self._setVariable(
name,
guid,
attributes,
data.len,
data.ptr,
)) {
.success => {},
.invalid_parameter => return error.InvalidParameter,
.out_of_resources => return error.OutOfResources,
.device_error => return error.DeviceError,
.write_protected => return error.WriteProtected,
.security_violation => return error.SecurityViolation,
.not_found => return error.NotFound,
.unsupported => return error.Unsupported,
else => |status| return uefi.unexpectedStatus(status),
}
}
pub fn getNextHighMonotonicCount(self: *const RuntimeServices) GetNextHighMonotonicCountError!u32 {
var cnt: u32 = undefined;
switch (self._getNextHighMonotonicCount(&cnt)) {
.success => return cnt,
.device_error => return error.DeviceError,
.unsupported => return error.Unsupported,
else => |status| return uefi.unexpectedStatus(status),
}
}
pub fn resetSystem(
self: *RuntimeServices,
reset_type: ResetType,
reset_status: Status,
data: ?[]align(2) const u8,
) noreturn {
self._resetSystem(
reset_type,
reset_status,
if (data) |d| d.len else 0,
if (data) |d| @alignCast(@ptrCast(d.ptr)) else null,
);
}
pub fn updateCapsule(
self: *RuntimeServices,
capsules: []*const CapsuleHeader,
scatter_gather_list: PhysicalAddress,
) UpdateCapsuleError!void {
switch (self._updateCapsule(
capsules.ptr,
capsules.len,
scatter_gather_list,
)) {
.success => {},
.invalid_parameter => return error.InvalidParameter,
.device_error => return error.DeviceError,
.unsupported => return error.Unsupported,
.out_of_resources => return error.OutOfResources,
else => |status| return uefi.unexpectedStatus(status),
}
}
pub fn queryCapsuleCapabilities(
self: *const RuntimeServices,
capsules: []*const CapsuleHeader,
) QueryCapsuleCapabilitiesError!struct { u64, ResetType } {
var max_capsule_size: u64 = undefined;
var reset_type: ResetType = undefined;
switch (self._queryCapsuleCapabilities(
capsules.ptr,
capsules.len,
&max_capsule_size,
&reset_type,
)) {
.success => return .{ max_capsule_size, reset_type },
.unsupported => return error.Unsupported,
.out_of_resources => return error.OutOfResources,
else => |status| return uefi.unexpectedStatus(status),
}
}
pub fn queryVariableInfo(
self: *const RuntimeServices,
// Note: .append_write is ignored
attributes: VariableAttributes,
) QueryVariableInfoError!VariableInfo {
var res: VariableInfo = undefined;
switch (self._queryVariableInfo(
attributes,
&res.max_variable_storage_size,
&res.remaining_variable_storage_size,
&res.max_variable_size,
)) {
.success => return res,
.invalid_parameter => return error.InvalidParameter,
.unsupported => return error.Unsupported,
else => |status| return uefi.unexpectedStatus(status),
}
}
pub const DebugDisposition = enum(usize) {
const Bits = packed struct(usize) {
optional_ptr: bool = false,
_pad: std.meta.Int(.unsigned, @bitSizeOf(usize) - 1) = 0,
};
pointer = @bitCast(Bits{}),
optional = @bitCast(Bits{ .optional_ptr = true }),
_,
};
pub const PointerConversion = union(DebugDisposition) {
pointer: *anyopaque,
optional: ?*anyopaque,
};
pub const VariableAttributes = packed struct(u32) {
non_volatile: bool = false,
bootservice_access: bool = false,
runtime_access: bool = false,
hardware_error_record: bool = false,
/// Note: deprecated and should be considered reserved.
authenticated_write_access: bool = false,
time_based_authenticated_write_access: bool = false,
append_write: bool = false,
/// Indicates that the variable payload begins with a EFI_VARIABLE_AUTHENTICATION_3
/// structure, and potentially more structures as indicated by fields of
/// this structure.
enhanced_authenticated_access: bool = false,
_pad: u24 = 0,
};
pub const VariableAuthentication3 = extern struct {
version: u8 = 1,
type: Type,
metadata_size: u32,
flags: Flags,
pub fn payloadConst(self: *const VariableAuthentication3) []const u8 {
return @constCast(self).payload();
}
pub fn payload(self: *VariableAuthentication3) []u8 {
var ptr: [*]u8 = @ptrCast(self);
return ptr[@sizeOf(VariableAuthentication3)..self.metadata_size];
}
pub const Flags = packed struct(u32) {
update_cert: bool = false,
_pad: u31 = 0,
};
pub const Type = enum(u8) {
timestamp = 1,
nonce = 2,
_,
};
};
pub const VariableInfo = struct {
max_variable_storage_size: u64,
remaining_variable_storage_size: u64,
max_variable_size: u64,
};
pub const VariableNameIterator = struct {
pub const NextSizeError = uefi.UnexpectedError || error{
DeviceError,
Unsupported,
};
pub const IterateVariableNameError = NextSizeError || error{
BufferTooSmall,
};
services: *const RuntimeServices,
buffer: []u16,
guid: Guid,
pub fn nextSize(self: *VariableNameIterator) NextSizeError!?usize {
var len: usize = 0;
switch (self.services._getNextVariableName(
&len,
null,
&self.guid,
)) {
.buffer_too_small => return len,
.not_found => return null,
.device_error => return error.DeviceError,
.unsupported => return error.Unsupported,
else => |status| return uefi.unexpectedStatus(status),
}
}
/// Call `nextSize` to get the length of the next variable name and check
/// if `buffer` is large enough to hold the name.
pub fn next(
self: *VariableNameIterator,
) IterateVariableNameError!?[:0]const u16 {
var len = self.buffer.len;
switch (self.services._getNextVariableName(
&len,
@ptrCast(self.buffer.ptr),
&self.guid,
)) {
.success => return self.buffer[0 .. len - 1 :0],
.not_found => return null,
.buffer_too_small => return error.BufferTooSmall,
.device_error => return error.DeviceError,
.unsupported => return error.Unsupported,
else => |status| return uefi.unexpectedStatus(status),
}
}
};
pub const signature: u64 = 0x56524553544e5552;
};

View File

@ -146,7 +146,7 @@ pub fn OpenFile(sub_path_w: []const u16, options: OpenFileOptions) OpenError!HAN
// call has failed. There is not really a sane way to handle
// this other than retrying the creation after the OS finishes
// the deletion.
std.time.sleep(std.time.ns_per_ms);
std.Thread.sleep(std.time.ns_per_ms);
continue;
},
.VIRUS_INFECTED, .VIRUS_DELETED => return error.AntivirusInterference,
@ -2848,9 +2848,6 @@ pub const STD_OUTPUT_HANDLE = maxInt(DWORD) - 11 + 1;
/// The standard error device. Initially, this is the active console screen buffer, CONOUT$.
pub const STD_ERROR_HANDLE = maxInt(DWORD) - 12 + 1;
/// Deprecated; use `std.builtin.CallingConvention.winapi` instead.
pub const WINAPI: std.builtin.CallingConvention = .winapi;
pub const BOOL = c_int;
pub const BOOLEAN = BYTE;
pub const BYTE = u8;

View File

@ -772,12 +772,12 @@ pub fn exit(status: u8) noreturn {
if (native_os == .uefi) {
const uefi = std.os.uefi;
// exit() is only available if exitBootServices() has not been called yet.
// This call to exit should not fail, so we don't care about its return value.
// This call to exit should not fail, so we catch-ignore errors.
if (uefi.system_table.boot_services) |bs| {
_ = bs.exit(uefi.handle, @enumFromInt(status), 0, null);
bs.exit(uefi.handle, @enumFromInt(status), null) catch {};
}
// If we can't exit, reboot the system instead.
uefi.system_table.runtime_services.resetSystem(.reset_cold, @enumFromInt(status), 0, null);
uefi.system_table.runtime_services.resetSystem(.cold, @enumFromInt(status), null);
}
system.exit(status);
}
@ -6088,6 +6088,9 @@ pub const SendError = error{
/// The local network interface used to reach the destination is down.
NetworkSubsystemFailed,
/// The destination address is not listening.
ConnectionRefused,
} || UnexpectedError;
pub const SendMsgError = SendError || error{
@ -6315,7 +6318,6 @@ pub fn send(
error.AddressNotAvailable => unreachable,
error.SocketNotConnected => unreachable,
error.UnreachableAddress => unreachable,
error.ConnectionRefused => unreachable,
else => |e| return e,
};
}
@ -6986,6 +6988,20 @@ pub fn tcsetpgrp(handle: fd_t, pgrp: pid_t) TermioSetPgrpError!void {
}
}
pub const SetSidError = error{
/// The calling process is already a process group leader, or the process group ID of a process other than the calling process matches the process ID of the calling process.
PermissionDenied,
} || UnexpectedError;
pub fn setsid() SetSidError!pid_t {
const rc = system.setsid();
switch (errno(rc)) {
.SUCCESS => return rc,
.PERM => return error.PermissionDenied,
else => |err| return unexpectedErrno(err),
}
}
pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t {
const rc = system.signalfd(fd, mask, flags);
switch (errno(rc)) {

View File

@ -1161,7 +1161,7 @@ test "POSIX file locking with fcntl" {
posix.exit(0);
} else {
// parent waits for child to get shared lock:
std.time.sleep(1 * std.time.ns_per_ms);
std.Thread.sleep(1 * std.time.ns_per_ms);
// parent expects deadlock when attempting to upgrade the shared lock to exclusive:
struct_flock.start = 1;
struct_flock.type = posix.F.WRLCK;

View File

@ -8,9 +8,6 @@ const posix = std.posix;
pub const epoch = @import("time/epoch.zig");
/// Deprecated: moved to std.Thread.sleep
pub const sleep = std.Thread.sleep;
/// Get a calendar timestamp, in seconds, relative to UTC 1970-01-01.
/// Precision of timing depends on the hardware and operating system.
/// The return value is signed because it is possible to have a date that is
@ -59,9 +56,7 @@ pub fn nanoTimestamp() i128 {
return ns;
},
.uefi => {
var value: std.os.uefi.Time = undefined;
const status = std.os.uefi.system_table.runtime_services.getTime(&value, null);
assert(status == .success);
const value, _ = std.os.uefi.system_table.runtime_services.getTime() catch return 0;
return value.toEpoch();
},
else => {
@ -144,9 +139,7 @@ pub const Instant = struct {
return .{ .timestamp = ns };
},
.uefi => {
var value: std.os.uefi.Time = undefined;
const status = std.os.uefi.system_table.runtime_services.getTime(&value, null);
if (status != .success) return error.Unsupported;
const value, _ = std.os.uefi.system_table.runtime_services.getTime() catch return error.Unsupported;
return .{ .timestamp = value.toEpoch() };
},
// On darwin, use UPTIME_RAW instead of MONOTONIC as it ticks while

View File

@ -972,8 +972,6 @@ pub fn utf16LeToUtf8ArrayList(result: *std.ArrayList(u8), utf16le: []const u16)
return utf16LeToUtf8ArrayListImpl(result, utf16le, .cannot_encode_surrogate_half);
}
pub const utf16leToUtf8Alloc = @compileError("deprecated; renamed to utf16LeToUtf8Alloc");
/// Caller must free returned memory.
pub fn utf16LeToUtf8Alloc(allocator: mem.Allocator, utf16le: []const u16) Utf16LeToUtf8AllocError![]u8 {
// optimistically guess that it will all be ascii.
@ -984,8 +982,6 @@ pub fn utf16LeToUtf8Alloc(allocator: mem.Allocator, utf16le: []const u16) Utf16L
return result.toOwnedSlice();
}
pub const utf16leToUtf8AllocZ = @compileError("deprecated; renamed to utf16LeToUtf8AllocZ");
/// Caller must free returned memory.
pub fn utf16LeToUtf8AllocZ(allocator: mem.Allocator, utf16le: []const u16) Utf16LeToUtf8AllocError![:0]u8 {
// optimistically guess that it will all be ascii (and allocate space for the null terminator)
@ -1054,8 +1050,6 @@ fn utf16LeToUtf8Impl(utf8: []u8, utf16le: []const u16, comptime surrogates: Surr
return dest_index;
}
pub const utf16leToUtf8 = @compileError("deprecated; renamed to utf16LeToUtf8");
pub fn utf16LeToUtf8(utf8: []u8, utf16le: []const u16) Utf16LeToUtf8Error!usize {
return utf16LeToUtf8Impl(utf8, utf16le, .cannot_encode_surrogate_half);
}
@ -1175,8 +1169,6 @@ pub fn utf8ToUtf16LeAlloc(allocator: mem.Allocator, utf8: []const u8) error{ Inv
return result.toOwnedSlice();
}
pub const utf8ToUtf16LeWithNull = @compileError("deprecated; renamed to utf8ToUtf16LeAllocZ");
pub fn utf8ToUtf16LeAllocZ(allocator: mem.Allocator, utf8: []const u8) error{ InvalidUtf8, OutOfMemory }![:0]u16 {
// optimistically guess that it will not require surrogate pairs
var result = try std.ArrayList(u16).initCapacity(allocator, utf8.len + 1);
@ -1487,8 +1479,6 @@ fn formatUtf16Le(utf16le: []const u16, writer: *std.io.Writer) std.io.Writer.Err
try writer.writeAll(buf[0..u8len]);
}
pub const fmtUtf16le = @compileError("deprecated; renamed to fmtUtf16Le");
/// Return a Formatter for a (potentially ill-formed) UTF-16 LE string,
/// which will be converted to UTF-8 during formatting.
/// Unpaired surrogates are replaced by the replacement character (U+FFFD).

View File

@ -200,18 +200,6 @@ pub fn nonSimdCall3(func: fn (usize, usize, usize, usize) usize, a1: usize, a2:
return doClientRequestExpr(0, .ClientCall3, @intFromPtr(func), a1, a2, a3, 0);
}
/// Deprecated: use `nonSimdCall0`
pub const nonSIMDCall0 = nonSimdCall0;
/// Deprecated: use `nonSimdCall1`
pub const nonSIMDCall1 = nonSimdCall1;
/// Deprecated: use `nonSimdCall2`
pub const nonSIMDCall2 = nonSimdCall2;
/// Deprecated: use `nonSimdCall3`
pub const nonSIMDCall3 = nonSimdCall3;
/// Counts the number of errors that have been recorded by a tool. Nb:
/// the tool must record the errors with VG_(maybe_record_error)() or
/// VG_(unique_error)() for them to be counted.

View File

@ -10,8 +10,6 @@ pub const ClientRequest = enum(usize) {
StopInstrumentation,
};
pub const CallgrindClientRequest = @compileError("std.valgrind.callgrind.CallgrindClientRequest renamed to std.valgrind.callgrind.ClientRequest");
fn doClientRequestExpr(default: usize, request: ClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize {
return valgrind.doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5);
}

View File

@ -20,8 +20,6 @@ pub const ClientRequest = enum(usize) {
DisableAddrErrorReportingInRange,
};
pub const MemCheckClientRequest = @compileError("std.valgrind.memcheck.MemCheckClientRequest renamed to std.valgrind.memcheck.ClientRequest");
fn doClientRequestExpr(default: usize, request: ClientRequest, a1: usize, a2: usize, a3: usize, a4: usize, a5: usize) usize {
return valgrind.doClientRequest(default, @as(usize, @intCast(@intFromEnum(request))), a1, a2, a3, a4, a5);
}

View File

@ -23,7 +23,6 @@ pub const Zir = @import("zig/Zir.zig");
pub const Zoir = @import("zig/Zoir.zig");
pub const ZonGen = @import("zig/ZonGen.zig");
pub const system = @import("zig/system.zig");
pub const CrossTarget = @compileError("deprecated; use std.Target.Query");
pub const BuiltinFn = @import("zig/BuiltinFn.zig");
pub const AstRlAnnotate = @import("zig/AstRlAnnotate.zig");
pub const LibCInstallation = @import("zig/LibCInstallation.zig");
@ -596,7 +595,7 @@ pub fn putAstErrorsIntoBundle(
pub fn resolveTargetQueryOrFatal(target_query: std.Target.Query) std.Target {
return std.zig.system.resolveTargetQuery(target_query) catch |err|
fatal("unable to resolve target: {s}", .{@errorName(err)});
std.process.fatal("unable to resolve target: {s}", .{@errorName(err)});
}
pub fn parseTargetQueryOrReportFatalError(
@ -620,7 +619,7 @@ pub fn parseTargetQueryOrReportFatalError(
@tagName(diags.arch.?), help_text.items,
});
}
fatal("unknown CPU: '{s}'", .{diags.cpu_name.?});
std.process.fatal("unknown CPU: '{s}'", .{diags.cpu_name.?});
},
error.UnknownCpuFeature => {
help: {
@ -633,7 +632,7 @@ pub fn parseTargetQueryOrReportFatalError(
@tagName(diags.arch.?), help_text.items,
});
}
fatal("unknown CPU feature: '{s}'", .{diags.unknown_feature_name.?});
std.process.fatal("unknown CPU feature: '{s}'", .{diags.unknown_feature_name.?});
},
error.UnknownObjectFormat => {
help: {
@ -644,7 +643,7 @@ pub fn parseTargetQueryOrReportFatalError(
}
std.log.info("available object formats:\n{s}", .{help_text.items});
}
fatal("unknown object format: '{s}'", .{opts.object_format.?});
std.process.fatal("unknown object format: '{s}'", .{opts.object_format.?});
},
error.UnknownArchitecture => {
help: {
@ -655,17 +654,14 @@ pub fn parseTargetQueryOrReportFatalError(
}
std.log.info("available architectures:\n{s} native\n", .{help_text.items});
}
fatal("unknown architecture: '{s}'", .{diags.unknown_architecture_name.?});
std.process.fatal("unknown architecture: '{s}'", .{diags.unknown_architecture_name.?});
},
else => |e| fatal("unable to parse target query '{s}': {s}", .{
else => |e| std.process.fatal("unable to parse target query '{s}': {s}", .{
opts.arch_os_abi, @errorName(e),
}),
};
}
/// Deprecated; see `std.process.fatal`.
pub const fatal = std.process.fatal;
/// Collects all the environment variables that Zig could possibly inspect, so
/// that we can do reflection on this and print them with `zig env`.
pub const EnvVar = enum {

View File

@ -254,9 +254,6 @@ test "sizeof" {
pub const CIntLiteralBase = enum { decimal, octal, hex };
/// Deprecated: use `CIntLiteralBase`
pub const CIntLiteralRadix = CIntLiteralBase;
fn PromoteIntLiteralReturnType(comptime SuffixType: type, comptime number: comptime_int, comptime base: CIntLiteralBase) type {
const signed_decimal = [_]type{ c_int, c_long, c_longlong, c_ulonglong };
const signed_oct_hex = [_]type{ c_int, c_uint, c_long, c_ulong, c_longlong, c_ulonglong };

View File

@ -2037,7 +2037,7 @@ pub const Alignment = enum(u6) {
pub fn format(p: Prefixed, w: *Writer) Writer.Error!void {
const byte_units = p.alignment.toByteUnits() orelse return;
return w.print("{s}align ({d})", .{ p.prefix, byte_units });
return w.print("{s}align {d}", .{ p.prefix, byte_units });
}
};
@ -2384,7 +2384,7 @@ pub const Global = struct {
};
fn format(data: FormatData, w: *Writer) Writer.Error!void {
try w.print("@{f}", .{
data.global.unwrap(data.builder).name(data.builder).fmt(data.builder, null),
data.global.unwrap(data.builder).name(data.builder).fmt(data.builder, .quote_unless_valid_identifier),
});
}
pub fn fmt(self: Index, builder: *const Builder) std.fmt.Formatter(FormatData, format) {
@ -8401,7 +8401,7 @@ pub const Metadata = enum(u32) {
}, w);
},
.string => |node| try w.print("{s}{f}", .{
@as([]const u8, if (is_specialized) "" else "!"), node.fmt(builder),
@as([]const u8, if (is_specialized) "!" else ""), node.fmt(builder),
}),
inline .bool, .u32, .u64 => |node| try w.print("{}", .{node}),
inline .di_flags, .sp_flags => |node| try w.print("{f}", .{node}),
@ -9782,7 +9782,7 @@ pub fn print(self: *Builder, w: *Writer) (Writer.Error || Allocator.Error)!void
instruction_index.name(&function).fmt(self),
@tagName(tag),
extra.lhs.fmt(function_index, self, .{ .percent = true }),
extra.rhs.fmt(function_index, self, .{ .percent = true }),
extra.rhs.fmt(function_index, self, .{}),
});
},
.addrspacecast,
@ -10638,7 +10638,7 @@ fn fnTypeAssumeCapacity(
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = std.hash.Wyhash.init(comptime std.hash.uint32(@intFromEnum(tag)));
var hasher = std.hash.Wyhash.init(comptime std.hash.int(@intFromEnum(tag)));
hasher.update(std.mem.asBytes(&key.ret));
hasher.update(std.mem.sliceAsBytes(key.params));
return @truncate(hasher.final());
@ -10698,7 +10698,7 @@ fn vectorTypeAssumeCapacity(
builder: *const Builder,
pub fn hash(_: @This(), key: Type.Vector) u32 {
return @truncate(std.hash.Wyhash.hash(
comptime std.hash.uint32(@intFromEnum(tag)),
comptime std.hash.int(@intFromEnum(tag)),
std.mem.asBytes(&key),
));
}
@ -10727,7 +10727,7 @@ fn arrayTypeAssumeCapacity(self: *Builder, len: u64, child: Type) Type {
builder: *const Builder,
pub fn hash(_: @This(), key: Type.Vector) u32 {
return @truncate(std.hash.Wyhash.hash(
comptime std.hash.uint32(@intFromEnum(Type.Tag.small_array)),
comptime std.hash.int(@intFromEnum(Type.Tag.small_array)),
std.mem.asBytes(&key),
));
}
@ -10753,7 +10753,7 @@ fn arrayTypeAssumeCapacity(self: *Builder, len: u64, child: Type) Type {
builder: *const Builder,
pub fn hash(_: @This(), key: Type.Array) u32 {
return @truncate(std.hash.Wyhash.hash(
comptime std.hash.uint32(@intFromEnum(Type.Tag.array)),
comptime std.hash.int(@intFromEnum(Type.Tag.array)),
std.mem.asBytes(&key),
));
}
@ -10794,7 +10794,7 @@ fn structTypeAssumeCapacity(
builder: *const Builder,
pub fn hash(_: @This(), key: []const Type) u32 {
return @truncate(std.hash.Wyhash.hash(
comptime std.hash.uint32(@intFromEnum(tag)),
comptime std.hash.int(@intFromEnum(tag)),
std.mem.sliceAsBytes(key),
));
}
@ -10826,7 +10826,7 @@ fn opaqueTypeAssumeCapacity(self: *Builder, name: String) Type {
builder: *const Builder,
pub fn hash(_: @This(), key: String) u32 {
return @truncate(std.hash.Wyhash.hash(
comptime std.hash.uint32(@intFromEnum(Type.Tag.named_structure)),
comptime std.hash.int(@intFromEnum(Type.Tag.named_structure)),
std.mem.asBytes(&key),
));
}
@ -10887,7 +10887,7 @@ fn getOrPutTypeNoExtraAssumeCapacity(self: *Builder, item: Type.Item) struct { n
builder: *const Builder,
pub fn hash(_: @This(), key: Type.Item) u32 {
return @truncate(std.hash.Wyhash.hash(
comptime std.hash.uint32(@intFromEnum(Type.Tag.simple)),
comptime std.hash.int(@intFromEnum(Type.Tag.simple)),
std.mem.asBytes(&key),
));
}
@ -11021,7 +11021,7 @@ fn bigIntConstAssumeCapacity(
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(key.tag)));
var hasher = std.hash.Wyhash.init(std.hash.int(@intFromEnum(key.tag)));
hasher.update(std.mem.asBytes(&key.type));
hasher.update(std.mem.sliceAsBytes(key.limbs));
return @truncate(hasher.final());
@ -11084,7 +11084,7 @@ fn doubleConstAssumeCapacity(self: *Builder, val: f64) Constant {
builder: *const Builder,
pub fn hash(_: @This(), key: f64) u32 {
return @truncate(std.hash.Wyhash.hash(
comptime std.hash.uint32(@intFromEnum(Constant.Tag.double)),
comptime std.hash.int(@intFromEnum(Constant.Tag.double)),
std.mem.asBytes(&key),
));
}
@ -11115,7 +11115,7 @@ fn fp128ConstAssumeCapacity(self: *Builder, val: f128) Constant {
builder: *const Builder,
pub fn hash(_: @This(), key: f128) u32 {
return @truncate(std.hash.Wyhash.hash(
comptime std.hash.uint32(@intFromEnum(Constant.Tag.fp128)),
comptime std.hash.int(@intFromEnum(Constant.Tag.fp128)),
std.mem.asBytes(&key),
));
}
@ -11149,7 +11149,7 @@ fn x86_fp80ConstAssumeCapacity(self: *Builder, val: f80) Constant {
builder: *const Builder,
pub fn hash(_: @This(), key: f80) u32 {
return @truncate(std.hash.Wyhash.hash(
comptime std.hash.uint32(@intFromEnum(Constant.Tag.x86_fp80)),
comptime std.hash.int(@intFromEnum(Constant.Tag.x86_fp80)),
std.mem.asBytes(&key)[0..10],
));
}
@ -11182,7 +11182,7 @@ fn ppc_fp128ConstAssumeCapacity(self: *Builder, val: [2]f64) Constant {
builder: *const Builder,
pub fn hash(_: @This(), key: [2]f64) u32 {
return @truncate(std.hash.Wyhash.hash(
comptime std.hash.uint32(@intFromEnum(Constant.Tag.ppc_fp128)),
comptime std.hash.int(@intFromEnum(Constant.Tag.ppc_fp128)),
std.mem.asBytes(&key),
));
}
@ -11317,7 +11317,7 @@ fn splatConstAssumeCapacity(self: *Builder, ty: Type, val: Constant) Constant {
builder: *const Builder,
pub fn hash(_: @This(), key: Constant.Splat) u32 {
return @truncate(std.hash.Wyhash.hash(
comptime std.hash.uint32(@intFromEnum(Constant.Tag.splat)),
comptime std.hash.int(@intFromEnum(Constant.Tag.splat)),
std.mem.asBytes(&key),
));
}
@ -11420,7 +11420,7 @@ fn blockAddrConstAssumeCapacity(
builder: *const Builder,
pub fn hash(_: @This(), key: Constant.BlockAddress) u32 {
return @truncate(std.hash.Wyhash.hash(
comptime std.hash.uint32(@intFromEnum(Constant.Tag.blockaddress)),
comptime std.hash.int(@intFromEnum(Constant.Tag.blockaddress)),
std.mem.asBytes(&key),
));
}
@ -11546,7 +11546,7 @@ fn castConstAssumeCapacity(self: *Builder, tag: Constant.Tag, val: Constant, ty:
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
return @truncate(std.hash.Wyhash.hash(
std.hash.uint32(@intFromEnum(key.tag)),
std.hash.int(@intFromEnum(key.tag)),
std.mem.asBytes(&key.cast),
));
}
@ -11621,7 +11621,7 @@ fn gepConstAssumeCapacity(
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = std.hash.Wyhash.init(comptime std.hash.uint32(@intFromEnum(tag)));
var hasher = std.hash.Wyhash.init(comptime std.hash.int(@intFromEnum(tag)));
hasher.update(std.mem.asBytes(&key.type));
hasher.update(std.mem.asBytes(&key.base));
hasher.update(std.mem.asBytes(&key.inrange));
@ -11685,7 +11685,7 @@ fn binConstAssumeCapacity(
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
return @truncate(std.hash.Wyhash.hash(
std.hash.uint32(@intFromEnum(key.tag)),
std.hash.int(@intFromEnum(key.tag)),
std.mem.asBytes(&key.extra),
));
}
@ -11723,7 +11723,7 @@ fn asmConstAssumeCapacity(
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
return @truncate(std.hash.Wyhash.hash(
std.hash.uint32(@intFromEnum(key.tag)),
std.hash.int(@intFromEnum(key.tag)),
std.mem.asBytes(&key.extra),
));
}
@ -11773,7 +11773,7 @@ fn getOrPutConstantNoExtraAssumeCapacity(
builder: *const Builder,
pub fn hash(_: @This(), key: Constant.Item) u32 {
return @truncate(std.hash.Wyhash.hash(
std.hash.uint32(@intFromEnum(key.tag)),
std.hash.int(@intFromEnum(key.tag)),
std.mem.asBytes(&key.data),
));
}
@ -11804,7 +11804,7 @@ fn getOrPutConstantAggregateAssumeCapacity(
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(key.tag)));
var hasher = std.hash.Wyhash.init(std.hash.int(@intFromEnum(key.tag)));
hasher.update(std.mem.asBytes(&key.type));
hasher.update(std.mem.sliceAsBytes(key.vals));
return @truncate(hasher.final());
@ -12421,7 +12421,7 @@ fn metadataSimpleAssumeCapacity(self: *Builder, tag: Metadata.Tag, value: anytyp
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(key.tag)));
var hasher = std.hash.Wyhash.init(std.hash.int(@intFromEnum(key.tag)));
inline for (std.meta.fields(@TypeOf(value))) |field| {
hasher.update(std.mem.asBytes(&@field(key.value, field.name)));
}
@ -12457,7 +12457,7 @@ fn metadataDistinctAssumeCapacity(self: *Builder, tag: Metadata.Tag, value: anyt
const Adapter = struct {
pub fn hash(_: @This(), key: Key) u32 {
return @truncate(std.hash.Wyhash.hash(
std.hash.uint32(@intFromEnum(key.tag)),
std.hash.int(@intFromEnum(key.tag)),
std.mem.asBytes(&key.index),
));
}
@ -12853,7 +12853,7 @@ fn debugEnumeratorAssumeCapacity(
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(key.tag)));
var hasher = std.hash.Wyhash.init(std.hash.int(@intFromEnum(key.tag)));
hasher.update(std.mem.asBytes(&key.name));
hasher.update(std.mem.asBytes(&key.bit_width));
hasher.update(std.mem.sliceAsBytes(key.value.limbs));
@ -12935,7 +12935,7 @@ fn debugExpressionAssumeCapacity(
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = comptime std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(Metadata.Tag.expression)));
var hasher = comptime std.hash.Wyhash.init(std.hash.int(@intFromEnum(Metadata.Tag.expression)));
hasher.update(std.mem.sliceAsBytes(key.elements));
return @truncate(hasher.final());
}
@ -12981,7 +12981,7 @@ fn metadataTupleAssumeCapacity(
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = comptime std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(Metadata.Tag.tuple)));
var hasher = comptime std.hash.Wyhash.init(std.hash.int(@intFromEnum(Metadata.Tag.tuple)));
hasher.update(std.mem.sliceAsBytes(key.elements));
return @truncate(hasher.final());
}
@ -13029,7 +13029,7 @@ fn strTupleAssumeCapacity(
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Key) u32 {
var hasher = comptime std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(Metadata.Tag.tuple)));
var hasher = comptime std.hash.Wyhash.init(std.hash.int(@intFromEnum(Metadata.Tag.tuple)));
hasher.update(std.mem.sliceAsBytes(key.elements));
return @truncate(hasher.final());
}
@ -13159,7 +13159,7 @@ fn metadataConstantAssumeCapacity(self: *Builder, constant: Constant) Metadata {
const Adapter = struct {
builder: *const Builder,
pub fn hash(_: @This(), key: Constant) u32 {
var hasher = comptime std.hash.Wyhash.init(std.hash.uint32(@intFromEnum(Metadata.Tag.constant)));
var hasher = comptime std.hash.Wyhash.init(std.hash.int(@intFromEnum(Metadata.Tag.constant)));
hasher.update(std.mem.asBytes(&key));
return @truncate(hasher.final());
}

View File

@ -1029,7 +1029,9 @@ pub const CObject = struct {
pub fn destroy(bundle: *Bundle, gpa: Allocator) void {
for (bundle.file_names.values()) |file_name| gpa.free(file_name);
bundle.file_names.deinit(gpa);
for (bundle.category_names.values()) |category_name| gpa.free(category_name);
bundle.category_names.deinit(gpa);
for (bundle.diags) |*diag| diag.deinit(gpa);
gpa.free(bundle.diags);
gpa.destroy(bundle);
@ -3040,7 +3042,7 @@ fn flush(
// If there's an output file, it wants to decide where the LLVM object goes!
const sub_prog_node = comp.link_prog_node.start("LLVM Emit Object", 0);
defer sub_prog_node.end();
try llvm_object.emit(.{
try llvm_object.emit(.{ .zcu = zcu, .tid = tid }, .{
.pre_ir_path = comp.verbose_llvm_ir,
.pre_bc_path = comp.verbose_llvm_bc,

View File

@ -55,14 +55,15 @@ fn runThread(ids: *IncrementalDebugServer) void {
const conn = server.accept() catch @panic("IncrementalDebugServer: failed to accept");
defer conn.stream.close();
var stream_reader = conn.stream.reader(&cmd_buf);
while (ids.running.load(.monotonic)) {
conn.stream.writeAll("zig> ") catch @panic("IncrementalDebugServer: failed to write");
var fbs = std.io.fixedBufferStream(&cmd_buf);
conn.stream.reader().streamUntilDelimiter(fbs.writer(), '\n', cmd_buf.len) catch |err| switch (err) {
const untrimmed = stream_reader.interface().takeSentinel('\n') catch |err| switch (err) {
error.EndOfStream => break,
else => @panic("IncrementalDebugServer: failed to read command"),
};
const cmd_and_arg = std.mem.trim(u8, fbs.getWritten(), " \t\r\n");
const cmd_and_arg = std.mem.trim(u8, untrimmed, " \t\r\n");
const cmd: []const u8, const arg: []const u8 = if (std.mem.indexOfScalar(u8, cmd_and_arg, ' ')) |i|
.{ cmd_and_arg[0..i], cmd_and_arg[i + 1 ..] }
else

View File

@ -1861,7 +1861,7 @@ pub const NullTerminatedString = enum(u32) {
pub fn hash(ctx: @This(), a: NullTerminatedString) u32 {
_ = ctx;
return std.hash.uint32(@intFromEnum(a));
return std.hash.int(@intFromEnum(a));
}
};
@ -4740,7 +4740,7 @@ pub const Index = enum(u32) {
pub fn hash(ctx: @This(), a: Index) u32 {
_ = ctx;
return std.hash.uint32(@intFromEnum(a));
return std.hash.int(@intFromEnum(a));
}
};
@ -12725,7 +12725,7 @@ const GlobalErrorSet = struct {
name: NullTerminatedString,
) Allocator.Error!GlobalErrorSet.Index {
if (name == .empty) return .none;
const hash = std.hash.uint32(@intFromEnum(name));
const hash = std.hash.int(@intFromEnum(name));
var map = ges.shared.map.acquire();
const Map = @TypeOf(map);
var map_mask = map.header().mask();
@ -12818,7 +12818,7 @@ const GlobalErrorSet = struct {
name: NullTerminatedString,
) ?GlobalErrorSet.Index {
if (name == .empty) return .none;
const hash = std.hash.uint32(@intFromEnum(name));
const hash = std.hash.int(@intFromEnum(name));
const map = ges.shared.map.acquire();
const map_mask = map.header().mask();
const names_items = ges.shared.names.acquire().view().items(.@"0");

View File

@ -9705,7 +9705,6 @@ fn funcCommon(
func_inst,
cc_src,
is_noinline,
is_generic,
);
}
@ -9745,7 +9744,6 @@ fn funcCommon(
func_inst,
cc_src,
is_noinline,
is_generic,
);
}
@ -9762,7 +9760,6 @@ fn funcCommon(
func_inst,
cc_src,
is_noinline,
is_generic,
);
}
@ -9779,7 +9776,6 @@ fn finishFunc(
func_inst: Zir.Inst.Index,
cc_src: LazySrcLoc,
is_noinline: bool,
is_generic: bool,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
@ -9911,13 +9907,6 @@ fn finishFunc(
}),
}
if (!is_generic and sema.wantErrorReturnTracing(return_type)) {
// Make sure that StackTrace's fields are resolved so that the backend can
// lower this fn type.
const unresolved_stack_trace_ty = try sema.getBuiltinType(block.nodeOffset(.zero), .StackTrace);
try unresolved_stack_trace_ty.resolveFields(pt);
}
return Air.internedToRef(if (opt_func_index != .none) opt_func_index else func_ty);
}
@ -13052,8 +13041,10 @@ fn analyzeSwitchRuntimeBlock(
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(cases_extra.items));
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(else_body));
const has_any_continues = spa.operand == .loop and child_block.label.?.merges.extra_insts.items.len > 0;
return try child_block.addInst(.{
.tag = if (spa.operand == .loop) .loop_switch_br else .switch_br,
.tag = if (has_any_continues) .loop_switch_br else .switch_br,
.data = .{ .pl_op = .{
.operand = operand,
.payload = payload_index,

View File

@ -808,7 +808,7 @@ pub const Namespace = struct {
pub fn hash(ctx: NavNameContext, nav: InternPool.Nav.Index) u32 {
const name = ctx.zcu.intern_pool.getNav(nav).name;
return std.hash.uint32(@intFromEnum(name));
return std.hash.int(@intFromEnum(name));
}
pub fn eql(ctx: NavNameContext, a_nav: InternPool.Nav.Index, b_nav: InternPool.Nav.Index, b_index: usize) bool {
@ -824,7 +824,7 @@ pub const Namespace = struct {
pub fn hash(ctx: NameAdapter, s: InternPool.NullTerminatedString) u32 {
_ = ctx;
return std.hash.uint32(@intFromEnum(s));
return std.hash.int(@intFromEnum(s));
}
pub fn eql(ctx: NameAdapter, a: InternPool.NullTerminatedString, b_nav: InternPool.Nav.Index, b_index: usize) bool {

View File

@ -3975,7 +3975,7 @@ fn airSwitchBr(cg: *CodeGen, inst: Air.Inst.Index, is_dispatch_loop: bool) Inner
var width_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
width_bigint.sub(max_bigint, min_bigint);
width_bigint.addScalar(width_bigint.toConst(), 1);
break :width width_bigint.toConst().to(u32) catch null;
break :width width_bigint.toConst().toInt(u32) catch null;
};
try cg.startBlock(.block, .empty); // whole switch block start
@ -4016,7 +4016,7 @@ fn airSwitchBr(cg: *CodeGen, inst: Air.Inst.Index, is_dispatch_loop: bool) Inner
const val_bigint = val.toBigInt(&val_space, zcu);
var index_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
index_bigint.sub(val_bigint, min_bigint);
branch_list[index_bigint.toConst().to(u32) catch unreachable] = case.idx;
branch_list[index_bigint.toConst().toInt(u32) catch unreachable] = case.idx;
}
for (case.ranges) |range| {
var low_space: Value.BigIntSpace = undefined;
@ -4025,9 +4025,9 @@ fn airSwitchBr(cg: *CodeGen, inst: Air.Inst.Index, is_dispatch_loop: bool) Inner
const high_bigint = Value.fromInterned(range[1].toInterned().?).toBigInt(&high_space, zcu);
var index_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
index_bigint.sub(low_bigint, min_bigint);
const start = index_bigint.toConst().to(u32) catch unreachable;
const start = index_bigint.toConst().toInt(u32) catch unreachable;
index_bigint.sub(high_bigint, min_bigint);
const end = (index_bigint.toConst().to(u32) catch unreachable) + 1;
const end = (index_bigint.toConst().toInt(u32) catch unreachable) + 1;
@memset(branch_list[start..end], case.idx);
}
}

View File

@ -687,7 +687,7 @@ pub fn lower(mir: *const Mir, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) st
const sp_global: Wasm.GlobalIndex = .stack_pointer;
// load stack pointer
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_get));
std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
std.leb.writeUleb128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
// store stack pointer so we can restore it when we return from the function
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.local_tee));
leb.writeUleb128(code.fixedWriter(), mir.prologue.sp_local) catch unreachable;
@ -710,7 +710,7 @@ pub fn lower(mir: *const Mir, wasm: *Wasm, code: *std.ArrayListUnmanaged(u8)) st
// Store the current stack pointer value into the global stack pointer so other function calls will
// start from this value instead and not overwrite the current stack.
code.appendAssumeCapacity(@intFromEnum(std.wasm.Opcode.global_set));
std.leb.writeULEB128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
std.leb.writeUleb128(code.fixedWriter(), @intFromEnum(sp_global)) catch unreachable;
}
var emit: Emit = .{

View File

@ -179277,7 +179277,7 @@ fn lowerSwitchBr(
var table_len_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
table_len_bigint.sub(max_bigint, min_bigint);
assert(table_len_bigint.positive); // min <= max
break :table_len @as(u11, table_len_bigint.toConst().to(u10) catch break :table) + 1; // no more than a 1024 entry table
break :table_len @as(u11, table_len_bigint.toConst().toInt(u10) catch break :table) + 1; // no more than a 1024 entry table
};
assert(prong_items <= table_len); // each prong item introduces at least one unique integer to the range
if (prong_items < table_len >> 2) break :table; // no more than 75% waste
@ -179353,7 +179353,7 @@ fn lowerSwitchBr(
const val_bigint = val.toBigInt(&val_space, zcu);
var index_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
index_bigint.sub(val_bigint, min_bigint);
table[index_bigint.toConst().to(u10) catch unreachable] = @intCast(cg.mir_instructions.len);
table[index_bigint.toConst().toInt(u10) catch unreachable] = @intCast(cg.mir_instructions.len);
}
for (case.ranges) |range| {
var low_space: Value.BigIntSpace = undefined;
@ -179362,9 +179362,9 @@ fn lowerSwitchBr(
const high_bigint = Value.fromInterned(range[1].toInterned().?).toBigInt(&high_space, zcu);
var index_bigint: std.math.big.int.Mutable = .{ .limbs = limbs, .positive = undefined, .len = undefined };
index_bigint.sub(low_bigint, min_bigint);
const start = index_bigint.toConst().to(u10) catch unreachable;
const start = index_bigint.toConst().toInt(u10) catch unreachable;
index_bigint.sub(high_bigint, min_bigint);
const end = @as(u11, index_bigint.toConst().to(u10) catch unreachable) + 1;
const end = @as(u11, index_bigint.toConst().toInt(u10) catch unreachable) + 1;
@memset(table[start..end], @intCast(cg.mir_instructions.len));
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -7,8 +7,7 @@ const assert = std.debug.assert;
const spec = @import("spec.zig");
const Opcode = spec.Opcode;
const Word = spec.Word;
const IdRef = spec.IdRef;
const IdResult = spec.IdResult;
const Id = spec.Id;
const StorageClass = spec.StorageClass;
const SpvModule = @import("Module.zig");
@ -127,10 +126,10 @@ const AsmValue = union(enum) {
unresolved_forward_reference,
/// This result-value is a normal result produced by a different instruction.
value: IdRef,
value: Id,
/// This result-value represents a type registered into the module's type system.
ty: IdRef,
ty: Id,
/// This is a pre-supplied constant integer value.
constant: u32,
@ -141,7 +140,7 @@ const AsmValue = union(enum) {
/// Retrieve the result-id of this AsmValue. Asserts that this AsmValue
/// is of a variant that allows the result to be obtained (not an unresolved
/// forward declaration, not in the process of being declared, etc).
pub fn resultId(self: AsmValue) IdRef {
pub fn resultId(self: AsmValue) Id {
return switch (self) {
.just_declared,
.unresolved_forward_reference,
@ -314,7 +313,7 @@ fn processInstruction(self: *Assembler) !void {
return;
},
else => switch (self.inst.opcode.class()) {
.TypeDeclaration => try self.processTypeInstruction(),
.type_declaration => try self.processTypeInstruction(),
else => (try self.processGenericInstruction()) orelse return,
},
};
@ -392,7 +391,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
break :blk result_id;
},
.OpTypeStruct => blk: {
const ids = try self.gpa.alloc(IdRef, operands[1..].len);
const ids = try self.gpa.alloc(Id, operands[1..].len);
defer self.gpa.free(ids);
for (operands[1..], ids) |op, *id| id.* = try self.resolveRefId(op.ref_id);
const result_id = self.spv.allocId();
@ -429,7 +428,7 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
const param_operands = operands[2..];
const return_type = try self.resolveRefId(operands[1].ref_id);
const param_types = try self.spv.gpa.alloc(IdRef, param_operands.len);
const param_types = try self.spv.gpa.alloc(Id, param_operands.len);
defer self.spv.gpa.free(param_types);
for (param_types, param_operands) |*param, operand| {
param.* = try self.resolveRefId(operand.ref_id);
@ -457,17 +456,17 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue {
const operands = self.inst.operands.items;
var maybe_spv_decl_index: ?SpvModule.Decl.Index = null;
const section = switch (self.inst.opcode.class()) {
.ConstantCreation => &self.spv.sections.types_globals_constants,
.Annotation => &self.spv.sections.annotations,
.TypeDeclaration => unreachable, // Handled elsewhere.
.constant_creation => &self.spv.sections.types_globals_constants,
.annotation => &self.spv.sections.annotations,
.type_declaration => unreachable, // Handled elsewhere.
else => switch (self.inst.opcode) {
.OpEntryPoint => unreachable,
.OpExecutionMode, .OpExecutionModeId => &self.spv.sections.execution_modes,
.OpVariable => section: {
const storage_class: spec.StorageClass = @enumFromInt(operands[2].value);
if (storage_class == .Function) break :section &self.func.prologue;
if (storage_class == .function) break :section &self.func.prologue;
maybe_spv_decl_index = try self.spv.allocDecl(.global);
if (self.spv.version.minor < 4 and storage_class != .Input and storage_class != .Output) {
if (self.spv.version.minor < 4 and storage_class != .input and storage_class != .output) {
// Before version 1.4, the interfaces storage classes are limited to the Input and Output
break :section &self.spv.sections.types_globals_constants;
}
@ -481,7 +480,7 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue {
},
};
var maybe_result_id: ?IdResult = null;
var maybe_result_id: ?Id = null;
const first_word = section.instructions.items.len;
// At this point we're not quite sure how many operands this instruction is going to have,
// so insert 0 and patch up the actual opcode word later.
@ -504,12 +503,12 @@ fn processGenericInstruction(self: *Assembler) !?AsmValue {
else
self.spv.allocId();
try section.ensureUnusedCapacity(self.spv.gpa, 1);
section.writeOperand(IdResult, maybe_result_id.?);
section.writeOperand(Id, maybe_result_id.?);
},
.ref_id => |index| {
const result = try self.resolveRef(index);
try section.ensureUnusedCapacity(self.spv.gpa, 1);
section.writeOperand(spec.IdRef, result.resultId());
section.writeOperand(spec.Id, result.resultId());
},
.string => |offset| {
const text = std.mem.sliceTo(self.inst.string_bytes.items[offset..], 0);
@ -558,7 +557,7 @@ fn resolveRef(self: *Assembler, ref: AsmValue.Ref) !AsmValue {
}
}
fn resolveRefId(self: *Assembler, ref: AsmValue.Ref) !IdRef {
fn resolveRefId(self: *Assembler, ref: AsmValue.Ref) !Id {
const value = try self.resolveRef(ref);
return value.resultId();
}
@ -600,7 +599,7 @@ fn parseInstruction(self: *Assembler) !void {
const expected_operands = inst.operands;
// This is a loop because the result-id is not always the first operand.
const requires_lhs_result = for (expected_operands) |op| {
if (op.kind == .IdResult) break true;
if (op.kind == .id_result) break true;
} else false;
if (requires_lhs_result and maybe_lhs_result == null) {
@ -614,7 +613,7 @@ fn parseInstruction(self: *Assembler) !void {
}
for (expected_operands) |operand| {
if (operand.kind == .IdResult) {
if (operand.kind == .id_result) {
try self.inst.operands.append(self.gpa, .{ .result_id = maybe_lhs_result.? });
continue;
}
@ -646,11 +645,11 @@ fn parseOperand(self: *Assembler, kind: spec.OperandKind) Error!void {
.value_enum => try self.parseValueEnum(kind),
.id => try self.parseRefId(),
else => switch (kind) {
.LiteralInteger => try self.parseLiteralInteger(),
.LiteralString => try self.parseString(),
.LiteralContextDependentNumber => try self.parseContextDependentNumber(),
.LiteralExtInstInteger => try self.parseLiteralExtInstInteger(),
.PairIdRefIdRef => try self.parsePhiSource(),
.literal_integer => try self.parseLiteralInteger(),
.literal_string => try self.parseString(),
.literal_context_dependent_number => try self.parseContextDependentNumber(),
.literal_ext_inst_integer => try self.parseLiteralExtInstInteger(),
.pair_id_ref_id_ref => try self.parsePhiSource(),
else => return self.todo("parse operand of type {s}", .{@tagName(kind)}),
},
}

View File

@ -15,9 +15,7 @@ const Wyhash = std.hash.Wyhash;
const spec = @import("spec.zig");
const Word = spec.Word;
const IdRef = spec.IdRef;
const IdResult = spec.IdResult;
const IdResultType = spec.IdResultType;
const Id = spec.Id;
const Section = @import("Section.zig");
@ -82,7 +80,7 @@ pub const Decl = struct {
/// - For `func`, this is the result-id of the associated OpFunction instruction.
/// - For `global`, this is the result-id of the associated OpVariable instruction.
/// - For `invocation_global`, this is the result-id of the associated InvocationGlobal instruction.
result_id: IdRef,
result_id: Id,
/// The offset of the first dependency of this decl in the `decl_deps` array.
begin_dep: u32,
/// The past-end offset of the dependencies of this decl in the `decl_deps` array.
@ -150,7 +148,7 @@ sections: struct {
next_result_id: Word,
/// Cache for results of OpString instructions.
strings: std.StringArrayHashMapUnmanaged(IdRef) = .empty,
strings: std.StringArrayHashMapUnmanaged(Id) = .empty,
/// Some types shouldn't be emitted more than one time, but cannot be caught by
/// the `intern_map` during codegen. Sometimes, IDs are compared to check if
@ -161,20 +159,20 @@ strings: std.StringArrayHashMapUnmanaged(IdRef) = .empty,
/// Additionally, this is used for other values which can be cached, for example,
/// built-in variables.
cache: struct {
bool_type: ?IdRef = null,
void_type: ?IdRef = null,
int_types: std.AutoHashMapUnmanaged(std.builtin.Type.Int, IdRef) = .empty,
float_types: std.AutoHashMapUnmanaged(std.builtin.Type.Float, IdRef) = .empty,
vector_types: std.AutoHashMapUnmanaged(struct { IdRef, u32 }, IdRef) = .empty,
array_types: std.AutoHashMapUnmanaged(struct { IdRef, IdRef }, IdRef) = .empty,
bool_type: ?Id = null,
void_type: ?Id = null,
int_types: std.AutoHashMapUnmanaged(std.builtin.Type.Int, Id) = .empty,
float_types: std.AutoHashMapUnmanaged(std.builtin.Type.Float, Id) = .empty,
vector_types: std.AutoHashMapUnmanaged(struct { Id, u32 }, Id) = .empty,
array_types: std.AutoHashMapUnmanaged(struct { Id, Id }, Id) = .empty,
capabilities: std.AutoHashMapUnmanaged(spec.Capability, void) = .empty,
extensions: std.StringHashMapUnmanaged(void) = .empty,
extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = .empty,
decorations: std.AutoHashMapUnmanaged(struct { IdRef, spec.Decoration }, void) = .empty,
builtins: std.AutoHashMapUnmanaged(struct { IdRef, spec.BuiltIn }, Decl.Index) = .empty,
extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, Id) = .empty,
decorations: std.AutoHashMapUnmanaged(struct { Id, spec.Decoration }, void) = .empty,
builtins: std.AutoHashMapUnmanaged(struct { Id, spec.BuiltIn }, Decl.Index) = .empty,
bool_const: [2]?IdRef = .{ null, null },
bool_const: [2]?Id = .{ null, null },
} = .{},
/// Set of Decls, referred to by Decl.Index.
@ -185,7 +183,7 @@ decls: std.ArrayListUnmanaged(Decl) = .empty,
decl_deps: std.ArrayListUnmanaged(Decl.Index) = .empty,
/// The list of entry points that should be exported from this module.
entry_points: std.AutoArrayHashMapUnmanaged(IdRef, EntryPoint) = .empty,
entry_points: std.AutoArrayHashMapUnmanaged(Id, EntryPoint) = .empty,
pub fn init(gpa: Allocator, target: *const std.Target) Module {
const version_minor: u8 = blk: {
@ -245,7 +243,7 @@ pub const IdRange = struct {
base: u32,
len: u32,
pub fn at(range: IdRange, i: usize) IdResult {
pub fn at(range: IdRange, i: usize) Id {
assert(i < range.len);
return @enumFromInt(range.base + i);
}
@ -259,7 +257,7 @@ pub fn allocIds(self: *Module, n: u32) IdRange {
};
}
pub fn allocId(self: *Module) IdResult {
pub fn allocId(self: *Module) Id {
return self.allocIds(1).at(0);
}
@ -275,7 +273,7 @@ fn addEntryPointDeps(
self: *Module,
decl_index: Decl.Index,
seen: *std.DynamicBitSetUnmanaged,
interface: *std.ArrayList(IdRef),
interface: *std.ArrayList(Id),
) !void {
const decl = self.declPtr(decl_index);
const deps = self.decl_deps.items[decl.begin_dep..decl.end_dep];
@ -299,7 +297,7 @@ fn entryPoints(self: *Module) !Section {
var entry_points = Section{};
errdefer entry_points.deinit(self.gpa);
var interface = std.ArrayList(IdRef).init(self.gpa);
var interface = std.ArrayList(Id).init(self.gpa);
defer interface.deinit();
var seen = try std.DynamicBitSetUnmanaged.initEmpty(self.gpa, self.decls.items.len);
@ -317,12 +315,12 @@ fn entryPoints(self: *Module) !Section {
.interface = interface.items,
});
if (entry_point.exec_mode == null and entry_point.exec_model == .Fragment) {
if (entry_point.exec_mode == null and entry_point.exec_model == .fragment) {
switch (self.target.os.tag) {
.vulkan, .opengl => |tag| {
try self.sections.execution_modes.emit(self.gpa, .OpExecutionMode, .{
.entry_point = entry_point_id,
.mode = if (tag == .vulkan) .OriginUpperLeft else .OriginLowerLeft,
.mode = if (tag == .vulkan) .origin_upper_left else .origin_lower_left,
});
},
.opencl => {},
@ -338,59 +336,59 @@ pub fn finalize(self: *Module, a: Allocator) ![]Word {
// Emit capabilities and extensions
switch (self.target.os.tag) {
.opengl => {
try self.addCapability(.Shader);
try self.addCapability(.Matrix);
try self.addCapability(.shader);
try self.addCapability(.matrix);
},
.vulkan => {
try self.addCapability(.Shader);
try self.addCapability(.Matrix);
try self.addCapability(.shader);
try self.addCapability(.matrix);
if (self.target.cpu.arch == .spirv64) {
try self.addExtension("SPV_KHR_physical_storage_buffer");
try self.addCapability(.PhysicalStorageBufferAddresses);
try self.addCapability(.physical_storage_buffer_addresses);
}
},
.opencl, .amdhsa => {
try self.addCapability(.Kernel);
try self.addCapability(.Addresses);
try self.addCapability(.kernel);
try self.addCapability(.addresses);
},
else => unreachable,
}
if (self.target.cpu.arch == .spirv64) try self.addCapability(.Int64);
if (self.target.cpu.has(.spirv, .int64)) try self.addCapability(.Int64);
if (self.target.cpu.has(.spirv, .float16)) try self.addCapability(.Float16);
if (self.target.cpu.has(.spirv, .float64)) try self.addCapability(.Float64);
if (self.target.cpu.has(.spirv, .generic_pointer)) try self.addCapability(.GenericPointer);
if (self.target.cpu.has(.spirv, .vector16)) try self.addCapability(.Vector16);
if (self.target.cpu.arch == .spirv64) try self.addCapability(.int64);
if (self.target.cpu.has(.spirv, .int64)) try self.addCapability(.int64);
if (self.target.cpu.has(.spirv, .float16)) try self.addCapability(.float16);
if (self.target.cpu.has(.spirv, .float64)) try self.addCapability(.float64);
if (self.target.cpu.has(.spirv, .generic_pointer)) try self.addCapability(.generic_pointer);
if (self.target.cpu.has(.spirv, .vector16)) try self.addCapability(.vector16);
if (self.target.cpu.has(.spirv, .storage_push_constant16)) {
try self.addExtension("SPV_KHR_16bit_storage");
try self.addCapability(.StoragePushConstant16);
try self.addCapability(.storage_push_constant16);
}
if (self.target.cpu.has(.spirv, .arbitrary_precision_integers)) {
try self.addExtension("SPV_INTEL_arbitrary_precision_integers");
try self.addCapability(.ArbitraryPrecisionIntegersINTEL);
try self.addCapability(.arbitrary_precision_integers_intel);
}
if (self.target.cpu.has(.spirv, .variable_pointers)) {
try self.addExtension("SPV_KHR_variable_pointers");
try self.addCapability(.VariablePointersStorageBuffer);
try self.addCapability(.VariablePointers);
try self.addCapability(.variable_pointers_storage_buffer);
try self.addCapability(.variable_pointers);
}
// These are well supported
try self.addCapability(.Int8);
try self.addCapability(.Int16);
try self.addCapability(.int8);
try self.addCapability(.int16);
// Emit memory model
const addressing_model: spec.AddressingModel = switch (self.target.os.tag) {
.opengl => .Logical,
.vulkan => if (self.target.cpu.arch == .spirv32) .Logical else .PhysicalStorageBuffer64,
.opencl => if (self.target.cpu.arch == .spirv32) .Physical32 else .Physical64,
.amdhsa => .Physical64,
.opengl => .logical,
.vulkan => if (self.target.cpu.arch == .spirv32) .logical else .physical_storage_buffer64,
.opencl => if (self.target.cpu.arch == .spirv32) .physical32 else .physical64,
.amdhsa => .physical64,
else => unreachable,
};
try self.sections.memory_model.emit(self.gpa, .OpMemoryModel, .{
.addressing_model = addressing_model,
.memory_model = switch (self.target.os.tag) {
.opencl => .OpenCL,
.vulkan, .opengl => .GLSL450,
.opencl => .open_cl,
.vulkan, .opengl => .glsl450,
else => unreachable,
},
});
@ -411,7 +409,7 @@ pub fn finalize(self: *Module, a: Allocator) ![]Word {
var source = Section{};
defer source.deinit(self.gpa);
try self.sections.debug_strings.emit(self.gpa, .OpSource, .{
.source_language = .Zig,
.source_language = .zig,
.version = 0,
// We cannot emit these because the Khronos translator does not parse this instruction
// correctly.
@ -473,7 +471,7 @@ pub fn addExtension(self: *Module, ext: []const u8) !void {
}
/// Imports or returns the existing id of an extended instruction set
pub fn importInstructionSet(self: *Module, set: spec.InstructionSet) !IdRef {
pub fn importInstructionSet(self: *Module, set: spec.InstructionSet) !Id {
assert(set != .core);
const gop = try self.cache.extended_instruction_set.getOrPut(self.gpa, set);
@ -490,7 +488,7 @@ pub fn importInstructionSet(self: *Module, set: spec.InstructionSet) !IdRef {
}
/// Fetch the result-id of an instruction corresponding to a string.
pub fn resolveString(self: *Module, string: []const u8) !IdRef {
pub fn resolveString(self: *Module, string: []const u8) !Id {
if (self.strings.get(string)) |id| {
return id;
}
@ -506,7 +504,7 @@ pub fn resolveString(self: *Module, string: []const u8) !IdRef {
return id;
}
pub fn structType(self: *Module, result_id: IdResult, types: []const IdRef, maybe_names: ?[]const []const u8) !void {
pub fn structType(self: *Module, result_id: Id, types: []const Id, maybe_names: ?[]const []const u8) !void {
try self.sections.types_globals_constants.emit(self.gpa, .OpTypeStruct, .{
.id_result = result_id,
.id_ref = types,
@ -520,7 +518,7 @@ pub fn structType(self: *Module, result_id: IdResult, types: []const IdRef, mayb
}
}
pub fn boolType(self: *Module) !IdRef {
pub fn boolType(self: *Module) !Id {
if (self.cache.bool_type) |id| return id;
const result_id = self.allocId();
@ -531,7 +529,7 @@ pub fn boolType(self: *Module) !IdRef {
return result_id;
}
pub fn voidType(self: *Module) !IdRef {
pub fn voidType(self: *Module) !Id {
if (self.cache.void_type) |id| return id;
const result_id = self.allocId();
@ -543,7 +541,7 @@ pub fn voidType(self: *Module) !IdRef {
return result_id;
}
pub fn intType(self: *Module, signedness: std.builtin.Signedness, bits: u16) !IdRef {
pub fn intType(self: *Module, signedness: std.builtin.Signedness, bits: u16) !Id {
assert(bits > 0);
const entry = try self.cache.int_types.getOrPut(self.gpa, .{ .signedness = signedness, .bits = bits });
if (!entry.found_existing) {
@ -566,7 +564,7 @@ pub fn intType(self: *Module, signedness: std.builtin.Signedness, bits: u16) !Id
return entry.value_ptr.*;
}
pub fn floatType(self: *Module, bits: u16) !IdRef {
pub fn floatType(self: *Module, bits: u16) !Id {
assert(bits > 0);
const entry = try self.cache.float_types.getOrPut(self.gpa, .{ .bits = bits });
if (!entry.found_existing) {
@ -581,7 +579,7 @@ pub fn floatType(self: *Module, bits: u16) !IdRef {
return entry.value_ptr.*;
}
pub fn vectorType(self: *Module, len: u32, child_ty_id: IdRef) !IdRef {
pub fn vectorType(self: *Module, len: u32, child_ty_id: Id) !Id {
const entry = try self.cache.vector_types.getOrPut(self.gpa, .{ child_ty_id, len });
if (!entry.found_existing) {
const result_id = self.allocId();
@ -595,7 +593,7 @@ pub fn vectorType(self: *Module, len: u32, child_ty_id: IdRef) !IdRef {
return entry.value_ptr.*;
}
pub fn arrayType(self: *Module, len_id: IdRef, child_ty_id: IdRef) !IdRef {
pub fn arrayType(self: *Module, len_id: Id, child_ty_id: Id) !Id {
const entry = try self.cache.array_types.getOrPut(self.gpa, .{ child_ty_id, len_id });
if (!entry.found_existing) {
const result_id = self.allocId();
@ -609,7 +607,7 @@ pub fn arrayType(self: *Module, len_id: IdRef, child_ty_id: IdRef) !IdRef {
return entry.value_ptr.*;
}
pub fn functionType(self: *Module, return_ty_id: IdRef, param_type_ids: []const IdRef) !IdRef {
pub fn functionType(self: *Module, return_ty_id: Id, param_type_ids: []const Id) !Id {
const result_id = self.allocId();
try self.sections.types_globals_constants.emit(self.gpa, .OpTypeFunction, .{
.id_result = result_id,
@ -619,7 +617,7 @@ pub fn functionType(self: *Module, return_ty_id: IdRef, param_type_ids: []const
return result_id;
}
pub fn constant(self: *Module, result_ty_id: IdRef, value: spec.LiteralContextDependentNumber) !IdRef {
pub fn constant(self: *Module, result_ty_id: Id, value: spec.LiteralContextDependentNumber) !Id {
const result_id = self.allocId();
const section = &self.sections.types_globals_constants;
try section.emit(self.gpa, .OpConstant, .{
@ -630,7 +628,7 @@ pub fn constant(self: *Module, result_ty_id: IdRef, value: spec.LiteralContextDe
return result_id;
}
pub fn constBool(self: *Module, value: bool) !IdRef {
pub fn constBool(self: *Module, value: bool) !Id {
if (self.cache.bool_const[@intFromBool(value)]) |b| return b;
const result_ty_id = try self.boolType();
@ -653,7 +651,7 @@ pub fn constBool(self: *Module, value: bool) !IdRef {
/// Return a pointer to a builtin variable. `result_ty_id` must be a **pointer**
/// with storage class `.Input`.
pub fn builtin(self: *Module, result_ty_id: IdRef, spirv_builtin: spec.BuiltIn) !Decl.Index {
pub fn builtin(self: *Module, result_ty_id: Id, spirv_builtin: spec.BuiltIn) !Decl.Index {
const entry = try self.cache.builtins.getOrPut(self.gpa, .{ result_ty_id, spirv_builtin });
if (!entry.found_existing) {
const decl_index = try self.allocDecl(.global);
@ -662,15 +660,15 @@ pub fn builtin(self: *Module, result_ty_id: IdRef, spirv_builtin: spec.BuiltIn)
try self.sections.types_globals_constants.emit(self.gpa, .OpVariable, .{
.id_result_type = result_ty_id,
.id_result = result_id,
.storage_class = .Input,
.storage_class = .input,
});
try self.decorate(result_id, .{ .BuiltIn = .{ .built_in = spirv_builtin } });
try self.decorate(result_id, .{ .built_in = .{ .built_in = spirv_builtin } });
try self.declareDeclDeps(decl_index, &.{});
}
return entry.value_ptr.*;
}
pub fn constUndef(self: *Module, ty_id: IdRef) !IdRef {
pub fn constUndef(self: *Module, ty_id: Id) !Id {
const result_id = self.allocId();
try self.sections.types_globals_constants.emit(self.gpa, .OpUndef, .{
.id_result_type = ty_id,
@ -679,7 +677,7 @@ pub fn constUndef(self: *Module, ty_id: IdRef) !IdRef {
return result_id;
}
pub fn constNull(self: *Module, ty_id: IdRef) !IdRef {
pub fn constNull(self: *Module, ty_id: Id) !Id {
const result_id = self.allocId();
try self.sections.types_globals_constants.emit(self.gpa, .OpConstantNull, .{
.id_result_type = ty_id,
@ -691,7 +689,7 @@ pub fn constNull(self: *Module, ty_id: IdRef) !IdRef {
/// Decorate a result-id.
pub fn decorate(
self: *Module,
target: IdRef,
target: Id,
decoration: spec.Decoration.Extended,
) !void {
const entry = try self.cache.decorations.getOrPut(self.gpa, .{ target, decoration });
@ -707,7 +705,7 @@ pub fn decorate(
/// We really don't have to and shouldn't need to cache this.
pub fn decorateMember(
self: *Module,
structure_type: IdRef,
structure_type: Id,
member: u32,
decoration: spec.Decoration.Extended,
) !void {
@ -762,20 +760,20 @@ pub fn declareEntryPoint(
if (!gop.found_existing) gop.value_ptr.exec_mode = exec_mode;
}
pub fn debugName(self: *Module, target: IdResult, name: []const u8) !void {
pub fn debugName(self: *Module, target: Id, name: []const u8) !void {
try self.sections.debug_names.emit(self.gpa, .OpName, .{
.target = target,
.name = name,
});
}
pub fn debugNameFmt(self: *Module, target: IdResult, comptime fmt: []const u8, args: anytype) !void {
pub fn debugNameFmt(self: *Module, target: Id, comptime fmt: []const u8, args: anytype) !void {
const name = try std.fmt.allocPrint(self.gpa, fmt, args);
defer self.gpa.free(name);
try self.debugName(target, name);
}
pub fn memberDebugName(self: *Module, target: IdResult, member: u32, name: []const u8) !void {
pub fn memberDebugName(self: *Module, target: Id, member: u32, name: []const u8) !void {
try self.sections.debug_names.emit(self.gpa, .OpMemberName, .{
.type = target,
.member = member,

View File

@ -79,7 +79,7 @@ pub fn emit(
pub fn emitBranch(
section: *Section,
allocator: Allocator,
target_label: spec.IdRef,
target_label: spec.Id,
) !void {
try section.emit(allocator, .OpBranch, .{
.target_label = target_label,
@ -94,8 +94,8 @@ pub fn emitSpecConstantOp(
) !void {
const word_count = operandsSize(opcode.Operands(), operands);
try section.emitRaw(allocator, .OpSpecConstantOp, 1 + word_count);
section.writeOperand(spec.IdRef, operands.id_result_type);
section.writeOperand(spec.IdRef, operands.id_result);
section.writeOperand(spec.Id, operands.id_result_type);
section.writeOperand(spec.Id, operands.id_result);
section.writeOperand(Opcode, opcode);
const fields = @typeInfo(opcode.Operands()).@"struct".fields;
@ -134,7 +134,7 @@ fn writeOperands(section: *Section, comptime Operands: type, operands: Operands)
pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand) void {
switch (Operand) {
spec.IdResult => section.writeWord(@intFromEnum(operand)),
spec.Id => section.writeWord(@intFromEnum(operand)),
spec.LiteralInteger => section.writeWord(operand),
@ -266,7 +266,7 @@ fn operandsSize(comptime Operands: type, operands: Operands) usize {
fn operandSize(comptime Operand: type, operand: Operand) usize {
return switch (Operand) {
spec.IdResult,
spec.Id,
spec.LiteralInteger,
spec.LiteralExtInstInteger,
=> 1,

View File

@ -1,13 +1,11 @@
{
"version": 0,
"revision": 0,
"instructions": [
{
"opname": "InvocationGlobal",
"opcode": 0,
"operands": [
{ "kind": "IdRef", "name": "initializer function" }
]
}
]
"version": 0,
"revision": 0,
"instructions": [
{
"opname": "InvocationGlobal",
"opcode": 0,
"operands": [{ "kind": "IdRef", "name": "initializer function" }]
}
]
}

File diff suppressed because it is too large Load Diff

View File

@ -211,6 +211,10 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
try cflags.append("-DLIBCXX_BUILDING_LIBCXXABI");
try cflags.append("-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER");
if (target.os.tag == .wasi) {
try cflags.append("-fno-exceptions");
}
try cflags.append("-fvisibility=hidden");
try cflags.append("-fvisibility-inlines-hidden");
@ -388,6 +392,9 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
for (libcxxabi_files) |cxxabi_src| {
if (!comp.config.any_non_single_threaded and std.mem.startsWith(u8, cxxabi_src, "src/cxa_thread_atexit.cpp"))
continue;
if (target.os.tag == .wasi and
(std.mem.eql(u8, cxxabi_src, "src/cxa_exception.cpp") or std.mem.eql(u8, cxxabi_src, "src/cxa_personality.cpp")))
continue;
var cflags = std.ArrayList([]const u8).init(arena);
@ -403,6 +410,10 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
try cflags.append("-DHAVE___CXA_THREAD_ATEXIT_IMPL");
}
if (target.os.tag == .wasi) {
try cflags.append("-fno-exceptions");
}
try cflags.append("-fvisibility=hidden");
try cflags.append("-fvisibility-inlines-hidden");

View File

@ -106,6 +106,9 @@ pub fn parseCommon(
const header_buffer = try Elf.preadAllAlloc(gpa, handle, offset, @sizeOf(elf.Elf64_Ehdr));
defer gpa.free(header_buffer);
self.header = @as(*align(1) const elf.Elf64_Ehdr, @ptrCast(header_buffer)).*;
if (!mem.eql(u8, self.header.?.e_ident[0..4], elf.MAGIC)) {
return diags.failParse(path, "not an ELF file", .{});
}
const em = target.toElfMachine();
if (em != self.header.?.e_machine) {

Some files were not shown because too many files have changed in this diff Show More