Merge remote-tracking branch 'origin/master' into llvm7

This commit is contained in:
Andrew Kelley 2018-07-02 20:00:13 -04:00
commit 6e1425e312
100 changed files with 5427 additions and 1635 deletions

1
.gitignore vendored
View File

@ -1,3 +1,4 @@
zig-cache/
build/
build-*/
docgen_tmp/

View File

@ -22,7 +22,7 @@ set(ZIG_VERSION "${ZIG_VERSION_MAJOR}.${ZIG_VERSION_MINOR}.${ZIG_VERSION_PATCH}"
find_program(GIT_EXE NAMES git)
if(GIT_EXE)
execute_process(
COMMAND ${GIT_EXE} name-rev HEAD --tags --name-only --no-undefined --always
COMMAND ${GIT_EXE} -C ${CMAKE_SOURCE_DIR} name-rev HEAD --tags --name-only --no-undefined --always
OUTPUT_VARIABLE ZIG_GIT_REV
OUTPUT_STRIP_TRAILING_WHITESPACE)
if(ZIG_GIT_REV MATCHES "\\^0$")
@ -261,12 +261,15 @@ endif()
set(EMBEDDED_SOFTFLOAT_SOURCES
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/f128M_isSignalingNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF128M.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF16UI.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF32UI.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF64UI.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f128MToCommonNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f16UIToCommonNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f32UIToCommonNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f64UIToCommonNaN.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_propagateNaNF128M.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_propagateNaNF16UI.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/softfloat_raiseFlags.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_add.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_div.c"
@ -293,8 +296,20 @@ set(EMBEDDED_SOFTFLOAT_SOURCES
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui32_r_minMag.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui64.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui64_r_minMag.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_add.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_div.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_eq.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_lt.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_mul.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_rem.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_roundToInt.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_sqrt.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_sub.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_to_f128M.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_to_f64.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f32_to_f128M.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f64_to_f128M.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f64_to_f16.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_add256M.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addCarryM.c"
"${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addComplCarryM.c"
@ -416,7 +431,8 @@ set(ZIG_CPP_SOURCES
set(ZIG_STD_FILES
"array_list.zig"
"atomic/index.zig"
"atomic/queue.zig"
"atomic/queue_mpmc.zig"
"atomic/queue_mpsc.zig"
"atomic/stack.zig"
"base64.zig"
"buf_map.zig"
@ -558,6 +574,7 @@ set(ZIG_STD_FILES
"special/compiler_rt/aullrem.zig"
"special/compiler_rt/comparetf2.zig"
"special/compiler_rt/divti3.zig"
"special/compiler_rt/extendXfYf2.zig"
"special/compiler_rt/fixuint.zig"
"special/compiler_rt/fixunsdfdi.zig"
"special/compiler_rt/fixunsdfsi.zig"
@ -568,8 +585,17 @@ set(ZIG_STD_FILES
"special/compiler_rt/fixunstfdi.zig"
"special/compiler_rt/fixunstfsi.zig"
"special/compiler_rt/fixunstfti.zig"
"special/compiler_rt/floatunditf.zig"
"special/compiler_rt/floatunsitf.zig"
"special/compiler_rt/floatuntidf.zig"
"special/compiler_rt/floatuntisf.zig"
"special/compiler_rt/floatuntitf.zig"
"special/compiler_rt/floattidf.zig"
"special/compiler_rt/floattisf.zig"
"special/compiler_rt/floattitf.zig"
"special/compiler_rt/muloti4.zig"
"special/compiler_rt/index.zig"
"special/compiler_rt/truncXfYf2.zig"
"special/compiler_rt/udivmod.zig"
"special/compiler_rt/udivmoddi4.zig"
"special/compiler_rt/udivmodti4.zig"

File diff suppressed because it is too large Load Diff

View File

@ -168,7 +168,7 @@ pub const Args = struct {
}
// e.g. --names value1 value2 value3
pub fn many(self: *Args, name: []const u8) ?[]const []const u8 {
pub fn many(self: *Args, name: []const u8) []const []const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
FlagArg.Many => |inner| {
@ -177,7 +177,7 @@ pub const Args = struct {
else => @panic("attempted to retrieve flag with wrong type"),
}
} else {
return null;
return []const []const u8{};
}
}
};

View File

@ -35,7 +35,7 @@ pub fn createFromParseError(
var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
try parse_error.render(&tree.tokens, out_stream);
const msg = try allocator.construct(Msg{
const msg = try allocator.create(Msg{
.tree = tree,
.path = path,
.text = text_buf.toOwnedSlice(),

View File

@ -1,6 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
const event = std.event;
const os = std.os;
const io = std.io;
const mem = std.mem;
@ -26,15 +27,11 @@ const usage =
\\
\\Commands:
\\
\\ build Build project from build.zig
\\ build-exe [source] Create executable from source or object files
\\ build-lib [source] Create library from source or object files
\\ build-obj [source] Create object from source or assembly
\\ fmt [source] Parse file and render in canonical zig format
\\ run [source] Create executable and run immediately
\\ targets List available compilation targets
\\ test [source] Create and run a test build
\\ translate-c [source] Convert c code to zig code
\\ version Print version number and exit
\\ zen Print zen of zig and exit
\\
@ -47,7 +44,10 @@ const Command = struct {
};
pub fn main() !void {
var allocator = std.heap.c_allocator;
// This allocator needs to be thread-safe because we use it for the event.Loop
// which multiplexes coroutines onto kernel threads.
// libc allocator is guaranteed to have this property.
const allocator = std.heap.c_allocator;
var stdout_file = try std.io.getStdOut();
var stdout_out_stream = std.io.FileOutStream.init(&stdout_file);
@ -58,18 +58,16 @@ pub fn main() !void {
stderr = &stderr_out_stream.stream;
const args = try os.argsAlloc(allocator);
defer os.argsFree(allocator, args);
// TODO I'm getting unreachable code here, which shouldn't happen
//defer os.argsFree(allocator, args);
if (args.len <= 1) {
try stderr.write("expected command argument\n\n");
try stderr.write(usage);
os.exit(1);
}
const commands = []Command{
Command{
.name = "build",
.exec = cmdBuild,
},
Command{
.name = "build-exe",
.exec = cmdBuildExe,
@ -86,22 +84,10 @@ pub fn main() !void {
.name = "fmt",
.exec = cmdFmt,
},
Command{
.name = "run",
.exec = cmdRun,
},
Command{
.name = "targets",
.exec = cmdTargets,
},
Command{
.name = "test",
.exec = cmdTest,
},
Command{
.name = "translate-c",
.exec = cmdTranslateC,
},
Command{
.name = "version",
.exec = cmdVersion,
@ -124,177 +110,15 @@ pub fn main() !void {
for (commands) |command| {
if (mem.eql(u8, command.name, args[1])) {
try command.exec(allocator, args[2..]);
return;
return command.exec(allocator, args[2..]);
}
}
try stderr.print("unknown command: {}\n\n", args[1]);
try stderr.write(usage);
os.exit(1);
}
// cmd:build ///////////////////////////////////////////////////////////////////////////////////////
const usage_build =
\\usage: zig build <options>
\\
\\General Options:
\\ --help Print this help and exit
\\ --init Generate a build.zig template
\\ --build-file [file] Override path to build.zig
\\ --cache-dir [path] Override path to cache directory
\\ --verbose Print commands before executing them
\\ --prefix [path] Override default install prefix
\\
\\Project-Specific Options:
\\
\\ Project-specific options become available when the build file is found.
\\
\\Advanced Options:
\\ --build-file [file] Override path to build.zig
\\ --cache-dir [path] Override path to cache directory
\\ --verbose-tokenize Enable compiler debug output for tokenization
\\ --verbose-ast Enable compiler debug output for parsing into an AST
\\ --verbose-link Enable compiler debug output for linking
\\ --verbose-ir Enable compiler debug output for Zig IR
\\ --verbose-llvm-ir Enable compiler debug output for LLVM IR
\\ --verbose-cimport Enable compiler debug output for C imports
\\
\\
;
const args_build_spec = []Flag{
Flag.Bool("--help"),
Flag.Bool("--init"),
Flag.Arg1("--build-file"),
Flag.Arg1("--cache-dir"),
Flag.Bool("--verbose"),
Flag.Arg1("--prefix"),
Flag.Arg1("--build-file"),
Flag.Arg1("--cache-dir"),
Flag.Bool("--verbose-tokenize"),
Flag.Bool("--verbose-ast"),
Flag.Bool("--verbose-link"),
Flag.Bool("--verbose-ir"),
Flag.Bool("--verbose-llvm-ir"),
Flag.Bool("--verbose-cimport"),
};
const missing_build_file =
\\No 'build.zig' file found.
\\
\\Initialize a 'build.zig' template file with `zig build --init`,
\\or build an executable directly with `zig build-exe $FILENAME.zig`.
\\
\\See: `zig build --help` or `zig help` for more options.
\\
;
fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_build_spec, args);
defer flags.deinit();
if (flags.present("help")) {
try stderr.write(usage_build);
os.exit(0);
}
const zig_lib_dir = try introspect.resolveZigLibDir(allocator);
defer allocator.free(zig_lib_dir);
const zig_std_dir = try os.path.join(allocator, zig_lib_dir, "std");
defer allocator.free(zig_std_dir);
const special_dir = try os.path.join(allocator, zig_std_dir, "special");
defer allocator.free(special_dir);
const build_runner_path = try os.path.join(allocator, special_dir, "build_runner.zig");
defer allocator.free(build_runner_path);
const build_file = flags.single("build-file") orelse "build.zig";
const build_file_abs = try os.path.resolve(allocator, ".", build_file);
defer allocator.free(build_file_abs);
const build_file_exists = os.File.access(allocator, build_file_abs, os.default_file_mode) catch false;
if (flags.present("init")) {
if (build_file_exists) {
try stderr.print("build.zig already exists\n");
os.exit(1);
}
// need a new scope for proper defer scope finalization on exit
{
const build_template_path = try os.path.join(allocator, special_dir, "build_file_template.zig");
defer allocator.free(build_template_path);
try os.copyFile(allocator, build_template_path, build_file_abs);
try stderr.print("wrote build.zig template\n");
}
os.exit(0);
}
if (!build_file_exists) {
try stderr.write(missing_build_file);
os.exit(1);
}
// TODO: Invoke build.zig entrypoint directly?
var zig_exe_path = try os.selfExePath(allocator);
defer allocator.free(zig_exe_path);
var build_args = ArrayList([]const u8).init(allocator);
defer build_args.deinit();
const build_file_basename = os.path.basename(build_file_abs);
const build_file_dirname = os.path.dirname(build_file_abs) orelse ".";
var full_cache_dir: []u8 = undefined;
if (flags.single("cache-dir")) |cache_dir| {
full_cache_dir = try os.path.resolve(allocator, ".", cache_dir, full_cache_dir);
} else {
full_cache_dir = try os.path.join(allocator, build_file_dirname, "zig-cache");
}
defer allocator.free(full_cache_dir);
const path_to_build_exe = try os.path.join(allocator, full_cache_dir, "build");
defer allocator.free(path_to_build_exe);
try build_args.append(path_to_build_exe);
try build_args.append(zig_exe_path);
try build_args.append(build_file_dirname);
try build_args.append(full_cache_dir);
var proc = try os.ChildProcess.init(build_args.toSliceConst(), allocator);
defer proc.deinit();
var term = try proc.spawnAndWait();
switch (term) {
os.ChildProcess.Term.Exited => |status| {
if (status != 0) {
try stderr.print("{} exited with status {}\n", build_args.at(0), status);
os.exit(1);
}
},
os.ChildProcess.Term.Signal => |signal| {
try stderr.print("{} killed by signal {}\n", build_args.at(0), signal);
os.exit(1);
},
os.ChildProcess.Term.Stopped => |signal| {
try stderr.print("{} stopped by signal {}\n", build_args.at(0), signal);
os.exit(1);
},
os.ChildProcess.Term.Unknown => |status| {
try stderr.print("{} encountered unknown failure {}\n", build_args.at(0), status);
os.exit(1);
},
}
}
// cmd:build-exe ///////////////////////////////////////////////////////////////////////////////////
const usage_build_generic =
\\usage: zig build-exe <options> [file]
\\ zig build-lib <options> [file]
@ -315,8 +139,11 @@ const usage_build_generic =
\\ --output-h [file] Override generated header file path
\\ --pkg-begin [name] [path] Make package available to import and push current pkg
\\ --pkg-end Pop current pkg
\\ --release-fast Build with optimizations on and safety off
\\ --release-safe Build with optimizations on and safety on
\\ --mode [mode] Set the build mode
\\ debug (default) optimizations off, safety on
\\ release-fast optimizations on, safety off
\\ release-safe optimizations on, safety on
\\ release-small optimize for small binary, safety off
\\ --static Output will be statically linked
\\ --strip Exclude debug symbols
\\ --target-arch [name] Specify target architecture
@ -367,6 +194,12 @@ const args_build_generic = []Flag{
"off",
"on",
}),
Flag.Option("--mode", []const []const u8{
"debug",
"release-fast",
"release-safe",
"release-small",
}),
Flag.ArgMergeN("--assembly", 1),
Flag.Arg1("--cache-dir"),
@ -383,8 +216,6 @@ const args_build_generic = []Flag{
// NOTE: Parsed manually after initial check
Flag.ArgN("--pkg-begin", 2),
Flag.Bool("--pkg-end"),
Flag.Bool("--release-fast"),
Flag.Bool("--release-safe"),
Flag.Bool("--static"),
Flag.Bool("--strip"),
Flag.Arg1("--target-arch"),
@ -431,16 +262,25 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
defer flags.deinit();
if (flags.present("help")) {
try stderr.write(usage_build_generic);
try stdout.write(usage_build_generic);
os.exit(0);
}
var build_mode = builtin.Mode.Debug;
if (flags.present("release-fast")) {
build_mode = builtin.Mode.ReleaseFast;
} else if (flags.present("release-safe")) {
build_mode = builtin.Mode.ReleaseSafe;
}
const build_mode = blk: {
if (flags.single("mode")) |mode_flag| {
if (mem.eql(u8, mode_flag, "debug")) {
break :blk builtin.Mode.Debug;
} else if (mem.eql(u8, mode_flag, "release-fast")) {
break :blk builtin.Mode.ReleaseFast;
} else if (mem.eql(u8, mode_flag, "release-safe")) {
break :blk builtin.Mode.ReleaseSafe;
} else if (mem.eql(u8, mode_flag, "release-small")) {
break :blk builtin.Mode.ReleaseSmall;
} else unreachable;
} else {
break :blk builtin.Mode.Debug;
}
};
const color = blk: {
if (flags.single("color")) |color_flag| {
@ -456,20 +296,21 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
}
};
var emit_type = Module.Emit.Binary;
if (flags.single("emit")) |emit_flag| {
if (mem.eql(u8, emit_flag, "asm")) {
emit_type = Module.Emit.Assembly;
} else if (mem.eql(u8, emit_flag, "bin")) {
emit_type = Module.Emit.Binary;
} else if (mem.eql(u8, emit_flag, "llvm-ir")) {
emit_type = Module.Emit.LlvmIr;
const emit_type = blk: {
if (flags.single("emit")) |emit_flag| {
if (mem.eql(u8, emit_flag, "asm")) {
break :blk Module.Emit.Assembly;
} else if (mem.eql(u8, emit_flag, "bin")) {
break :blk Module.Emit.Binary;
} else if (mem.eql(u8, emit_flag, "llvm-ir")) {
break :blk Module.Emit.LlvmIr;
} else unreachable;
} else {
unreachable;
break :blk Module.Emit.Binary;
}
}
};
var cur_pkg = try Module.CliPkg.init(allocator, "", "", null); // TODO: Need a path, name?
var cur_pkg = try CliPkg.init(allocator, "", "", null);
defer cur_pkg.deinit();
var i: usize = 0;
@ -482,15 +323,16 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
i += 1;
const new_pkg_path = args[i];
var new_cur_pkg = try Module.CliPkg.init(allocator, new_pkg_name, new_pkg_path, cur_pkg);
var new_cur_pkg = try CliPkg.init(allocator, new_pkg_name, new_pkg_path, cur_pkg);
try cur_pkg.children.append(new_cur_pkg);
cur_pkg = new_cur_pkg;
} else if (mem.eql(u8, "--pkg-end", arg_name)) {
if (cur_pkg.parent == null) {
if (cur_pkg.parent) |parent| {
cur_pkg = parent;
} else {
try stderr.print("encountered --pkg-end with no matching --pkg-begin\n");
os.exit(1);
}
cur_pkg = cur_pkg.parent.?;
}
}
@ -499,43 +341,42 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
os.exit(1);
}
var in_file: ?[]const u8 = undefined;
switch (flags.positionals.len) {
0 => {
try stderr.write("--name [name] not provided and unable to infer\n");
os.exit(1);
},
1 => {
in_file = flags.positionals.at(0);
},
const provided_name = flags.single("name");
const root_source_file = switch (flags.positionals.len) {
0 => null,
1 => flags.positionals.at(0),
else => {
try stderr.write("only one zig input file is accepted during build\n");
try stderr.print("unexpected extra parameter: {}\n", flags.positionals.at(1));
os.exit(1);
},
}
const basename = os.path.basename(in_file.?);
var it = mem.split(basename, ".");
const root_name = it.next() orelse {
try stderr.write("file name cannot be empty\n");
os.exit(1);
};
const asm_a = flags.many("assembly");
const obj_a = flags.many("object");
if (in_file == null and (obj_a == null or obj_a.?.len == 0) and (asm_a == null or asm_a.?.len == 0)) {
const root_name = if (provided_name) |n| n else blk: {
if (root_source_file) |file| {
const basename = os.path.basename(file);
var it = mem.split(basename, ".");
break :blk it.next() orelse basename;
} else {
try stderr.write("--name [name] not provided and unable to infer\n");
os.exit(1);
}
};
const assembly_files = flags.many("assembly");
const link_objects = flags.many("object");
if (root_source_file == null and link_objects.len == 0 and assembly_files.len == 0) {
try stderr.write("Expected source file argument or at least one --object or --assembly argument\n");
os.exit(1);
}
if (out_type == Module.Kind.Obj and (obj_a != null and obj_a.?.len != 0)) {
if (out_type == Module.Kind.Obj and link_objects.len != 0) {
try stderr.write("When building an object file, --object arguments are invalid\n");
os.exit(1);
}
const zig_root_source_file = in_file;
const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") orelse "zig-cache"[0..]) catch {
const rel_cache_dir = flags.single("cache-dir") orelse "zig-cache"[0..];
const full_cache_dir = os.path.resolve(allocator, ".", rel_cache_dir) catch {
try stderr.print("invalid cache dir: {}\n", rel_cache_dir);
os.exit(1);
};
defer allocator.free(full_cache_dir);
@ -543,10 +384,12 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
const zig_lib_dir = introspect.resolveZigLibDir(allocator) catch os.exit(1);
defer allocator.free(zig_lib_dir);
var loop = try event.Loop.init(allocator);
var module = try Module.create(
allocator,
&loop,
root_name,
zig_root_source_file,
root_source_file,
Target.Native,
out_type,
build_mode,
@ -561,24 +404,21 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
module.is_test = false;
if (flags.single("linker-script")) |linker_script| {
module.linker_script = linker_script;
}
module.linker_script = flags.single("linker-script");
module.each_lib_rpath = flags.present("each-lib-rpath");
var clang_argv_buf = ArrayList([]const u8).init(allocator);
defer clang_argv_buf.deinit();
if (flags.many("mllvm")) |mllvm_flags| {
for (mllvm_flags) |mllvm| {
try clang_argv_buf.append("-mllvm");
try clang_argv_buf.append(mllvm);
}
module.llvm_argv = mllvm_flags;
module.clang_argv = clang_argv_buf.toSliceConst();
const mllvm_flags = flags.many("mllvm");
for (mllvm_flags) |mllvm| {
try clang_argv_buf.append("-mllvm");
try clang_argv_buf.append(mllvm);
}
module.llvm_argv = mllvm_flags;
module.clang_argv = clang_argv_buf.toSliceConst();
module.strip = flags.present("strip");
module.is_static = flags.present("static");
@ -610,18 +450,9 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
module.verbose_cimport = flags.present("verbose-cimport");
module.err_color = color;
if (flags.many("library-path")) |lib_dirs| {
module.lib_dirs = lib_dirs;
}
if (flags.many("framework")) |frameworks| {
module.darwin_frameworks = frameworks;
}
if (flags.many("rpath")) |rpath_list| {
module.rpath_list = rpath_list;
}
module.lib_dirs = flags.many("library-path");
module.darwin_frameworks = flags.many("framework");
module.rpath_list = flags.many("rpath");
if (flags.single("output-h")) |output_h| {
module.out_h_path = output_h;
@ -644,41 +475,51 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
}
module.emit_file_type = emit_type;
if (flags.many("object")) |objects| {
module.link_objects = objects;
}
if (flags.many("assembly")) |assembly_files| {
module.assembly_files = assembly_files;
}
module.link_objects = link_objects;
module.assembly_files = assembly_files;
module.link_out_file = flags.single("out-file");
try module.build();
try module.link(flags.single("out-file") orelse null);
const process_build_events_handle = try async<loop.allocator> processBuildEvents(module, true);
defer cancel process_build_events_handle;
loop.run();
}
if (flags.present("print-timing-info")) {
// codegen_print_timing_info(g, stderr);
async fn processBuildEvents(module: *Module, watch: bool) void {
while (watch) {
// TODO directly awaiting async should guarantee memory allocation elision
const build_event = await (async module.events.get() catch unreachable);
switch (build_event) {
Module.Event.Ok => {
std.debug.warn("Build succeeded\n");
// for now we stop after 1
module.loop.stop();
return;
},
Module.Event.Error => |err| {
std.debug.warn("build failed: {}\n", @errorName(err));
@panic("TODO error return trace");
},
Module.Event.Fail => |errs| {
@panic("TODO print compile error messages");
},
}
}
try stderr.print("building {}: {}\n", @tagName(out_type), in_file);
}
fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
try buildOutputType(allocator, args, Module.Kind.Exe);
return buildOutputType(allocator, args, Module.Kind.Exe);
}
// cmd:build-lib ///////////////////////////////////////////////////////////////////////////////////
fn cmdBuildLib(allocator: *Allocator, args: []const []const u8) !void {
try buildOutputType(allocator, args, Module.Kind.Lib);
return buildOutputType(allocator, args, Module.Kind.Lib);
}
// cmd:build-obj ///////////////////////////////////////////////////////////////////////////////////
fn cmdBuildObj(allocator: *Allocator, args: []const []const u8) !void {
try buildOutputType(allocator, args, Module.Kind.Obj);
return buildOutputType(allocator, args, Module.Kind.Obj);
}
// cmd:fmt /////////////////////////////////////////////////////////////////////////////////////////
const usage_fmt =
\\usage: zig fmt [file]...
\\
@ -707,7 +548,7 @@ const Fmt = struct {
// file_path must outlive Fmt
fn addToQueue(self: *Fmt, file_path: []const u8) !void {
const new_node = try self.seen.allocator.construct(std.LinkedList([]const u8).Node{
const new_node = try self.seen.allocator.create(std.LinkedList([]const u8).Node{
.prev = undefined,
.next = undefined,
.data = file_path,
@ -735,7 +576,7 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
defer flags.deinit();
if (flags.present("help")) {
try stderr.write(usage_fmt);
try stdout.write(usage_fmt);
os.exit(0);
}
@ -863,162 +704,16 @@ fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {
}
}
// cmd:version /////////////////////////////////////////////////////////////////////////////////////
fn cmdVersion(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print("{}\n", std.cstr.toSliceConst(c.ZIG_VERSION_STRING));
}
// cmd:test ////////////////////////////////////////////////////////////////////////////////////////
const usage_test =
\\usage: zig test [file]...
\\
\\Options:
\\ --help Print this help and exit
\\
\\
;
const args_test_spec = []Flag{Flag.Bool("--help")};
fn cmdTest(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_build_spec, args);
defer flags.deinit();
if (flags.present("help")) {
try stderr.write(usage_test);
os.exit(0);
}
if (flags.positionals.len != 1) {
try stderr.write("expected exactly one zig source file\n");
os.exit(1);
}
// compile the test program into the cache and run
// NOTE: May be overlap with buildOutput, take the shared part out.
try stderr.print("testing file {}\n", flags.positionals.at(0));
}
// cmd:run /////////////////////////////////////////////////////////////////////////////////////////
// Run should be simple and not expose the full set of arguments provided by build-exe. If specific
// build requirements are need, the user should `build-exe` then `run` manually.
const usage_run =
\\usage: zig run [file] -- <runtime args>
\\
\\Options:
\\ --help Print this help and exit
\\
\\
;
const args_run_spec = []Flag{Flag.Bool("--help")};
fn cmdRun(allocator: *Allocator, args: []const []const u8) !void {
var compile_args = args;
var runtime_args: []const []const u8 = []const []const u8{};
for (args) |argv, i| {
if (mem.eql(u8, argv, "--")) {
compile_args = args[0..i];
runtime_args = args[i + 1 ..];
break;
}
}
var flags = try Args.parse(allocator, args_run_spec, compile_args);
defer flags.deinit();
if (flags.present("help")) {
try stderr.write(usage_run);
os.exit(0);
}
if (flags.positionals.len != 1) {
try stderr.write("expected exactly one zig source file\n");
os.exit(1);
}
try stderr.print("runtime args:\n");
for (runtime_args) |cargs| {
try stderr.print("{}\n", cargs);
}
}
// cmd:translate-c /////////////////////////////////////////////////////////////////////////////////
const usage_translate_c =
\\usage: zig translate-c [file]
\\
\\Options:
\\ --help Print this help and exit
\\ --enable-timing-info Print timing diagnostics
\\ --output [path] Output file to write generated zig file (default: stdout)
\\
\\
;
const args_translate_c_spec = []Flag{
Flag.Bool("--help"),
Flag.Bool("--enable-timing-info"),
Flag.Arg1("--libc-include-dir"),
Flag.Arg1("--output"),
};
fn cmdTranslateC(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_translate_c_spec, args);
defer flags.deinit();
if (flags.present("help")) {
try stderr.write(usage_translate_c);
os.exit(0);
}
if (flags.positionals.len != 1) {
try stderr.write("expected exactly one c source file\n");
os.exit(1);
}
// set up codegen
const zig_root_source_file = null;
// NOTE: translate-c shouldn't require setting up the full codegen instance as it does in
// the C++ compiler.
// codegen_create(g);
// codegen_set_out_name(g, null);
// codegen_translate_c(g, flags.positional.at(0))
var output_stream = stdout;
if (flags.single("output")) |output_file| {
var file = try os.File.openWrite(allocator, output_file);
defer file.close();
var file_stream = io.FileOutStream.init(&file);
// TODO: Not being set correctly, still stdout
output_stream = &file_stream.stream;
}
// ast_render(g, output_stream, g->root_import->root, 4);
try output_stream.write("pub const example = 10;\n");
if (flags.present("enable-timing-info")) {
// codegen_print_timing_info(g, stdout);
try stderr.write("printing timing info for translate-c\n");
}
}
// cmd:help ////////////////////////////////////////////////////////////////////////////////////////
fn cmdHelp(allocator: *Allocator, args: []const []const u8) !void {
try stderr.write(usage);
try stdout.write(usage);
}
// cmd:zen /////////////////////////////////////////////////////////////////////////////////////////
const info_zen =
\\
\\ * Communicate intent precisely.
@ -1040,8 +735,6 @@ fn cmdZen(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write(info_zen);
}
// cmd:internal ////////////////////////////////////////////////////////////////////////////////////
const usage_internal =
\\usage: zig internal [subcommand]
\\
@ -1095,3 +788,27 @@ fn cmdInternalBuildInfo(allocator: *Allocator, args: []const []const u8) !void {
std.cstr.toSliceConst(c.ZIG_DIA_GUIDS_LIB),
);
}
const CliPkg = struct {
name: []const u8,
path: []const u8,
children: ArrayList(*CliPkg),
parent: ?*CliPkg,
pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
var pkg = try allocator.create(CliPkg{
.name = name,
.path = path,
.children = ArrayList(*CliPkg).init(allocator),
.parent = parent,
});
return pkg;
}
pub fn deinit(self: *CliPkg) void {
for (self.children.toSliceConst()) |child| {
child.deinit();
}
self.children.deinit();
}
};

View File

@ -11,9 +11,11 @@ const warn = std.debug.warn;
const Token = std.zig.Token;
const ArrayList = std.ArrayList;
const errmsg = @import("errmsg.zig");
const ast = std.zig.ast;
const event = std.event;
pub const Module = struct {
allocator: *mem.Allocator,
loop: *event.Loop,
name: Buffer,
root_src_path: ?[]const u8,
module: llvm.ModuleRef,
@ -76,6 +78,52 @@ pub const Module = struct {
kind: Kind,
link_out_file: ?[]const u8,
events: *event.Channel(Event),
// TODO handle some of these earlier and report them in a way other than error codes
pub const BuildError = error{
OutOfMemory,
EndOfStream,
BadFd,
Io,
IsDir,
Unexpected,
SystemResources,
SharingViolation,
PathAlreadyExists,
FileNotFound,
AccessDenied,
PipeBusy,
FileTooBig,
SymLinkLoop,
ProcessFdQuotaExceeded,
NameTooLong,
SystemFdQuotaExceeded,
NoDevice,
PathNotFound,
NoSpaceLeft,
NotDir,
FileSystem,
OperationAborted,
IoPending,
BrokenPipe,
WouldBlock,
FileClosed,
DestinationAddressRequired,
DiskQuota,
InputOutput,
NoStdHandles,
Overflow,
NotSupported,
};
pub const Event = union(enum) {
Ok,
Fail: []errmsg.Msg,
Error: BuildError,
};
pub const DarwinVersionMin = union(enum) {
None,
MacOS: []const u8,
@ -103,31 +151,17 @@ pub const Module = struct {
LlvmIr,
};
pub const CliPkg = struct {
pub fn create(
loop: *event.Loop,
name: []const u8,
path: []const u8,
children: ArrayList(*CliPkg),
parent: ?*CliPkg,
pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
var pkg = try allocator.create(CliPkg);
pkg.name = name;
pkg.path = path;
pkg.children = ArrayList(*CliPkg).init(allocator);
pkg.parent = parent;
return pkg;
}
pub fn deinit(self: *CliPkg) void {
for (self.children.toSliceConst()) |child| {
child.deinit();
}
self.children.deinit();
}
};
pub fn create(allocator: *mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: *const Target, kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !*Module {
var name_buffer = try Buffer.init(allocator, name);
root_src_path: ?[]const u8,
target: *const Target,
kind: Kind,
build_mode: builtin.Mode,
zig_lib_dir: []const u8,
cache_dir: []const u8,
) !*Module {
var name_buffer = try Buffer.init(loop.allocator, name);
errdefer name_buffer.deinit();
const context = c.LLVMContextCreate() orelse return error.OutOfMemory;
@ -139,11 +173,12 @@ pub const Module = struct {
const builder = c.LLVMCreateBuilderInContext(context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeBuilder(builder);
const module_ptr = try allocator.create(Module);
errdefer allocator.destroy(module_ptr);
const events = try event.Channel(Event).create(loop, 0);
errdefer events.destroy();
module_ptr.* = Module{
.allocator = allocator,
return loop.allocator.create(Module{
.loop = loop,
.events = events,
.name = name_buffer,
.root_src_path = root_src_path,
.module = module,
@ -188,7 +223,7 @@ pub const Module = struct {
.link_objects = [][]const u8{},
.windows_subsystem_windows = false,
.windows_subsystem_console = false,
.link_libs_list = ArrayList(*LinkLib).init(allocator),
.link_libs_list = ArrayList(*LinkLib).init(loop.allocator),
.libc_link_lib = null,
.err_color = errmsg.Color.Auto,
.darwin_frameworks = [][]const u8{},
@ -196,8 +231,8 @@ pub const Module = struct {
.test_filters = [][]const u8{},
.test_name_prefix = null,
.emit_file_type = Emit.Binary,
};
return module_ptr;
.link_out_file = null,
});
}
fn dump(self: *Module) void {
@ -205,58 +240,70 @@ pub const Module = struct {
}
pub fn destroy(self: *Module) void {
self.events.destroy();
c.LLVMDisposeBuilder(self.builder);
c.LLVMDisposeModule(self.module);
c.LLVMContextDispose(self.context);
self.name.deinit();
self.allocator.destroy(self);
self.a().destroy(self);
}
pub fn build(self: *Module) !void {
if (self.llvm_argv.len != 0) {
var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.allocator, [][]const []const u8{
var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.a(), [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
self.llvm_argv,
});
defer c_compatible_args.deinit();
// TODO this sets global state
c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
}
_ = try async<self.a()> self.buildAsync();
}
async fn buildAsync(self: *Module) void {
while (true) {
// TODO directly awaiting async should guarantee memory allocation elision
// TODO also async before suspending should guarantee memory allocation elision
(await (async self.addRootSrc() catch unreachable)) catch |err| {
await (async self.events.put(Event{ .Error = err }) catch unreachable);
return;
};
await (async self.events.put(Event.Ok) catch unreachable);
}
}
async fn addRootSrc(self: *Module) !void {
const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path");
const root_src_real_path = os.path.real(self.allocator, root_src_path) catch |err| {
const root_src_real_path = os.path.real(self.a(), root_src_path) catch |err| {
try printError("unable to get real path '{}': {}", root_src_path, err);
return err;
};
errdefer self.allocator.free(root_src_real_path);
errdefer self.a().free(root_src_real_path);
const source_code = io.readFileAlloc(self.allocator, root_src_real_path) catch |err| {
const source_code = io.readFileAlloc(self.a(), root_src_real_path) catch |err| {
try printError("unable to open '{}': {}", root_src_real_path, err);
return err;
};
errdefer self.allocator.free(source_code);
errdefer self.a().free(source_code);
warn("====input:====\n");
warn("{}", source_code);
warn("====parse:====\n");
var tree = try std.zig.parse(self.allocator, source_code);
var tree = try std.zig.parse(self.a(), source_code);
defer tree.deinit();
var stderr_file = try std.io.getStdErr();
var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
const out_stream = &stderr_file_out_stream.stream;
warn("====fmt:====\n");
_ = try std.zig.render(self.allocator, out_stream, &tree);
warn("====ir:====\n");
warn("TODO\n\n");
warn("====llvm ir:====\n");
self.dump();
//var it = tree.root_node.decls.iterator();
//while (it.next()) |decl_ptr| {
// const decl = decl_ptr.*;
// switch (decl.id) {
// ast.Node.Comptime => @panic("TODO"),
// ast.Node.VarDecl => @panic("TODO"),
// ast.Node.UseDecl => @panic("TODO"),
// ast.Node.FnDef => @panic("TODO"),
// ast.Node.TestDecl => @panic("TODO"),
// else => unreachable,
// }
//}
}
pub fn link(self: *Module, out_file: ?[]const u8) !void {
@ -279,19 +326,22 @@ pub const Module = struct {
}
}
const link_lib = try self.allocator.create(LinkLib);
link_lib.* = LinkLib{
const link_lib = try self.a().create(LinkLib{
.name = name,
.path = null,
.provided_explicitly = provided_explicitly,
.symbols = ArrayList([]u8).init(self.allocator),
};
.symbols = ArrayList([]u8).init(self.a()),
});
try self.link_libs_list.append(link_lib);
if (is_libc) {
self.libc_link_lib = link_lib;
}
return link_lib;
}
fn a(self: Module) *mem.Allocator {
return self.loop.allocator;
}
};
fn printError(comptime format: []const u8, args: ...) !void {

View File

@ -234,6 +234,16 @@ enum RuntimeHintPtr {
RuntimeHintPtrNonStack,
};
enum RuntimeHintSliceId {
RuntimeHintSliceIdUnknown,
RuntimeHintSliceIdLen,
};
struct RuntimeHintSlice {
enum RuntimeHintSliceId id;
uint64_t len;
};
struct ConstGlobalRefs {
LLVMValueRef llvm_value;
LLVMValueRef llvm_global;
@ -248,6 +258,7 @@ struct ConstExprValue {
// populated if special == ConstValSpecialStatic
BigInt x_bigint;
BigFloat x_bigfloat;
float16_t x_f16;
float x_f32;
double x_f64;
float128_t x_f128;
@ -270,6 +281,7 @@ struct ConstExprValue {
RuntimeHintErrorUnion rh_error_union;
RuntimeHintOptional rh_maybe;
RuntimeHintPtr rh_ptr;
RuntimeHintSlice rh_slice;
} data;
};
@ -1222,7 +1234,7 @@ struct TypeTableEntry {
// use these fields to make sure we don't duplicate type table entries for the same type
TypeTableEntry *pointer_parent[2]; // [0 - mut, 1 - const]
TypeTableEntry *maybe_parent;
TypeTableEntry *optional_parent;
TypeTableEntry *promise_parent;
TypeTableEntry *promise_frame_parent;
// If we generate a constant name value for this type, we memoize it here.
@ -1359,9 +1371,16 @@ enum BuiltinFnId {
BuiltinFnIdTruncate,
BuiltinFnIdIntCast,
BuiltinFnIdFloatCast,
BuiltinFnIdErrSetCast,
BuiltinFnIdToBytes,
BuiltinFnIdFromBytes,
BuiltinFnIdIntToFloat,
BuiltinFnIdFloatToInt,
BuiltinFnIdBoolToInt,
BuiltinFnIdErrToInt,
BuiltinFnIdIntToErr,
BuiltinFnIdEnumToInt,
BuiltinFnIdIntToEnum,
BuiltinFnIdIntType,
BuiltinFnIdSetCold,
BuiltinFnIdSetRuntimeSafety,
@ -1416,6 +1435,7 @@ enum PanicMsgId {
PanicMsgIdIncorrectAlignment,
PanicMsgIdBadUnionField,
PanicMsgIdBadEnumValue,
PanicMsgIdFloatToInt,
PanicMsgIdCount,
};
@ -1579,6 +1599,7 @@ struct CodeGen {
TypeTableEntry *entry_i128;
TypeTableEntry *entry_isize;
TypeTableEntry *entry_usize;
TypeTableEntry *entry_f16;
TypeTableEntry *entry_f32;
TypeTableEntry *entry_f64;
TypeTableEntry *entry_f128;
@ -2074,6 +2095,7 @@ enum IrInstructionId {
IrInstructionIdIntToPtr,
IrInstructionIdPtrToInt,
IrInstructionIdIntToEnum,
IrInstructionIdEnumToInt,
IrInstructionIdIntToErr,
IrInstructionIdErrToInt,
IrInstructionIdCheckSwitchProngs,
@ -2119,6 +2141,9 @@ enum IrInstructionId {
IrInstructionIdMergeErrRetTraces,
IrInstructionIdMarkErrRetTracePtr,
IrInstructionIdSqrt,
IrInstructionIdErrSetCast,
IrInstructionIdToBytes,
IrInstructionIdFromBytes,
};
struct IrInstruction {
@ -2654,6 +2679,26 @@ struct IrInstructionFloatCast {
IrInstruction *target;
};
struct IrInstructionErrSetCast {
IrInstruction base;
IrInstruction *dest_type;
IrInstruction *target;
};
struct IrInstructionToBytes {
IrInstruction base;
IrInstruction *target;
};
struct IrInstructionFromBytes {
IrInstruction base;
IrInstruction *dest_child_type;
IrInstruction *target;
};
struct IrInstructionIntToFloat {
IrInstruction base;
@ -2864,6 +2909,13 @@ struct IrInstructionIntToPtr {
struct IrInstructionIntToEnum {
IrInstruction base;
IrInstruction *dest_type;
IrInstruction *target;
};
struct IrInstructionEnumToInt {
IrInstruction base;
IrInstruction *target;
};

View File

@ -482,7 +482,7 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type)
return return_type->promise_frame_parent;
}
TypeTableEntry *awaiter_handle_type = get_maybe_type(g, g->builtin_types.entry_promise);
TypeTableEntry *awaiter_handle_type = get_optional_type(g, g->builtin_types.entry_promise);
TypeTableEntry *result_ptr_type = get_pointer_to_type(g, return_type, false);
ZigList<const char *> field_names = {};
@ -513,9 +513,9 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type)
return entry;
}
TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) {
if (child_type->maybe_parent) {
TypeTableEntry *entry = child_type->maybe_parent;
TypeTableEntry *get_optional_type(CodeGen *g, TypeTableEntry *child_type) {
if (child_type->optional_parent) {
TypeTableEntry *entry = child_type->optional_parent;
return entry;
} else {
ensure_complete_type(g, child_type);
@ -592,7 +592,7 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) {
entry->data.maybe.child_type = child_type;
child_type->maybe_parent = entry;
child_type->optional_parent = entry;
return entry;
}
}
@ -1470,6 +1470,17 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
calling_convention_name(fn_type_id.cc)));
return g->builtin_types.entry_invalid;
}
if (param_node->data.param_decl.type != nullptr) {
TypeTableEntry *type_entry = analyze_type_expr(g, child_scope, param_node->data.param_decl.type);
if (type_is_invalid(type_entry)) {
return g->builtin_types.entry_invalid;
}
FnTypeParamInfo *param_info = &fn_type_id.param_info[fn_type_id.next_param_index];
param_info->type = type_entry;
param_info->is_noalias = param_node->data.param_decl.is_noalias;
fn_type_id.next_param_index += 1;
}
return get_generic_fn_type(g, &fn_type_id);
} else if (param_is_var_args) {
if (fn_type_id.cc == CallingConventionC) {
@ -2307,8 +2318,9 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) {
return;
if (enum_type->data.enumeration.zero_bits_loop_flag) {
enum_type->data.enumeration.zero_bits_known = true;
enum_type->data.enumeration.zero_bits_loop_flag = false;
add_node_error(g, enum_type->data.enumeration.decl_node,
buf_sprintf("'%s' depends on itself", buf_ptr(&enum_type->name)));
enum_type->data.enumeration.is_invalid = true;
return;
}
@ -2984,7 +2996,7 @@ static void typecheck_panic_fn(CodeGen *g, FnTableEntry *panic_fn) {
return wrong_panic_prototype(g, proto_node, fn_type);
}
TypeTableEntry *optional_ptr_to_stack_trace_type = get_maybe_type(g, get_ptr_to_stack_trace_type(g));
TypeTableEntry *optional_ptr_to_stack_trace_type = get_optional_type(g, get_ptr_to_stack_trace_type(g));
if (fn_type_id->param_info[1].type != optional_ptr_to_stack_trace_type) {
return wrong_panic_prototype(g, proto_node, fn_type);
}
@ -3716,6 +3728,7 @@ TypeUnionField *find_union_field_by_tag(TypeTableEntry *type_entry, const BigInt
}
TypeEnumField *find_enum_field_by_tag(TypeTableEntry *enum_type, const BigInt *tag) {
assert(enum_type->data.enumeration.zero_bits_known);
for (uint32_t i = 0; i < enum_type->data.enumeration.src_field_count; i += 1) {
TypeEnumField *field = &enum_type->data.enumeration.fields[i];
if (bigint_cmp(&field->value, tag) == CmpEQ) {
@ -4656,6 +4669,13 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
}
case TypeTableEntryIdFloat:
switch (const_val->type->data.floating.bit_count) {
case 16:
{
uint16_t result;
static_assert(sizeof(result) == sizeof(const_val->data.x_f16), "");
memcpy(&result, &const_val->data.x_f16, sizeof(result));
return result * 65537u;
}
case 32:
{
uint32_t result;
@ -5116,6 +5136,9 @@ void init_const_float(ConstExprValue *const_val, TypeTableEntry *type, double va
bigfloat_init_64(&const_val->data.x_bigfloat, value);
} else if (type->id == TypeTableEntryIdFloat) {
switch (type->data.floating.bit_count) {
case 16:
const_val->data.x_f16 = zig_double_to_f16(value);
break;
case 32:
const_val->data.x_f32 = value;
break;
@ -5429,6 +5452,8 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
case TypeTableEntryIdFloat:
assert(a->type->data.floating.bit_count == b->type->data.floating.bit_count);
switch (a->type->data.floating.bit_count) {
case 16:
return f16_eq(a->data.x_f16, b->data.x_f16);
case 32:
return a->data.x_f32 == b->data.x_f32;
case 64:
@ -5446,8 +5471,22 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
case TypeTableEntryIdPointer:
case TypeTableEntryIdFn:
return const_values_equal_ptr(a, b);
case TypeTableEntryIdArray:
zig_panic("TODO");
case TypeTableEntryIdArray: {
assert(a->type->data.array.len == b->type->data.array.len);
assert(a->data.x_array.special != ConstArraySpecialUndef);
assert(b->data.x_array.special != ConstArraySpecialUndef);
size_t len = a->type->data.array.len;
ConstExprValue *a_elems = a->data.x_array.s_none.elements;
ConstExprValue *b_elems = b->data.x_array.s_none.elements;
for (size_t i = 0; i < len; ++i) {
if (!const_values_equal(&a_elems[i], &b_elems[i]))
return false;
}
return true;
}
case TypeTableEntryIdStruct:
for (size_t i = 0; i < a->type->data.structure.src_field_count; i += 1) {
ConstExprValue *field_a = &a->data.x_struct.fields[i];
@ -5558,7 +5597,7 @@ void render_const_val_ptr(CodeGen *g, Buf *buf, ConstExprValue *const_val, TypeT
return;
}
case ConstPtrSpecialHardCodedAddr:
buf_appendf(buf, "(*%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->data.pointer.child_type->name),
buf_appendf(buf, "(%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->name),
const_val->data.x_ptr.data.hard_coded_addr.addr);
return;
case ConstPtrSpecialDiscard:
@ -5602,6 +5641,9 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
return;
case TypeTableEntryIdFloat:
switch (type_entry->data.floating.bit_count) {
case 16:
buf_appendf(buf, "%f", zig_f16_to_double(const_val->data.x_f16));
return;
case 32:
buf_appendf(buf, "%f", const_val->data.x_f32);
return;

View File

@ -24,7 +24,7 @@ TypeTableEntry *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits);
TypeTableEntry **get_c_int_type_ptr(CodeGen *g, CIntType c_int_type);
TypeTableEntry *get_c_int_type(CodeGen *g, CIntType c_int_type);
TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id);
TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type);
TypeTableEntry *get_optional_type(CodeGen *g, TypeTableEntry *child_type);
TypeTableEntry *get_array_type(CodeGen *g, TypeTableEntry *child_type, uint64_t array_size);
TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type);
TypeTableEntry *get_partial_container_type(CodeGen *g, Scope *scope, ContainerKind kind,

View File

@ -18,6 +18,10 @@ void bigfloat_init_128(BigFloat *dest, float128_t x) {
dest->value = x;
}
void bigfloat_init_16(BigFloat *dest, float16_t x) {
f16_to_f128M(x, &dest->value);
}
void bigfloat_init_32(BigFloat *dest, float x) {
float32_t f32_val;
memcpy(&f32_val, &x, sizeof(float));
@ -146,6 +150,10 @@ Cmp bigfloat_cmp(const BigFloat *op1, const BigFloat *op2) {
}
}
float16_t bigfloat_to_f16(const BigFloat *bigfloat) {
return f128M_to_f16(&bigfloat->value);
}
float bigfloat_to_f32(const BigFloat *bigfloat) {
float32_t f32_value = f128M_to_f32(&bigfloat->value);
float result;

View File

@ -22,6 +22,7 @@ struct BigFloat {
struct Buf;
void bigfloat_init_16(BigFloat *dest, float16_t x);
void bigfloat_init_32(BigFloat *dest, float x);
void bigfloat_init_64(BigFloat *dest, double x);
void bigfloat_init_128(BigFloat *dest, float128_t x);
@ -29,6 +30,7 @@ void bigfloat_init_bigfloat(BigFloat *dest, const BigFloat *x);
void bigfloat_init_bigint(BigFloat *dest, const BigInt *op);
int bigfloat_init_buf_base10(BigFloat *dest, const uint8_t *buf_ptr, size_t buf_len);
float16_t bigfloat_to_f16(const BigFloat *bigfloat);
float bigfloat_to_f32(const BigFloat *bigfloat);
double bigfloat_to_f64(const BigFloat *bigfloat);
float128_t bigfloat_to_f128(const BigFloat *bigfloat);

View File

@ -1683,10 +1683,15 @@ void bigint_incr(BigInt *x) {
bigint_init_unsigned(x, 1);
return;
}
if (x->digit_count == 1 && x->data.digit != UINT64_MAX) {
x->data.digit += 1;
return;
if (x->digit_count == 1) {
if (x->is_negative && x->data.digit != 0) {
x->data.digit -= 1;
return;
} else if (!x->is_negative && x->data.digit != UINT64_MAX) {
x->data.digit += 1;
return;
}
}
BigInt copy;

View File

@ -17,6 +17,7 @@
#include "os.hpp"
#include "translate_c.hpp"
#include "target.hpp"
#include "util.hpp"
#include "zig_llvm.h"
#include <stdio.h>
@ -865,6 +866,8 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("access of inactive union field");
case PanicMsgIdBadEnumValue:
return buf_create_from_str("invalid enum value");
case PanicMsgIdFloatToInt:
return buf_create_from_str("integer part of floating point value out of bounds");
}
zig_unreachable();
}
@ -1644,7 +1647,7 @@ static LLVMValueRef gen_widen_or_shorten(CodeGen *g, bool want_runtime_safety, T
return trunc_val;
}
LLVMValueRef orig_val;
if (actual_type->data.integral.is_signed) {
if (wanted_type->data.integral.is_signed) {
orig_val = LLVMBuildSExt(g->builder, trunc_val, actual_type->type_ref, "");
} else {
orig_val = LLVMBuildZExt(g->builder, trunc_val, actual_type->type_ref, "");
@ -2207,12 +2210,12 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable,
} else if (type_entry->id == TypeTableEntryIdInt) {
LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, type_entry->data.integral.is_signed);
return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, "");
} else if (type_entry->id == TypeTableEntryIdEnum) {
LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, false);
return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, "");
} else if (type_entry->id == TypeTableEntryIdErrorSet ||
} else if (type_entry->id == TypeTableEntryIdEnum ||
type_entry->id == TypeTableEntryIdErrorSet ||
type_entry->id == TypeTableEntryIdPointer ||
type_entry->id == TypeTableEntryIdBool)
type_entry->id == TypeTableEntryIdBool ||
type_entry->id == TypeTableEntryIdPromise ||
type_entry->id == TypeTableEntryIdFn)
{
LLVMIntPredicate pred = cmp_op_to_int_predicate(op_id, false);
return LLVMBuildICmp(g->builder, pred, op1_value, op2_value, "");
@ -2509,15 +2512,41 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
} else {
return LLVMBuildUIToFP(g->builder, expr_val, wanted_type->type_ref, "");
}
case CastOpFloatToInt:
case CastOpFloatToInt: {
assert(wanted_type->id == TypeTableEntryIdInt);
ZigLLVMSetFastMath(g->builder, ir_want_fast_math(g, &cast_instruction->base));
bool want_safety = ir_want_runtime_safety(g, &cast_instruction->base);
LLVMValueRef result;
if (wanted_type->data.integral.is_signed) {
return LLVMBuildFPToSI(g->builder, expr_val, wanted_type->type_ref, "");
result = LLVMBuildFPToSI(g->builder, expr_val, wanted_type->type_ref, "");
} else {
return LLVMBuildFPToUI(g->builder, expr_val, wanted_type->type_ref, "");
result = LLVMBuildFPToUI(g->builder, expr_val, wanted_type->type_ref, "");
}
if (want_safety) {
LLVMValueRef back_to_float;
if (wanted_type->data.integral.is_signed) {
back_to_float = LLVMBuildSIToFP(g->builder, result, LLVMTypeOf(expr_val), "");
} else {
back_to_float = LLVMBuildUIToFP(g->builder, result, LLVMTypeOf(expr_val), "");
}
LLVMValueRef difference = LLVMBuildFSub(g->builder, expr_val, back_to_float, "");
LLVMValueRef one_pos = LLVMConstReal(LLVMTypeOf(expr_val), 1.0f);
LLVMValueRef one_neg = LLVMConstReal(LLVMTypeOf(expr_val), -1.0f);
LLVMValueRef ok_bit_pos = LLVMBuildFCmp(g->builder, LLVMRealOLT, difference, one_pos, "");
LLVMValueRef ok_bit_neg = LLVMBuildFCmp(g->builder, LLVMRealOGT, difference, one_neg, "");
LLVMValueRef ok_bit = LLVMBuildAnd(g->builder, ok_bit_pos, ok_bit_neg, "");
LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "FloatCheckOk");
LLVMBasicBlockRef bad_block = LLVMAppendBasicBlock(g->cur_fn_val, "FloatCheckFail");
LLVMBuildCondBr(g->builder, ok_bit, ok_block, bad_block);
LLVMPositionBuilderAtEnd(g->builder, bad_block);
gen_safety_crash(g, PanicMsgIdFloatToInt);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
return result;
}
case CastOpBoolToInt:
assert(wanted_type->id == TypeTableEntryIdInt);
assert(actual_type->id == TypeTableEntryIdBool);
@ -2607,8 +2636,25 @@ static LLVMValueRef ir_render_int_to_enum(CodeGen *g, IrExecutable *executable,
TypeTableEntry *tag_int_type = wanted_type->data.enumeration.tag_int_type;
LLVMValueRef target_val = ir_llvm_value(g, instruction->target);
return gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base),
LLVMValueRef tag_int_value = gen_widen_or_shorten(g, ir_want_runtime_safety(g, &instruction->base),
instruction->target->value.type, tag_int_type, target_val);
if (ir_want_runtime_safety(g, &instruction->base)) {
LLVMBasicBlockRef bad_value_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadValue");
LLVMBasicBlockRef ok_value_block = LLVMAppendBasicBlock(g->cur_fn_val, "OkValue");
size_t field_count = wanted_type->data.enumeration.src_field_count;
LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, tag_int_value, bad_value_block, field_count);
for (size_t field_i = 0; field_i < field_count; field_i += 1) {
LLVMValueRef this_tag_int_value = bigint_to_llvm_const(tag_int_type->type_ref,
&wanted_type->data.enumeration.fields[field_i].value);
LLVMAddCase(switch_instr, this_tag_int_value, ok_value_block);
}
LLVMPositionBuilderAtEnd(g->builder, bad_value_block);
gen_safety_crash(g, PanicMsgIdBadEnumValue);
LLVMPositionBuilderAtEnd(g->builder, ok_value_block);
}
return tag_int_value;
}
static LLVMValueRef ir_render_int_to_err(CodeGen *g, IrExecutable *executable, IrInstructionIntToErr *instruction) {
@ -4638,6 +4684,10 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdIntToFloat:
case IrInstructionIdFloatToInt:
case IrInstructionIdBoolToInt:
case IrInstructionIdErrSetCast:
case IrInstructionIdFromBytes:
case IrInstructionIdToBytes:
case IrInstructionIdEnumToInt:
zig_unreachable();
case IrInstructionIdReturn:
@ -5090,6 +5140,8 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
const_val->data.x_err_set->value, false);
case TypeTableEntryIdFloat:
switch (type_entry->data.floating.bit_count) {
case 16:
return LLVMConstReal(type_entry->type_ref, zig_f16_to_double(const_val->data.x_f16));
case 32:
return LLVMConstReal(type_entry->type_ref, const_val->data.x_f32);
case 64:
@ -6056,58 +6108,30 @@ static void define_builtin_types(CodeGen *g) {
g->builtin_types.entry_usize = entry;
}
}
{
auto add_fp_entry = [] (CodeGen *g,
const char *name,
uint32_t bit_count,
LLVMTypeRef type_ref,
TypeTableEntry **field) {
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat);
entry->type_ref = LLVMFloatType();
buf_init_from_str(&entry->name, "f32");
entry->data.floating.bit_count = 32;
entry->type_ref = type_ref;
buf_init_from_str(&entry->name, name);
entry->data.floating.bit_count = bit_count;
uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref);
entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
debug_size_in_bits,
ZigLLVMEncoding_DW_ATE_float());
g->builtin_types.entry_f32 = entry;
*field = entry;
g->primitive_type_table.put(&entry->name, entry);
}
{
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat);
entry->type_ref = LLVMDoubleType();
buf_init_from_str(&entry->name, "f64");
entry->data.floating.bit_count = 64;
};
add_fp_entry(g, "f16", 16, LLVMHalfType(), &g->builtin_types.entry_f16);
add_fp_entry(g, "f32", 32, LLVMFloatType(), &g->builtin_types.entry_f32);
add_fp_entry(g, "f64", 64, LLVMDoubleType(), &g->builtin_types.entry_f64);
add_fp_entry(g, "f128", 128, LLVMFP128Type(), &g->builtin_types.entry_f128);
add_fp_entry(g, "c_longdouble", 80, LLVMX86FP80Type(), &g->builtin_types.entry_c_longdouble);
uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref);
entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
debug_size_in_bits,
ZigLLVMEncoding_DW_ATE_float());
g->builtin_types.entry_f64 = entry;
g->primitive_type_table.put(&entry->name, entry);
}
{
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat);
entry->type_ref = LLVMFP128Type();
buf_init_from_str(&entry->name, "f128");
entry->data.floating.bit_count = 128;
uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref);
entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
debug_size_in_bits,
ZigLLVMEncoding_DW_ATE_float());
g->builtin_types.entry_f128 = entry;
g->primitive_type_table.put(&entry->name, entry);
}
{
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdFloat);
entry->type_ref = LLVMX86FP80Type();
buf_init_from_str(&entry->name, "c_longdouble");
entry->data.floating.bit_count = 80;
uint64_t debug_size_in_bits = 8*LLVMStoreSizeOfType(g->target_data_ref, entry->type_ref);
entry->di_type = ZigLLVMCreateDebugBasicType(g->dbuilder, buf_ptr(&entry->name),
debug_size_in_bits,
ZigLLVMEncoding_DW_ATE_float());
g->builtin_types.entry_c_longdouble = entry;
g->primitive_type_table.put(&entry->name, entry);
}
{
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdVoid);
entry->type_ref = LLVMVoidType();
@ -6231,6 +6255,10 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdIntToFloat, "intToFloat", 2);
create_builtin_fn(g, BuiltinFnIdFloatToInt, "floatToInt", 2);
create_builtin_fn(g, BuiltinFnIdBoolToInt, "boolToInt", 1);
create_builtin_fn(g, BuiltinFnIdErrToInt, "errorToInt", 1);
create_builtin_fn(g, BuiltinFnIdIntToErr, "intToError", 1);
create_builtin_fn(g, BuiltinFnIdEnumToInt, "enumToInt", 1);
create_builtin_fn(g, BuiltinFnIdIntToEnum, "intToEnum", 2);
create_builtin_fn(g, BuiltinFnIdCompileErr, "compileError", 1);
create_builtin_fn(g, BuiltinFnIdCompileLog, "compileLog", SIZE_MAX);
create_builtin_fn(g, BuiltinFnIdIntType, "IntType", 2); // TODO rename to Int
@ -6267,6 +6295,9 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdErrorReturnTrace, "errorReturnTrace", 0);
create_builtin_fn(g, BuiltinFnIdAtomicRmw, "atomicRmw", 5);
create_builtin_fn(g, BuiltinFnIdAtomicLoad, "atomicLoad", 3);
create_builtin_fn(g, BuiltinFnIdErrSetCast, "errSetCast", 2);
create_builtin_fn(g, BuiltinFnIdToBytes, "sliceToBytes", 1);
create_builtin_fn(g, BuiltinFnIdFromBytes, "bytesToSlice", 2);
}
static const char *bool_to_str(bool b) {
@ -6538,7 +6569,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
"\n"
" pub const Union = struct {\n"
" layout: ContainerLayout,\n"
" tag_type: type,\n"
" tag_type: ?type,\n"
" fields: []UnionField,\n"
" defs: []Definition,\n"
" };\n"
@ -6555,20 +6586,20 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" pub const FnArg = struct {\n"
" is_generic: bool,\n"
" is_noalias: bool,\n"
" arg_type: type,\n"
" arg_type: ?type,\n"
" };\n"
"\n"
" pub const Fn = struct {\n"
" calling_convention: CallingConvention,\n"
" is_generic: bool,\n"
" is_var_args: bool,\n"
" return_type: type,\n"
" async_allocator_type: type,\n"
" return_type: ?type,\n"
" async_allocator_type: ?type,\n"
" args: []FnArg,\n"
" };\n"
"\n"
" pub const Promise = struct {\n"
" child: type,\n"
" child: ?type,\n"
" };\n"
"\n"
" pub const Definition = struct {\n"

1514
src/ir.cpp

File diff suppressed because it is too large Load Diff

View File

@ -664,6 +664,28 @@ static void ir_print_float_cast(IrPrint *irp, IrInstructionFloatCast *instructio
fprintf(irp->f, ")");
}
static void ir_print_err_set_cast(IrPrint *irp, IrInstructionErrSetCast *instruction) {
fprintf(irp->f, "@errSetCast(");
ir_print_other_instruction(irp, instruction->dest_type);
fprintf(irp->f, ", ");
ir_print_other_instruction(irp, instruction->target);
fprintf(irp->f, ")");
}
static void ir_print_from_bytes(IrPrint *irp, IrInstructionFromBytes *instruction) {
fprintf(irp->f, "@bytesToSlice(");
ir_print_other_instruction(irp, instruction->dest_child_type);
fprintf(irp->f, ", ");
ir_print_other_instruction(irp, instruction->target);
fprintf(irp->f, ")");
}
static void ir_print_to_bytes(IrPrint *irp, IrInstructionToBytes *instruction) {
fprintf(irp->f, "@sliceToBytes(");
ir_print_other_instruction(irp, instruction->target);
fprintf(irp->f, ")");
}
static void ir_print_int_to_float(IrPrint *irp, IrInstructionIntToFloat *instruction) {
fprintf(irp->f, "@intToFloat(");
ir_print_other_instruction(irp, instruction->dest_type);
@ -906,6 +928,17 @@ static void ir_print_int_to_ptr(IrPrint *irp, IrInstructionIntToPtr *instruction
static void ir_print_int_to_enum(IrPrint *irp, IrInstructionIntToEnum *instruction) {
fprintf(irp->f, "@intToEnum(");
if (instruction->dest_type == nullptr) {
fprintf(irp->f, "(null)");
} else {
ir_print_other_instruction(irp, instruction->dest_type);
}
ir_print_other_instruction(irp, instruction->target);
fprintf(irp->f, ")");
}
static void ir_print_enum_to_int(IrPrint *irp, IrInstructionEnumToInt *instruction) {
fprintf(irp->f, "@enumToInt(");
ir_print_other_instruction(irp, instruction->target);
fprintf(irp->f, ")");
}
@ -1461,6 +1494,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdFloatCast:
ir_print_float_cast(irp, (IrInstructionFloatCast *)instruction);
break;
case IrInstructionIdErrSetCast:
ir_print_err_set_cast(irp, (IrInstructionErrSetCast *)instruction);
break;
case IrInstructionIdFromBytes:
ir_print_from_bytes(irp, (IrInstructionFromBytes *)instruction);
break;
case IrInstructionIdToBytes:
ir_print_to_bytes(irp, (IrInstructionToBytes *)instruction);
break;
case IrInstructionIdIntToFloat:
ir_print_int_to_float(irp, (IrInstructionIntToFloat *)instruction);
break;
@ -1686,6 +1728,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdAtomicLoad:
ir_print_atomic_load(irp, (IrInstructionAtomicLoad *)instruction);
break;
case IrInstructionIdEnumToInt:
ir_print_enum_to_int(irp, (IrInstructionEnumToInt *)instruction);
break;
}
fprintf(irp->f, "\n");
}

View File

@ -325,10 +325,13 @@ static void construct_linker_job_elf(LinkJob *lj) {
lj->args.append((const char *)buf_ptr(g->link_objects.at(i)));
}
if (g->libc_link_lib == nullptr && (g->out_type == OutTypeExe || g->out_type == OutTypeLib)) {
Buf *builtin_o_path = build_o(g, "builtin");
lj->args.append(buf_ptr(builtin_o_path));
if (g->out_type == OutTypeExe || g->out_type == OutTypeLib) {
if (g->libc_link_lib == nullptr) {
Buf *builtin_o_path = build_o(g, "builtin");
lj->args.append(buf_ptr(builtin_o_path));
}
// sometimes libgcc is missing stuff, so we still build compiler_rt and rely on weak linkage
Buf *compiler_rt_o_path = build_compiler_rt(g);
lj->args.append(buf_ptr(compiler_rt_o_path));
}
@ -554,7 +557,7 @@ static void construct_linker_job_coff(LinkJob *lj) {
lj->args.append(buf_ptr(builtin_o_path));
}
// msvc compiler_rt is missing some stuff, so we still build it and rely on LinkOnce
// msvc compiler_rt is missing some stuff, so we still build it and rely on weak linkage
Buf *compiler_rt_o_path = build_compiler_rt(g);
lj->args.append(buf_ptr(compiler_rt_o_path));
}

View File

@ -924,6 +924,8 @@ int main(int argc, char **argv) {
codegen_print_timing_report(g, stdout);
return EXIT_SUCCESS;
} else if (cmd == CmdTest) {
codegen_set_emit_file_type(g, emit_file_type);
ZigTarget native;
get_native_target(&native);

View File

@ -225,6 +225,11 @@ void os_path_extname(Buf *full_path, Buf *out_basename, Buf *out_extname) {
}
void os_path_join(Buf *dirname, Buf *basename, Buf *out_full_path) {
if (buf_len(dirname) == 0) {
buf_init_from_buf(out_full_path, basename);
return;
}
buf_init_from_buf(out_full_path, dirname);
uint8_t c = *(buf_ptr(out_full_path) + buf_len(out_full_path) - 1);
if (!os_is_sep(c))

View File

@ -357,12 +357,19 @@ static void end_float_token(Tokenize *t) {
// Mask the sign bit to 0 since always non-negative lex
const uint64_t exp_mask = 0xffffull << exp_shift;
if (shift >= 64) {
// must be special-cased to avoid undefined behavior on shift == 64
if (shift == 128) {
f_bits.repr[0] = 0;
f_bits.repr[1] = sig_bits[0];
} else if (shift == 0) {
f_bits.repr[0] = sig_bits[0];
f_bits.repr[1] = sig_bits[1];
} else if (shift >= 64) {
f_bits.repr[0] = 0;
f_bits.repr[1] = sig_bits[0] << (shift - 64);
} else {
f_bits.repr[0] = sig_bits[0] << shift;
f_bits.repr[1] = ((sig_bits[1] << shift) | (sig_bits[0] >> (64 - shift)));
f_bits.repr[1] = (sig_bits[1] << shift) | (sig_bits[0] >> (64 - shift));
}
f_bits.repr[1] &= ~exp_mask;

View File

@ -31,6 +31,8 @@
#endif
#include "softfloat.hpp"
#define BREAKPOINT __asm("int $0x03")
ATTRIBUTE_COLD
@ -165,4 +167,21 @@ static inline uint8_t log2_u64(uint64_t x) {
return (63 - clzll(x));
}
static inline float16_t zig_double_to_f16(double x) {
float64_t y;
static_assert(sizeof(x) == sizeof(y), "");
memcpy(&y, &x, sizeof(x));
return f64_to_f16(y);
}
// Return value is safe to coerce to float even when |x| is NaN or Infinity.
static inline double zig_f16_to_double(float16_t x) {
float64_t y = f16_to_f64(x);
double z;
static_assert(sizeof(y) == sizeof(z), "");
memcpy(&z, &y, sizeof(y));
return z;
}
#endif

View File

@ -1,7 +1,9 @@
pub const Stack = @import("stack.zig").Stack;
pub const Queue = @import("queue.zig").Queue;
pub const QueueMpsc = @import("queue_mpsc.zig").QueueMpsc;
pub const QueueMpmc = @import("queue_mpmc.zig").QueueMpmc;
test "std.atomic" {
_ = @import("stack.zig").Stack;
_ = @import("queue.zig").Queue;
_ = @import("stack.zig");
_ = @import("queue_mpsc.zig");
_ = @import("queue_mpmc.zig");
}

214
std/atomic/queue_mpmc.zig Normal file
View File

@ -0,0 +1,214 @@
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp;
/// Many producer, many consumer, non-allocating, thread-safe, lock-free
/// This implementation has a crippling limitation - it hangs onto node
/// memory for 1 extra get() and 1 extra put() operation - when get() returns a node, that
/// node must not be freed until both the next get() and the next put() completes.
pub fn QueueMpmc(comptime T: type) type {
return struct {
head: *Node,
tail: *Node,
root: Node,
pub const Self = this;
pub const Node = struct {
next: ?*Node,
data: T,
};
/// TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287
pub fn init(self: *Self) void {
self.root.next = null;
self.head = &self.root;
self.tail = &self.root;
}
pub fn put(self: *Self, node: *Node) void {
node.next = null;
const tail = @atomicRmw(*Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
_ = @atomicRmw(?*Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
}
/// node must not be freed until both the next get() and the next put() complete
pub fn get(self: *Self) ?*Node {
var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst);
while (true) {
const node = head.next orelse return null;
head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return node;
}
}
///// This is a debug function that is not thread-safe.
pub fn dump(self: *Self) void {
std.debug.warn("head: ");
dumpRecursive(self.head, 0);
std.debug.warn("tail: ");
dumpRecursive(self.tail, 0);
}
fn dumpRecursive(optional_node: ?*Node, indent: usize) void {
var stderr_file = std.io.getStdErr() catch return;
const stderr = &std.io.FileOutStream.init(&stderr_file).stream;
stderr.writeByteNTimes(' ', indent) catch return;
if (optional_node) |node| {
std.debug.warn("0x{x}={}\n", @ptrToInt(node), node.data);
dumpRecursive(node.next, indent + 1);
} else {
std.debug.warn("(null)\n");
}
}
};
}
const std = @import("std");
const assert = std.debug.assert;
const Context = struct {
allocator: *std.mem.Allocator,
queue: *QueueMpmc(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
puts_done: u8, // TODO make this a bool
};
// TODO add lazy evaluated build options and then put puts_per_thread behind
// some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor
// CI we would use a less aggressive setting since at 1 core, while we still
// want this test to pass, we need a smaller value since there is so much thrashing
// we would also use a less aggressive setting when running in valgrind
const puts_per_thread = 500;
const put_thread_count = 3;
test "std.atomic.queue_mpmc" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 300 * 1024);
defer direct_allocator.allocator.free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
var a = &fixed_buffer_allocator.allocator;
var queue: QueueMpmc(i32) = undefined;
queue.init();
var context = Context{
.allocator = a,
.queue = &queue,
.put_sum = 0,
.get_sum = 0,
.puts_done = 0,
.get_count = 0,
};
var putters: [put_thread_count]*std.os.Thread = undefined;
for (putters) |*t| {
t.* = try std.os.spawnThread(&context, startPuts);
}
var getters: [put_thread_count]*std.os.Thread = undefined;
for (getters) |*t| {
t.* = try std.os.spawnThread(&context, startGets);
}
for (putters) |t|
t.wait();
_ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
for (getters) |t|
t.wait();
if (context.put_sum != context.get_sum) {
std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum);
}
if (context.get_count != puts_per_thread * put_thread_count) {
std.debug.panic(
"failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}",
context.get_count,
u32(puts_per_thread),
u32(put_thread_count),
);
}
}
fn startPuts(ctx: *Context) u8 {
var put_count: usize = puts_per_thread;
var r = std.rand.DefaultPrng.init(0xdeadbeef);
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(QueueMpmc(i32).Node{
.next = undefined,
.data = x,
}) catch unreachable;
ctx.queue.put(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
}
return 0;
}
fn startGets(ctx: *Context) u8 {
while (true) {
const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
while (ctx.queue.get()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
}
if (last) return 0;
}
}
test "std.atomic.queue_mpmc single-threaded" {
var queue: QueueMpmc(i32) = undefined;
queue.init();
var node_0 = QueueMpmc(i32).Node{
.data = 0,
.next = undefined,
};
queue.put(&node_0);
var node_1 = QueueMpmc(i32).Node{
.data = 1,
.next = undefined,
};
queue.put(&node_1);
assert(queue.get().?.data == 0);
var node_2 = QueueMpmc(i32).Node{
.data = 2,
.next = undefined,
};
queue.put(&node_2);
var node_3 = QueueMpmc(i32).Node{
.data = 3,
.next = undefined,
};
queue.put(&node_3);
assert(queue.get().?.data == 1);
assert(queue.get().?.data == 2);
var node_4 = QueueMpmc(i32).Node{
.data = 4,
.next = undefined,
};
queue.put(&node_4);
assert(queue.get().?.data == 3);
// if we were to set node_3.next to null here, it would cause this test
// to fail. this demonstrates the limitation of hanging on to extra memory.
assert(queue.get().?.data == 4);
assert(queue.get() == null);
}

View File

@ -1,49 +1,54 @@
const std = @import("../index.zig");
const assert = std.debug.assert;
const builtin = @import("builtin");
const AtomicOrder = builtin.AtomicOrder;
const AtomicRmwOp = builtin.AtomicRmwOp;
/// Many reader, many writer, non-allocating, thread-safe, lock-free
pub fn Queue(comptime T: type) type {
/// Many producer, single consumer, non-allocating, thread-safe, lock-free
pub fn QueueMpsc(comptime T: type) type {
return struct {
head: *Node,
tail: *Node,
root: Node,
inboxes: [2]std.atomic.Stack(T),
outbox: std.atomic.Stack(T),
inbox_index: usize,
pub const Self = this;
pub const Node = struct {
next: ?*Node,
data: T,
};
pub const Node = std.atomic.Stack(T).Node;
// TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287
pub fn init(self: *Self) void {
self.root.next = null;
self.head = &self.root;
self.tail = &self.root;
pub fn init() Self {
return Self{
.inboxes = []std.atomic.Stack(T){
std.atomic.Stack(T).init(),
std.atomic.Stack(T).init(),
},
.outbox = std.atomic.Stack(T).init(),
.inbox_index = 0,
};
}
pub fn put(self: *Self, node: *Node) void {
node.next = null;
const tail = @atomicRmw(*Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
_ = @atomicRmw(?*Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
const inbox_index = @atomicLoad(usize, &self.inbox_index, AtomicOrder.SeqCst);
const inbox = &self.inboxes[inbox_index];
inbox.push(node);
}
pub fn get(self: *Self) ?*Node {
var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst);
while (true) {
const node = head.next orelse return null;
head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return node;
if (self.outbox.pop()) |node| {
return node;
}
const prev_inbox_index = @atomicRmw(usize, &self.inbox_index, AtomicRmwOp.Xor, 0x1, AtomicOrder.SeqCst);
const prev_inbox = &self.inboxes[prev_inbox_index];
while (prev_inbox.pop()) |node| {
self.outbox.push(node);
}
return self.outbox.pop();
}
};
}
const std = @import("std");
const Context = struct {
allocator: *std.mem.Allocator,
queue: *Queue(i32),
queue: *QueueMpsc(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
@ -58,7 +63,7 @@ const Context = struct {
const puts_per_thread = 500;
const put_thread_count = 3;
test "std.atomic.queue" {
test "std.atomic.queue_mpsc" {
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
@ -68,8 +73,7 @@ test "std.atomic.queue" {
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
var a = &fixed_buffer_allocator.allocator;
var queue: Queue(i32) = undefined;
queue.init();
var queue = QueueMpsc(i32).init();
var context = Context{
.allocator = a,
.queue = &queue,
@ -83,7 +87,7 @@ test "std.atomic.queue" {
for (putters) |*t| {
t.* = try std.os.spawnThread(&context, startPuts);
}
var getters: [put_thread_count]*std.os.Thread = undefined;
var getters: [1]*std.os.Thread = undefined;
for (getters) |*t| {
t.* = try std.os.spawnThread(&context, startGets);
}
@ -114,8 +118,10 @@ fn startPuts(ctx: *Context) u8 {
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(Queue(i32).Node) catch unreachable;
node.data = x;
const node = ctx.allocator.create(QueueMpsc(i32).Node{
.next = undefined,
.data = x,
}) catch unreachable;
ctx.queue.put(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
}

View File

@ -117,8 +117,10 @@ fn startPuts(ctx: *Context) u8 {
while (put_count != 0) : (put_count -= 1) {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
const x = @bitCast(i32, r.random.scalar(u32));
const node = ctx.allocator.create(Stack(i32).Node) catch unreachable;
node.data = x;
const node = ctx.allocator.create(Stack(i32).Node{
.next = undefined,
.data = x,
}) catch unreachable;
ctx.stack.push(node);
_ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst);
}

View File

@ -158,8 +158,7 @@ pub const Builder = struct {
}
pub fn addTest(self: *Builder, root_src: []const u8) *TestStep {
const test_step = self.allocator.create(TestStep) catch unreachable;
test_step.* = TestStep.init(self, root_src);
const test_step = self.allocator.create(TestStep.init(self, root_src)) catch unreachable;
return test_step;
}
@ -191,21 +190,18 @@ pub const Builder = struct {
}
pub fn addWriteFile(self: *Builder, file_path: []const u8, data: []const u8) *WriteFileStep {
const write_file_step = self.allocator.create(WriteFileStep) catch unreachable;
write_file_step.* = WriteFileStep.init(self, file_path, data);
const write_file_step = self.allocator.create(WriteFileStep.init(self, file_path, data)) catch unreachable;
return write_file_step;
}
pub fn addLog(self: *Builder, comptime format: []const u8, args: ...) *LogStep {
const data = self.fmt(format, args);
const log_step = self.allocator.create(LogStep) catch unreachable;
log_step.* = LogStep.init(self, data);
const log_step = self.allocator.create(LogStep.init(self, data)) catch unreachable;
return log_step;
}
pub fn addRemoveDirTree(self: *Builder, dir_path: []const u8) *RemoveDirStep {
const remove_dir_step = self.allocator.create(RemoveDirStep) catch unreachable;
remove_dir_step.* = RemoveDirStep.init(self, dir_path);
const remove_dir_step = self.allocator.create(RemoveDirStep.init(self, dir_path)) catch unreachable;
return remove_dir_step;
}
@ -404,11 +400,10 @@ pub const Builder = struct {
}
pub fn step(self: *Builder, name: []const u8, description: []const u8) *Step {
const step_info = self.allocator.create(TopLevelStep) catch unreachable;
step_info.* = TopLevelStep{
const step_info = self.allocator.create(TopLevelStep{
.step = Step.initNoOp(name, self.allocator),
.description = description,
};
}) catch unreachable;
self.top_level_steps.append(step_info) catch unreachable;
return &step_info.step;
}
@ -598,8 +593,7 @@ pub const Builder = struct {
const full_dest_path = os.path.resolve(self.allocator, self.prefix, dest_rel_path) catch unreachable;
self.pushInstalledFile(full_dest_path);
const install_step = self.allocator.create(InstallFileStep) catch unreachable;
install_step.* = InstallFileStep.init(self, src_path, full_dest_path);
const install_step = self.allocator.create(InstallFileStep.init(self, src_path, full_dest_path)) catch unreachable;
return install_step;
}
@ -837,51 +831,43 @@ pub const LibExeObjStep = struct {
};
pub fn createSharedLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Lib, false, ver);
const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Lib, false, ver)) catch unreachable;
return self;
}
pub fn createCSharedLibrary(builder: *Builder, name: []const u8, version: *const Version) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Lib, version, false);
const self = builder.allocator.create(initC(builder, name, Kind.Lib, version, false)) catch unreachable;
return self;
}
pub fn createStaticLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Lib, true, builder.version(0, 0, 0));
const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Lib, true, builder.version(0, 0, 0))) catch unreachable;
return self;
}
pub fn createCStaticLibrary(builder: *Builder, name: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Lib, builder.version(0, 0, 0), true);
const self = builder.allocator.create(initC(builder, name, Kind.Lib, builder.version(0, 0, 0), true)) catch unreachable;
return self;
}
pub fn createObject(builder: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Obj, false, builder.version(0, 0, 0));
const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Obj, false, builder.version(0, 0, 0))) catch unreachable;
return self;
}
pub fn createCObject(builder: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Obj, builder.version(0, 0, 0), false);
const self = builder.allocator.create(initC(builder, name, Kind.Obj, builder.version(0, 0, 0), false)) catch unreachable;
self.object_src = src;
return self;
}
pub fn createExecutable(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Exe, false, builder.version(0, 0, 0));
const self = builder.allocator.create(initExtraArgs(builder, name, root_src, Kind.Exe, false, builder.version(0, 0, 0))) catch unreachable;
return self;
}
pub fn createCExecutable(builder: *Builder, name: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Exe, builder.version(0, 0, 0), false);
const self = builder.allocator.create(initC(builder, name, Kind.Exe, builder.version(0, 0, 0), false)) catch unreachable;
return self;
}
@ -1748,14 +1734,14 @@ pub const CommandStep = struct {
/// ::argv is copied.
pub fn create(builder: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep {
const self = builder.allocator.create(CommandStep) catch unreachable;
self.* = CommandStep{
const self = builder.allocator.create(CommandStep{
.builder = builder,
.step = Step.init(argv[0], builder.allocator, make),
.argv = builder.allocator.alloc([]u8, argv.len) catch unreachable,
.cwd = cwd,
.env_map = env_map,
};
}) catch unreachable;
mem.copy([]const u8, self.argv, argv);
self.step.name = self.argv[0];
return self;
@ -1778,18 +1764,17 @@ const InstallArtifactStep = struct {
const Self = this;
pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
const self = builder.allocator.create(Self) catch unreachable;
const dest_dir = switch (artifact.kind) {
LibExeObjStep.Kind.Obj => unreachable,
LibExeObjStep.Kind.Exe => builder.exe_dir,
LibExeObjStep.Kind.Lib => builder.lib_dir,
};
self.* = Self{
const self = builder.allocator.create(Self{
.builder = builder,
.step = Step.init(builder.fmt("install {}", artifact.step.name), builder.allocator, make),
.artifact = artifact,
.dest_file = os.path.join(builder.allocator, dest_dir, artifact.out_filename) catch unreachable,
};
}) catch unreachable;
self.step.dependOn(&artifact.step);
builder.pushInstalledFile(self.dest_file);
if (self.artifact.kind == LibExeObjStep.Kind.Lib and !self.artifact.static) {

View File

@ -79,7 +79,7 @@ pub const NullTerminated2DArray = struct {
errdefer allocator.free(buf);
var write_index = index_size;
const index_buf = ([]?[*]u8)(buf);
const index_buf = @bytesToSlice(?[*]u8, buf);
var i: usize = 0;
for (slices) |slice| {

View File

@ -249,9 +249,7 @@ fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: us
pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
switch (builtin.object_format) {
builtin.ObjectFormat.elf => {
const st = try allocator.create(ElfStackTrace);
errdefer allocator.destroy(st);
st.* = ElfStackTrace{
const st = try allocator.create(ElfStackTrace{
.self_exe_file = undefined,
.elf = undefined,
.debug_info = undefined,
@ -261,7 +259,8 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
.debug_ranges = null,
.abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator),
.compile_unit_list = ArrayList(CompileUnit).init(allocator),
};
});
errdefer allocator.destroy(st);
st.self_exe_file = try os.openSelfExe();
errdefer st.self_exe_file.close();
@ -280,11 +279,8 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
var exe_file = try os.openSelfExe();
defer exe_file.close();
const st = try allocator.create(ElfStackTrace);
const st = try allocator.create(ElfStackTrace{ .symbol_table = try macho.loadSymbols(allocator, &io.FileInStream.init(&exe_file)) });
errdefer allocator.destroy(st);
st.* = ElfStackTrace{ .symbol_table = try macho.loadSymbols(allocator, &io.FileInStream.init(&exe_file)) };
return st;
},
builtin.ObjectFormat.coff => {
@ -974,8 +970,7 @@ fn scanAllCompileUnits(st: *ElfStackTrace) !void {
try st.self_exe_file.seekTo(compile_unit_pos);
const compile_unit_die = try st.allocator().create(Die);
compile_unit_die.* = try parseDie(st, abbrev_table, is_64);
const compile_unit_die = try st.allocator().create(try parseDie(st, abbrev_table, is_64));
if (compile_unit_die.tag_id != DW.TAG_compile_unit) return error.InvalidDebugInfo;

View File

@ -4,6 +4,8 @@ const assert = std.debug.assert;
const event = this;
const mem = std.mem;
const posix = std.os.posix;
const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
pub const TcpServer = struct {
handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void,
@ -93,16 +95,56 @@ pub const TcpServer = struct {
pub const Loop = struct {
allocator: *mem.Allocator,
epollfd: i32,
keep_running: bool,
next_tick_queue: std.atomic.QueueMpsc(promise),
os_data: OsData,
fn init(allocator: *mem.Allocator) !Loop {
const epollfd = try std.os.linuxEpollCreate(std.os.linux.EPOLL_CLOEXEC);
return Loop{
const OsData = switch (builtin.os) {
builtin.Os.linux => struct {
epollfd: i32,
},
else => struct {},
};
pub const NextTickNode = std.atomic.QueueMpsc(promise).Node;
/// The allocator must be thread-safe because we use it for multiplexing
/// coroutines onto kernel threads.
pub fn init(allocator: *mem.Allocator) !Loop {
var self = Loop{
.keep_running = true,
.allocator = allocator,
.epollfd = epollfd,
.os_data = undefined,
.next_tick_queue = std.atomic.QueueMpsc(promise).init(),
};
try self.initOsData();
errdefer self.deinitOsData();
return self;
}
/// must call stop before deinit
pub fn deinit(self: *Loop) void {
self.deinitOsData();
}
const InitOsDataError = std.os.LinuxEpollCreateError;
fn initOsData(self: *Loop) InitOsDataError!void {
switch (builtin.os) {
builtin.Os.linux => {
self.os_data.epollfd = try std.os.linuxEpollCreate(std.os.linux.EPOLL_CLOEXEC);
errdefer std.os.close(self.os_data.epollfd);
},
else => {},
}
}
fn deinitOsData(self: *Loop) void {
switch (builtin.os) {
builtin.Os.linux => std.os.close(self.os_data.epollfd),
else => {},
}
}
pub fn addFd(self: *Loop, fd: i32, prom: promise) !void {
@ -110,11 +152,11 @@ pub const Loop = struct {
.events = std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | std.os.linux.EPOLLET,
.data = std.os.linux.epoll_data{ .ptr = @ptrToInt(prom) },
};
try std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_ADD, fd, &ev);
try std.os.linuxEpollCtl(self.os_data.epollfd, std.os.linux.EPOLL_CTL_ADD, fd, &ev);
}
pub fn removeFd(self: *Loop, fd: i32) void {
std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
std.os.linuxEpollCtl(self.os_data.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
}
async fn waitFd(self: *Loop, fd: i32) !void {
defer self.removeFd(fd);
@ -126,21 +168,250 @@ pub const Loop = struct {
pub fn stop(self: *Loop) void {
// TODO make atomic
self.keep_running = false;
// TODO activate an fd in the epoll set
// TODO activate an fd in the epoll set which should cancel all the promises
}
/// bring your own linked list node. this means it can't fail.
pub fn onNextTick(self: *Loop, node: *NextTickNode) void {
self.next_tick_queue.put(node);
}
pub fn run(self: *Loop) void {
while (self.keep_running) {
var events: [16]std.os.linux.epoll_event = undefined;
const count = std.os.linuxEpollWait(self.epollfd, events[0..], -1);
for (events[0..count]) |ev| {
const p = @intToPtr(promise, ev.data.ptr);
resume p;
// TODO multiplex the next tick queue and the epoll event results onto a thread pool
while (self.next_tick_queue.get()) |node| {
resume node.data;
}
if (!self.keep_running) break;
self.dispatchOsEvents();
}
}
fn dispatchOsEvents(self: *Loop) void {
switch (builtin.os) {
builtin.Os.linux => {
var events: [16]std.os.linux.epoll_event = undefined;
const count = std.os.linuxEpollWait(self.os_data.epollfd, events[0..], -1);
for (events[0..count]) |ev| {
const p = @intToPtr(promise, ev.data.ptr);
resume p;
}
},
else => {},
}
}
};
/// many producer, many consumer, thread-safe, lock-free, runtime configurable buffer size
/// when buffer is empty, consumers suspend and are resumed by producers
/// when buffer is full, producers suspend and are resumed by consumers
pub fn Channel(comptime T: type) type {
return struct {
loop: *Loop,
getters: std.atomic.QueueMpsc(GetNode),
putters: std.atomic.QueueMpsc(PutNode),
get_count: usize,
put_count: usize,
dispatch_lock: u8, // TODO make this a bool
need_dispatch: u8, // TODO make this a bool
// simple fixed size ring buffer
buffer_nodes: []T,
buffer_index: usize,
buffer_len: usize,
const SelfChannel = this;
const GetNode = struct {
ptr: *T,
tick_node: *Loop.NextTickNode,
};
const PutNode = struct {
data: T,
tick_node: *Loop.NextTickNode,
};
/// call destroy when done
pub fn create(loop: *Loop, capacity: usize) !*SelfChannel {
const buffer_nodes = try loop.allocator.alloc(T, capacity);
errdefer loop.allocator.free(buffer_nodes);
const self = try loop.allocator.create(SelfChannel{
.loop = loop,
.buffer_len = 0,
.buffer_nodes = buffer_nodes,
.buffer_index = 0,
.dispatch_lock = 0,
.need_dispatch = 0,
.getters = std.atomic.QueueMpsc(GetNode).init(),
.putters = std.atomic.QueueMpsc(PutNode).init(),
.get_count = 0,
.put_count = 0,
});
errdefer loop.allocator.destroy(self);
return self;
}
/// must be called when all calls to put and get have suspended and no more calls occur
pub fn destroy(self: *SelfChannel) void {
while (self.getters.get()) |get_node| {
cancel get_node.data.tick_node.data;
}
while (self.putters.get()) |put_node| {
cancel put_node.data.tick_node.data;
}
self.loop.allocator.free(self.buffer_nodes);
self.loop.allocator.destroy(self);
}
/// puts a data item in the channel. The promise completes when the value has been added to the
/// buffer, or in the case of a zero size buffer, when the item has been retrieved by a getter.
pub async fn put(self: *SelfChannel, data: T) void {
// TODO should be able to group memory allocation failure before first suspend point
// so that the async invocation catches it
var dispatch_tick_node_ptr: *Loop.NextTickNode = undefined;
_ = async self.dispatch(&dispatch_tick_node_ptr) catch unreachable;
suspend |handle| {
var my_tick_node = Loop.NextTickNode{
.next = undefined,
.data = handle,
};
var queue_node = std.atomic.QueueMpsc(PutNode).Node{
.data = PutNode{
.tick_node = &my_tick_node,
.data = data,
},
.next = undefined,
};
self.putters.put(&queue_node);
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.loop.onNextTick(dispatch_tick_node_ptr);
}
}
/// await this function to get an item from the channel. If the buffer is empty, the promise will
/// complete when the next item is put in the channel.
pub async fn get(self: *SelfChannel) T {
// TODO should be able to group memory allocation failure before first suspend point
// so that the async invocation catches it
var dispatch_tick_node_ptr: *Loop.NextTickNode = undefined;
_ = async self.dispatch(&dispatch_tick_node_ptr) catch unreachable;
// TODO integrate this function with named return values
// so we can get rid of this extra result copy
var result: T = undefined;
var debug_handle: usize = undefined;
suspend |handle| {
debug_handle = @ptrToInt(handle);
var my_tick_node = Loop.NextTickNode{
.next = undefined,
.data = handle,
};
var queue_node = std.atomic.QueueMpsc(GetNode).Node{
.data = GetNode{
.ptr = &result,
.tick_node = &my_tick_node,
},
.next = undefined,
};
self.getters.put(&queue_node);
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
self.loop.onNextTick(dispatch_tick_node_ptr);
}
return result;
}
async fn dispatch(self: *SelfChannel, tick_node_ptr: **Loop.NextTickNode) void {
// resumed by onNextTick
suspend |handle| {
var tick_node = Loop.NextTickNode{
.data = handle,
.next = undefined,
};
tick_node_ptr.* = &tick_node;
}
// set the "need dispatch" flag
_ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
lock: while (true) {
// set the lock flag
const prev_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
if (prev_lock != 0) return;
// clear the need_dispatch flag since we're about to do it
_ = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
while (true) {
one_dispatch: {
// later we correct these extra subtractions
var get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
var put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
// transfer self.buffer to self.getters
while (self.buffer_len != 0) {
if (get_count == 0) break :one_dispatch;
const get_node = &self.getters.get().?.data;
get_node.ptr.* = self.buffer_nodes[self.buffer_index -% self.buffer_len];
self.loop.onNextTick(get_node.tick_node);
self.buffer_len -= 1;
get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
}
// direct transfer self.putters to self.getters
while (get_count != 0 and put_count != 0) {
const get_node = &self.getters.get().?.data;
const put_node = &self.putters.get().?.data;
get_node.ptr.* = put_node.data;
self.loop.onNextTick(get_node.tick_node);
self.loop.onNextTick(put_node.tick_node);
get_count = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
}
// transfer self.putters to self.buffer
while (self.buffer_len != self.buffer_nodes.len and put_count != 0) {
const put_node = &self.putters.get().?.data;
self.buffer_nodes[self.buffer_index] = put_node.data;
self.loop.onNextTick(put_node.tick_node);
self.buffer_index +%= 1;
self.buffer_len += 1;
put_count = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst);
}
}
// undo the extra subtractions
_ = @atomicRmw(usize, &self.get_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &self.put_count, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
// clear need-dispatch flag
const need_dispatch = @atomicRmw(u8, &self.need_dispatch, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
if (need_dispatch != 0) continue;
const my_lock = @atomicRmw(u8, &self.dispatch_lock, AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst);
assert(my_lock != 0);
// we have to check again now that we unlocked
if (@atomicLoad(u8, &self.need_dispatch, AtomicOrder.SeqCst) != 0) continue :lock;
return;
}
}
}
};
}
pub async fn connect(loop: *Loop, _address: *const std.net.Address) !std.os.File {
var address = _address.*; // TODO https://github.com/ziglang/zig/issues/733
@ -199,6 +470,7 @@ test "listen on a port, send bytes, receive bytes" {
defer cancel p;
loop.run();
}
async fn doAsyncTest(loop: *Loop, address: *const std.net.Address) void {
errdefer @panic("test failure");
@ -211,3 +483,43 @@ async fn doAsyncTest(loop: *Loop, address: *const std.net.Address) void {
assert(mem.eql(u8, msg, "hello from server\n"));
loop.stop();
}
test "std.event.Channel" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
const allocator = &da.allocator;
var loop = try Loop.init(allocator);
defer loop.deinit();
const channel = try Channel(i32).create(&loop, 0);
defer channel.destroy();
const handle = try async<allocator> testChannelGetter(&loop, channel);
defer cancel handle;
const putter = try async<allocator> testChannelPutter(channel);
defer cancel putter;
loop.run();
}
async fn testChannelGetter(loop: *Loop, channel: *Channel(i32)) void {
errdefer @panic("test failed");
const value1_promise = try async channel.get();
const value1 = await value1_promise;
assert(value1 == 1234);
const value2_promise = try async channel.get();
const value2 = await value2_promise;
assert(value2 == 4567);
loop.stop();
}
async fn testChannelPutter(channel: *Channel(i32)) void {
await (async channel.put(1234) catch @panic("out of memory"));
await (async channel.put(4567) catch @panic("out of memory"));
}

View File

@ -130,6 +130,9 @@ pub fn formatType(
try output(context, "error.");
return output(context, @errorName(value));
},
builtin.TypeId.Promise => {
return format(context, Errors, output, "promise@{x}", @ptrToInt(value));
},
builtin.TypeId.Pointer => |ptr_info| switch (ptr_info.size) {
builtin.TypeInfo.Pointer.Size.One => switch (@typeInfo(ptr_info.child)) {
builtin.TypeId.Array => |info| {
@ -327,7 +330,7 @@ pub fn formatFloatScientific(
comptime Errors: type,
output: fn (@typeOf(context), []const u8) Errors!void,
) Errors!void {
var x = f64(value);
var x = @floatCast(f64, value);
// Errol doesn't handle these special cases.
if (math.signbit(x)) {

View File

@ -38,6 +38,7 @@ fn cFree(self: *Allocator, old_mem: []u8) void {
}
/// This allocator makes a syscall directly for every allocation and free.
/// TODO make this thread-safe. The windows implementation will need some atomics.
pub const DirectAllocator = struct {
allocator: Allocator,
heap_handle: ?HeapHandle,
@ -221,7 +222,7 @@ pub const ArenaAllocator = struct {
if (len >= actual_min_size) break;
}
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
const buf_node_slice = ([]BufNode)(buf[0..@sizeOf(BufNode)]);
const buf_node_slice = @bytesToSlice(BufNode, buf[0..@sizeOf(BufNode)]);
const buf_node = &buf_node_slice[0];
buf_node.* = BufNode{
.data = buf,
@ -407,8 +408,7 @@ fn testAllocator(allocator: *mem.Allocator) !void {
var slice = try allocator.alloc(*i32, 100);
for (slice) |*item, i| {
item.* = try allocator.create(i32);
item.*.* = @intCast(i32, i);
item.* = try allocator.create(@intCast(i32, i));
}
for (slice) |item, i| {

View File

@ -414,14 +414,12 @@ pub const BufferedAtomicFile = struct {
pub fn create(allocator: *mem.Allocator, dest_path: []const u8) !*BufferedAtomicFile {
// TODO with well defined copy elision we don't need this allocation
var self = try allocator.create(BufferedAtomicFile);
errdefer allocator.destroy(self);
self.* = BufferedAtomicFile{
var self = try allocator.create(BufferedAtomicFile{
.atomic_file = undefined,
.file_stream = undefined,
.buffered_stream = undefined,
};
});
errdefer allocator.destroy(self);
self.atomic_file = try os.AtomicFile.init(allocator, dest_path, os.default_file_mode);
errdefer self.atomic_file.deinit();

View File

@ -180,7 +180,7 @@ pub const StreamingParser = struct {
pub fn fromInt(x: var) State {
debug.assert(x == 0 or x == 1);
const T = @TagType(State);
return State(@intCast(T, x));
return @intToEnum(State, @intCast(T, x));
}
};

View File

@ -193,7 +193,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// A pointer to the new node.
pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
return allocator.create(Node);
return allocator.create(Node(undefined));
}
/// Deallocate a node.

View File

@ -161,7 +161,7 @@ pub fn loadSymbols(allocator: *mem.Allocator, in: *io.FileInStream) !SymbolTable
}
fn readNoEof(in: *io.FileInStream, comptime T: type, result: []T) !void {
return in.stream.readNoEof(([]u8)(result));
return in.stream.readNoEof(@sliceToBytes(result));
}
fn readOneNoEof(in: *io.FileInStream, comptime T: type, result: *T) !void {
return readNoEof(in, T, (*[1]T)(result)[0..]);

View File

@ -4,12 +4,22 @@ const assert = std.debug.assert;
pub fn copysign(comptime T: type, x: T, y: T) T {
return switch (T) {
f16 => copysign16(x, y),
f32 => copysign32(x, y),
f64 => copysign64(x, y),
else => @compileError("copysign not implemented for " ++ @typeName(T)),
};
}
fn copysign16(x: f16, y: f16) f16 {
const ux = @bitCast(u16, x);
const uy = @bitCast(u16, y);
const h1 = ux & (@maxValue(u16) / 2);
const h2 = uy & (u16(1) << 15);
return @bitCast(f16, h1 | h2);
}
fn copysign32(x: f32, y: f32) f32 {
const ux = @bitCast(u32, x);
const uy = @bitCast(u32, y);
@ -29,10 +39,18 @@ fn copysign64(x: f64, y: f64) f64 {
}
test "math.copysign" {
assert(copysign(f16, 1.0, 1.0) == copysign16(1.0, 1.0));
assert(copysign(f32, 1.0, 1.0) == copysign32(1.0, 1.0));
assert(copysign(f64, 1.0, 1.0) == copysign64(1.0, 1.0));
}
test "math.copysign16" {
assert(copysign16(5.0, 1.0) == 5.0);
assert(copysign16(5.0, -1.0) == -5.0);
assert(copysign16(-5.0, -1.0) == -5.0);
assert(copysign16(-5.0, 1.0) == 5.0);
}
test "math.copysign32" {
assert(copysign32(5.0, 1.0) == 5.0);
assert(copysign32(5.0, -1.0) == -5.0);

View File

@ -20,6 +20,10 @@ pub fn expm1(x: var) @typeOf(x) {
fn expm1_32(x_: f32) f32 {
@setFloatMode(this, builtin.FloatMode.Strict);
if (math.isNan(x_))
return math.nan(f32);
const o_threshold: f32 = 8.8721679688e+01;
const ln2_hi: f32 = 6.9313812256e-01;
const ln2_lo: f32 = 9.0580006145e-06;
@ -146,6 +150,10 @@ fn expm1_32(x_: f32) f32 {
fn expm1_64(x_: f64) f64 {
@setFloatMode(this, builtin.FloatMode.Strict);
if (math.isNan(x_))
return math.nan(f64);
const o_threshold: f64 = 7.09782712893383973096e+02;
const ln2_hi: f64 = 6.93147180369123816490e-01;
const ln2_lo: f64 = 1.90821492927058770002e-10;

View File

@ -10,12 +10,19 @@ const assert = std.debug.assert;
pub fn fabs(x: var) @typeOf(x) {
const T = @typeOf(x);
return switch (T) {
f16 => fabs16(x),
f32 => fabs32(x),
f64 => fabs64(x),
else => @compileError("fabs not implemented for " ++ @typeName(T)),
};
}
fn fabs16(x: f16) f16 {
var u = @bitCast(u16, x);
u &= 0x7FFF;
return @bitCast(f16, u);
}
fn fabs32(x: f32) f32 {
var u = @bitCast(u32, x);
u &= 0x7FFFFFFF;
@ -29,10 +36,16 @@ fn fabs64(x: f64) f64 {
}
test "math.fabs" {
assert(fabs(f16(1.0)) == fabs16(1.0));
assert(fabs(f32(1.0)) == fabs32(1.0));
assert(fabs(f64(1.0)) == fabs64(1.0));
}
test "math.fabs16" {
assert(fabs16(1.0) == 1.0);
assert(fabs16(-1.0) == 1.0);
}
test "math.fabs32" {
assert(fabs32(1.0) == 1.0);
assert(fabs32(-1.0) == 1.0);
@ -43,6 +56,12 @@ test "math.fabs64" {
assert(fabs64(-1.0) == 1.0);
}
test "math.fabs16.special" {
assert(math.isPositiveInf(fabs(math.inf(f16))));
assert(math.isPositiveInf(fabs(-math.inf(f16))));
assert(math.isNan(fabs(math.nan(f16))));
}
test "math.fabs32.special" {
assert(math.isPositiveInf(fabs(math.inf(f32))));
assert(math.isPositiveInf(fabs(-math.inf(f32))));

View File

@ -12,12 +12,47 @@ const math = std.math;
pub fn floor(x: var) @typeOf(x) {
const T = @typeOf(x);
return switch (T) {
f16 => floor16(x),
f32 => floor32(x),
f64 => floor64(x),
else => @compileError("floor not implemented for " ++ @typeName(T)),
};
}
fn floor16(x: f16) f16 {
var u = @bitCast(u16, x);
const e = @intCast(i16, (u >> 10) & 31) - 15;
var m: u16 = undefined;
// TODO: Shouldn't need this explicit check.
if (x == 0.0) {
return x;
}
if (e >= 10) {
return x;
}
if (e >= 0) {
m = u16(1023) >> @intCast(u4, e);
if (u & m == 0) {
return x;
}
math.forceEval(x + 0x1.0p120);
if (u >> 15 != 0) {
u += m;
}
return @bitCast(f16, u & ~m);
} else {
math.forceEval(x + 0x1.0p120);
if (u >> 15 == 0) {
return 0.0;
} else {
return -1.0;
}
}
}
fn floor32(x: f32) f32 {
var u = @bitCast(u32, x);
const e = @intCast(i32, (u >> 23) & 0xFF) - 0x7F;
@ -84,10 +119,17 @@ fn floor64(x: f64) f64 {
}
test "math.floor" {
assert(floor(f16(1.3)) == floor16(1.3));
assert(floor(f32(1.3)) == floor32(1.3));
assert(floor(f64(1.3)) == floor64(1.3));
}
test "math.floor16" {
assert(floor16(1.3) == 1.0);
assert(floor16(-1.3) == -2.0);
assert(floor16(0.2) == 0.0);
}
test "math.floor32" {
assert(floor32(1.3) == 1.0);
assert(floor32(-1.3) == -2.0);
@ -100,6 +142,14 @@ test "math.floor64" {
assert(floor64(0.2) == 0.0);
}
test "math.floor16.special" {
assert(floor16(0.0) == 0.0);
assert(floor16(-0.0) == -0.0);
assert(math.isPositiveInf(floor16(math.inf(f16))));
assert(math.isNegativeInf(floor16(-math.inf(f16))));
assert(math.isNan(floor16(math.nan(f16))));
}
test "math.floor32.special" {
assert(floor32(0.0) == 0.0);
assert(floor32(-0.0) == -0.0);

View File

@ -19,6 +19,18 @@ pub const f32_max = 3.40282346638528859812e+38;
pub const f32_epsilon = 1.1920928955078125e-07;
pub const f32_toint = 1.0 / f32_epsilon;
pub const f16_true_min = 0.000000059604644775390625; // 2**-24
pub const f16_min = 0.00006103515625; // 2**-14
pub const f16_max = 65504;
pub const f16_epsilon = 0.0009765625; // 2**-10
pub const f16_toint = 1.0 / f16_epsilon;
pub const nan_u16 = u16(0x7C01);
pub const nan_f16 = @bitCast(f16, nan_u16);
pub const inf_u16 = u16(0x7C00);
pub const inf_f16 = @bitCast(f16, inf_u16);
pub const nan_u32 = u32(0x7F800001);
pub const nan_f32 = @bitCast(f32, nan_u32);
@ -44,6 +56,11 @@ pub fn approxEq(comptime T: type, x: T, y: T, epsilon: T) bool {
pub fn forceEval(value: var) void {
const T = @typeOf(value);
switch (T) {
f16 => {
var x: f16 = undefined;
const p = @ptrCast(*volatile f16, &x);
p.* = x;
},
f32 => {
var x: f32 = undefined;
const p = @ptrCast(*volatile f32, &x);
@ -183,6 +200,32 @@ test "math" {
_ = @import("big/index.zig");
}
pub fn floatMantissaBits(comptime T: type) comptime_int {
assert(@typeId(T) == builtin.TypeId.Float);
return switch (T.bit_count) {
16 => 10,
32 => 23,
64 => 52,
80 => 64,
128 => 112,
else => @compileError("unknown floating point type " ++ @typeName(T)),
};
}
pub fn floatExponentBits(comptime T: type) comptime_int {
assert(@typeId(T) == builtin.TypeId.Float);
return switch (T.bit_count) {
16 => 5,
32 => 8,
64 => 11,
80 => 15,
128 => 15,
else => @compileError("unknown floating point type " ++ @typeName(T)),
};
}
pub fn min(x: var, y: var) @typeOf(x + y) {
return if (x < y) x else y;
}
@ -607,4 +650,3 @@ pub fn lossyCast(comptime T: type, value: var) T {
else => @compileError("bad type"),
}
}

View File

@ -1,9 +1,9 @@
const std = @import("../index.zig");
const math = std.math;
const assert = std.debug.assert;
pub fn inf(comptime T: type) T {
return switch (T) {
f16 => @bitCast(f16, math.inf_u16),
f32 => @bitCast(f32, math.inf_u32),
f64 => @bitCast(f64, math.inf_u64),
else => @compileError("inf not implemented for " ++ @typeName(T)),

View File

@ -5,6 +5,10 @@ const assert = std.debug.assert;
pub fn isFinite(x: var) bool {
const T = @typeOf(x);
switch (T) {
f16 => {
const bits = @bitCast(u16, x);
return bits & 0x7FFF < 0x7C00;
},
f32 => {
const bits = @bitCast(u32, x);
return bits & 0x7FFFFFFF < 0x7F800000;
@ -20,10 +24,14 @@ pub fn isFinite(x: var) bool {
}
test "math.isFinite" {
assert(isFinite(f16(0.0)));
assert(isFinite(f16(-0.0)));
assert(isFinite(f32(0.0)));
assert(isFinite(f32(-0.0)));
assert(isFinite(f64(0.0)));
assert(isFinite(f64(-0.0)));
assert(!isFinite(math.inf(f16)));
assert(!isFinite(-math.inf(f16)));
assert(!isFinite(math.inf(f32)));
assert(!isFinite(-math.inf(f32)));
assert(!isFinite(math.inf(f64)));

View File

@ -5,6 +5,10 @@ const assert = std.debug.assert;
pub fn isInf(x: var) bool {
const T = @typeOf(x);
switch (T) {
f16 => {
const bits = @bitCast(u16, x);
return bits & 0x7FFF == 0x7C00;
},
f32 => {
const bits = @bitCast(u32, x);
return bits & 0x7FFFFFFF == 0x7F800000;
@ -22,6 +26,9 @@ pub fn isInf(x: var) bool {
pub fn isPositiveInf(x: var) bool {
const T = @typeOf(x);
switch (T) {
f16 => {
return @bitCast(u16, x) == 0x7C00;
},
f32 => {
return @bitCast(u32, x) == 0x7F800000;
},
@ -37,6 +44,9 @@ pub fn isPositiveInf(x: var) bool {
pub fn isNegativeInf(x: var) bool {
const T = @typeOf(x);
switch (T) {
f16 => {
return @bitCast(u16, x) == 0xFC00;
},
f32 => {
return @bitCast(u32, x) == 0xFF800000;
},
@ -50,10 +60,14 @@ pub fn isNegativeInf(x: var) bool {
}
test "math.isInf" {
assert(!isInf(f16(0.0)));
assert(!isInf(f16(-0.0)));
assert(!isInf(f32(0.0)));
assert(!isInf(f32(-0.0)));
assert(!isInf(f64(0.0)));
assert(!isInf(f64(-0.0)));
assert(isInf(math.inf(f16)));
assert(isInf(-math.inf(f16)));
assert(isInf(math.inf(f32)));
assert(isInf(-math.inf(f32)));
assert(isInf(math.inf(f64)));
@ -61,10 +75,14 @@ test "math.isInf" {
}
test "math.isPositiveInf" {
assert(!isPositiveInf(f16(0.0)));
assert(!isPositiveInf(f16(-0.0)));
assert(!isPositiveInf(f32(0.0)));
assert(!isPositiveInf(f32(-0.0)));
assert(!isPositiveInf(f64(0.0)));
assert(!isPositiveInf(f64(-0.0)));
assert(isPositiveInf(math.inf(f16)));
assert(!isPositiveInf(-math.inf(f16)));
assert(isPositiveInf(math.inf(f32)));
assert(!isPositiveInf(-math.inf(f32)));
assert(isPositiveInf(math.inf(f64)));
@ -72,10 +90,14 @@ test "math.isPositiveInf" {
}
test "math.isNegativeInf" {
assert(!isNegativeInf(f16(0.0)));
assert(!isNegativeInf(f16(-0.0)));
assert(!isNegativeInf(f32(0.0)));
assert(!isNegativeInf(f32(-0.0)));
assert(!isNegativeInf(f64(0.0)));
assert(!isNegativeInf(f64(-0.0)));
assert(!isNegativeInf(math.inf(f16)));
assert(isNegativeInf(-math.inf(f16)));
assert(!isNegativeInf(math.inf(f32)));
assert(isNegativeInf(-math.inf(f32)));
assert(!isNegativeInf(math.inf(f64)));

View File

@ -5,6 +5,10 @@ const assert = std.debug.assert;
pub fn isNan(x: var) bool {
const T = @typeOf(x);
switch (T) {
f16 => {
const bits = @bitCast(u16, x);
return (bits & 0x7fff) > 0x7c00;
},
f32 => {
const bits = @bitCast(u32, x);
return bits & 0x7FFFFFFF > 0x7F800000;
@ -26,8 +30,10 @@ pub fn isSignalNan(x: var) bool {
}
test "math.isNan" {
assert(isNan(math.nan(f16)));
assert(isNan(math.nan(f32)));
assert(isNan(math.nan(f64)));
assert(!isNan(f16(1.0)));
assert(!isNan(f32(1.0)));
assert(!isNan(f64(1.0)));
}

View File

@ -5,6 +5,10 @@ const assert = std.debug.assert;
pub fn isNormal(x: var) bool {
const T = @typeOf(x);
switch (T) {
f16 => {
const bits = @bitCast(u16, x);
return (bits + 1024) & 0x7FFF >= 2048;
},
f32 => {
const bits = @bitCast(u32, x);
return (bits + 0x00800000) & 0x7FFFFFFF >= 0x01000000;
@ -20,8 +24,13 @@ pub fn isNormal(x: var) bool {
}
test "math.isNormal" {
assert(!isNormal(math.nan(f16)));
assert(!isNormal(math.nan(f32)));
assert(!isNormal(math.nan(f64)));
assert(!isNormal(f16(0)));
assert(!isNormal(f32(0)));
assert(!isNormal(f64(0)));
assert(isNormal(f16(1.0)));
assert(isNormal(f32(1.0)));
assert(isNormal(f64(1.0)));
}

View File

@ -2,6 +2,7 @@ const math = @import("index.zig");
pub fn nan(comptime T: type) T {
return switch (T) {
f16 => @bitCast(f16, math.nan_u16),
f32 => @bitCast(f32, math.nan_u32),
f64 => @bitCast(f64, math.nan_u64),
else => @compileError("nan not implemented for " ++ @typeName(T)),
@ -12,6 +13,7 @@ pub fn nan(comptime T: type) T {
// representation in the future when required.
pub fn snan(comptime T: type) T {
return switch (T) {
f16 => @bitCast(f16, math.nan_u16),
f32 => @bitCast(f32, math.nan_u32),
f64 => @bitCast(f64, math.nan_u64),
else => @compileError("snan not implemented for " ++ @typeName(T)),

View File

@ -5,12 +5,18 @@ const assert = std.debug.assert;
pub fn signbit(x: var) bool {
const T = @typeOf(x);
return switch (T) {
f16 => signbit16(x),
f32 => signbit32(x),
f64 => signbit64(x),
else => @compileError("signbit not implemented for " ++ @typeName(T)),
};
}
fn signbit16(x: f16) bool {
const bits = @bitCast(u16, x);
return bits >> 15 != 0;
}
fn signbit32(x: f32) bool {
const bits = @bitCast(u32, x);
return bits >> 31 != 0;
@ -22,10 +28,16 @@ fn signbit64(x: f64) bool {
}
test "math.signbit" {
assert(signbit(f16(4.0)) == signbit16(4.0));
assert(signbit(f32(4.0)) == signbit32(4.0));
assert(signbit(f64(4.0)) == signbit64(4.0));
}
test "math.signbit16" {
assert(!signbit16(4.0));
assert(signbit16(-3.0));
}
test "math.signbit32" {
assert(!signbit32(4.0));
assert(signbit32(-3.0));

View File

@ -31,10 +31,25 @@ pub fn sqrt(x: var) (if (@typeId(@typeOf(x)) == TypeId.Int) @IntType(false, @typ
}
test "math.sqrt" {
assert(sqrt(f16(0.0)) == @sqrt(f16, 0.0));
assert(sqrt(f32(0.0)) == @sqrt(f32, 0.0));
assert(sqrt(f64(0.0)) == @sqrt(f64, 0.0));
}
test "math.sqrt16" {
const epsilon = 0.000001;
assert(@sqrt(f16, 0.0) == 0.0);
assert(math.approxEq(f16, @sqrt(f16, 2.0), 1.414214, epsilon));
assert(math.approxEq(f16, @sqrt(f16, 3.6), 1.897367, epsilon));
assert(@sqrt(f16, 4.0) == 2.0);
assert(math.approxEq(f16, @sqrt(f16, 7.539840), 2.745877, epsilon));
assert(math.approxEq(f16, @sqrt(f16, 19.230934), 4.385309, epsilon));
assert(@sqrt(f16, 64.0) == 8.0);
assert(math.approxEq(f16, @sqrt(f16, 64.1), 8.006248, epsilon));
assert(math.approxEq(f16, @sqrt(f16, 8942.230469), 94.563370, epsilon));
}
test "math.sqrt32" {
const epsilon = 0.000001;
@ -63,6 +78,14 @@ test "math.sqrt64" {
assert(math.approxEq(f64, @sqrt(f64, 8942.230469), 94.563367, epsilon));
}
test "math.sqrt16.special" {
assert(math.isPositiveInf(@sqrt(f16, math.inf(f16))));
assert(@sqrt(f16, 0.0) == 0.0);
assert(@sqrt(f16, -0.0) == -0.0);
assert(math.isNan(@sqrt(f16, -1.0)));
assert(math.isNan(@sqrt(f16, math.nan(f16))));
}
test "math.sqrt32.special" {
assert(math.isPositiveInf(@sqrt(f32, math.inf(f32))));
assert(@sqrt(f32, 0.0) == 0.0);

View File

@ -31,16 +31,8 @@ pub const Allocator = struct {
/// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
freeFn: fn (self: *Allocator, old_mem: []u8) void,
/// Call destroy with the result
pub fn create(self: *Allocator, comptime T: type) !*T {
if (@sizeOf(T) == 0) return *{};
const slice = try self.alloc(T, 1);
return &slice[0];
}
/// Call destroy with the result
/// TODO once #733 is solved, this will replace create
pub fn construct(self: *Allocator, init: var) Error!*@typeOf(init) {
/// Call `destroy` with the result
pub fn create(self: *Allocator, init: var) Error!*@typeOf(init) {
const T = @typeOf(init);
if (@sizeOf(T) == 0) return &{};
const slice = try self.alloc(T, 1);
@ -49,7 +41,7 @@ pub const Allocator = struct {
return ptr;
}
/// `ptr` should be the return value of `construct` or `create`
/// `ptr` should be the return value of `create`
pub fn destroy(self: *Allocator, ptr: var) void {
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
self.freeFn(self, non_const_ptr[0..@sizeOf(@typeOf(ptr).Child)]);
@ -70,7 +62,7 @@ pub const Allocator = struct {
for (byte_slice) |*byte| {
byte.* = undefined;
}
return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
pub fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
@ -86,7 +78,7 @@ pub const Allocator = struct {
return ([*]align(alignment) T)(undefined)[0..0];
}
const old_byte_slice = ([]u8)(old_mem);
const old_byte_slice = @sliceToBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.reallocFn(self, old_byte_slice, byte_count, alignment);
assert(byte_slice.len == byte_count);
@ -96,7 +88,7 @@ pub const Allocator = struct {
byte.* = undefined;
}
}
return ([]T)(@alignCast(alignment, byte_slice));
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
/// Reallocate, but `n` must be less than or equal to `old_mem.len`.
@ -118,13 +110,13 @@ pub const Allocator = struct {
// n <= old_mem.len and the multiplication didn't overflow for that operation.
const byte_count = @sizeOf(T) * n;
const byte_slice = self.reallocFn(self, ([]u8)(old_mem), byte_count, alignment) catch unreachable;
const byte_slice = self.reallocFn(self, @sliceToBytes(old_mem), byte_count, alignment) catch unreachable;
assert(byte_slice.len == byte_count);
return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
}
pub fn free(self: *Allocator, memory: var) void {
const bytes = ([]const u8)(memory);
const bytes = @sliceToBytes(memory);
if (bytes.len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
self.freeFn(self, non_const_ptr[0..bytes.len]);

View File

@ -68,7 +68,7 @@ pub const Address = struct {
pub fn parseIp4(buf: []const u8) !u32 {
var result: u32 = undefined;
const out_ptr = ([]u8)((*[1]u32)(&result)[0..]);
const out_ptr = @sliceToBytes((*[1]u32)(&result)[0..]);
var x: u8 = 0;
var index: u8 = 0;

View File

@ -85,10 +85,7 @@ pub const ChildProcess = struct {
/// First argument in argv is the executable.
/// On success must call deinit.
pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
const child = try allocator.create(ChildProcess);
errdefer allocator.destroy(child);
child.* = ChildProcess{
const child = try allocator.create(ChildProcess{
.allocator = allocator,
.argv = argv,
.pid = undefined,
@ -109,8 +106,8 @@ pub const ChildProcess = struct {
.stdin_behavior = StdIo.Inherit,
.stdout_behavior = StdIo.Inherit,
.stderr_behavior = StdIo.Inherit,
};
});
errdefer allocator.destroy(child);
return child;
}
@ -318,7 +315,7 @@ pub const ChildProcess = struct {
// Here we potentially return the fork child's error
// from the parent pid.
if (err_int != @maxValue(ErrInt)) {
return SpawnError(err_int);
return @errSetCast(SpawnError, @intToError(err_int));
}
return statusToTerm(status);
@ -756,7 +753,7 @@ fn destroyPipe(pipe: *const [2]i32) void {
// Child of fork calls this to report an error to the fork parent.
// Then the child exits.
fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
_ = writeIntFd(fd, ErrInt(err));
_ = writeIntFd(fd, ErrInt(@errorToInt(err)));
posix.exit(1);
}

View File

@ -1805,7 +1805,7 @@ pub fn argsAlloc(allocator: *mem.Allocator) ![]const []u8 {
const buf = try allocator.alignedAlloc(u8, @alignOf([]u8), total_bytes);
errdefer allocator.free(buf);
const result_slice_list = ([][]u8)(buf[0..slice_list_bytes]);
const result_slice_list = @bytesToSlice([]u8, buf[0..slice_list_bytes]);
const result_contents = buf[slice_list_bytes..];
mem.copy(u8, result_contents, contents_slice);
@ -2468,7 +2468,7 @@ pub const Thread = struct {
data: Data,
pub const use_pthreads = is_posix and builtin.link_libc;
const Data = if (use_pthreads)
pub const Data = if (use_pthreads)
struct {
handle: c.pthread_t,
stack_addr: usize,
@ -2582,10 +2582,16 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) orelse return SpawnThreadError.OutOfMemory;
errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0);
const bytes = @ptrCast([*]u8, bytes_ptr)[0..byte_count];
const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable;
outer_context.inner = context;
outer_context.thread.data.heap_handle = heap_handle;
outer_context.thread.data.alloc_start = bytes_ptr;
const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext{
.thread = Thread{
.data = Thread.Data{
.heap_handle = heap_handle,
.alloc_start = bytes_ptr,
.handle = undefined,
},
},
.inner = context,
}) catch unreachable;
const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(*c_void, &outer_context.inner);
outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) orelse {

View File

@ -79,7 +79,7 @@ pub fn windowsIsCygwinPty(handle: windows.HANDLE) bool {
const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]);
const name_bytes = name_info_bytes[size .. size + usize(name_info.FileNameLength)];
const name_wide = ([]u16)(name_bytes);
const name_wide = @bytesToSlice(u16, name_bytes);
return mem.indexOf(u16, name_wide, []u16{ 'm', 's', 'y', 's', '-' }) != null or
mem.indexOf(u16, name_wide, []u16{ '-', 'p', 't', 'y' }) != null;
}

View File

@ -116,7 +116,7 @@ pub const Random = struct {
pub fn floatNorm(r: *Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.NormDist);
switch (T) {
f32 => return f32(value),
f32 => return @floatCast(f32, value),
f64 => return value,
else => @compileError("unknown floating point type"),
}
@ -128,7 +128,7 @@ pub const Random = struct {
pub fn floatExp(r: *Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.ExpDist);
switch (T) {
f32 => return f32(value),
f32 => return @floatCast(f32, value),
f64 => return value,
else => @compileError("unknown floating point type"),
}

View File

@ -84,12 +84,12 @@ fn ZigTableGen(
for (tables.x[2..256]) |*entry, i| {
const last = tables.x[2 + i - 1];
*entry = f_inv(v / last + f(last));
entry.* = f_inv(v / last + f(last));
}
tables.x[256] = 0;
for (tables.f[0..]) |*entry, i| {
*entry = f(tables.x[i]);
entry.* = f(tables.x[i]);
}
return tables;
@ -160,3 +160,7 @@ test "ziggurant exp dist sanity" {
_ = prng.random.floatExp(f64);
}
}
test "ziggurat table gen" {
const table = NormDist;
}

View File

@ -210,7 +210,9 @@ fn generic_fmod(comptime T: type, x: T, y: T) T {
}
fn isNan(comptime T: type, bits: T) bool {
if (T == u32) {
if (T == u16) {
return (bits & 0x7fff) > 0x7c00;
} else if (T == u32) {
return (bits & 0x7fffffff) > 0x7f800000;
} else if (T == u64) {
return (bits & (@maxValue(u64) >> 1)) > (u64(0x7ff) << 52);

View File

@ -0,0 +1,89 @@
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
pub extern fn __extenddftf2(a: f64) f128 {
return extendXfYf2(f128, f64, a);
}
pub extern fn __extendsftf2(a: f32) f128 {
return extendXfYf2(f128, f32, a);
}
pub extern fn __extendhfsf2(a: u16) f32 {
return extendXfYf2(f32, f16, @bitCast(f16, a));
}
const CHAR_BIT = 8;
inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits);
const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
const SrcShift = std.math.Log2Int(src_rep_t);
const DstShift = std.math.Log2Int(dst_rep_t);
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const srcBits = @sizeOf(src_t) * CHAR_BIT;
const srcExpBits = srcBits - srcSigBits - 1;
const srcInfExp = (1 << srcExpBits) - 1;
const srcExpBias = srcInfExp >> 1;
const srcMinNormal = 1 << srcSigBits;
const srcInfinity = srcInfExp << srcSigBits;
const srcSignMask = 1 << (srcSigBits + srcExpBits);
const srcAbsMask = srcSignMask - 1;
const srcQNaN = 1 << (srcSigBits - 1);
const srcNaNCode = srcQNaN - 1;
const dstBits = @sizeOf(dst_t) * CHAR_BIT;
const dstExpBits = dstBits - dstSigBits - 1;
const dstInfExp = (1 << dstExpBits) - 1;
const dstExpBias = dstInfExp >> 1;
const dstMinNormal: dst_rep_t = dst_rep_t(1) << dstSigBits;
// Break a into a sign and representation of the absolute value
const aRep: src_rep_t = @bitCast(src_rep_t, a);
const aAbs: src_rep_t = aRep & srcAbsMask;
const sign: src_rep_t = aRep & srcSignMask;
var absResult: dst_rep_t = undefined;
if (aAbs -% srcMinNormal < srcInfinity - srcMinNormal) {
// a is a normal number.
// Extend to the destination type by shifting the significand and
// exponent into the proper position and rebiasing the exponent.
absResult = dst_rep_t(aAbs) << (dstSigBits - srcSigBits);
absResult += (dstExpBias - srcExpBias) << dstSigBits;
} else if (aAbs >= srcInfinity) {
// a is NaN or infinity.
// Conjure the result by beginning with infinity, then setting the qNaN
// bit (if needed) and right-aligning the rest of the trailing NaN
// payload field.
absResult = dstInfExp << dstSigBits;
absResult |= dst_rep_t(aAbs & srcQNaN) << (dstSigBits - srcSigBits);
absResult |= dst_rep_t(aAbs & srcNaNCode) << (dstSigBits - srcSigBits);
} else if (aAbs != 0) {
// a is denormal.
// renormalize the significand and clear the leading bit, then insert
// the correct adjusted exponent in the destination type.
const scale: u32 = @clz(aAbs) - @clz(src_rep_t(srcMinNormal));
absResult = dst_rep_t(aAbs) << @intCast(DstShift, dstSigBits - srcSigBits + scale);
absResult ^= dstMinNormal;
const resultExponent: u32 = dstExpBias - srcExpBias - scale + 1;
absResult |= @intCast(dst_rep_t, resultExponent) << dstSigBits;
} else {
// a is zero.
absResult = 0;
}
// Apply the signbit to (dst_t)abs(a).
const result: dst_rep_t align(@alignOf(dst_t)) = absResult | dst_rep_t(sign) << (dstBits - srcBits);
return @bitCast(dst_t, result);
}
test "import extendXfYf2" {
_ = @import("extendXfYf2_test.zig");
}

View File

@ -0,0 +1,155 @@
const __extenddftf2 = @import("extendXfYf2.zig").__extenddftf2;
const __extendhfsf2 = @import("extendXfYf2.zig").__extendhfsf2;
const __extendsftf2 = @import("extendXfYf2.zig").__extendsftf2;
const assert = @import("std").debug.assert;
fn test__extenddftf2(a: f64, expectedHi: u64, expectedLo: u64) void {
const x = __extenddftf2(a);
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
if (hi == expectedHi and lo == expectedLo)
return;
// test other possible NaN representation(signal NaN)
if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{
return;
}
}
@panic("__extenddftf2 test failure");
}
fn test__extendhfsf2(a: u16, expected: u32) void {
const x = __extendhfsf2(a);
const rep = @bitCast(u32, x);
if (rep == expected) {
if (rep & 0x7fffffff > 0x7f800000) {
return; // NaN is always unequal.
}
if (x == @bitCast(f32, expected)) {
return;
}
}
@panic("__extendhfsf2 test failure");
}
fn test__extendsftf2(a: f32, expectedHi: u64, expectedLo: u64) void {
const x = __extendsftf2(a);
const rep = @bitCast(u128, x);
const hi = @intCast(u64, rep >> 64);
const lo = @truncate(u64, rep);
if (hi == expectedHi and lo == expectedLo)
return;
// test other possible NaN representation(signal NaN)
if (expectedHi == 0x7fff800000000000 and expectedLo == 0x0) {
if ((hi & 0x7fff000000000000) == 0x7fff000000000000 and
((hi & 0xffffffffffff) > 0 or lo > 0))
{
return;
}
}
@panic("__extendsftf2 test failure");
}
test "extenddftf2" {
// qNaN
test__extenddftf2(makeQNaN64(), 0x7fff800000000000, 0x0);
// NaN
test__extenddftf2(makeNaN64(0x7100000000000), 0x7fff710000000000, 0x0);
// inf
test__extenddftf2(makeInf64(), 0x7fff000000000000, 0x0);
// zero
test__extenddftf2(0.0, 0x0, 0x0);
test__extenddftf2(0x1.23456789abcdefp+5, 0x400423456789abcd, 0xf000000000000000);
test__extenddftf2(0x1.edcba987654321fp-9, 0x3ff6edcba9876543, 0x2000000000000000);
test__extenddftf2(0x1.23456789abcdefp+45, 0x402c23456789abcd, 0xf000000000000000);
test__extenddftf2(0x1.edcba987654321fp-45, 0x3fd2edcba9876543, 0x2000000000000000);
}
test "extendhfsf2" {
test__extendhfsf2(0x7e00, 0x7fc00000); // qNaN
test__extendhfsf2(0x7f00, 0x7fe00000); // sNaN
test__extendhfsf2(0x7c01, 0x7f802000); // sNaN
test__extendhfsf2(0, 0); // 0
test__extendhfsf2(0x8000, 0x80000000); // -0
test__extendhfsf2(0x7c00, 0x7f800000); // inf
test__extendhfsf2(0xfc00, 0xff800000); // -inf
test__extendhfsf2(0x0001, 0x33800000); // denormal (min), 2**-24
test__extendhfsf2(0x8001, 0xb3800000); // denormal (min), -2**-24
test__extendhfsf2(0x03ff, 0x387fc000); // denormal (max), 2**-14 - 2**-24
test__extendhfsf2(0x83ff, 0xb87fc000); // denormal (max), -2**-14 + 2**-24
test__extendhfsf2(0x0400, 0x38800000); // normal (min), 2**-14
test__extendhfsf2(0x8400, 0xb8800000); // normal (min), -2**-14
test__extendhfsf2(0x7bff, 0x477fe000); // normal (max), 65504
test__extendhfsf2(0xfbff, 0xc77fe000); // normal (max), -65504
test__extendhfsf2(0x3c01, 0x3f802000); // normal, 1 + 2**-10
test__extendhfsf2(0xbc01, 0xbf802000); // normal, -1 - 2**-10
test__extendhfsf2(0x3555, 0x3eaaa000); // normal, approx. 1/3
test__extendhfsf2(0xb555, 0xbeaaa000); // normal, approx. -1/3
}
test "extendsftf2" {
// qNaN
test__extendsftf2(makeQNaN32(), 0x7fff800000000000, 0x0);
// NaN
test__extendsftf2(makeNaN32(0x410000), 0x7fff820000000000, 0x0);
// inf
test__extendsftf2(makeInf32(), 0x7fff000000000000, 0x0);
// zero
test__extendsftf2(0.0, 0x0, 0x0);
test__extendsftf2(0x1.23456p+5, 0x4004234560000000, 0x0);
test__extendsftf2(0x1.edcbap-9, 0x3ff6edcba0000000, 0x0);
test__extendsftf2(0x1.23456p+45, 0x402c234560000000, 0x0);
test__extendsftf2(0x1.edcbap-45, 0x3fd2edcba0000000, 0x0);
}
fn makeQNaN64() f64 {
return @bitCast(f64, u64(0x7ff8000000000000));
}
fn makeInf64() f64 {
return @bitCast(f64, u64(0x7ff0000000000000));
}
fn makeNaN64(rand: u64) f64 {
return @bitCast(f64, 0x7ff0000000000000 | (rand & 0xfffffffffffff));
}
fn makeQNaN32() f32 {
return @bitCast(f32, u32(0x7fc00000));
}
fn makeNaN32(rand: u32) f32 {
return @bitCast(f32, 0x7f800000 | (rand & 0x7fffff));
}
fn makeInf32() f32 {
return @bitCast(f32, u32(0x7f800000));
}

View File

@ -0,0 +1,69 @@
const builtin = @import("builtin");
const is_test = builtin.is_test;
const DBL_MANT_DIG = 53;
pub extern fn __floattidf(arg: i128) f64 {
@setRuntimeSafety(is_test);
if (arg == 0)
return 0.0;
var ai = arg;
const N: u32 = 128;
const si = ai >> @intCast(u7, (N - 1));
ai = ((ai ^ si) -% si);
var a = @bitCast(u128, ai);
const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
var e: i32 = sd - 1; // exponent
if (sd > DBL_MANT_DIG) {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
// 12345678901234567890123456
// 1 = msb 1 bit
// P = bit DBL_MANT_DIG-1 bits to the right of 1
// Q = bit DBL_MANT_DIG bits to the right of 1
// R = "or" of all bits to the right of Q
switch (sd) {
DBL_MANT_DIG + 1 => {
a <<= 1;
},
DBL_MANT_DIG + 2 => {},
else => {
const shift1_amt = @intCast(i32, sd - (DBL_MANT_DIG + 2));
const shift1_amt_u7 = @intCast(u7, shift1_amt);
const shift2_amt = @intCast(i32, N + (DBL_MANT_DIG + 2)) - sd;
const shift2_amt_u7 = @intCast(u7, shift2_amt);
a = (a >> shift1_amt_u7) | @boolToInt((a & (@intCast(u128, @maxValue(u128)) >> shift2_amt_u7)) != 0);
},
}
// finish
a |= @boolToInt((a & 4) != 0); // Or P into R
a += 1; // round - this step may add a significant bit
a >>= 2; // dump Q and R
// a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
if ((a & (u128(1) << DBL_MANT_DIG)) != 0) {
a >>= 1;
e += 1;
}
// a is now rounded to DBL_MANT_DIG bits
} else {
a <<= @intCast(u7, DBL_MANT_DIG - sd);
// a is now rounded to DBL_MANT_DIG bits
}
const s = @bitCast(u128, arg) >> (128 - 32);
const high: u64 = (@intCast(u64, s) & 0x80000000) | // sign
(@intCast(u32, (e + 1023)) << 20) | // exponent
(@truncate(u32, a >> 32) & 0x000fffff); // mantissa-high
const low: u64 = @truncate(u32, a); // mantissa-low
return @bitCast(f64, low | (high << 32));
}
test "import floattidf" {
_ = @import("floattidf_test.zig");
}

View File

@ -0,0 +1,84 @@
const __floattidf = @import("floattidf.zig").__floattidf;
const assert = @import("std").debug.assert;
fn test__floattidf(a: i128, expected: f64) void {
const x = __floattidf(a);
assert(x == expected);
}
test "floattidf" {
test__floattidf(0, 0.0);
test__floattidf(1, 1.0);
test__floattidf(2, 2.0);
test__floattidf(20, 20.0);
test__floattidf(-1, -1.0);
test__floattidf(-2, -2.0);
test__floattidf(-20, -20.0);
test__floattidf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
test__floattidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
test__floattidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
test__floattidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
test__floattidf(make_ti(0x8000008000000000, 0), -0x1.FFFFFEp+126);
test__floattidf(make_ti(0x8000000000000800, 0), -0x1.FFFFFFFFFFFFEp+126);
test__floattidf(make_ti(0x8000010000000000, 0), -0x1.FFFFFCp+126);
test__floattidf(make_ti(0x8000000000001000, 0), -0x1.FFFFFFFFFFFFCp+126);
test__floattidf(make_ti(0x8000000000000000, 0), -0x1.000000p+127);
test__floattidf(make_ti(0x8000000000000001, 0), -0x1.000000p+127);
test__floattidf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
test__floattidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
test__floattidf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
test__floattidf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
test__floattidf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
test__floattidf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
test__floattidf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
test__floattidf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
test__floattidf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
test__floattidf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
test__floattidf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
test__floattidf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
test__floattidf(0x023479FD0E092DA1, 0x1.1A3CFE870496Dp+57);
test__floattidf(0x023479FD0E092DB0, 0x1.1A3CFE870496Ep+57);
test__floattidf(0x023479FD0E092DB8, 0x1.1A3CFE870496Ep+57);
test__floattidf(0x023479FD0E092DB6, 0x1.1A3CFE870496Ep+57);
test__floattidf(0x023479FD0E092DBF, 0x1.1A3CFE870496Ep+57);
test__floattidf(0x023479FD0E092DC1, 0x1.1A3CFE870496Ep+57);
test__floattidf(0x023479FD0E092DC7, 0x1.1A3CFE870496Ep+57);
test__floattidf(0x023479FD0E092DC8, 0x1.1A3CFE870496Ep+57);
test__floattidf(0x023479FD0E092DCF, 0x1.1A3CFE870496Ep+57);
test__floattidf(0x023479FD0E092DD0, 0x1.1A3CFE870496Ep+57);
test__floattidf(0x023479FD0E092DD1, 0x1.1A3CFE870496Fp+57);
test__floattidf(0x023479FD0E092DD8, 0x1.1A3CFE870496Fp+57);
test__floattidf(0x023479FD0E092DDF, 0x1.1A3CFE870496Fp+57);
test__floattidf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
test__floattidf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
test__floattidf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496Dp+121);
test__floattidf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496Ep+121);
test__floattidf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496Ep+121);
test__floattidf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496Ep+121);
test__floattidf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496Ep+121);
test__floattidf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496Ep+121);
test__floattidf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496Ep+121);
test__floattidf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496Ep+121);
test__floattidf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496Ep+121);
test__floattidf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496Ep+121);
test__floattidf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496Fp+121);
test__floattidf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496Fp+121);
test__floattidf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496Fp+121);
test__floattidf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
}
fn make_ti(high: u64, low: u64) i128 {
var result: u128 = high;
result <<= 64;
result |= low;
return @bitCast(i128, result);
}

View File

@ -0,0 +1,69 @@
const builtin = @import("builtin");
const is_test = builtin.is_test;
const FLT_MANT_DIG = 24;
pub extern fn __floattisf(arg: i128) f32 {
@setRuntimeSafety(is_test);
if (arg == 0)
return 0.0;
var ai = arg;
const N: u32 = 128;
const si = ai >> @intCast(u7, (N - 1));
ai = ((ai ^ si) -% si);
var a = @bitCast(u128, ai);
const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
var e: i32 = sd - 1; // exponent
if (sd > FLT_MANT_DIG) {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
// 12345678901234567890123456
// 1 = msb 1 bit
// P = bit FLT_MANT_DIG-1 bits to the right of 1
// Q = bit FLT_MANT_DIG bits to the right of 1
// R = "or" of all bits to the right of Q
switch (sd) {
FLT_MANT_DIG + 1 => {
a <<= 1;
},
FLT_MANT_DIG + 2 => {},
else => {
const shift1_amt = @intCast(i32, sd - (FLT_MANT_DIG + 2));
const shift1_amt_u7 = @intCast(u7, shift1_amt);
const shift2_amt = @intCast(i32, N + (FLT_MANT_DIG + 2)) - sd;
const shift2_amt_u7 = @intCast(u7, shift2_amt);
a = (a >> shift1_amt_u7) | @boolToInt((a & (@intCast(u128, @maxValue(u128)) >> shift2_amt_u7)) != 0);
},
}
// finish
a |= @boolToInt((a & 4) != 0); // Or P into R
a += 1; // round - this step may add a significant bit
a >>= 2; // dump Q and R
// a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
if ((a & (u128(1) << FLT_MANT_DIG)) != 0) {
a >>= 1;
e += 1;
}
// a is now rounded to FLT_MANT_DIG bits
} else {
a <<= @intCast(u7, FLT_MANT_DIG - sd);
// a is now rounded to FLT_MANT_DIG bits
}
const s = @bitCast(u128, arg) >> (128 - 32);
const r = (@intCast(u32, s) & 0x80000000) | // sign
(@intCast(u32, (e + 127)) << 23) | // exponent
(@truncate(u32, a) & 0x007fffff); // mantissa-high
return @bitCast(f32, r);
}
test "import floattisf" {
_ = @import("floattisf_test.zig");
}

View File

@ -0,0 +1,60 @@
const __floattisf = @import("floattisf.zig").__floattisf;
const assert = @import("std").debug.assert;
fn test__floattisf(a: i128, expected: f32) void {
const x = __floattisf(a);
assert(x == expected);
}
test "floattisf" {
test__floattisf(0, 0.0);
test__floattisf(1, 1.0);
test__floattisf(2, 2.0);
test__floattisf(-1, -1.0);
test__floattisf(-2, -2.0);
test__floattisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
test__floattisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000008000000000), -0x1.FFFFFEp+62);
test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000010000000000), -0x1.FFFFFCp+62);
test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000000000000000), -0x1.000000p+63);
test__floattisf(make_ti(0xFFFFFFFFFFFFFFFF, 0x8000000000000001), -0x1.000000p+63);
test__floattisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
test__floattisf(0x0007FB72EA000000, 0x1.FEDCBAp+50);
test__floattisf(0x0007FB72EB000000, 0x1.FEDCBAp+50);
test__floattisf(0x0007FB72EBFFFFFF, 0x1.FEDCBAp+50);
test__floattisf(0x0007FB72EC000000, 0x1.FEDCBCp+50);
test__floattisf(0x0007FB72E8000001, 0x1.FEDCBAp+50);
test__floattisf(0x0007FB72E6000000, 0x1.FEDCBAp+50);
test__floattisf(0x0007FB72E7000000, 0x1.FEDCBAp+50);
test__floattisf(0x0007FB72E7FFFFFF, 0x1.FEDCBAp+50);
test__floattisf(0x0007FB72E4000001, 0x1.FEDCBAp+50);
test__floattisf(0x0007FB72E4000000, 0x1.FEDCB8p+50);
test__floattisf(make_ti(0x0007FB72E8000000, 0), 0x1.FEDCBAp+114);
test__floattisf(make_ti(0x0007FB72EA000000, 0), 0x1.FEDCBAp+114);
test__floattisf(make_ti(0x0007FB72EB000000, 0), 0x1.FEDCBAp+114);
test__floattisf(make_ti(0x0007FB72EBFFFFFF, 0), 0x1.FEDCBAp+114);
test__floattisf(make_ti(0x0007FB72EC000000, 0), 0x1.FEDCBCp+114);
test__floattisf(make_ti(0x0007FB72E8000001, 0), 0x1.FEDCBAp+114);
test__floattisf(make_ti(0x0007FB72E6000000, 0), 0x1.FEDCBAp+114);
test__floattisf(make_ti(0x0007FB72E7000000, 0), 0x1.FEDCBAp+114);
test__floattisf(make_ti(0x0007FB72E7FFFFFF, 0), 0x1.FEDCBAp+114);
test__floattisf(make_ti(0x0007FB72E4000001, 0), 0x1.FEDCBAp+114);
test__floattisf(make_ti(0x0007FB72E4000000, 0), 0x1.FEDCB8p+114);
}
fn make_ti(high: u64, low: u64) i128 {
var result: u128 = high;
result <<= 64;
result |= low;
return @bitCast(i128, result);
}

View File

@ -0,0 +1,69 @@
const builtin = @import("builtin");
const is_test = builtin.is_test;
const LDBL_MANT_DIG = 113;
pub extern fn __floattitf(arg: i128) f128 {
@setRuntimeSafety(is_test);
if (arg == 0)
return 0.0;
var ai = arg;
const N: u32 = 128;
const si = ai >> @intCast(u7, (N - 1));
ai = ((ai ^ si) -% si);
var a = @bitCast(u128, ai);
const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
var e: i32 = sd - 1; // exponent
if (sd > LDBL_MANT_DIG) {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
// 12345678901234567890123456
// 1 = msb 1 bit
// P = bit LDBL_MANT_DIG-1 bits to the right of 1
// Q = bit LDBL_MANT_DIG bits to the right of 1
// R = "or" of all bits to the right of Q
switch (sd) {
LDBL_MANT_DIG + 1 => {
a <<= 1;
},
LDBL_MANT_DIG + 2 => {},
else => {
const shift1_amt = @intCast(i32, sd - (LDBL_MANT_DIG + 2));
const shift1_amt_u7 = @intCast(u7, shift1_amt);
const shift2_amt = @intCast(i32, N + (LDBL_MANT_DIG + 2)) - sd;
const shift2_amt_u7 = @intCast(u7, shift2_amt);
a = (a >> shift1_amt_u7) | @boolToInt((a & (@intCast(u128, @maxValue(u128)) >> shift2_amt_u7)) != 0);
},
}
// finish
a |= @boolToInt((a & 4) != 0); // Or P into R
a += 1; // round - this step may add a significant bit
a >>= 2; // dump Q and R
// a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
if ((a & (u128(1) << LDBL_MANT_DIG)) != 0) {
a >>= 1;
e += 1;
}
// a is now rounded to LDBL_MANT_DIG bits
} else {
a <<= @intCast(u7, LDBL_MANT_DIG - sd);
// a is now rounded to LDBL_MANT_DIG bits
}
const s = @bitCast(u128, arg) >> (128 - 64);
const high: u128 = (@intCast(u64, s) & 0x8000000000000000) | // sign
(@intCast(u64, (e + 16383)) << 48) | // exponent
(@truncate(u64, a >> 64) & 0x0000ffffffffffff); // mantissa-high
const low = @truncate(u64, a); // mantissa-low
return @bitCast(f128, low | (high << 64));
}
test "import floattitf" {
_ = @import("floattitf_test.zig");
}

View File

@ -0,0 +1,96 @@
const __floattitf = @import("floattitf.zig").__floattitf;
const assert = @import("std").debug.assert;
fn test__floattitf(a: i128, expected: f128) void {
const x = __floattitf(a);
assert(x == expected);
}
test "floattitf" {
test__floattitf(0, 0.0);
test__floattitf(1, 1.0);
test__floattitf(2, 2.0);
test__floattitf(20, 20.0);
test__floattitf(-1, -1.0);
test__floattitf(-2, -2.0);
test__floattitf(-20, -20.0);
test__floattitf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
test__floattitf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
test__floattitf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
test__floattitf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
test__floattitf(make_ti(0x8000008000000000, 0), -0x1.FFFFFEp+126);
test__floattitf(make_ti(0x8000000000000800, 0), -0x1.FFFFFFFFFFFFEp+126);
test__floattitf(make_ti(0x8000010000000000, 0), -0x1.FFFFFCp+126);
test__floattitf(make_ti(0x8000000000001000, 0), -0x1.FFFFFFFFFFFFCp+126);
test__floattitf(make_ti(0x8000000000000000, 0), -0x1.000000p+127);
test__floattitf(make_ti(0x8000000000000001, 0), -0x1.FFFFFFFFFFFFFFFCp+126);
test__floattitf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
test__floattitf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
test__floattitf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
test__floattitf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
test__floattitf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
test__floattitf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
test__floattitf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
test__floattitf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
test__floattitf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
test__floattitf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
test__floattitf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
test__floattitf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
test__floattitf(0x023479FD0E092DA1, 0x1.1A3CFE870496D08p+57);
test__floattitf(0x023479FD0E092DB0, 0x1.1A3CFE870496D8p+57);
test__floattitf(0x023479FD0E092DB8, 0x1.1A3CFE870496DCp+57);
test__floattitf(0x023479FD0E092DB6, 0x1.1A3CFE870496DBp+57);
test__floattitf(0x023479FD0E092DBF, 0x1.1A3CFE870496DF8p+57);
test__floattitf(0x023479FD0E092DC1, 0x1.1A3CFE870496E08p+57);
test__floattitf(0x023479FD0E092DC7, 0x1.1A3CFE870496E38p+57);
test__floattitf(0x023479FD0E092DC8, 0x1.1A3CFE870496E4p+57);
test__floattitf(0x023479FD0E092DCF, 0x1.1A3CFE870496E78p+57);
test__floattitf(0x023479FD0E092DD0, 0x1.1A3CFE870496E8p+57);
test__floattitf(0x023479FD0E092DD1, 0x1.1A3CFE870496E88p+57);
test__floattitf(0x023479FD0E092DD8, 0x1.1A3CFE870496ECp+57);
test__floattitf(0x023479FD0E092DDF, 0x1.1A3CFE870496EF8p+57);
test__floattitf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
test__floattitf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
test__floattitf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496D08p+121);
test__floattitf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496D8p+121);
test__floattitf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496DCp+121);
test__floattitf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496DBp+121);
test__floattitf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496DF8p+121);
test__floattitf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496E08p+121);
test__floattitf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496E38p+121);
test__floattitf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496E4p+121);
test__floattitf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496E78p+121);
test__floattitf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496E8p+121);
test__floattitf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496E88p+121);
test__floattitf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496ECp+121);
test__floattitf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496EF8p+121);
test__floattitf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
test__floattitf(make_ti(0, 0xFFFFFFFFFFFFFFFF), 0x1.FFFFFFFFFFFFFFFEp+63);
test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC2801), 0x1.23456789ABCDEF0123456789ABC3p+124);
test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3000), 0x1.23456789ABCDEF0123456789ABC3p+124);
test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC37FF), 0x1.23456789ABCDEF0123456789ABC3p+124);
test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3800), 0x1.23456789ABCDEF0123456789ABC4p+124);
test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4000), 0x1.23456789ABCDEF0123456789ABC4p+124);
test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC47FF), 0x1.23456789ABCDEF0123456789ABC4p+124);
test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4800), 0x1.23456789ABCDEF0123456789ABC4p+124);
test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4801), 0x1.23456789ABCDEF0123456789ABC5p+124);
test__floattitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC57FF), 0x1.23456789ABCDEF0123456789ABC5p+124);
}
fn make_ti(high: u64, low: u64) i128 {
var result: u128 = high;
result <<= 64;
result |= low;
return @bitCast(i128, result);
}

View File

@ -0,0 +1,28 @@
const builtin = @import("builtin");
const is_test = builtin.is_test;
const std = @import("std");
pub extern fn __floatunditf(a: u128) f128 {
@setRuntimeSafety(is_test);
if (a == 0) {
return 0;
}
const mantissa_bits = std.math.floatMantissaBits(f128);
const exponent_bits = std.math.floatExponentBits(f128);
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
const implicit_bit = 1 << mantissa_bits;
const exp = (u128.bit_count - 1) - @clz(a);
const shift = mantissa_bits - @intCast(u7, exp);
var result: u128 align(16) = (a << shift) ^ implicit_bit;
result += (@intCast(u128, exp) + exponent_bias) << mantissa_bits;
return @bitCast(f128, result);
}
test "import floatunditf" {
_ = @import("floatunditf_test.zig");
}

View File

@ -0,0 +1,33 @@
const __floatunditf = @import("floatunditf.zig").__floatunditf;
const assert = @import("std").debug.assert;
fn test__floatunditf(a: u128, expected_hi: u64, expected_lo: u64) void {
const x = __floatunditf(a);
const x_repr = @bitCast(u128, x);
const x_hi = @intCast(u64, x_repr >> 64);
const x_lo = @truncate(u64, x_repr);
if (x_hi == expected_hi and x_lo == expected_lo) {
return;
}
// nan repr
else if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((x_hi & 0x7fff000000000000) == 0x7fff000000000000 and ((x_hi & 0xffffffffffff) > 0 or x_lo > 0)) {
return;
}
}
@panic("__floatunditf test failure");
}
test "floatunditf" {
test__floatunditf(0xffffffffffffffff, 0x403effffffffffff, 0xfffe000000000000);
test__floatunditf(0xfffffffffffffffe, 0x403effffffffffff, 0xfffc000000000000);
test__floatunditf(0x8000000000000000, 0x403e000000000000, 0x0);
test__floatunditf(0x7fffffffffffffff, 0x403dffffffffffff, 0xfffc000000000000);
test__floatunditf(0x123456789abcdef1, 0x403b23456789abcd, 0xef10000000000000);
test__floatunditf(0x2, 0x4000000000000000, 0x0);
test__floatunditf(0x1, 0x3fff000000000000, 0x0);
test__floatunditf(0x0, 0x0, 0x0);
}

View File

@ -0,0 +1,29 @@
const builtin = @import("builtin");
const is_test = builtin.is_test;
const std = @import("std");
pub extern fn __floatunsitf(a: u64) f128 {
@setRuntimeSafety(is_test);
if (a == 0) {
return 0;
}
const mantissa_bits = std.math.floatMantissaBits(f128);
const exponent_bits = std.math.floatExponentBits(f128);
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
const implicit_bit = 1 << mantissa_bits;
const exp = (u64.bit_count - 1) - @clz(a);
const shift = mantissa_bits - @intCast(u7, exp);
// TODO: @bitCast alignment error
var result align(16) = (@intCast(u128, a) << shift) ^ implicit_bit;
result += (@intCast(u128, exp) + exponent_bias) << mantissa_bits;
return @bitCast(f128, result);
}
test "import floatunsitf" {
_ = @import("floatunsitf_test.zig");
}

View File

@ -0,0 +1,29 @@
const __floatunsitf = @import("floatunsitf.zig").__floatunsitf;
const assert = @import("std").debug.assert;
fn test__floatunsitf(a: u64, expected_hi: u64, expected_lo: u64) void {
const x = __floatunsitf(a);
const x_repr = @bitCast(u128, x);
const x_hi = @intCast(u64, x_repr >> 64);
const x_lo = @truncate(u64, x_repr);
if (x_hi == expected_hi and x_lo == expected_lo) {
return;
}
// nan repr
else if (expected_hi == 0x7fff800000000000 and expected_lo == 0x0) {
if ((x_hi & 0x7fff000000000000) == 0x7fff000000000000 and ((x_hi & 0xffffffffffff) > 0 or x_lo > 0)) {
return;
}
}
@panic("__floatunsitf test failure");
}
test "floatunsitf" {
test__floatunsitf(0x7fffffff, 0x401dfffffffc0000, 0x0);
test__floatunsitf(0, 0x0, 0x0);
test__floatunsitf(0xffffffff, 0x401efffffffe0000, 0x0);
test__floatunsitf(0x12345678, 0x401b234567800000, 0x0);
}

View File

@ -0,0 +1,60 @@
const builtin = @import("builtin");
const is_test = builtin.is_test;
const DBL_MANT_DIG = 53;
pub extern fn __floatuntidf(arg: u128) f64 {
@setRuntimeSafety(is_test);
if (arg == 0)
return 0.0;
var a = arg;
const N: u32 = @sizeOf(u128) * 8;
const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
var e: i32 = sd - 1; // exponent
if (sd > DBL_MANT_DIG) {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
// 12345678901234567890123456
// 1 = msb 1 bit
// P = bit DBL_MANT_DIG-1 bits to the right of 1
// Q = bit DBL_MANT_DIG bits to the right of 1
// R = "or" of all bits to the right of Q
switch (sd) {
DBL_MANT_DIG + 1 => {
a <<= 1;
},
DBL_MANT_DIG + 2 => {},
else => {
const shift_amt = @bitCast(i32, N + (DBL_MANT_DIG + 2)) - sd;
const shift_amt_u7 = @intCast(u7, shift_amt);
a = (a >> @intCast(u7, sd - (DBL_MANT_DIG + 2))) |
@boolToInt((a & (u128(@maxValue(u128)) >> shift_amt_u7)) != 0);
},
}
// finish
a |= @boolToInt((a & 4) != 0); // Or P into R
a += 1; // round - this step may add a significant bit
a >>= 2; // dump Q and R
// a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
if ((a & (u128(1) << DBL_MANT_DIG)) != 0) {
a >>= 1;
e += 1;
}
// a is now rounded to DBL_MANT_DIG bits
} else {
a <<= @intCast(u7, DBL_MANT_DIG - sd);
// a is now rounded to DBL_MANT_DIG bits
}
const high: u64 = @bitCast(u32, (e + 1023) << 20) | // exponent
(@truncate(u32, a >> 32) & 0x000FFFFF); // mantissa-high
const low = @truncate(u32, a); // mantissa-low
return @bitCast(f64, low | (high << 32));
}
test "import floatuntidf" {
_ = @import("floatuntidf_test.zig");
}

View File

@ -0,0 +1,81 @@
const __floatuntidf = @import("floatuntidf.zig").__floatuntidf;
const assert = @import("std").debug.assert;
fn test__floatuntidf(a: u128, expected: f64) void {
const x = __floatuntidf(a);
assert(x == expected);
}
test "floatuntidf" {
test__floatuntidf(0, 0.0);
test__floatuntidf(1, 1.0);
test__floatuntidf(2, 2.0);
test__floatuntidf(20, 20.0);
test__floatuntidf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
test__floatuntidf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
test__floatuntidf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
test__floatuntidf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
test__floatuntidf(make_ti(0x8000008000000000, 0), 0x1.000001p+127);
test__floatuntidf(make_ti(0x8000000000000800, 0), 0x1.0000000000001p+127);
test__floatuntidf(make_ti(0x8000010000000000, 0), 0x1.000002p+127);
test__floatuntidf(make_ti(0x8000000000001000, 0), 0x1.0000000000002p+127);
test__floatuntidf(make_ti(0x8000000000000000, 0), 0x1.000000p+127);
test__floatuntidf(make_ti(0x8000000000000001, 0), 0x1.0000000000000002p+127);
test__floatuntidf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
test__floatuntidf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
test__floatuntidf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
test__floatuntidf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
test__floatuntidf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
test__floatuntidf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
test__floatuntidf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
test__floatuntidf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
test__floatuntidf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
test__floatuntidf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
test__floatuntidf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
test__floatuntidf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
test__floatuntidf(0x023479FD0E092DA1, 0x1.1A3CFE870496Dp+57);
test__floatuntidf(0x023479FD0E092DB0, 0x1.1A3CFE870496Ep+57);
test__floatuntidf(0x023479FD0E092DB8, 0x1.1A3CFE870496Ep+57);
test__floatuntidf(0x023479FD0E092DB6, 0x1.1A3CFE870496Ep+57);
test__floatuntidf(0x023479FD0E092DBF, 0x1.1A3CFE870496Ep+57);
test__floatuntidf(0x023479FD0E092DC1, 0x1.1A3CFE870496Ep+57);
test__floatuntidf(0x023479FD0E092DC7, 0x1.1A3CFE870496Ep+57);
test__floatuntidf(0x023479FD0E092DC8, 0x1.1A3CFE870496Ep+57);
test__floatuntidf(0x023479FD0E092DCF, 0x1.1A3CFE870496Ep+57);
test__floatuntidf(0x023479FD0E092DD0, 0x1.1A3CFE870496Ep+57);
test__floatuntidf(0x023479FD0E092DD1, 0x1.1A3CFE870496Fp+57);
test__floatuntidf(0x023479FD0E092DD8, 0x1.1A3CFE870496Fp+57);
test__floatuntidf(0x023479FD0E092DDF, 0x1.1A3CFE870496Fp+57);
test__floatuntidf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
test__floatuntidf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
test__floatuntidf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496Dp+121);
test__floatuntidf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496Ep+121);
test__floatuntidf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496Ep+121);
test__floatuntidf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496Ep+121);
test__floatuntidf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496Ep+121);
test__floatuntidf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496Ep+121);
test__floatuntidf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496Ep+121);
test__floatuntidf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496Ep+121);
test__floatuntidf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496Ep+121);
test__floatuntidf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496Ep+121);
test__floatuntidf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496Fp+121);
test__floatuntidf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496Fp+121);
test__floatuntidf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496Fp+121);
test__floatuntidf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
}
fn make_ti(high: u64, low: u64) u128 {
var result: u128 = high;
result <<= 64;
result |= low;
return result;
}

View File

@ -0,0 +1,59 @@
const builtin = @import("builtin");
const is_test = builtin.is_test;
const FLT_MANT_DIG = 24;
pub extern fn __floatuntisf(arg: u128) f32 {
@setRuntimeSafety(is_test);
if (arg == 0)
return 0.0;
var a = arg;
const N: u32 = @sizeOf(u128) * 8;
const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
var e: i32 = sd - 1; // exponent
if (sd > FLT_MANT_DIG) {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
// 12345678901234567890123456
// 1 = msb 1 bit
// P = bit FLT_MANT_DIG-1 bits to the right of 1
// Q = bit FLT_MANT_DIG bits to the right of 1
// R = "or" of all bits to the right of Q
switch (sd) {
FLT_MANT_DIG + 1 => {
a <<= 1;
},
FLT_MANT_DIG + 2 => {},
else => {
const shift_amt = @bitCast(i32, N + (FLT_MANT_DIG + 2)) - sd;
const shift_amt_u7 = @intCast(u7, shift_amt);
a = (a >> @intCast(u7, sd - (FLT_MANT_DIG + 2))) |
@boolToInt((a & (u128(@maxValue(u128)) >> shift_amt_u7)) != 0);
},
}
// finish
a |= @boolToInt((a & 4) != 0); // Or P into R
a += 1; // round - this step may add a significant bit
a >>= 2; // dump Q and R
// a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
if ((a & (u128(1) << FLT_MANT_DIG)) != 0) {
a >>= 1;
e += 1;
}
// a is now rounded to FLT_MANT_DIG bits
} else {
a <<= @intCast(u7, FLT_MANT_DIG - sd);
// a is now rounded to FLT_MANT_DIG bits
}
const high = @bitCast(u32, (e + 127) << 23); // exponent
const low = @truncate(u32, a) & 0x007fffff; // mantissa
return @bitCast(f32, high | low);
}
test "import floatuntisf" {
_ = @import("floatuntisf_test.zig");
}

View File

@ -0,0 +1,72 @@
const __floatuntisf = @import("floatuntisf.zig").__floatuntisf;
const assert = @import("std").debug.assert;
fn test__floatuntisf(a: u128, expected: f32) void {
const x = __floatuntisf(a);
assert(x == expected);
}
test "floatuntisf" {
test__floatuntisf(0, 0.0);
test__floatuntisf(1, 1.0);
test__floatuntisf(2, 2.0);
test__floatuntisf(20, 20.0);
test__floatuntisf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
test__floatuntisf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
test__floatuntisf(make_ti(0x8000008000000000, 0), 0x1.000001p+127);
test__floatuntisf(make_ti(0x8000000000000800, 0), 0x1.0p+127);
test__floatuntisf(make_ti(0x8000010000000000, 0), 0x1.000002p+127);
test__floatuntisf(make_ti(0x8000000000000000, 0), 0x1.000000p+127);
test__floatuntisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
test__floatuntisf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
test__floatuntisf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
test__floatuntisf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
test__floatuntisf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
test__floatuntisf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
test__floatuntisf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
test__floatuntisf(0xFFFFFFFFFFFFFFFE, 0x1p+64);
test__floatuntisf(0xFFFFFFFFFFFFFFFF, 0x1p+64);
test__floatuntisf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
test__floatuntisf(0x0007FB72EA000000, 0x1.FEDCBAp+50);
test__floatuntisf(0x0007FB72EB000000, 0x1.FEDCBAp+50);
test__floatuntisf(0x0007FB72EBFFFFFF, 0x1.FEDCBAp+50);
test__floatuntisf(0x0007FB72EC000000, 0x1.FEDCBCp+50);
test__floatuntisf(0x0007FB72E8000001, 0x1.FEDCBAp+50);
test__floatuntisf(0x0007FB72E6000000, 0x1.FEDCBAp+50);
test__floatuntisf(0x0007FB72E7000000, 0x1.FEDCBAp+50);
test__floatuntisf(0x0007FB72E7FFFFFF, 0x1.FEDCBAp+50);
test__floatuntisf(0x0007FB72E4000001, 0x1.FEDCBAp+50);
test__floatuntisf(0x0007FB72E4000000, 0x1.FEDCB8p+50);
test__floatuntisf(make_ti(0x0000000000001FED, 0xCB90000000000001), 0x1.FEDCBAp+76);
test__floatuntisf(make_ti(0x0000000000001FED, 0xCBA0000000000000), 0x1.FEDCBAp+76);
test__floatuntisf(make_ti(0x0000000000001FED, 0xCBAFFFFFFFFFFFFF), 0x1.FEDCBAp+76);
test__floatuntisf(make_ti(0x0000000000001FED, 0xCBB0000000000000), 0x1.FEDCBCp+76);
test__floatuntisf(make_ti(0x0000000000001FED, 0xCBB0000000000001), 0x1.FEDCBCp+76);
test__floatuntisf(make_ti(0x0000000000001FED, 0xCBBFFFFFFFFFFFFF), 0x1.FEDCBCp+76);
test__floatuntisf(make_ti(0x0000000000001FED, 0xCBC0000000000000), 0x1.FEDCBCp+76);
test__floatuntisf(make_ti(0x0000000000001FED, 0xCBC0000000000001), 0x1.FEDCBCp+76);
test__floatuntisf(make_ti(0x0000000000001FED, 0xCBD0000000000000), 0x1.FEDCBCp+76);
test__floatuntisf(make_ti(0x0000000000001FED, 0xCBD0000000000001), 0x1.FEDCBEp+76);
test__floatuntisf(make_ti(0x0000000000001FED, 0xCBDFFFFFFFFFFFFF), 0x1.FEDCBEp+76);
test__floatuntisf(make_ti(0x0000000000001FED, 0xCBE0000000000000), 0x1.FEDCBEp+76);
}
fn make_ti(high: u64, low: u64) u128 {
var result: u128 = high;
result <<= 64;
result |= low;
return result;
}

View File

@ -0,0 +1,60 @@
const builtin = @import("builtin");
const is_test = builtin.is_test;
const LDBL_MANT_DIG = 113;
pub extern fn __floatuntitf(arg: u128) f128 {
@setRuntimeSafety(is_test);
if (arg == 0)
return 0.0;
var a = arg;
const N: u32 = @sizeOf(u128) * 8;
const sd = @bitCast(i32, N - @clz(a)); // number of significant digits
var e: i32 = sd - 1; // exponent
if (sd > LDBL_MANT_DIG) {
// start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
// finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
// 12345678901234567890123456
// 1 = msb 1 bit
// P = bit LDBL_MANT_DIG-1 bits to the right of 1
// Q = bit LDBL_MANT_DIG bits to the right of 1
// R = "or" of all bits to the right of Q
switch (sd) {
LDBL_MANT_DIG + 1 => {
a <<= 1;
},
LDBL_MANT_DIG + 2 => {},
else => {
const shift_amt = @bitCast(i32, N + (LDBL_MANT_DIG + 2)) - sd;
const shift_amt_u7 = @intCast(u7, shift_amt);
a = (a >> @intCast(u7, sd - (LDBL_MANT_DIG + 2))) |
@boolToInt((a & (u128(@maxValue(u128)) >> shift_amt_u7)) != 0);
},
}
// finish
a |= @boolToInt((a & 4) != 0); // Or P into R
a += 1; // round - this step may add a significant bit
a >>= 2; // dump Q and R
// a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
if ((a & (u128(1) << LDBL_MANT_DIG)) != 0) {
a >>= 1;
e += 1;
}
// a is now rounded to LDBL_MANT_DIG bits
} else {
a <<= @intCast(u7, LDBL_MANT_DIG - sd);
// a is now rounded to LDBL_MANT_DIG bits
}
const high: u128 = (@intCast(u64, (e + 16383)) << 48) | // exponent
(@truncate(u64, a >> 64) & 0x0000ffffffffffff); // mantissa-high
const low = @truncate(u64, a); // mantissa-low
return @bitCast(f128, low | (high << 64));
}
test "import floatuntitf" {
_ = @import("floatuntitf_test.zig");
}

View File

@ -0,0 +1,99 @@
const __floatuntitf = @import("floatuntitf.zig").__floatuntitf;
const assert = @import("std").debug.assert;
fn test__floatuntitf(a: u128, expected: f128) void {
const x = __floatuntitf(a);
assert(x == expected);
}
test "floatuntitf" {
test__floatuntitf(0, 0.0);
test__floatuntitf(1, 1.0);
test__floatuntitf(2, 2.0);
test__floatuntitf(20, 20.0);
test__floatuntitf(0x7FFFFF8000000000, 0x1.FFFFFEp+62);
test__floatuntitf(0x7FFFFFFFFFFFF800, 0x1.FFFFFFFFFFFFEp+62);
test__floatuntitf(0x7FFFFF0000000000, 0x1.FFFFFCp+62);
test__floatuntitf(0x7FFFFFFFFFFFF000, 0x1.FFFFFFFFFFFFCp+62);
test__floatuntitf(0x7FFFFFFFFFFFFFFF, 0xF.FFFFFFFFFFFFFFEp+59);
test__floatuntitf(0xFFFFFFFFFFFFFFFE, 0xF.FFFFFFFFFFFFFFEp+60);
test__floatuntitf(0xFFFFFFFFFFFFFFFF, 0xF.FFFFFFFFFFFFFFFp+60);
test__floatuntitf(0x8000008000000000, 0x8.000008p+60);
test__floatuntitf(0x8000000000000800, 0x8.0000000000008p+60);
test__floatuntitf(0x8000010000000000, 0x8.00001p+60);
test__floatuntitf(0x8000000000001000, 0x8.000000000001p+60);
test__floatuntitf(0x8000000000000000, 0x8p+60);
test__floatuntitf(0x8000000000000001, 0x8.000000000000001p+60);
test__floatuntitf(0x0007FB72E8000000, 0x1.FEDCBAp+50);
test__floatuntitf(0x0007FB72EA000000, 0x1.FEDCBA8p+50);
test__floatuntitf(0x0007FB72EB000000, 0x1.FEDCBACp+50);
test__floatuntitf(0x0007FB72EBFFFFFF, 0x1.FEDCBAFFFFFFCp+50);
test__floatuntitf(0x0007FB72EC000000, 0x1.FEDCBBp+50);
test__floatuntitf(0x0007FB72E8000001, 0x1.FEDCBA0000004p+50);
test__floatuntitf(0x0007FB72E6000000, 0x1.FEDCB98p+50);
test__floatuntitf(0x0007FB72E7000000, 0x1.FEDCB9Cp+50);
test__floatuntitf(0x0007FB72E7FFFFFF, 0x1.FEDCB9FFFFFFCp+50);
test__floatuntitf(0x0007FB72E4000001, 0x1.FEDCB90000004p+50);
test__floatuntitf(0x0007FB72E4000000, 0x1.FEDCB9p+50);
test__floatuntitf(0x023479FD0E092DC0, 0x1.1A3CFE870496Ep+57);
test__floatuntitf(0x023479FD0E092DA1, 0x1.1A3CFE870496D08p+57);
test__floatuntitf(0x023479FD0E092DB0, 0x1.1A3CFE870496D8p+57);
test__floatuntitf(0x023479FD0E092DB8, 0x1.1A3CFE870496DCp+57);
test__floatuntitf(0x023479FD0E092DB6, 0x1.1A3CFE870496DBp+57);
test__floatuntitf(0x023479FD0E092DBF, 0x1.1A3CFE870496DF8p+57);
test__floatuntitf(0x023479FD0E092DC1, 0x1.1A3CFE870496E08p+57);
test__floatuntitf(0x023479FD0E092DC7, 0x1.1A3CFE870496E38p+57);
test__floatuntitf(0x023479FD0E092DC8, 0x1.1A3CFE870496E4p+57);
test__floatuntitf(0x023479FD0E092DCF, 0x1.1A3CFE870496E78p+57);
test__floatuntitf(0x023479FD0E092DD0, 0x1.1A3CFE870496E8p+57);
test__floatuntitf(0x023479FD0E092DD1, 0x1.1A3CFE870496E88p+57);
test__floatuntitf(0x023479FD0E092DD8, 0x1.1A3CFE870496ECp+57);
test__floatuntitf(0x023479FD0E092DDF, 0x1.1A3CFE870496EF8p+57);
test__floatuntitf(0x023479FD0E092DE0, 0x1.1A3CFE870496Fp+57);
test__floatuntitf(make_ti(0x023479FD0E092DC0, 0), 0x1.1A3CFE870496Ep+121);
test__floatuntitf(make_ti(0x023479FD0E092DA1, 1), 0x1.1A3CFE870496D08p+121);
test__floatuntitf(make_ti(0x023479FD0E092DB0, 2), 0x1.1A3CFE870496D8p+121);
test__floatuntitf(make_ti(0x023479FD0E092DB8, 3), 0x1.1A3CFE870496DCp+121);
test__floatuntitf(make_ti(0x023479FD0E092DB6, 4), 0x1.1A3CFE870496DBp+121);
test__floatuntitf(make_ti(0x023479FD0E092DBF, 5), 0x1.1A3CFE870496DF8p+121);
test__floatuntitf(make_ti(0x023479FD0E092DC1, 6), 0x1.1A3CFE870496E08p+121);
test__floatuntitf(make_ti(0x023479FD0E092DC7, 7), 0x1.1A3CFE870496E38p+121);
test__floatuntitf(make_ti(0x023479FD0E092DC8, 8), 0x1.1A3CFE870496E4p+121);
test__floatuntitf(make_ti(0x023479FD0E092DCF, 9), 0x1.1A3CFE870496E78p+121);
test__floatuntitf(make_ti(0x023479FD0E092DD0, 0), 0x1.1A3CFE870496E8p+121);
test__floatuntitf(make_ti(0x023479FD0E092DD1, 11), 0x1.1A3CFE870496E88p+121);
test__floatuntitf(make_ti(0x023479FD0E092DD8, 12), 0x1.1A3CFE870496ECp+121);
test__floatuntitf(make_ti(0x023479FD0E092DDF, 13), 0x1.1A3CFE870496EF8p+121);
test__floatuntitf(make_ti(0x023479FD0E092DE0, 14), 0x1.1A3CFE870496Fp+121);
test__floatuntitf(make_ti(0, 0xFFFFFFFFFFFFFFFF), 0x1.FFFFFFFFFFFFFFFEp+63);
test__floatuntitf(make_ti(0xFFFFFFFFFFFFFFFF, 0x0000000000000000), 0x1.FFFFFFFFFFFFFFFEp+127);
test__floatuntitf(make_ti(0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF), 0x1.0000000000000000p+128);
test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC2801), 0x1.23456789ABCDEF0123456789ABC3p+124);
test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3000), 0x1.23456789ABCDEF0123456789ABC3p+124);
test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC37FF), 0x1.23456789ABCDEF0123456789ABC3p+124);
test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC3800), 0x1.23456789ABCDEF0123456789ABC4p+124);
test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4000), 0x1.23456789ABCDEF0123456789ABC4p+124);
test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC47FF), 0x1.23456789ABCDEF0123456789ABC4p+124);
test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4800), 0x1.23456789ABCDEF0123456789ABC4p+124);
test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC4801), 0x1.23456789ABCDEF0123456789ABC5p+124);
test__floatuntitf(make_ti(0x123456789ABCDEF0, 0x123456789ABC57FF), 0x1.23456789ABCDEF0123456789ABC5p+124);
}
fn make_ti(high: u64, low: u64) u128 {
var result: u128 = high;
result <<= 64;
result |= low;
return result;
}

View File

@ -15,10 +15,31 @@ comptime {
@export("__lttf2", @import("comparetf2.zig").__letf2, linkage);
@export("__netf2", @import("comparetf2.zig").__letf2, linkage);
@export("__gttf2", @import("comparetf2.zig").__getf2, linkage);
@export("__gnu_h2f_ieee", @import("extendXfYf2.zig").__extendhfsf2, linkage);
@export("__gnu_f2h_ieee", @import("truncXfYf2.zig").__truncsfhf2, linkage);
}
@export("__unordtf2", @import("comparetf2.zig").__unordtf2, linkage);
@export("__floattitf", @import("floattitf.zig").__floattitf, linkage);
@export("__floattidf", @import("floattidf.zig").__floattidf, linkage);
@export("__floattisf", @import("floattisf.zig").__floattisf, linkage);
@export("__floatunditf", @import("floatunditf.zig").__floatunditf, linkage);
@export("__floatunsitf", @import("floatunsitf.zig").__floatunsitf, linkage);
@export("__floatuntitf", @import("floatuntitf.zig").__floatuntitf, linkage);
@export("__floatuntidf", @import("floatuntidf.zig").__floatuntidf, linkage);
@export("__floatuntisf", @import("floatuntisf.zig").__floatuntisf, linkage);
@export("__extenddftf2", @import("extendXfYf2.zig").__extenddftf2, linkage);
@export("__extendsftf2", @import("extendXfYf2.zig").__extendsftf2, linkage);
@export("__extendhfsf2", @import("extendXfYf2.zig").__extendhfsf2, linkage);
@export("__truncsfhf2", @import("truncXfYf2.zig").__truncsfhf2, linkage);
@export("__trunctfdf2", @import("truncXfYf2.zig").__trunctfdf2, linkage);
@export("__trunctfsf2", @import("truncXfYf2.zig").__trunctfsf2, linkage);
@export("__fixunssfsi", @import("fixunssfsi.zig").__fixunssfsi, linkage);
@export("__fixunssfdi", @import("fixunssfdi.zig").__fixunssfdi, linkage);
@export("__fixunssfti", @import("fixunssfti.zig").__fixunssfti, linkage);

View File

@ -0,0 +1,117 @@
const std = @import("std");
pub extern fn __truncsfhf2(a: f32) u16 {
return @bitCast(u16, truncXfYf2(f16, f32, a));
}
pub extern fn __trunctfsf2(a: f128) f32 {
return truncXfYf2(f32, f128, a);
}
pub extern fn __trunctfdf2(a: f128) f64 {
return truncXfYf2(f64, f128, a);
}
inline fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits);
const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
const dstSigBits = std.math.floatMantissaBits(dst_t);
const SrcShift = std.math.Log2Int(src_rep_t);
const DstShift = std.math.Log2Int(dst_rep_t);
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const srcBits = src_t.bit_count;
const srcExpBits = srcBits - srcSigBits - 1;
const srcInfExp = (1 << srcExpBits) - 1;
const srcExpBias = srcInfExp >> 1;
const srcMinNormal = 1 << srcSigBits;
const srcSignificandMask = srcMinNormal - 1;
const srcInfinity = srcInfExp << srcSigBits;
const srcSignMask = 1 << (srcSigBits + srcExpBits);
const srcAbsMask = srcSignMask - 1;
const roundMask = (1 << (srcSigBits - dstSigBits)) - 1;
const halfway = 1 << (srcSigBits - dstSigBits - 1);
const srcQNaN = 1 << (srcSigBits - 1);
const srcNaNCode = srcQNaN - 1;
const dstBits = dst_t.bit_count;
const dstExpBits = dstBits - dstSigBits - 1;
const dstInfExp = (1 << dstExpBits) - 1;
const dstExpBias = dstInfExp >> 1;
const underflowExponent = srcExpBias + 1 - dstExpBias;
const overflowExponent = srcExpBias + dstInfExp - dstExpBias;
const underflow = underflowExponent << srcSigBits;
const overflow = overflowExponent << srcSigBits;
const dstQNaN = 1 << (dstSigBits - 1);
const dstNaNCode = dstQNaN - 1;
// Break a into a sign and representation of the absolute value
const aRep: src_rep_t = @bitCast(src_rep_t, a);
const aAbs: src_rep_t = aRep & srcAbsMask;
const sign: src_rep_t = aRep & srcSignMask;
var absResult: dst_rep_t = undefined;
if (aAbs -% underflow < aAbs -% overflow) {
// The exponent of a is within the range of normal numbers in the
// destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
absResult = @truncate(dst_rep_t, aAbs >> (srcSigBits - dstSigBits));
absResult -%= dst_rep_t(srcExpBias - dstExpBias) << dstSigBits;
const roundBits: src_rep_t = aAbs & roundMask;
if (roundBits > halfway) {
// Round to nearest
absResult += 1;
} else if (roundBits == halfway) {
// Ties to even
absResult += absResult & 1;
}
} else if (aAbs > srcInfinity) {
// a is NaN.
// Conjure the result by beginning with infinity, setting the qNaN
// bit and inserting the (truncated) trailing NaN field.
absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits;
absResult |= dstQNaN;
absResult |= @intCast(dst_rep_t, ((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode);
} else if (aAbs >= overflow) {
// a overflows to infinity.
absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits;
} else {
// a underflows on conversion to the destination type or is an exact
// zero. The result may be a denormal or zero. Extract the exponent
// to get the shift amount for the denormalization.
const aExp = @intCast(u32, aAbs >> srcSigBits);
const shift = @intCast(u32, srcExpBias - dstExpBias - aExp + 1);
const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal;
// Right shift by the denormalization amount with sticky.
if (shift > srcSigBits) {
absResult = 0;
} else {
const sticky: src_rep_t = significand << @intCast(SrcShift, srcBits - shift);
const denormalizedSignificand: src_rep_t = significand >> @intCast(SrcShift, shift) | sticky;
absResult = @intCast(dst_rep_t, denormalizedSignificand >> (srcSigBits - dstSigBits));
const roundBits: src_rep_t = denormalizedSignificand & roundMask;
if (roundBits > halfway) {
// Round to nearest
absResult += 1;
} else if (roundBits == halfway) {
// Ties to even
absResult += absResult & 1;
}
}
}
const result: dst_rep_t align(@alignOf(dst_t)) = absResult | @truncate(dst_rep_t, sign >> @intCast(SrcShift, srcBits - dstBits));
return @bitCast(dst_t, result);
}
test "import truncXfYf2" {
_ = @import("truncXfYf2_test.zig");
}

View File

@ -0,0 +1,134 @@
const __truncsfhf2 = @import("truncXfYf2.zig").__truncsfhf2;
fn test__truncsfhf2(a: u32, expected: u16) void {
const actual = __truncsfhf2(@bitCast(f32, a));
if (actual == expected) {
return;
}
@panic("__truncsfhf2 test failure");
}
test "truncsfhf2" {
test__truncsfhf2(0x7fc00000, 0x7e00); // qNaN
test__truncsfhf2(0x7fe00000, 0x7f00); // sNaN
test__truncsfhf2(0, 0); // 0
test__truncsfhf2(0x80000000, 0x8000); // -0
test__truncsfhf2(0x7f800000, 0x7c00); // inf
test__truncsfhf2(0xff800000, 0xfc00); // -inf
test__truncsfhf2(0x477ff000, 0x7c00); // 65520 -> inf
test__truncsfhf2(0xc77ff000, 0xfc00); // -65520 -> -inf
test__truncsfhf2(0x71cc3892, 0x7c00); // 0x1.987124876876324p+100 -> inf
test__truncsfhf2(0xf1cc3892, 0xfc00); // -0x1.987124876876324p+100 -> -inf
test__truncsfhf2(0x38800000, 0x0400); // normal (min), 2**-14
test__truncsfhf2(0xb8800000, 0x8400); // normal (min), -2**-14
test__truncsfhf2(0x477fe000, 0x7bff); // normal (max), 65504
test__truncsfhf2(0xc77fe000, 0xfbff); // normal (max), -65504
test__truncsfhf2(0x477fe100, 0x7bff); // normal, 65505 -> 65504
test__truncsfhf2(0xc77fe100, 0xfbff); // normal, -65505 -> -65504
test__truncsfhf2(0x477fef00, 0x7bff); // normal, 65519 -> 65504
test__truncsfhf2(0xc77fef00, 0xfbff); // normal, -65519 -> -65504
test__truncsfhf2(0x3f802000, 0x3c01); // normal, 1 + 2**-10
test__truncsfhf2(0xbf802000, 0xbc01); // normal, -1 - 2**-10
test__truncsfhf2(0x3eaaa000, 0x3555); // normal, approx. 1/3
test__truncsfhf2(0xbeaaa000, 0xb555); // normal, approx. -1/3
test__truncsfhf2(0x40490fdb, 0x4248); // normal, 3.1415926535
test__truncsfhf2(0xc0490fdb, 0xc248); // normal, -3.1415926535
test__truncsfhf2(0x45cc3892, 0x6e62); // normal, 0x1.987124876876324p+12
test__truncsfhf2(0x3f800000, 0x3c00); // normal, 1
test__truncsfhf2(0x38800000, 0x0400); // normal, 0x1.0p-14
test__truncsfhf2(0x33800000, 0x0001); // denormal (min), 2**-24
test__truncsfhf2(0xb3800000, 0x8001); // denormal (min), -2**-24
test__truncsfhf2(0x387fc000, 0x03ff); // denormal (max), 2**-14 - 2**-24
test__truncsfhf2(0xb87fc000, 0x83ff); // denormal (max), -2**-14 + 2**-24
test__truncsfhf2(0x35800000, 0x0010); // denormal, 0x1.0p-20
test__truncsfhf2(0x33280000, 0x0001); // denormal, 0x1.5p-25 -> 0x1.0p-24
test__truncsfhf2(0x33000000, 0x0000); // 0x1.0p-25 -> zero
}
const __trunctfsf2 = @import("truncXfYf2.zig").__trunctfsf2;
fn test__trunctfsf2(a: f128, expected: u32) void {
const x = __trunctfsf2(a);
const rep = @bitCast(u32, x);
if (rep == expected) {
return;
}
// test other possible NaN representation(signal NaN)
else if (expected == 0x7fc00000) {
if ((rep & 0x7f800000) == 0x7f800000 and (rep & 0x7fffff) > 0) {
return;
}
}
@panic("__trunctfsf2 test failure");
}
test "trunctfsf2" {
// qnan
test__trunctfsf2(@bitCast(f128, u128(0x7fff800000000000 << 64)), 0x7fc00000);
// nan
test__trunctfsf2(@bitCast(f128, u128((0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7fc08000);
// inf
test__trunctfsf2(@bitCast(f128, u128(0x7fff000000000000 << 64)), 0x7f800000);
// zero
test__trunctfsf2(0.0, 0x0);
test__trunctfsf2(0x1.23a2abb4a2ddee355f36789abcdep+5, 0x4211d156);
test__trunctfsf2(0x1.e3d3c45bd3abfd98b76a54cc321fp-9, 0x3b71e9e2);
test__trunctfsf2(0x1.234eebb5faa678f4488693abcdefp+4534, 0x7f800000);
test__trunctfsf2(0x1.edcba9bb8c76a5a43dd21f334634p-435, 0x0);
}
const __trunctfdf2 = @import("truncXfYf2.zig").__trunctfdf2;
fn test__trunctfdf2(a: f128, expected: u64) void {
const x = __trunctfdf2(a);
const rep = @bitCast(u64, x);
if (rep == expected) {
return;
}
// test other possible NaN representation(signal NaN)
else if (expected == 0x7ff8000000000000) {
if ((rep & 0x7ff0000000000000) == 0x7ff0000000000000 and (rep & 0xfffffffffffff) > 0) {
return;
}
}
@panic("__trunctfsf2 test failure");
}
test "trunctfdf2" {
// qnan
test__trunctfdf2(@bitCast(f128, u128(0x7fff800000000000 << 64)), 0x7ff8000000000000);
// nan
test__trunctfdf2(@bitCast(f128, u128((0x7fff000000000000 | (0x810000000000 & 0xffffffffffff)) << 64)), 0x7ff8100000000000);
// inf
test__trunctfdf2(@bitCast(f128, u128(0x7fff000000000000 << 64)), 0x7ff0000000000000);
// zero
test__trunctfdf2(0.0, 0x0);
test__trunctfdf2(0x1.af23456789bbaaab347645365cdep+5, 0x404af23456789bbb);
test__trunctfdf2(0x1.dedafcff354b6ae9758763545432p-9, 0x3f6dedafcff354b7);
test__trunctfdf2(0x1.2f34dd5f437e849b4baab754cdefp+4534, 0x7ff0000000000000);
test__trunctfdf2(0x1.edcbff8ad76ab5bf46463233214fp-435, 0x24cedcbff8ad76ab);
}

View File

@ -858,6 +858,7 @@ pub const Node = struct {
pub fn firstToken(self: *FnProto) TokenIndex {
if (self.visib_token) |visib_token| return visib_token;
if (self.async_attr) |async_attr| return async_attr.firstToken();
if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token;
assert(self.lib_name == null);
if (self.cc_token) |cc_token| return cc_token;

View File

@ -17,7 +17,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
defer stack.deinit();
const arena = &tree_arena.allocator;
const root_node = try arena.construct(ast.Node.Root{
const root_node = try arena.create(ast.Node.Root{
.base = ast.Node{ .id = ast.Node.Id.Root },
.decls = ast.Node.Root.DeclList.init(arena),
.doc_comments = null,
@ -65,14 +65,14 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
Token.Id.Keyword_test => {
stack.append(State.TopLevel) catch unreachable;
const block = try arena.construct(ast.Node.Block{
const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = undefined,
.statements = ast.Node.Block.StatementList.init(arena),
.rbrace = undefined,
});
const test_node = try arena.construct(ast.Node.TestDecl{
const test_node = try arena.create(ast.Node.TestDecl{
.base = ast.Node{ .id = ast.Node.Id.TestDecl },
.doc_comments = comments,
.test_token = token_index,
@ -109,14 +109,14 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_comptime => {
const block = try arena.construct(ast.Node.Block{
const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = undefined,
.statements = ast.Node.Block.StatementList.init(arena),
.rbrace = undefined,
});
const node = try arena.construct(ast.Node.Comptime{
const node = try arena.create(ast.Node.Comptime{
.base = ast.Node{ .id = ast.Node.Id.Comptime },
.comptime_token = token_index,
.expr = &block.base,
@ -225,7 +225,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
return tree;
}
const node = try arena.construct(ast.Node.Use{
const node = try arena.create(ast.Node.Use{
.base = ast.Node{ .id = ast.Node.Id.Use },
.use_token = token_index,
.visib_token = ctx.visib_token,
@ -266,7 +266,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_fn, Token.Id.Keyword_nakedcc, Token.Id.Keyword_stdcallcc, Token.Id.Keyword_async => {
const fn_proto = try arena.construct(ast.Node.FnProto{
const fn_proto = try arena.create(ast.Node.FnProto{
.base = ast.Node{ .id = ast.Node.Id.FnProto },
.doc_comments = ctx.comments,
.visib_token = ctx.visib_token,
@ -298,7 +298,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_async => {
const async_node = try arena.construct(ast.Node.AsyncAttribute{
const async_node = try arena.create(ast.Node.AsyncAttribute{
.base = ast.Node{ .id = ast.Node.Id.AsyncAttribute },
.async_token = token_index,
.allocator_type = null,
@ -330,7 +330,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.TopLevelExternOrField => |ctx| {
if (eatToken(&tok_it, &tree, Token.Id.Identifier)) |identifier| {
const node = try arena.construct(ast.Node.StructField{
const node = try arena.create(ast.Node.StructField{
.base = ast.Node{ .id = ast.Node.Id.StructField },
.doc_comments = ctx.comments,
.visib_token = ctx.visib_token,
@ -375,7 +375,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
const token_ptr = token.ptr;
const node = try arena.construct(ast.Node.ContainerDecl{
const node = try arena.create(ast.Node.ContainerDecl{
.base = ast.Node{ .id = ast.Node.Id.ContainerDecl },
.layout_token = ctx.layout_token,
.kind_token = switch (token_ptr.id) {
@ -448,7 +448,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
Token.Id.Identifier => {
switch (tree.tokens.at(container_decl.kind_token).id) {
Token.Id.Keyword_struct => {
const node = try arena.construct(ast.Node.StructField{
const node = try arena.create(ast.Node.StructField{
.base = ast.Node{ .id = ast.Node.Id.StructField },
.doc_comments = comments,
.visib_token = null,
@ -464,7 +464,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_union => {
const node = try arena.construct(ast.Node.UnionTag{
const node = try arena.create(ast.Node.UnionTag{
.base = ast.Node{ .id = ast.Node.Id.UnionTag },
.name_token = token_index,
.type_expr = null,
@ -480,7 +480,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_enum => {
const node = try arena.construct(ast.Node.EnumTag{
const node = try arena.create(ast.Node.EnumTag{
.base = ast.Node{ .id = ast.Node.Id.EnumTag },
.name_token = token_index,
.value = null,
@ -562,7 +562,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.VarDecl => |ctx| {
const var_decl = try arena.construct(ast.Node.VarDecl{
const var_decl = try arena.create(ast.Node.VarDecl{
.base = ast.Node{ .id = ast.Node.Id.VarDecl },
.doc_comments = ctx.comments,
.visib_token = ctx.visib_token,
@ -660,7 +660,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_ptr = token.ptr;
switch (token_ptr.id) {
Token.Id.LBrace => {
const block = try arena.construct(ast.Node.Block{
const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = token_index,
@ -712,7 +712,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
// TODO: this is a special case. Remove this when #760 is fixed
if (token_ptr.id == Token.Id.Keyword_error) {
if (tok_it.peek().?.id == Token.Id.LBrace) {
const error_type_node = try arena.construct(ast.Node.ErrorType{
const error_type_node = try arena.create(ast.Node.ErrorType{
.base = ast.Node{ .id = ast.Node.Id.ErrorType },
.token = token_index,
});
@ -733,7 +733,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
if (eatToken(&tok_it, &tree, Token.Id.RParen)) |_| {
continue;
}
const param_decl = try arena.construct(ast.Node.ParamDecl{
const param_decl = try arena.create(ast.Node.ParamDecl{
.base = ast.Node{ .id = ast.Node.Id.ParamDecl },
.comptime_token = null,
.noalias_token = null,
@ -819,7 +819,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_ptr = token.ptr;
switch (token_ptr.id) {
Token.Id.LBrace => {
const block = try arena.construct(ast.Node.Block{
const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = ctx.label,
.lbrace = token_index,
@ -853,7 +853,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_suspend => {
const node = try arena.construct(ast.Node.Suspend{
const node = try arena.create(ast.Node.Suspend{
.base = ast.Node{ .id = ast.Node.Id.Suspend },
.label = ctx.label,
.suspend_token = token_index,
@ -925,7 +925,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
}
},
State.While => |ctx| {
const node = try arena.construct(ast.Node.While{
const node = try arena.create(ast.Node.While{
.base = ast.Node{ .id = ast.Node.Id.While },
.label = ctx.label,
.inline_token = ctx.inline_token,
@ -954,7 +954,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
State.For => |ctx| {
const node = try arena.construct(ast.Node.For{
const node = try arena.create(ast.Node.For{
.base = ast.Node{ .id = ast.Node.Id.For },
.label = ctx.label,
.inline_token = ctx.inline_token,
@ -975,7 +975,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.Else => |dest| {
if (eatToken(&tok_it, &tree, Token.Id.Keyword_else)) |else_token| {
const node = try arena.construct(ast.Node.Else{
const node = try arena.create(ast.Node.Else{
.base = ast.Node{ .id = ast.Node.Id.Else },
.else_token = else_token,
.payload = null,
@ -1038,7 +1038,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_defer, Token.Id.Keyword_errdefer => {
const node = try arena.construct(ast.Node.Defer{
const node = try arena.create(ast.Node.Defer{
.base = ast.Node{ .id = ast.Node.Id.Defer },
.defer_token = token_index,
.kind = switch (token_ptr.id) {
@ -1056,7 +1056,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.LBrace => {
const inner_block = try arena.construct(ast.Node.Block{
const inner_block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = token_index,
@ -1124,7 +1124,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
const node = try arena.construct(ast.Node.AsmOutput{
const node = try arena.create(ast.Node.AsmOutput{
.base = ast.Node{ .id = ast.Node.Id.AsmOutput },
.lbracket = lbracket_index,
.symbolic_name = undefined,
@ -1178,7 +1178,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
const node = try arena.construct(ast.Node.AsmInput{
const node = try arena.create(ast.Node.AsmInput{
.base = ast.Node{ .id = ast.Node.Id.AsmInput },
.lbracket = lbracket_index,
.symbolic_name = undefined,
@ -1243,7 +1243,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
const node = try arena.construct(ast.Node.FieldInitializer{
const node = try arena.create(ast.Node.FieldInitializer{
.base = ast.Node{ .id = ast.Node.Id.FieldInitializer },
.period_token = undefined,
.name_token = undefined,
@ -1332,7 +1332,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
}
const comments = try eatDocComments(arena, &tok_it, &tree);
const node = try arena.construct(ast.Node.SwitchCase{
const node = try arena.create(ast.Node.SwitchCase{
.base = ast.Node{ .id = ast.Node.Id.SwitchCase },
.items = ast.Node.SwitchCase.ItemList.init(arena),
.payload = null,
@ -1369,7 +1369,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (token_ptr.id == Token.Id.Keyword_else) {
const else_node = try arena.construct(ast.Node.SwitchElse{
const else_node = try arena.create(ast.Node.SwitchElse{
.base = ast.Node{ .id = ast.Node.Id.SwitchElse },
.token = token_index,
});
@ -1468,7 +1468,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
State.ExternType => |ctx| {
if (eatToken(&tok_it, &tree, Token.Id.Keyword_fn)) |fn_token| {
const fn_proto = try arena.construct(ast.Node.FnProto{
const fn_proto = try arena.create(ast.Node.FnProto{
.base = ast.Node{ .id = ast.Node.Id.FnProto },
.doc_comments = ctx.comments,
.visib_token = null,
@ -1641,7 +1641,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
const node = try arena.construct(ast.Node.Payload{
const node = try arena.create(ast.Node.Payload{
.base = ast.Node{ .id = ast.Node.Id.Payload },
.lpipe = token_index,
.error_symbol = undefined,
@ -1677,7 +1677,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
const node = try arena.construct(ast.Node.PointerPayload{
const node = try arena.create(ast.Node.PointerPayload{
.base = ast.Node{ .id = ast.Node.Id.PointerPayload },
.lpipe = token_index,
.ptr_token = null,
@ -1720,7 +1720,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
const node = try arena.construct(ast.Node.PointerIndexPayload{
const node = try arena.create(ast.Node.PointerIndexPayload{
.base = ast.Node{ .id = ast.Node.Id.PointerIndexPayload },
.lpipe = token_index,
.ptr_token = null,
@ -1754,7 +1754,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_ptr = token.ptr;
switch (token_ptr.id) {
Token.Id.Keyword_return, Token.Id.Keyword_break, Token.Id.Keyword_continue => {
const node = try arena.construct(ast.Node.ControlFlowExpression{
const node = try arena.create(ast.Node.ControlFlowExpression{
.base = ast.Node{ .id = ast.Node.Id.ControlFlowExpression },
.ltoken = token_index,
.kind = undefined,
@ -1783,7 +1783,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_try, Token.Id.Keyword_cancel, Token.Id.Keyword_resume => {
const node = try arena.construct(ast.Node.PrefixOp{
const node = try arena.create(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
.op_token = token_index,
.op = switch (token_ptr.id) {
@ -1817,7 +1817,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Ellipsis3)) |ellipsis3| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = ellipsis3,
@ -1842,7 +1842,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToAssignment(token_ptr.id)) |ass_id| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@ -1872,7 +1872,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToUnwrapExpr(token_ptr.id)) |unwrap_id| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@ -1904,7 +1904,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Keyword_or)) |or_token| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = or_token,
@ -1928,7 +1928,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Keyword_and)) |and_token| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = and_token,
@ -1955,7 +1955,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToComparison(token_ptr.id)) |comp_id| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@ -1982,7 +1982,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Pipe)) |pipe| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = pipe,
@ -2006,7 +2006,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Caret)) |caret| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = caret,
@ -2030,7 +2030,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Ampersand)) |ampersand| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = ampersand,
@ -2057,7 +2057,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToBitShift(token_ptr.id)) |bitshift_id| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@ -2087,7 +2087,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToAddition(token_ptr.id)) |add_id| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@ -2117,7 +2117,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToMultiply(token_ptr.id)) |mult_id| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@ -2145,7 +2145,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (tok_it.peek().?.id == Token.Id.Period) {
const node = try arena.construct(ast.Node.SuffixOp{
const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op{ .StructInitializer = ast.Node.SuffixOp.Op.InitList.init(arena) },
@ -2164,7 +2164,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
const node = try arena.construct(ast.Node.SuffixOp{
const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op{ .ArrayInitializer = ast.Node.SuffixOp.Op.InitList.init(arena) },
@ -2193,7 +2193,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Bang)) |bang| {
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = bang,
@ -2212,7 +2212,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_index = token.index;
const token_ptr = token.ptr;
if (tokenIdToPrefixOp(token_ptr.id)) |prefix_id| {
var node = try arena.construct(ast.Node.PrefixOp{
var node = try arena.create(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
.op_token = token_index,
.op = prefix_id,
@ -2222,7 +2222,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
// Treat '**' token as two pointer types
if (token_ptr.id == Token.Id.AsteriskAsterisk) {
const child = try arena.construct(ast.Node.PrefixOp{
const child = try arena.create(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
.op_token = token_index,
.op = prefix_id,
@ -2246,7 +2246,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
State.SuffixOpExpressionBegin => |opt_ctx| {
if (eatToken(&tok_it, &tree, Token.Id.Keyword_async)) |async_token| {
const async_node = try arena.construct(ast.Node.AsyncAttribute{
const async_node = try arena.create(ast.Node.AsyncAttribute{
.base = ast.Node{ .id = ast.Node.Id.AsyncAttribute },
.async_token = async_token,
.allocator_type = null,
@ -2277,7 +2277,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token_ptr = token.ptr;
switch (token_ptr.id) {
Token.Id.LParen => {
const node = try arena.construct(ast.Node.SuffixOp{
const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op{
@ -2301,7 +2301,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.LBracket => {
const node = try arena.construct(ast.Node.SuffixOp{
const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op{ .ArrayAccess = undefined },
@ -2316,7 +2316,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
Token.Id.Period => {
if (eatToken(&tok_it, &tree, Token.Id.Asterisk)) |asterisk_token| {
const node = try arena.construct(ast.Node.SuffixOp{
const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op.Deref,
@ -2327,7 +2327,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
if (eatToken(&tok_it, &tree, Token.Id.QuestionMark)) |question_token| {
const node = try arena.construct(ast.Node.SuffixOp{
const node = try arena.create(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
.op = ast.Node.SuffixOp.Op.UnwrapOptional,
@ -2337,7 +2337,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
continue;
}
const node = try arena.construct(ast.Node.InfixOp{
const node = try arena.create(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
.op_token = token_index,
@ -2397,7 +2397,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_promise => {
const node = try arena.construct(ast.Node.PromiseType{
const node = try arena.create(ast.Node.PromiseType{
.base = ast.Node{ .id = ast.Node.Id.PromiseType },
.promise_token = token.index,
.result = null,
@ -2423,7 +2423,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.LParen => {
const node = try arena.construct(ast.Node.GroupedExpression{
const node = try arena.create(ast.Node.GroupedExpression{
.base = ast.Node{ .id = ast.Node.Id.GroupedExpression },
.lparen = token.index,
.expr = undefined,
@ -2441,7 +2441,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Builtin => {
const node = try arena.construct(ast.Node.BuiltinCall{
const node = try arena.create(ast.Node.BuiltinCall{
.base = ast.Node{ .id = ast.Node.Id.BuiltinCall },
.builtin_token = token.index,
.params = ast.Node.BuiltinCall.ParamList.init(arena),
@ -2460,7 +2460,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.LBracket => {
const node = try arena.construct(ast.Node.PrefixOp{
const node = try arena.create(ast.Node.PrefixOp{
.base = ast.Node{ .id = ast.Node.Id.PrefixOp },
.op_token = token.index,
.op = undefined,
@ -2519,7 +2519,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_fn => {
const fn_proto = try arena.construct(ast.Node.FnProto{
const fn_proto = try arena.create(ast.Node.FnProto{
.base = ast.Node{ .id = ast.Node.Id.FnProto },
.doc_comments = null,
.visib_token = null,
@ -2540,7 +2540,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_nakedcc, Token.Id.Keyword_stdcallcc => {
const fn_proto = try arena.construct(ast.Node.FnProto{
const fn_proto = try arena.create(ast.Node.FnProto{
.base = ast.Node{ .id = ast.Node.Id.FnProto },
.doc_comments = null,
.visib_token = null,
@ -2567,7 +2567,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.Keyword_asm => {
const node = try arena.construct(ast.Node.Asm{
const node = try arena.create(ast.Node.Asm{
.base = ast.Node{ .id = ast.Node.Id.Asm },
.asm_token = token.index,
.volatile_token = null,
@ -2629,7 +2629,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
}
const node = try arena.construct(ast.Node.ErrorSetDecl{
const node = try arena.create(ast.Node.ErrorSetDecl{
.base = ast.Node{ .id = ast.Node.Id.ErrorSetDecl },
.error_token = ctx.error_token,
.decls = ast.Node.ErrorSetDecl.DeclList.init(arena),
@ -2695,7 +2695,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
return tree;
}
const node = try arena.construct(ast.Node.ErrorTag{
const node = try arena.create(ast.Node.ErrorTag{
.base = ast.Node{ .id = ast.Node.Id.ErrorTag },
.doc_comments = comments,
.name_token = ident_token_index,
@ -3032,7 +3032,7 @@ fn pushDocComment(arena: *mem.Allocator, line_comment: TokenIndex, result: *?*as
if (result.*) |comment_node| {
break :blk comment_node;
} else {
const comment_node = try arena.construct(ast.Node.DocComment{
const comment_node = try arena.create(ast.Node.DocComment{
.base = ast.Node{ .id = ast.Node.Id.DocComment },
.lines = ast.Node.DocComment.LineList.init(arena),
});
@ -3061,7 +3061,7 @@ fn parseStringLiteral(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterato
return &(try createLiteral(arena, ast.Node.StringLiteral, token_index)).base;
},
Token.Id.MultilineStringLiteralLine => {
const node = try arena.construct(ast.Node.MultilineStringLiteral{
const node = try arena.create(ast.Node.MultilineStringLiteral{
.base = ast.Node{ .id = ast.Node.Id.MultilineStringLiteral },
.lines = ast.Node.MultilineStringLiteral.LineList.init(arena),
});
@ -3089,7 +3089,7 @@ fn parseStringLiteral(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterato
fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *const OptionalCtx, token_ptr: *const Token, token_index: TokenIndex) !bool {
switch (token_ptr.id) {
Token.Id.Keyword_suspend => {
const node = try arena.construct(ast.Node.Suspend{
const node = try arena.create(ast.Node.Suspend{
.base = ast.Node{ .id = ast.Node.Id.Suspend },
.label = null,
.suspend_token = token_index,
@ -3103,7 +3103,7 @@ fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *con
return true;
},
Token.Id.Keyword_if => {
const node = try arena.construct(ast.Node.If{
const node = try arena.create(ast.Node.If{
.base = ast.Node{ .id = ast.Node.Id.If },
.if_token = token_index,
.condition = undefined,
@ -3144,7 +3144,7 @@ fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *con
return true;
},
Token.Id.Keyword_switch => {
const node = try arena.construct(ast.Node.Switch{
const node = try arena.create(ast.Node.Switch{
.base = ast.Node{ .id = ast.Node.Id.Switch },
.switch_token = token_index,
.expr = undefined,
@ -3166,7 +3166,7 @@ fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *con
return true;
},
Token.Id.Keyword_comptime => {
const node = try arena.construct(ast.Node.Comptime{
const node = try arena.create(ast.Node.Comptime{
.base = ast.Node{ .id = ast.Node.Id.Comptime },
.comptime_token = token_index,
.expr = undefined,
@ -3178,7 +3178,7 @@ fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *con
return true;
},
Token.Id.LBrace => {
const block = try arena.construct(ast.Node.Block{
const block = try arena.create(ast.Node.Block{
.base = ast.Node{ .id = ast.Node.Id.Block },
.label = null,
.lbrace = token_index,
@ -3318,7 +3318,7 @@ fn tokenIdToPrefixOp(id: @TagType(Token.Id)) ?ast.Node.PrefixOp.Op {
}
fn createLiteral(arena: *mem.Allocator, comptime T: type, token_index: TokenIndex) !*T {
return arena.construct(T{
return arena.create(T{
.base = ast.Node{ .id = ast.Node.typeToId(T) },
.token = token_index,
});

View File

@ -1,3 +1,12 @@
test "zig fmt: preserve space between async fn definitions" {
try testCanonical(
\\async fn a() void {}
\\
\\async fn b() void {}
\\
);
}
test "zig fmt: comment to disable/enable zig fmt first" {
try testCanonical(
\\// Test trailing comma syntax

View File

@ -13,6 +13,7 @@ comptime {
_ = @import("cases/bugs/656.zig");
_ = @import("cases/bugs/828.zig");
_ = @import("cases/bugs/920.zig");
_ = @import("cases/bugs/1111.zig");
_ = @import("cases/byval_arg_var.zig");
_ = @import("cases/cast.zig");
_ = @import("cases/const_slice_child.zig");
@ -34,6 +35,7 @@ comptime {
_ = @import("cases/math.zig");
_ = @import("cases/merge_error_sets.zig");
_ = @import("cases/misc.zig");
_ = @import("cases/optional.zig");
_ = @import("cases/namespace_depends_on_compile_var/index.zig");
_ = @import("cases/new_stack_call.zig");
_ = @import("cases/null.zig");
@ -58,4 +60,5 @@ comptime {
_ = @import("cases/var_args.zig");
_ = @import("cases/void.zig");
_ = @import("cases/while.zig");
_ = @import("cases/widening.zig");
}

View File

@ -90,7 +90,7 @@ fn testBytesAlignSlice(b: u8) void {
b,
b,
};
const slice = ([]u32)(bytes[0..]);
const slice: []u32 = @bytesToSlice(u32, bytes[0..]);
assert(slice[0] == 0x33333333);
}

View File

@ -152,3 +152,11 @@ fn testImplicitCastSingleItemPtr() void {
slice[0] += 1;
assert(byte == 101);
}
fn testArrayByValAtComptime(b: [2]u8) u8 { return b[0]; }
test "comptime evalutating function that takes array by value" {
const arr = []u8{0,1};
_ = comptime testArrayByValAtComptime(arr);
_ = comptime testArrayByValAtComptime(arr);
}

12
test/cases/bugs/1111.zig Normal file
View File

@ -0,0 +1,12 @@
const Foo = extern enum {
Bar = -1,
};
test "issue 1111 fixed" {
const v = Foo.Bar;
switch (v) {
Foo.Bar => return,
else => return,
}
}

View File

@ -140,8 +140,8 @@ test "explicit cast from integer to error type" {
comptime testCastIntToErr(error.ItBroke);
}
fn testCastIntToErr(err: error) void {
const x = usize(err);
const y = error(x);
const x = @errorToInt(err);
const y = @intToError(x);
assert(error.ItBroke == y);
}
@ -340,11 +340,26 @@ fn testPeerErrorAndArray2(x: u8) error![]const u8 {
};
}
test "explicit cast float number literal to integer if no fraction component" {
test "@floatToInt" {
testFloatToInts();
comptime testFloatToInts();
}
fn testFloatToInts() void {
const x = i32(1e4);
assert(x == 10000);
const y = @floatToInt(i32, f32(1e4));
assert(y == 10000);
expectFloatToInt(f16, 255.1, u8, 255);
expectFloatToInt(f16, 127.2, i8, 127);
expectFloatToInt(f16, -128.2, i8, -128);
expectFloatToInt(f32, 255.1, u8, 255);
expectFloatToInt(f32, 127.2, i8, 127);
expectFloatToInt(f32, -128.2, i8, -128);
}
fn expectFloatToInt(comptime F: type, f: F, comptime I: type, i: I) void {
assert(@floatToInt(I, f) == i);
}
test "cast u128 to f128 and back" {
@ -372,7 +387,7 @@ test "const slice widen cast" {
0x12,
};
const u32_value = ([]const u32)(bytes[0..])[0];
const u32_value = @bytesToSlice(u32, bytes[0..])[0];
assert(u32_value == 0x12121212);
assert(@bitCast(u32, bytes) == 0x12121212);
@ -406,17 +421,50 @@ test "@intCast comptime_int" {
}
test "@floatCast comptime_int and comptime_float" {
const result = @floatCast(f32, 1234);
assert(@typeOf(result) == f32);
assert(result == 1234.0);
const result2 = @floatCast(f32, 1234.0);
assert(@typeOf(result) == f32);
assert(result == 1234.0);
{
const result = @floatCast(f16, 1234);
assert(@typeOf(result) == f16);
assert(result == 1234.0);
}
{
const result = @floatCast(f16, 1234.0);
assert(@typeOf(result) == f16);
assert(result == 1234.0);
}
{
const result = @floatCast(f32, 1234);
assert(@typeOf(result) == f32);
assert(result == 1234.0);
}
{
const result = @floatCast(f32, 1234.0);
assert(@typeOf(result) == f32);
assert(result == 1234.0);
}
}
test "comptime_int @intToFloat" {
const result = @intToFloat(f32, 1234);
assert(@typeOf(result) == f32);
assert(result == 1234.0);
{
const result = @intToFloat(f16, 1234);
assert(@typeOf(result) == f16);
assert(result == 1234.0);
}
{
const result = @intToFloat(f32, 1234);
assert(@typeOf(result) == f32);
assert(result == 1234.0);
}
}
test "@bytesToSlice keeps pointer alignment" {
var bytes = []u8{ 0x01, 0x02, 0x03, 0x04 };
const numbers = @bytesToSlice(u32, bytes[0..]);
comptime assert(@typeOf(numbers) == []align(@alignOf(@typeOf(bytes))) u32);
}
test "@intCast i32 to u7" {
var x: u128 = @maxValue(u128);
var y: i32 = 120;
var z = x >> @intCast(u7, y);
assert(z == 0xff);
}

View File

@ -5,7 +5,10 @@ const assert = std.debug.assert;
var x: i32 = 1;
test "create a coroutine and cancel it" {
const p = try async<std.debug.global_allocator> simpleAsyncFn();
var da = std.heap.DirectAllocator.init();
defer da.deinit();
const p = try async<&da.allocator> simpleAsyncFn();
comptime assert(@typeOf(p) == promise->void);
cancel p;
assert(x == 2);
@ -17,8 +20,11 @@ async fn simpleAsyncFn() void {
}
test "coroutine suspend, resume, cancel" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
seq('a');
const p = try async<std.debug.global_allocator> testAsyncSeq();
const p = try async<&da.allocator> testAsyncSeq();
seq('c');
resume p;
seq('f');
@ -43,7 +49,10 @@ fn seq(c: u8) void {
}
test "coroutine suspend with block" {
const p = try async<std.debug.global_allocator> testSuspendBlock();
var da = std.heap.DirectAllocator.init();
defer da.deinit();
const p = try async<&da.allocator> testSuspendBlock();
std.debug.assert(!result);
resume a_promise;
std.debug.assert(result);
@ -64,8 +73,11 @@ var await_a_promise: promise = undefined;
var await_final_result: i32 = 0;
test "coroutine await" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
await_seq('a');
const p = async<std.debug.global_allocator> await_amain() catch unreachable;
const p = async<&da.allocator> await_amain() catch unreachable;
await_seq('f');
resume await_a_promise;
await_seq('i');
@ -100,8 +112,11 @@ fn await_seq(c: u8) void {
var early_final_result: i32 = 0;
test "coroutine await early return" {
var da = std.heap.DirectAllocator.init();
defer da.deinit();
early_seq('a');
const p = async<std.debug.global_allocator> early_amain() catch unreachable;
const p = async<&da.allocator> early_amain() catch unreachable;
early_seq('f');
assert(early_final_result == 1234);
assert(std.mem.eql(u8, early_points, "abcdef"));
@ -146,7 +161,9 @@ test "async function with dot syntax" {
suspend;
}
};
const p = try async<std.debug.global_allocator> S.foo();
var da = std.heap.DirectAllocator.init();
defer da.deinit();
const p = try async<&da.allocator> S.foo();
cancel p;
assert(S.y == 2);
}
@ -157,7 +174,9 @@ test "async fn pointer in a struct field" {
bar: async<*std.mem.Allocator> fn (*i32) void,
};
var foo = Foo{ .bar = simpleAsyncFn2 };
const p = (async<std.debug.global_allocator> foo.bar(&data)) catch unreachable;
var da = std.heap.DirectAllocator.init();
defer da.deinit();
const p = (async<&da.allocator> foo.bar(&data)) catch unreachable;
assert(data == 2);
cancel p;
assert(data == 4);
@ -169,7 +188,9 @@ async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void {
}
test "async fn with inferred error set" {
const p = (async<std.debug.global_allocator> failing()) catch unreachable;
var da = std.heap.DirectAllocator.init();
defer da.deinit();
const p = (async<&da.allocator> failing()) catch unreachable;
resume p;
cancel p;
}
@ -181,7 +202,9 @@ async fn failing() !void {
test "error return trace across suspend points - early return" {
const p = nonFailing();
resume p;
const p2 = try async<std.debug.global_allocator> printTrace(p);
var da = std.heap.DirectAllocator.init();
defer da.deinit();
const p2 = try async<&da.allocator> printTrace(p);
cancel p2;
}

View File

@ -92,14 +92,14 @@ test "enum to int" {
}
fn shouldEqual(n: Number, expected: u3) void {
assert(u3(n) == expected);
assert(@enumToInt(n) == expected);
}
test "int to enum" {
testIntToEnumEval(3);
}
fn testIntToEnumEval(x: i32) void {
assert(IntToEnumNumber(@intCast(u3, x)) == IntToEnumNumber.Three);
assert(@intToEnum(IntToEnumNumber, @intCast(u3, x)) == IntToEnumNumber.Three);
}
const IntToEnumNumber = enum {
Zero,
@ -768,7 +768,7 @@ test "casting enum to its tag type" {
}
fn testCastEnumToTagType(value: Small2) void {
assert(u2(value) == 1);
assert(@enumToInt(value) == 1);
}
const MultipleChoice = enum(u32) {
@ -784,7 +784,7 @@ test "enum with specified tag values" {
}
fn testEnumWithSpecifiedTagValues(x: MultipleChoice) void {
assert(u32(x) == 60);
assert(@enumToInt(x) == 60);
assert(1234 == switch (x) {
MultipleChoice.A => 1,
MultipleChoice.B => 2,
@ -811,7 +811,7 @@ test "enum with specified and unspecified tag values" {
}
fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) void {
assert(u32(x) == 1000);
assert(@enumToInt(x) == 1000);
assert(1234 == switch (x) {
MultipleChoice2.A => 1,
MultipleChoice2.B => 2,
@ -826,8 +826,8 @@ fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) void {
}
test "cast integer literal to enum" {
assert(MultipleChoice2(0) == MultipleChoice2.Unspecified1);
assert(MultipleChoice2(40) == MultipleChoice2.B);
assert(@intToEnum(MultipleChoice2, 0) == MultipleChoice2.Unspecified1);
assert(@intToEnum(MultipleChoice2, 40) == MultipleChoice2.B);
}
const EnumWithOneMember = enum {
@ -865,7 +865,7 @@ const EnumWithTagValues = enum(u4) {
D = 1 << 3,
};
test "enum with tag values don't require parens" {
assert(u4(EnumWithTagValues.C) == 0b0100);
assert(@enumToInt(EnumWithTagValues.C) == 0b0100);
}
test "enum with 1 field but explicit tag type should still have the tag type" {

View File

@ -31,8 +31,8 @@ test "@errorName" {
}
test "error values" {
const a = i32(error.err1);
const b = i32(error.err2);
const a = @errorToInt(error.err1);
const b = @errorToInt(error.err2);
assert(a != b);
}
@ -124,8 +124,8 @@ const Set2 = error{
};
fn testExplicitErrorSetCast(set1: Set1) void {
var x = Set2(set1);
var y = Set1(x);
var x = @errSetCast(Set2, set1);
var y = @errSetCast(Set1, x);
assert(y == error.A);
}
@ -147,14 +147,14 @@ test "syntax: optional operator in front of error union operator" {
}
test "comptime err to int of error set with only 1 possible value" {
testErrToIntWithOnePossibleValue(error.A, u32(error.A));
comptime testErrToIntWithOnePossibleValue(error.A, u32(error.A));
testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A));
comptime testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A));
}
fn testErrToIntWithOnePossibleValue(
x: error{A},
comptime value: u32,
) void {
if (u32(x) != value) {
if (@errorToInt(x) != value) {
@compileError("bad");
}
}

View File

@ -623,3 +623,22 @@ test "function which returns struct with type field causes implicit comptime" {
const ty = wrap(i32).T;
assert(ty == i32);
}
test "call method with comptime pass-by-non-copying-value self parameter" {
const S = struct {
a: u8,
fn b(comptime s: this) u8 {
return s.a;
}
};
const s = S{ .a = 2 };
var b = s.b();
assert(b == 2);
}
test "@tagName of @typeId" {
const str = @tagName(@typeId(u8));
assert(std.mem.eql(u8, str, "Int"));
}

View File

@ -6,15 +6,20 @@ test "division" {
}
fn testDivision() void {
assert(div(u32, 13, 3) == 4);
assert(div(f16, 1.0, 2.0) == 0.5);
assert(div(f32, 1.0, 2.0) == 0.5);
assert(divExact(u32, 55, 11) == 5);
assert(divExact(i32, -55, 11) == -5);
assert(divExact(f16, 55.0, 11.0) == 5.0);
assert(divExact(f16, -55.0, 11.0) == -5.0);
assert(divExact(f32, 55.0, 11.0) == 5.0);
assert(divExact(f32, -55.0, 11.0) == -5.0);
assert(divFloor(i32, 5, 3) == 1);
assert(divFloor(i32, -5, 3) == -2);
assert(divFloor(f16, 5.0, 3.0) == 1.0);
assert(divFloor(f16, -5.0, 3.0) == -2.0);
assert(divFloor(f32, 5.0, 3.0) == 1.0);
assert(divFloor(f32, -5.0, 3.0) == -2.0);
assert(divFloor(i32, -0x80000000, -2) == 0x40000000);
@ -24,8 +29,12 @@ fn testDivision() void {
assert(divTrunc(i32, 5, 3) == 1);
assert(divTrunc(i32, -5, 3) == -1);
assert(divTrunc(f16, 5.0, 3.0) == 1.0);
assert(divTrunc(f16, -5.0, 3.0) == -1.0);
assert(divTrunc(f32, 5.0, 3.0) == 1.0);
assert(divTrunc(f32, -5.0, 3.0) == -1.0);
assert(divTrunc(f64, 5.0, 3.0) == 1.0);
assert(divTrunc(f64, -5.0, 3.0) == -1.0);
comptime {
assert(
@ -287,6 +296,14 @@ test "quad hex float literal parsing in range" {
const d = 0x1.edcbff8ad76ab5bf46463233214fp-435;
}
test "quad hex float literal parsing accurate" {
const a: f128 = 0x1.1111222233334444555566667777p+0;
// implied 1 is dropped, with an exponent of 0 (0x3fff) after biasing.
const expected: u128 = 0x3fff1111222233334444555566667777;
assert(@bitCast(u128, a) == expected);
}
test "hex float literal within range" {
const a = 0x1.0p16383;
const b = 0x0.1p16387;
@ -434,11 +451,28 @@ test "comptime float rem int" {
}
}
test "remainder division" {
comptime remdiv(f16);
comptime remdiv(f32);
comptime remdiv(f64);
comptime remdiv(f128);
remdiv(f16);
remdiv(f64);
remdiv(f128);
}
fn remdiv(comptime T: type) void {
assert(T(1) == T(1) % T(2));
assert(T(1) == T(7) % T(3));
}
test "@sqrt" {
testSqrt(f64, 12.0);
comptime testSqrt(f64, 12.0);
testSqrt(f32, 13.0);
comptime testSqrt(f32, 13.0);
testSqrt(f16, 13.0);
comptime testSqrt(f16, 13.0);
const x = 14.0;
const y = x * x;

View File

@ -53,6 +53,7 @@ test "@IntType builtin" {
}
test "floating point primitive bit counts" {
assert(f16.bit_count == 16);
assert(f32.bit_count == 32);
assert(f64.bit_count == 64);
}
@ -422,14 +423,14 @@ test "cast slice to u8 slice" {
4,
};
const big_thing_slice: []i32 = big_thing_array[0..];
const bytes = ([]u8)(big_thing_slice);
const bytes = @sliceToBytes(big_thing_slice);
assert(bytes.len == 4 * 4);
bytes[4] = 0;
bytes[5] = 0;
bytes[6] = 0;
bytes[7] = 0;
assert(big_thing_slice[1] == 0);
const big_thing_again = ([]align(1) i32)(bytes);
const big_thing_again = @bytesToSlice(i32, bytes);
assert(big_thing_again[2] == 3);
big_thing_again[2] = -1;
assert(bytes[8] == @maxValue(u8));
@ -701,3 +702,8 @@ test "comptime cast fn to ptr" {
const addr2 = @ptrCast(*const u8, emptyFn);
comptime assert(addr1 == addr2);
}
test "equality compare fn ptrs" {
var a = emptyFn;
assert(a == a);
}

View File

@ -146,7 +146,7 @@ test "null with default unwrap" {
test "optional types" {
comptime {
const opt_type_struct = StructWithOptionalType { .t=u8, };
const opt_type_struct = StructWithOptionalType{ .t = u8 };
assert(opt_type_struct.t != null and opt_type_struct.t.? == u8);
}
}

9
test/cases/optional.zig Normal file
View File

@ -0,0 +1,9 @@
const assert = @import("std").debug.assert;
pub const EmptyStruct = struct {};
test "optional pointer to size zero struct" {
var e = EmptyStruct{};
var o: ?*EmptyStruct = &e;
assert(o != null);
}

View File

@ -302,7 +302,7 @@ test "packed array 24bits" {
var bytes = []u8{0} ** (@sizeOf(FooArray24Bits) + 1);
bytes[bytes.len - 1] = 0xaa;
const ptr = &([]FooArray24Bits)(bytes[0 .. bytes.len - 1])[0];
const ptr = &@bytesToSlice(FooArray24Bits, bytes[0 .. bytes.len - 1])[0];
assert(ptr.a == 0);
assert(ptr.b[0].field == 0);
assert(ptr.b[1].field == 0);
@ -351,7 +351,7 @@ test "aligned array of packed struct" {
}
var bytes = []u8{0xbb} ** @sizeOf(FooArrayOfAligned);
const ptr = &([]FooArrayOfAligned)(bytes[0..bytes.len])[0];
const ptr = &@bytesToSlice(FooArrayOfAligned, bytes[0..bytes.len])[0];
assert(ptr.a[0].a == 0xbb);
assert(ptr.a[0].b == 0xbb);

View File

@ -107,11 +107,11 @@ test "type info: promise info" {
fn testPromise() void {
const null_promise_info = @typeInfo(promise);
assert(TypeId(null_promise_info) == TypeId.Promise);
assert(null_promise_info.Promise.child == @typeOf(undefined));
assert(null_promise_info.Promise.child == null);
const promise_info = @typeInfo(promise->usize);
assert(TypeId(promise_info) == TypeId.Promise);
assert(promise_info.Promise.child == usize);
assert(promise_info.Promise.child.? == usize);
}
test "type info: error set, error union info" {
@ -130,7 +130,7 @@ fn testErrorSet() void {
assert(TypeId(error_set_info) == TypeId.ErrorSet);
assert(error_set_info.ErrorSet.errors.len == 3);
assert(mem.eql(u8, error_set_info.ErrorSet.errors[0].name, "First"));
assert(error_set_info.ErrorSet.errors[2].value == usize(TestErrorSet.Third));
assert(error_set_info.ErrorSet.errors[2].value == @errorToInt(TestErrorSet.Third));
const error_union_info = @typeInfo(TestErrorSet!usize);
assert(TypeId(error_union_info) == TypeId.ErrorUnion);
@ -165,7 +165,7 @@ fn testUnion() void {
const typeinfo_info = @typeInfo(TypeInfo);
assert(TypeId(typeinfo_info) == TypeId.Union);
assert(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto);
assert(typeinfo_info.Union.tag_type == TypeId);
assert(typeinfo_info.Union.tag_type.? == TypeId);
assert(typeinfo_info.Union.fields.len == 25);
assert(typeinfo_info.Union.fields[4].enum_field != null);
assert(typeinfo_info.Union.fields[4].enum_field.?.value == 4);
@ -179,7 +179,7 @@ fn testUnion() void {
const notag_union_info = @typeInfo(TestNoTagUnion);
assert(TypeId(notag_union_info) == TypeId.Union);
assert(notag_union_info.Union.tag_type == @typeOf(undefined));
assert(notag_union_info.Union.tag_type == null);
assert(notag_union_info.Union.layout == TypeInfo.ContainerLayout.Auto);
assert(notag_union_info.Union.fields.len == 2);
assert(notag_union_info.Union.fields[0].enum_field == null);
@ -191,7 +191,7 @@ fn testUnion() void {
const extern_union_info = @typeInfo(TestExternUnion);
assert(extern_union_info.Union.layout == TypeInfo.ContainerLayout.Extern);
assert(extern_union_info.Union.tag_type == @typeOf(undefined));
assert(extern_union_info.Union.tag_type == null);
assert(extern_union_info.Union.fields[0].enum_field == null);
assert(extern_union_info.Union.fields[0].field_type == *c_void);
}
@ -238,13 +238,13 @@ fn testFunction() void {
assert(fn_info.Fn.is_generic);
assert(fn_info.Fn.args.len == 2);
assert(fn_info.Fn.is_var_args);
assert(fn_info.Fn.return_type == @typeOf(undefined));
assert(fn_info.Fn.async_allocator_type == @typeOf(undefined));
assert(fn_info.Fn.return_type == null);
assert(fn_info.Fn.async_allocator_type == null);
const test_instance: TestStruct = undefined;
const bound_fn_info = @typeInfo(@typeOf(test_instance.foo));
assert(TypeId(bound_fn_info) == TypeId.BoundFn);
assert(bound_fn_info.BoundFn.args[0].arg_type == *const TestStruct);
assert(bound_fn_info.BoundFn.args[0].arg_type.? == *const TestStruct);
}
fn foo(comptime a: usize, b: bool, args: ...) usize {

View File

@ -126,7 +126,7 @@ const MultipleChoice = union(enum(u32)) {
test "simple union(enum(u32))" {
var x = MultipleChoice.C;
assert(x == MultipleChoice.C);
assert(u32(@TagType(MultipleChoice)(x)) == 60);
assert(@enumToInt(@TagType(MultipleChoice)(x)) == 60);
}
const MultipleChoice2 = union(enum(u32)) {
@ -148,7 +148,7 @@ test "union(enum(u32)) with specified and unspecified tag values" {
}
fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: *const MultipleChoice2) void {
assert(u32(@TagType(MultipleChoice2)(x.*)) == 60);
assert(@enumToInt(@TagType(MultipleChoice2)(x.*)) == 60);
assert(1123 == switch (x.*) {
MultipleChoice2.A => 1,
MultipleChoice2.B => 2,

27
test/cases/widening.zig Normal file
View File

@ -0,0 +1,27 @@
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
test "integer widening" {
var a: u8 = 250;
var b: u16 = a;
var c: u32 = b;
var d: u64 = c;
var e: u64 = d;
var f: u128 = e;
assert(f == a);
}
test "implicit unsigned integer to signed integer" {
var a: u8 = 250;
var b: i16 = a;
assert(b == 250);
}
test "float widening" {
var a: f16 = 12.34;
var b: f32 = a;
var c: f64 = b;
var d: f128 = c;
assert(d == a);
}

View File

@ -1,6 +1,36 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"enum field value references enum",
\\pub const Foo = extern enum {
\\ A = Foo.B,
\\ C = D,
\\};
\\export fn entry() void {
\\ var s: Foo = Foo.E;
\\}
,
".tmp_source.zig:1:17: error: 'Foo' depends on itself",
);
cases.add(
"@floatToInt comptime safety",
\\comptime {
\\ _ = @floatToInt(i8, f32(-129.1));
\\}
\\comptime {
\\ _ = @floatToInt(u8, f32(-1.1));
\\}
\\comptime {
\\ _ = @floatToInt(u8, f32(256.1));
\\}
,
".tmp_source.zig:2:9: error: integer value '-129' cannot be stored in type 'i8'",
".tmp_source.zig:5:9: error: integer value '-1' cannot be stored in type 'u8'",
".tmp_source.zig:8:9: error: integer value '256' cannot be stored in type 'u8'",
);
cases.add(
"use c_void as return type of fn ptr",
\\export fn entry() void {
@ -83,7 +113,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ var rule_set = try Foo.init();
\\}
,
".tmp_source.zig:2:13: error: invalid cast from type 'type' to 'i32'",
".tmp_source.zig:2:13: error: expected type 'i32', found 'type'",
);
cases.add(
@ -105,7 +135,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.add(
"invalid deref on switch target",
"nested error set mismatch",
\\const NextError = error{NextError};
\\const OtherError = error{OutOfMemory};
\\
@ -117,7 +147,9 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ return null;
\\}
,
".tmp_source.zig:5:34: error: expected 'NextError!i32', found 'OtherError!i32'",
".tmp_source.zig:5:34: error: expected type '?NextError!i32', found '?OtherError!i32'",
".tmp_source.zig:5:34: note: optional type child 'OtherError!i32' cannot cast into optional type child 'NextError!i32'",
".tmp_source.zig:5:34: note: error set 'OtherError' cannot cast into error set 'NextError'",
".tmp_source.zig:2:26: note: 'error.OutOfMemory' not a member of destination error set",
);
@ -404,10 +436,10 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\const Set2 = error {A, C};
\\comptime {
\\ var x = Set1.B;
\\ var y = Set2(x);
\\ var y = @errSetCast(Set2, x);
\\}
,
".tmp_source.zig:5:17: error: error.B not a member of error set 'Set2'",
".tmp_source.zig:5:13: error: error.B not a member of error set 'Set2'",
);
cases.add(
@ -420,8 +452,9 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ return error.B;
\\}
,
".tmp_source.zig:3:35: error: expected 'SmallErrorSet!i32', found 'error!i32'",
".tmp_source.zig:3:35: note: unable to cast global error set into smaller set",
".tmp_source.zig:3:35: error: expected type 'SmallErrorSet!i32', found 'error!i32'",
".tmp_source.zig:3:35: note: error set 'error' cannot cast into error set 'SmallErrorSet'",
".tmp_source.zig:3:35: note: cannot cast global error set into smaller set",
);
cases.add(
@ -434,8 +467,8 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ return error.B;
\\}
,
".tmp_source.zig:3:31: error: expected 'SmallErrorSet', found 'error'",
".tmp_source.zig:3:31: note: unable to cast global error set into smaller set",
".tmp_source.zig:3:31: error: expected type 'SmallErrorSet', found 'error'",
".tmp_source.zig:3:31: note: cannot cast global error set into smaller set",
);
cases.add(
@ -461,31 +494,40 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ var x: Set2 = set1;
\\}
,
".tmp_source.zig:7:19: error: expected 'Set2', found 'Set1'",
".tmp_source.zig:7:19: error: expected type 'Set2', found 'Set1'",
".tmp_source.zig:1:23: note: 'error.B' not a member of destination error set",
);
cases.add(
"int to err global invalid number",
\\const Set1 = error{A, B};
\\const Set1 = error{
\\ A,
\\ B,
\\};
\\comptime {
\\ var x: usize = 3;
\\ var y = error(x);
\\ var x: u16 = 3;
\\ var y = @intToError(x);
\\}
,
".tmp_source.zig:4:18: error: integer value 3 represents no error",
".tmp_source.zig:7:13: error: integer value 3 represents no error",
);
cases.add(
"int to err non global invalid number",
\\const Set1 = error{A, B};
\\const Set2 = error{A, C};
\\const Set1 = error{
\\ A,
\\ B,
\\};
\\const Set2 = error{
\\ A,
\\ C,
\\};
\\comptime {
\\ var x = usize(Set1.B);
\\ var y = Set2(x);
\\ var x = @errorToInt(Set1.B);
\\ var y = @errSetCast(Set2, @intToError(x));
\\}
,
".tmp_source.zig:5:17: error: integer value 2 represents no error in 'Set2'",
".tmp_source.zig:11:13: error: error.B not a member of error set 'Set2'",
);
cases.add(
@ -1635,6 +1677,18 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
".tmp_source.zig:1:16: error: integer value 300 cannot be implicitly casted to type 'u8'",
);
cases.add(
"invalid shift amount error",
\\const x : u8 = 2;
\\fn f() u16 {
\\ return x << 8;
\\}
\\export fn entry() u16 { return f(); }
,
".tmp_source.zig:3:14: error: RHS of shift is too large for LHS type",
".tmp_source.zig:3:17: note: value 8 cannot fit into type u3",
);
cases.add(
"incompatible number literals",
\\const x = 2 == 2.0;
@ -1851,6 +1905,416 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
".tmp_source.zig:1:15: error: use of undefined value",
);
cases.add(
"div on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a / a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"div assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a /= a;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"mod on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a % a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"mod assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a %= a;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"add on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a + a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"add assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a += a;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"add wrap on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a +% a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"add wrap assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a +%= a;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"sub on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a - a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"sub assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a -= a;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"sub wrap on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a -% a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"sub wrap assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a -%= a;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"mult on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a * a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"mult assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a *= a;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"mult wrap on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a *% a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"mult wrap assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a *%= a;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"shift left on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a << 2;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"shift left assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a <<= 2;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"shift right on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a >> 2;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"shift left assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a >>= 2;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"bin and on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a & a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"bin and assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a &= a;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"bin or on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a | a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"bin or assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a |= a;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"bin xor on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a ^ a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"bin xor assign on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ a ^= a;
\\}
,
".tmp_source.zig:3:5: error: use of undefined value",
);
cases.add(
"equal on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a == a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"not equal on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a != a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"greater than on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a > a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"greater than equal on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a >= a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"less than on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a < a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"less than equal on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = a <= a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"and on undefined value",
\\comptime {
\\ var a: bool = undefined;
\\ _ = a and a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"or on undefined value",
\\comptime {
\\ var a: bool = undefined;
\\ _ = a or a;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"negate on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = -a;
\\}
,
".tmp_source.zig:3:10: error: use of undefined value",
);
cases.add(
"negate wrap on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = -%a;
\\}
,
".tmp_source.zig:3:11: error: use of undefined value",
);
cases.add(
"bin not on undefined value",
\\comptime {
\\ var a: i64 = undefined;
\\ _ = ~a;
\\}
,
".tmp_source.zig:3:10: error: use of undefined value",
);
cases.add(
"bool not on undefined value",
\\comptime {
\\ var a: bool = undefined;
\\ _ = !a;
\\}
,
".tmp_source.zig:3:10: error: use of undefined value",
);
cases.add(
"orelse on undefined value",
\\comptime {
\\ var a: ?bool = undefined;
\\ _ = a orelse false;
\\}
,
".tmp_source.zig:3:11: error: use of undefined value",
);
cases.add(
"catch on undefined value",
\\comptime {
\\ var a: error!bool = undefined;
\\ _ = a catch |err| false;
\\}
,
".tmp_source.zig:3:11: error: use of undefined value",
);
cases.add(
"deref on undefined value",
\\comptime {
\\ var a: *u8 = undefined;
\\ _ = a.*;
\\}
,
".tmp_source.zig:3:9: error: use of undefined value",
);
cases.add(
"endless loop in function evaluation",
\\const seventh_fib_number = fibbonaci(7);
@ -2086,10 +2550,11 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
"convert fixed size array to slice with invalid size",
\\export fn f() void {
\\ var array: [5]u8 = undefined;
\\ var foo = ([]const u32)(array)[0];
\\ var foo = @bytesToSlice(u32, array)[0];
\\}
,
".tmp_source.zig:3:28: error: unable to convert [5]u8 to []const u32: size mismatch",
".tmp_source.zig:3:15: error: unable to convert [5]u8 to []align(1) const u32: size mismatch",
".tmp_source.zig:3:29: note: u32 has size 4; remaining bytes: 1",
);
cases.add(
@ -2611,17 +3076,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
".tmp_source.zig:2:21: error: expected pointer, found 'usize'",
);
cases.add(
"too many error values to cast to small integer",
\\const Error = error { A, B, C, D, E, F, G, H };
\\fn foo(e: Error) u2 {
\\ return u2(e);
\\}
\\export fn entry() usize { return @sizeOf(@typeOf(foo)); }
,
".tmp_source.zig:3:14: error: too many error values to fit in 'u2'",
);
cases.add(
"asm at compile time",
\\comptime {
@ -3239,18 +3693,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
".tmp_source.zig:3:26: note: '*u32' has alignment 4",
);
cases.add(
"increase pointer alignment in slice resize",
\\export fn entry() u32 {
\\ var bytes = []u8{0x01, 0x02, 0x03, 0x04};
\\ return ([]u32)(bytes[0..])[0];
\\}
,
".tmp_source.zig:3:19: error: cast increases pointer alignment",
".tmp_source.zig:3:19: note: '[]u8' has alignment 1",
".tmp_source.zig:3:19: note: '[]u32' has alignment 4",
);
cases.add(
"@alignCast expects pointer or slice",
\\export fn entry() void {
@ -3722,22 +4164,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
".tmp_source.zig:9:22: error: expected type 'u2', found 'Small'",
);
cases.add(
"explicitly casting enum to non tag type",
\\const Small = enum(u2) {
\\ One,
\\ Two,
\\ Three,
\\ Four,
\\};
\\
\\export fn entry() void {
\\ var x = u3(Small.Two);
\\}
,
".tmp_source.zig:9:15: error: enum to integer cast to 'u3' instead of its tag type, 'u2'",
);
cases.add(
"explicitly casting non tag type to enum",
\\const Small = enum(u2) {
@ -3749,10 +4175,10 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\export fn entry() void {
\\ var y = u3(3);
\\ var x = Small(y);
\\ var x = @intToEnum(Small, y);
\\}
,
".tmp_source.zig:10:18: error: integer to enum cast from 'u3' instead of its tag type, 'u2'",
".tmp_source.zig:10:31: error: expected type 'u2', found 'u3'",
);
cases.add(
@ -4033,10 +4459,10 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ B = 11,
\\};
\\export fn entry() void {
\\ var x = Foo(0);
\\ var x = @intToEnum(Foo, 0);
\\}
,
".tmp_source.zig:6:16: error: enum 'Foo' has no tag matching integer value 0",
".tmp_source.zig:6:13: error: enum 'Foo' has no tag matching integer value 0",
".tmp_source.zig:1:13: note: 'Foo' declared here",
);

View File

@ -1,6 +1,63 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompareOutputContext) void {
cases.addRuntimeSafety("@intToEnum - no matching tag value",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\const Foo = enum {
\\ A,
\\ B,
\\ C,
\\};
\\pub fn main() void {
\\ baz(bar(3));
\\}
\\fn bar(a: u2) Foo {
\\ return @intToEnum(Foo, a);
\\}
\\fn baz(a: Foo) void {}
);
cases.addRuntimeSafety("@floatToInt cannot fit - negative to unsigned",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() void {
\\ baz(bar(-1.1));
\\}
\\fn bar(a: f32) u8 {
\\ return @floatToInt(u8, a);
\\}
\\fn baz(a: u8) void { }
);
cases.addRuntimeSafety("@floatToInt cannot fit - negative out of range",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() void {
\\ baz(bar(-129.1));
\\}
\\fn bar(a: f32) i8 {
\\ return @floatToInt(i8, a);
\\}
\\fn baz(a: i8) void { }
);
cases.addRuntimeSafety("@floatToInt cannot fit - positive out of range",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
\\pub fn main() void {
\\ baz(bar(256.2));
\\}
\\fn bar(a: f32) u8 {
\\ return @floatToInt(u8, a);
\\}
\\fn baz(a: u8) void { }
);
cases.addRuntimeSafety("calling panic",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
@ -175,7 +232,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ if (x.len == 0) return error.Whatever;
\\}
\\fn widenSlice(slice: []align(1) const u8) []align(1) const i32 {
\\ return ([]align(1) const i32)(slice);
\\ return @bytesToSlice(i32, slice);
\\}
);
@ -227,12 +284,12 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\pub fn main() void {
\\ _ = bar(9999);
\\}
\\fn bar(x: u32) error {
\\ return error(x);
\\fn bar(x: u16) error {
\\ return @intToError(x);
\\}
);
cases.addRuntimeSafety("cast integer to non-global error set and no match",
cases.addRuntimeSafety("@errSetCast error not present in destination",
\\pub fn panic(message: []const u8, stack_trace: ?*@import("builtin").StackTrace) noreturn {
\\ @import("std").os.exit(126);
\\}
@ -242,7 +299,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ _ = foo(Set1.B);
\\}
\\fn foo(set1: Set1) Set2 {
\\ return Set2(set1);
\\ return @errSetCast(Set2, set1);
\\}
);
@ -252,12 +309,12 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\}
\\pub fn main() !void {
\\ var array align(4) = []u32{0x11111111, 0x11111111};
\\ const bytes = ([]u8)(array[0..]);
\\ const bytes = @sliceToBytes(array[0..]);
\\ if (foo(bytes) != 0x11111111) return error.Wrong;
\\}
\\fn foo(bytes: []u8) u32 {
\\ const slice4 = bytes[1..5];
\\ const int_slice = ([]u32)(@alignCast(4, slice4));
\\ const int_slice = @bytesToSlice(u32, @alignCast(4, slice4));
\\ return int_slice[0];
\\}
);

View File

@ -48,13 +48,12 @@ const test_targets = []TestTarget{
const max_stdout_size = 1 * 1024 * 1024; // 1 MB
pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-compare-output", "Run the compare output tests"),
.test_index = 0,
.test_filter = test_filter,
};
}) catch unreachable;
compare_output.addCases(cases);
@ -62,13 +61,12 @@ pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build
}
pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-runtime-safety", "Run the runtime safety tests"),
.test_index = 0,
.test_filter = test_filter,
};
}) catch unreachable;
runtime_safety.addCases(cases);
@ -76,13 +74,12 @@ pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build
}
pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(CompileErrorContext) catch unreachable;
cases.* = CompileErrorContext{
const cases = b.allocator.create(CompileErrorContext{
.b = b,
.step = b.step("test-compile-errors", "Run the compile error tests"),
.test_index = 0,
.test_filter = test_filter,
};
}) catch unreachable;
compile_errors.addCases(cases);
@ -90,13 +87,12 @@ pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8) *build.
}
pub fn addBuildExampleTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(BuildExamplesContext) catch unreachable;
cases.* = BuildExamplesContext{
const cases = b.allocator.create(BuildExamplesContext{
.b = b,
.step = b.step("test-build-examples", "Build the examples"),
.test_index = 0,
.test_filter = test_filter,
};
}) catch unreachable;
build_examples.addCases(cases);
@ -104,13 +100,12 @@ pub fn addBuildExampleTests(b: *build.Builder, test_filter: ?[]const u8) *build.
}
pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(CompareOutputContext) catch unreachable;
cases.* = CompareOutputContext{
const cases = b.allocator.create(CompareOutputContext{
.b = b,
.step = b.step("test-asm-link", "Run the assemble and link tests"),
.test_index = 0,
.test_filter = test_filter,
};
}) catch unreachable;
assemble_and_link.addCases(cases);
@ -118,13 +113,12 @@ pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8) *bui
}
pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(TranslateCContext) catch unreachable;
cases.* = TranslateCContext{
const cases = b.allocator.create(TranslateCContext{
.b = b,
.step = b.step("test-translate-c", "Run the C transation tests"),
.test_index = 0,
.test_filter = test_filter,
};
}) catch unreachable;
translate_c.addCases(cases);
@ -132,13 +126,12 @@ pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.St
}
pub fn addGenHTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step {
const cases = b.allocator.create(GenHContext) catch unreachable;
cases.* = GenHContext{
const cases = b.allocator.create(GenHContext{
.b = b,
.step = b.step("test-gen-h", "Run the C header file generation tests"),
.test_index = 0,
.test_filter = test_filter,
};
}) catch unreachable;
gen_h.addCases(cases);
@ -240,8 +233,7 @@ pub const CompareOutputContext = struct {
pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8, expected_output: []const u8, cli_args: []const []const u8) *RunCompareOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(RunCompareOutputStep) catch unreachable;
ptr.* = RunCompareOutputStep{
const ptr = allocator.create(RunCompareOutputStep{
.context = context,
.exe_path = exe_path,
.name = name,
@ -249,7 +241,7 @@ pub const CompareOutputContext = struct {
.test_index = context.test_index,
.step = build.Step.init("RunCompareOutput", allocator, make),
.cli_args = cli_args,
};
}) catch unreachable;
context.test_index += 1;
return ptr;
}
@ -328,14 +320,14 @@ pub const CompareOutputContext = struct {
pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8) *RuntimeSafetyRunStep {
const allocator = context.b.allocator;
const ptr = allocator.create(RuntimeSafetyRunStep) catch unreachable;
ptr.* = RuntimeSafetyRunStep{
const ptr = allocator.create(RuntimeSafetyRunStep{
.context = context,
.exe_path = exe_path,
.name = name,
.test_index = context.test_index,
.step = build.Step.init("RuntimeSafetyRun", allocator, make),
};
}) catch unreachable;
context.test_index += 1;
return ptr;
}
@ -543,15 +535,15 @@ pub const CompileErrorContext = struct {
pub fn create(context: *CompileErrorContext, name: []const u8, case: *const TestCase, build_mode: Mode) *CompileCmpOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(CompileCmpOutputStep) catch unreachable;
ptr.* = CompileCmpOutputStep{
const ptr = allocator.create(CompileCmpOutputStep{
.step = build.Step.init("CompileCmpOutput", allocator, make),
.context = context,
.name = name,
.test_index = context.test_index,
.case = case,
.build_mode = build_mode,
};
}) catch unreachable;
context.test_index += 1;
return ptr;
}
@ -662,14 +654,14 @@ pub const CompileErrorContext = struct {
}
pub fn create(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
const tc = self.b.allocator.create(TestCase) catch unreachable;
tc.* = TestCase{
const tc = self.b.allocator.create(TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_errors = ArrayList([]const u8).init(self.b.allocator),
.link_libc = false,
.is_exe = false,
};
}) catch unreachable;
tc.addSourceFile(".tmp_source.zig", source);
comptime var arg_i = 0;
inline while (arg_i < expected_lines.len) : (arg_i += 1) {
@ -829,14 +821,14 @@ pub const TranslateCContext = struct {
pub fn create(context: *TranslateCContext, name: []const u8, case: *const TestCase) *TranslateCCmpOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(TranslateCCmpOutputStep) catch unreachable;
ptr.* = TranslateCCmpOutputStep{
const ptr = allocator.create(TranslateCCmpOutputStep{
.step = build.Step.init("ParseCCmpOutput", allocator, make),
.context = context,
.name = name,
.test_index = context.test_index,
.case = case,
};
}) catch unreachable;
context.test_index += 1;
return ptr;
}
@ -936,13 +928,13 @@ pub const TranslateCContext = struct {
}
pub fn create(self: *TranslateCContext, allow_warnings: bool, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
const tc = self.b.allocator.create(TestCase) catch unreachable;
tc.* = TestCase{
const tc = self.b.allocator.create(TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_lines = ArrayList([]const u8).init(self.b.allocator),
.allow_warnings = allow_warnings,
};
}) catch unreachable;
tc.addSourceFile(filename, source);
comptime var arg_i = 0;
inline while (arg_i < expected_lines.len) : (arg_i += 1) {
@ -1023,15 +1015,15 @@ pub const GenHContext = struct {
pub fn create(context: *GenHContext, h_path: []const u8, name: []const u8, case: *const TestCase) *GenHCmpOutputStep {
const allocator = context.b.allocator;
const ptr = allocator.create(GenHCmpOutputStep) catch unreachable;
ptr.* = GenHCmpOutputStep{
const ptr = allocator.create(GenHCmpOutputStep{
.step = build.Step.init("ParseCCmpOutput", allocator, make),
.context = context,
.h_path = h_path,
.name = name,
.test_index = context.test_index,
.case = case,
};
}) catch unreachable;
context.test_index += 1;
return ptr;
}
@ -1070,12 +1062,12 @@ pub const GenHContext = struct {
}
pub fn create(self: *GenHContext, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
const tc = self.b.allocator.create(TestCase) catch unreachable;
tc.* = TestCase{
const tc = self.b.allocator.create(TestCase{
.name = name,
.sources = ArrayList(TestCase.SourceFile).init(self.b.allocator),
.expected_lines = ArrayList([]const u8).init(self.b.allocator),
};
}) catch unreachable;
tc.addSourceFile(filename, source);
comptime var arg_i = 0;
inline while (arg_i < expected_lines.len) : (arg_i += 1) {