Merge branch 'master' into modernize-stage2

This commit is contained in:
Vexu 2019-11-23 19:13:48 +02:00
commit 03cc81665b
No known key found for this signature in database
GPG Key ID: 5AEABFCAFF5CD8D6
309 changed files with 8520 additions and 3250 deletions

View File

@ -1,7 +1,7 @@
![ZIG](https://ziglang.org/zig-logo.svg)
A general-purpose programming language designed for **robustness**,
**optimality**, and **maintainability**.
A general-purpose programming language for maintaining **robust**, **optimal**,
and **reusable** code.
## Resources

View File

@ -155,7 +155,7 @@ fn dependOnLib(b: *Builder, lib_exe_obj: var, dep: LibraryDep) void {
) catch unreachable;
for (dep.system_libs.toSliceConst()) |lib| {
const static_bare_name = if (mem.eql(u8, lib, "curses"))
([]const u8)("libncurses.a")
@as([]const u8, "libncurses.a")
else
b.fmt("lib{}.a", lib);
const static_lib_name = fs.path.join(

View File

@ -14,7 +14,7 @@ jobs:
displayName: 'Build and test'
- job: BuildLinux
pool:
vmImage: 'ubuntu-16.04'
vmImage: 'ubuntu-18.04'
timeoutInMinutes: 360
@ -53,7 +53,7 @@ jobs:
strategy:
maxParallel: 1
pool:
vmImage: 'ubuntu-16.04'
vmImage: 'ubuntu-18.04'
variables:
version: $[ dependencies.BuildLinux.outputs['main.version'] ]
steps:

View File

@ -9,6 +9,8 @@ steps:
- name: build-and-test
image: ziglang/static-base:llvm9-1
environment:
SRHT_OAUTH_TOKEN:
from_secret: SRHT_OAUTH_TOKEN
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY:

View File

@ -10,8 +10,8 @@ const testing = std.testing;
const max_doc_file_size = 10 * 1024 * 1024;
const exe_ext = std.build.Target(std.build.Target.Native).exeFileExt();
const obj_ext = std.build.Target(std.build.Target.Native).oFileExt();
const exe_ext = @as(std.build.Target, std.build.Target.Native).exeFileExt();
const obj_ext = @as(std.build.Target, std.build.Target.Native).oFileExt();
const tmp_dir_name = "docgen_tmp";
const test_out_path = tmp_dir_name ++ fs.path.sep_str ++ "test" ++ exe_ext;
@ -856,6 +856,7 @@ fn tokenizeAndPrintRaw(docgen_tokenizer: *Tokenizer, out: var, source_token: Tok
.LineComment,
.DocComment,
.ContainerDocComment,
.ShebangLine,
=> {
try out.write("<span class=\"tok-comment\">");

View File

@ -164,15 +164,17 @@
<div id="contents">
{#header_open|Introduction#}
<p>
Zig is a general-purpose programming language designed for <strong>robustness</strong>,
<strong>optimality</strong>, and <strong>maintainability</strong>.
Zig is a general-purpose programming language for maintaining <strong>robust</strong>,
<strong>optimal</strong>, and <strong>reusable</strong> code.
</p>
<ul>
<li><strong>Robust</strong> - behavior is correct even for edge cases such as out of memory.</li>
<li><strong>Optimal</strong> - write programs the best way they can behave and perform.</li>
<li><strong>Maintainable</strong> - precisely communicate intent to the compiler and other programmers.
The language imposes a low overhead to reading code and is resilient to changing requirements
and environments.</li>
<li><strong>Reusable</strong> - the same code works in many environments which have different
constraints.</li>
<li><strong>Maintainable</strong> - precisely communicate intent to the compiler and
other programmers. The language imposes a low overhead to reading code and is
resilient to changing requirements and environments.</li>
</ul>
<p>
Often the most efficient way to learn something new is to see examples, so
@ -202,11 +204,8 @@
const std = @import("std");
pub fn main() !void {
// If this program is run without stdout attached, exit with an error.
const stdout_file = try std.io.getStdOut();
// If this program encounters pipe failure when printing to stdout, exit
// with an error.
try stdout_file.write("Hello, world!\n");
const stdout = &std.io.getStdOut().outStream().stream;
try stdout.print("Hello, {}!\n", "world");
}
{#code_end#}
<p>
@ -712,7 +711,7 @@ test "init with undefined" {
}
{#code_end#}
<p>
{#syntax#}undefined{#endsyntax#} can be {#link|implicitly cast|Implicit Casts#} to any type.
{#syntax#}undefined{#endsyntax#} can be {#link|coerced|Type Coercion#} to any type.
Once this happens, it is no longer possible to detect that the value is {#syntax#}undefined{#endsyntax#}.
{#syntax#}undefined{#endsyntax#} means the value could be anything, even something that is nonsense
according to the type. Translated into English, {#syntax#}undefined{#endsyntax#} means "Not a meaningful
@ -920,7 +919,7 @@ fn divide(a: i32, b: i32) i32 {
{#syntax#}f128{#endsyntax#}.
</p>
<p>
Float literals {#link|implicitly cast|Implicit Casts#} to any floating point type,
Float literals {#link|coerce|Type Coercion#} to any floating point type,
and to any {#link|integer|Integers#} type when there is no fractional component.
</p>
{#code_begin|syntax#}
@ -950,7 +949,7 @@ const nan = std.math.nan(f128);
{#code_begin|obj|foo#}
{#code_release_fast#}
const builtin = @import("builtin");
const big = f64(1 << 40);
const big = @as(f64, 1 << 40);
export fn foo_strict(x: f64) f64 {
return x + big - big;
@ -1652,7 +1651,7 @@ test "iterate over an array" {
for (message) |byte| {
sum += byte;
}
assert(sum == usize('h') + usize('e') + usize('l') * 2 + usize('o'));
assert(sum == 'h' + 'e' + 'l' * 2 + 'o');
}
// modifiable array
@ -1734,6 +1733,43 @@ test "array initialization with function calls" {
{#code_end#}
{#see_also|for|Slices#}
{#header_open|Anonymous List Literals#}
<p>Similar to {#link|Enum Literals#} and {#link|Anonymous Struct Literals#}
the type can be omitted from array literals:</p>
{#code_begin|test|anon_list#}
const std = @import("std");
const assert = std.debug.assert;
test "anonymous list literal syntax" {
var array: [4]u8 = .{11, 22, 33, 44};
assert(array[0] == 11);
assert(array[1] == 22);
assert(array[2] == 33);
assert(array[3] == 44);
}
{#code_end#}
<p>
If there is no type in the result location then an anonymous list literal actually
turns into a {#link|struct#} with numbered field names:
</p>
{#code_begin|test|infer_list_literal#}
const std = @import("std");
const assert = std.debug.assert;
test "fully anonymous list literal" {
dump(.{ @as(u32, 1234), @as(f64, 12.34), true, "hi"});
}
fn dump(args: var) void {
assert(args.@"0" == 1234);
assert(args.@"1" == 12.34);
assert(args.@"2");
assert(args.@"3"[0] == 'h');
assert(args.@"3"[1] == 'i');
}
{#code_end#}
{#header_close#}
{#header_open|Multidimensional Arrays#}
<p>
Mutlidimensional arrays can be created by nesting arrays:
@ -2003,7 +2039,7 @@ test "variable alignment" {
}
}
{#code_end#}
<p>In the same way that a {#syntax#}*i32{#endsyntax#} can be {#link|implicitly cast|Implicit Casts#} to a
<p>In the same way that a {#syntax#}*i32{#endsyntax#} can be {#link|coerced|Type Coercion#} to a
{#syntax#}*const i32{#endsyntax#}, a pointer with a larger alignment can be implicitly
cast to a pointer with a smaller alignment, but not vice versa.
</p>
@ -2019,7 +2055,7 @@ var foo: u8 align(4) = 100;
test "global variable alignment" {
assert(@typeOf(&foo).alignment == 4);
assert(@typeOf(&foo) == *align(4) u8);
const slice = (*[1]u8)(&foo)[0..];
const slice = @as(*[1]u8, &foo)[0..];
assert(@typeOf(slice) == []align(4) u8);
}
@ -2114,7 +2150,7 @@ const fmt = @import("std").fmt;
test "using slices for strings" {
// Zig has no concept of strings. String literals are arrays of u8, and
// in general the string type is []u8 (slice of u8).
// Here we implicitly cast [5]u8 to []const u8
// Here we coerce [5]u8 to []const u8
const hello: []const u8 = "hello";
const world: []const u8 = "世界";
@ -2526,7 +2562,8 @@ test "overaligned pointer to packed struct" {
Don't worry, there will be a good solution for this use case in zig.
</p>
{#header_close#}
{#header_open|struct Naming#}
{#header_open|Struct Naming#}
<p>Since all structs are anonymous, Zig infers the type name based on a few rules.</p>
<ul>
<li>If the struct is in the initialization expression of a variable, it gets named after
@ -2552,6 +2589,53 @@ fn List(comptime T: type) type {
}
{#code_end#}
{#header_close#}
{#header_open|Anonymous Struct Literals#}
<p>
Zig allows omitting the struct type of a literal. When the result is {#link|coerced|Type Coercion#},
the struct literal will directly instantiate the result location, with no copy:
</p>
{#code_begin|test|struct_result#}
const std = @import("std");
const assert = std.debug.assert;
const Point = struct {x: i32, y: i32};
test "anonymous struct literal" {
var pt: Point = .{
.x = 13,
.y = 67,
};
assert(pt.x == 13);
assert(pt.y == 67);
}
{#code_end#}
<p>
The struct type can be inferred. Here the result location does not include a type, and
so Zig infers the type:
</p>
{#code_begin|test|struct_anon#}
const std = @import("std");
const assert = std.debug.assert;
test "fully anonymous struct" {
dump(.{
.int = @as(u32, 1234),
.float = @as(f64, 12.34),
.b = true,
.s = "hi",
});
}
fn dump(args: var) void {
assert(args.int == 1234);
assert(args.float == 12.34);
assert(args.b);
assert(args.s[0] == 'h');
assert(args.s[1] == 'i');
}
{#code_end#}
{#header_close#}
{#see_also|comptime|@fieldParentPtr#}
{#header_close#}
{#header_open|enum#}
@ -2778,7 +2862,7 @@ test "simple union" {
This turns the union into a <em>tagged</em> union, which makes it eligible
to use with {#link|switch#} expressions. One can use {#link|@TagType#} to
obtain the enum type from the union type.
Tagged unions implicitly cast to their enum {#link|Implicit Cast: unions and enums#}
Tagged unions coerce to their enum {#link|Type Coercion: unions and enums#}
</p>
{#code_begin|test#}
const std = @import("std");
@ -2795,7 +2879,7 @@ const ComplexType = union(ComplexTypeTag) {
test "switch on tagged union" {
const c = ComplexType{ .Ok = 42 };
assert(ComplexTypeTag(c) == ComplexTypeTag.Ok);
assert(@as(ComplexTypeTag, c) == ComplexTypeTag.Ok);
switch (c) {
ComplexTypeTag.Ok => |value| assert(value == 42),
@ -2807,7 +2891,7 @@ test "@TagType" {
assert(@TagType(ComplexType) == ComplexTypeTag);
}
test "implicit cast to enum" {
test "coerce to enum" {
const c1 = ComplexType{ .Ok = 42 };
const c2 = ComplexType.NotOk;
@ -2833,7 +2917,7 @@ const ComplexType = union(ComplexTypeTag) {
test "modify tagged union in switch" {
var c = ComplexType{ .Ok = 42 };
assert(ComplexTypeTag(c) == ComplexTypeTag.Ok);
assert(@as(ComplexTypeTag, c) == ComplexTypeTag.Ok);
switch (c) {
ComplexTypeTag.Ok => |*value| value.* += 1,
@ -2906,6 +2990,32 @@ test "@tagName" {
<p>A {#syntax#}packed union{#endsyntax#} has well-defined in-memory layout and is eligible
to be in a {#link|packed struct#}.
{#header_close#}
{#header_open|Anonymous Union Literals#}
<p>{#link|Anonymous Struct Literals#} syntax can be used to initialize unions without specifying
the type:</p>
{#code_begin|test|anon_union#}
const std = @import("std");
const assert = std.debug.assert;
const Number = union {
int: i32,
float: f64,
};
test "anonymous union literal syntax" {
var i: Number = .{.int = 42};
var f = makeNumber();
assert(i.int == 42);
assert(f.float == 12.34);
}
fn makeNumber() Number {
return .{.float = 12.34};
}
{#code_end#}
{#header_close#}
{#header_close#}
{#header_open|blocks#}
@ -3943,7 +4053,7 @@ test "fn reflection" {
However right now it is hard coded to be a {#syntax#}u16{#endsyntax#}. See <a href="https://github.com/ziglang/zig/issues/786">#768</a>.
</p>
<p>
You can {#link|implicitly cast|Implicit Casts#} an error from a subset to a superset:
You can {#link|coerce|Type Coercion#} an error from a subset to a superset:
</p>
{#code_begin|test#}
const std = @import("std");
@ -3958,7 +4068,7 @@ const AllocationError = error {
OutOfMemory,
};
test "implicit cast subset to superset" {
test "coerce subset to superset" {
const err = foo(AllocationError.OutOfMemory);
std.debug.assert(err == FileOpenError.OutOfMemory);
}
@ -3968,7 +4078,7 @@ fn foo(err: AllocationError) FileOpenError {
}
{#code_end#}
<p>
But you cannot implicitly cast an error from a superset to a subset:
But you cannot {#link|coerce|Type Coercion#} an error from a superset to a subset:
</p>
{#code_begin|test_err|not a member of destination error set#}
const FileOpenError = error {
@ -3981,7 +4091,7 @@ const AllocationError = error {
OutOfMemory,
};
test "implicit cast superset to subset" {
test "coerce superset to subset" {
foo(FileOpenError.OutOfMemory) catch {};
}
@ -4008,7 +4118,7 @@ const err = (error {FileNotFound}).FileNotFound;
It is a superset of all other error sets and a subset of none of them.
</p>
<p>
You can implicitly cast any error set to the global one, and you can explicitly
You can {#link|coerce|Type Coercion#} any error set to the global one, and you can explicitly
cast an error of the global error set to a non-global one. This inserts a language-level
assert to make sure the error value is in fact in the destination error set.
</p>
@ -4079,7 +4189,7 @@ test "parse u64" {
<p>
Within the function definition, you can see some return statements that return
an error, and at the bottom a return statement that returns a {#syntax#}u64{#endsyntax#}.
Both types {#link|implicitly cast|Implicit Casts#} to {#syntax#}anyerror!u64{#endsyntax#}.
Both types {#link|coerce|Type Coercion#} to {#syntax#}anyerror!u64{#endsyntax#}.
</p>
<p>
What it looks like to use this function varies depending on what you're
@ -4218,10 +4328,10 @@ const assert = @import("std").debug.assert;
test "error union" {
var foo: anyerror!i32 = undefined;
// Implicitly cast from child type of an error union:
// Coerce from child type of an error union:
foo = 1234;
// Implicitly cast from an error set:
// Coerce from an error set:
foo = error.SomeError;
// Use compile-time reflection to access the payload type of an error union:
@ -4598,10 +4708,10 @@ fn doAThing(optional_foo: ?*Foo) void {
const assert = @import("std").debug.assert;
test "optional type" {
// Declare an optional and implicitly cast from null:
// Declare an optional and coerce from null:
var foo: ?i32 = null;
// Implicitly cast from child type of an optional
// Coerce from child type of an optional
foo = 1234;
// Use compile-time reflection to access the child type of the optional:
@ -4644,38 +4754,38 @@ test "optional pointers" {
{#header_open|Casting#}
<p>
A <strong>type cast</strong> converts a value of one type to another.
Zig has {#link|Implicit Casts#} for conversions that are known to be completely safe and unambiguous,
Zig has {#link|Type Coercion#} for conversions that are known to be completely safe and unambiguous,
and {#link|Explicit Casts#} for conversions that one would not want to happen on accident.
There is also a third kind of type conversion called {#link|Peer Type Resolution#} for
the case when a result type must be decided given multiple operand types.
</p>
{#header_open|Implicit Casts#}
{#header_open|Type Coercion#}
<p>
An implicit cast occurs when one type is expected, but different type is provided:
Type coercion occurs when one type is expected, but different type is provided:
</p>
{#code_begin|test#}
test "implicit cast - variable declaration" {
test "type coercion - variable declaration" {
var a: u8 = 1;
var b: u16 = a;
}
test "implicit cast - function call" {
test "type coercion - function call" {
var a: u8 = 1;
foo(a);
}
fn foo(b: u16) void {}
test "implicit cast - invoke a type as a function" {
test "type coercion - @as builtin" {
var a: u8 = 1;
var b = u16(a);
var b = @as(u16, a);
}
{#code_end#}
<p>
Implicit casts are only allowed when it is completely unambiguous how to get from one type to another,
Type coercions are only allowed when it is completely unambiguous how to get from one type to another,
and the transformation is guaranteed to be safe. There is one exception, which is {#link|C Pointers#}.
</p>
{#header_open|Implicit Cast: Stricter Qualification#}
{#header_open|Type Coercion: Stricter Qualification#}
<p>
Values which have the same representation at runtime can be cast to increase the strictness
of the qualifiers, no matter how nested the qualifiers are:
@ -4690,7 +4800,7 @@ test "implicit cast - invoke a type as a function" {
These casts are no-ops at runtime since the value representation does not change.
</p>
{#code_begin|test#}
test "implicit cast - const qualification" {
test "type coercion - const qualification" {
var a: i32 = 1;
var b: *i32 = &a;
foo(b);
@ -4699,7 +4809,7 @@ test "implicit cast - const qualification" {
fn foo(a: *const i32) void {}
{#code_end#}
<p>
In addition, pointers implicitly cast to const optional pointers:
In addition, pointers coerce to const optional pointers:
</p>
{#code_begin|test#}
const std = @import("std");
@ -4713,10 +4823,10 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
}
{#code_end#}
{#header_close#}
{#header_open|Implicit Cast: Integer and Float Widening#}
{#header_open|Type Coercion: Integer and Float Widening#}
<p>
{#link|Integers#} implicitly cast to integer types which can represent every value of the old type, and likewise
{#link|Floats#} implicitly cast to float types which can represent every value of the old type.
{#link|Integers#} coerce to integer types which can represent every value of the old type, and likewise
{#link|Floats#} coerce to float types which can represent every value of the old type.
</p>
{#code_begin|test#}
const std = @import("std");
@ -4748,7 +4858,7 @@ test "float widening" {
}
{#code_end#}
{#header_close#}
{#header_open|Implicit Cast: Arrays and Pointers#}
{#header_open|Type Coercion: Arrays and Pointers#}
{#code_begin|test#}
const std = @import("std");
const assert = std.debug.assert;
@ -4797,7 +4907,7 @@ test "*[N]T to []T" {
assert(std.mem.eql(f32, x2, [2]f32{ 1.2, 3.4 }));
}
// Single-item pointers to arrays can be implicitly casted to
// Single-item pointers to arrays can be coerced to
// unknown length pointers.
test "*[N]T to [*]T" {
var buf: [5]u8 = "hello";
@ -4823,15 +4933,15 @@ test "*T to *[1]T" {
{#code_end#}
{#see_also|C Pointers#}
{#header_close#}
{#header_open|Implicit Cast: Optionals#}
{#header_open|Type Coercion: Optionals#}
<p>
The payload type of {#link|Optionals#}, as well as {#link|null#}, implicitly cast to the optional type.
The payload type of {#link|Optionals#}, as well as {#link|null#}, coerce to the optional type.
</p>
{#code_begin|test#}
const std = @import("std");
const assert = std.debug.assert;
test "implicit casting to optionals" {
test "coerce to optionals" {
const x: ?i32 = 1234;
const y: ?i32 = null;
@ -4844,7 +4954,7 @@ test "implicit casting to optionals" {
const std = @import("std");
const assert = std.debug.assert;
test "implicit casting to optionals wrapped in error union" {
test "coerce to optionals wrapped in error union" {
const x: anyerror!?i32 = 1234;
const y: anyerror!?i32 = null;
@ -4853,15 +4963,15 @@ test "implicit casting to optionals wrapped in error union" {
}
{#code_end#}
{#header_close#}
{#header_open|Implicit Cast: Error Unions#}
{#header_open|Type Coercion: Error Unions#}
<p>The payload type of an {#link|Error Union Type#} as well as the {#link|Error Set Type#}
implicitly cast to the error union type:
coerce to the error union type:
</p>
{#code_begin|test#}
const std = @import("std");
const assert = std.debug.assert;
test "implicit casting to error unions" {
test "coercion to error unions" {
const x: anyerror!i32 = 1234;
const y: anyerror!i32 = error.Failure;
@ -4870,23 +4980,23 @@ test "implicit casting to error unions" {
}
{#code_end#}
{#header_close#}
{#header_open|Implicit Cast: Compile-Time Known Numbers#}
{#header_open|Type Coercion: Compile-Time Known Numbers#}
<p>When a number is {#link|comptime#}-known to be representable in the destination type,
it may be implicitly casted:
it may be coerced:
</p>
{#code_begin|test#}
const std = @import("std");
const assert = std.debug.assert;
test "implicit casting large integer type to smaller one when value is comptime known to fit" {
test "coercing large integer type to smaller one when value is comptime known to fit" {
const x: u64 = 255;
const y: u8 = x;
assert(y == 255);
}
{#code_end#}
{#header_close#}
{#header_open|Implicit Cast: unions and enums#}
<p>Tagged unions can be implicitly cast to enums, and enums can be implicitly casted to tagged unions
{#header_open|Type Coercion: unions and enums#}
<p>Tagged unions can be coerced to enums, and enums can be coerced to tagged unions
when they are {#link|comptime#}-known to be a field of the union that has only one possible value, such as
{#link|void#}:
</p>
@ -4906,7 +5016,7 @@ const U = union(E) {
Three,
};
test "implicit casting between unions and enums" {
test "coercion between unions and enums" {
var u = U{ .Two = 12.34 };
var e: E = u;
assert(e == E.Two);
@ -4918,20 +5028,20 @@ test "implicit casting between unions and enums" {
{#code_end#}
{#see_also|union|enum#}
{#header_close#}
{#header_open|Implicit Cast: Zero Bit Types#}
<p>{#link|Zero Bit Types#} may be implicitly casted to single-item {#link|Pointers#},
{#header_open|Type Coercion: Zero Bit Types#}
<p>{#link|Zero Bit Types#} may be coerced to single-item {#link|Pointers#},
regardless of const.</p>
<p>TODO document the reasoning for this</p>
<p>TODO document whether vice versa should work and why</p>
{#code_begin|test#}
test "implicit casting of zero bit types" {
test "coercion of zero bit types" {
var x: void = {};
var y: *void = x;
//var z: void = y; // TODO
}
{#code_end#}
{#header_close#}
{#header_open|Implicit Cast: undefined#}
{#header_open|Type Coercion: undefined#}
<p>{#link|undefined#} can be cast to any type.</p>
{#header_close#}
{#header_close#}
@ -4976,7 +5086,7 @@ test "implicit casting of zero bit types" {
<li>Some {#link|binary operations|Table of Operators#}</li>
</ul>
<p>
This kind of type resolution chooses a type that all peer types can implicitly cast into. Here are
This kind of type resolution chooses a type that all peer types can coerce into. Here are
some examples:
</p>
{#code_begin|test#}
@ -5007,8 +5117,8 @@ test "peer resolve array and const slice" {
comptime testPeerResolveArrayConstSlice(true);
}
fn testPeerResolveArrayConstSlice(b: bool) void {
const value1 = if (b) "aoeu" else ([]const u8)("zz");
const value2 = if (b) ([]const u8)("zz") else "aoeu";
const value1 = if (b) "aoeu" else @as([]const u8, "zz");
const value2 = if (b) @as([]const u8, "zz") else "aoeu";
assert(mem.eql(u8, value1, "aoeu"));
assert(mem.eql(u8, value2, "zz"));
}
@ -5023,10 +5133,10 @@ test "peer type resolution: ?T and T" {
}
fn peerTypeTAndOptionalT(c: bool, b: bool) ?usize {
if (c) {
return if (b) null else usize(0);
return if (b) null else @as(usize, 0);
}
return usize(3);
return @as(usize, 3);
}
test "peer type resolution: [0]u8 and []const u8" {
@ -5293,7 +5403,7 @@ fn gimmeTheBiggerInteger(a: u64, b: u64) u64 {
<p>
For example, if we were to introduce another function to the above snippet:
</p>
{#code_begin|test_err|cannot store runtime value in type 'type'#}
{#code_begin|test_err|values of type 'type' must be comptime known#}
fn max(comptime T: type, a: T, b: T) T {
return if (a > b) a else b;
}
@ -5815,7 +5925,7 @@ test "printf too many arguments" {
</p>
<p>
Zig doesn't care whether the format argument is a string literal,
only that it is a compile-time known value that is implicitly castable to a {#syntax#}[]const u8{#endsyntax#}:
only that it is a compile-time known value that can be coerced to a {#syntax#}[]const u8{#endsyntax#}:
</p>
{#code_begin|exe|printf#}
const warn = @import("std").debug.warn;
@ -6185,7 +6295,7 @@ fn func() void {
</p>
<p>
{#syntax#}await{#endsyntax#} is a suspend point, and takes as an operand anything that
implicitly casts to {#syntax#}anyframe->T{#endsyntax#}.
coerces to {#syntax#}anyframe->T{#endsyntax#}.
</p>
<p>
There is a common misconception that {#syntax#}await{#endsyntax#} resumes the target function.
@ -6445,6 +6555,14 @@ comptime {
</p>
{#header_close#}
{#header_open|@as#}
<pre>{#syntax#}@as(comptime T: type, expression) T{#endsyntax#}</pre>
<p>
Performs {#link|Type Coercion#}. This cast is allowed when the conversion is unambiguous and safe,
and is the preferred way to convert between types, whenever possible.
</p>
{#header_close#}
{#header_open|@asyncCall#}
<pre>{#syntax#}@asyncCall(frame_buffer: []align(@alignOf(@Frame(anyAsyncFunction))) u8, result_ptr, function_ptr, args: ...) anyframe->T{#endsyntax#}</pre>
<p>
@ -6493,14 +6611,14 @@ async fn func(y: *i32) void {
This builtin function atomically dereferences a pointer and returns the value.
</p>
<p>
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#},
or an integer whose bit count meets these requirements:
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}
an integer whose bit count meets these requirements:
</p>
<ul>
<li>At least 8</li>
<li>At most the same as usize</li>
<li>Power of 2</li>
</ul>
</ul> or an enum with a valid integer tag type.
<p>
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
we can remove this restriction
@ -6541,6 +6659,25 @@ async fn func(y: *i32) void {
<li>{#syntax#}.Min{#endsyntax#} - stores the operand if it is smaller. Supports integers and floats.</li>
</ul>
{#header_close#}
{#header_open|@atomicStore#}
<pre>{#syntax#}@atomicStore(comptime T: type, ptr: *T, value: T, comptime ordering: builtin.AtomicOrder) void{#endsyntax#}</pre>
<p>
This builtin function atomically stores a value.
</p>
<p>
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}
an integer whose bit count meets these requirements:
</p>
<ul>
<li>At least 8</li>
<li>At most the same as usize</li>
<li>Power of 2</li>
</ul> or an enum with a valid integer tag type.
<p>
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
we can remove this restriction
</p>
{#header_close#}
{#header_open|@bitCast#}
<pre>{#syntax#}@bitCast(comptime DestType: type, value: var) DestType{#endsyntax#}</pre>
<p>
@ -7108,7 +7245,7 @@ test "field access by string" {
<pre>{#syntax#}@frame() *@Frame(func){#endsyntax#}</pre>
<p>
This function returns a pointer to the frame for a given function. This type
can be {#link|implicitly cast|Implicit Casts#} to {#syntax#}anyframe->T{#endsyntax#} and
can be {#link|coerced|Type Coercion#} to {#syntax#}anyframe->T{#endsyntax#} and
to {#syntax#}anyframe{#endsyntax#}, where {#syntax#}T{#endsyntax#} is the return type
of the function in scope.
</p>
@ -7827,7 +7964,7 @@ test "vector @splat" {
const scalar: u32 = 5;
const result = @splat(4, scalar);
comptime assert(@typeOf(result) == @Vector(4, u32));
assert(std.mem.eql(u32, ([4]u32)(result), [_]u32{ 5, 5, 5, 5 }));
assert(std.mem.eql(u32, @as([4]u32, result), [_]u32{ 5, 5, 5, 5 }));
}
{#code_end#}
<p>
@ -8025,7 +8162,7 @@ test "integer truncation" {
</p>
<p>
If {#syntax#}T{#endsyntax#} is {#syntax#}comptime_int{#endsyntax#},
then this is semantically equivalent to an {#link|implicit cast|Implicit Casts#}.
then this is semantically equivalent to {#link|Type Coercion#}.
</p>
{#header_close#}
@ -8529,7 +8666,7 @@ pub fn main() void {
{#header_close#}
{#header_open|Cast Truncates Data#}
<p>At compile-time:</p>
{#code_begin|test_err|integer value 300 cannot be implicitly casted to type 'u8'#}
{#code_begin|test_err|integer value 300 cannot be coerced to type 'u8'#}
comptime {
const spartan_count: u16 = 300;
const byte = @intCast(u8, spartan_count);
@ -8665,7 +8802,7 @@ test "wraparound addition and subtraction" {
<p>At compile-time:</p>
{#code_begin|test_err|operation caused overflow#}
comptime {
const x = @shlExact(u8(0b01010101), 2);
const x = @shlExact(@as(u8, 0b01010101), 2);
}
{#code_end#}
<p>At runtime:</p>
@ -8683,7 +8820,7 @@ pub fn main() void {
<p>At compile-time:</p>
{#code_begin|test_err|exact shift shifted out 1 bits#}
comptime {
const x = @shrExact(u8(0b10101010), 2);
const x = @shrExact(@as(u8, 0b10101010), 2);
}
{#code_end#}
<p>At runtime:</p>
@ -9535,8 +9672,8 @@ const c = @cImport({
<p>{#syntax#}[*c]T{#endsyntax#} - C pointer.</p>
<ul>
<li>Supports all the syntax of the other two pointer types.</li>
<li>Implicitly casts to other pointer types, as well as {#link|Optional Pointers#}.
When a C pointer is implicitly casted to a non-optional pointer, safety-checked
<li>Coerces to other pointer types, as well as {#link|Optional Pointers#}.
When a C pointer is coerced to a non-optional pointer, safety-checked
{#link|Undefined Behavior#} occurs if the address is 0.
</li>
<li>Allows address 0. On non-freestanding targets, dereferencing address 0 is safety-checked
@ -9544,7 +9681,7 @@ const c = @cImport({
null, just like {#syntax#}?usize{#endsyntax#}. Note that creating an optional C pointer
is unnecessary as one can use normal {#link|Optional Pointers#}.
</li>
<li>Supports {#link|implicit casting|Implicit Casts#} to and from integers.</li>
<li>Supports {#link|Type Coercion#} to and from integers.</li>
<li>Supports comparison with integers.</li>
<li>Does not support Zig-only pointer attributes such as alignment. Use normal {#link|Pointers#}
please!</li>

View File

@ -344,18 +344,18 @@ test "std.ArrayList.orderedRemove" {
try list.append(7);
//remove from middle
testing.expectEqual(i32(4), list.orderedRemove(3));
testing.expectEqual(i32(5), list.at(3));
testing.expectEqual(usize(6), list.len);
testing.expectEqual(@as(i32, 4), list.orderedRemove(3));
testing.expectEqual(@as(i32, 5), list.at(3));
testing.expectEqual(@as(usize, 6), list.len);
//remove from end
testing.expectEqual(i32(7), list.orderedRemove(5));
testing.expectEqual(usize(5), list.len);
testing.expectEqual(@as(i32, 7), list.orderedRemove(5));
testing.expectEqual(@as(usize, 5), list.len);
//remove from front
testing.expectEqual(i32(1), list.orderedRemove(0));
testing.expectEqual(i32(2), list.at(0));
testing.expectEqual(usize(4), list.len);
testing.expectEqual(@as(i32, 1), list.orderedRemove(0));
testing.expectEqual(@as(i32, 2), list.at(0));
testing.expectEqual(@as(usize, 4), list.len);
}
test "std.ArrayList.swapRemove" {

View File

@ -129,26 +129,26 @@ const combinedTable = init: {
comptime var i = 0;
inline while (i < 128) : (i += 1) {
table[i] =
u8(alpha[i]) << @enumToInt(tIndex.Alpha) |
u8(hex[i]) << @enumToInt(tIndex.Hex) |
u8(space[i]) << @enumToInt(tIndex.Space) |
u8(digit[i]) << @enumToInt(tIndex.Digit) |
u8(lower[i]) << @enumToInt(tIndex.Lower) |
u8(upper[i]) << @enumToInt(tIndex.Upper) |
u8(punct[i]) << @enumToInt(tIndex.Punct) |
u8(graph[i]) << @enumToInt(tIndex.Graph);
@as(u8, alpha[i]) << @enumToInt(tIndex.Alpha) |
@as(u8, hex[i]) << @enumToInt(tIndex.Hex) |
@as(u8, space[i]) << @enumToInt(tIndex.Space) |
@as(u8, digit[i]) << @enumToInt(tIndex.Digit) |
@as(u8, lower[i]) << @enumToInt(tIndex.Lower) |
@as(u8, upper[i]) << @enumToInt(tIndex.Upper) |
@as(u8, punct[i]) << @enumToInt(tIndex.Punct) |
@as(u8, graph[i]) << @enumToInt(tIndex.Graph);
}
mem.set(u8, table[128..256], 0);
break :init table;
};
fn inTable(c: u8, t: tIndex) bool {
return (combinedTable[c] & (u8(1) << @enumToInt(t))) != 0;
return (combinedTable[c] & (@as(u8, 1) << @enumToInt(t))) != 0;
}
pub fn isAlNum(c: u8) bool {
return (combinedTable[c] & ((u8(1) << @enumToInt(tIndex.Alpha)) |
u8(1) << @enumToInt(tIndex.Digit))) != 0;
return (combinedTable[c] & ((@as(u8, 1) << @enumToInt(tIndex.Alpha)) |
@as(u8, 1) << @enumToInt(tIndex.Digit))) != 0;
}
pub fn isAlpha(c: u8) bool {

View File

@ -199,7 +199,7 @@ test "std.atomic.Queue" {
for (putters) |t|
t.wait();
_ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
@atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
for (getters) |t|
t.wait();
@ -214,8 +214,8 @@ test "std.atomic.Queue" {
std.debug.panic(
"failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}",
context.get_count,
u32(puts_per_thread),
u32(put_thread_count),
@as(u32, puts_per_thread),
@as(u32, put_thread_count),
);
}
}

View File

@ -11,7 +11,7 @@ pub fn Stack(comptime T: type) type {
root: ?*Node,
lock: @typeOf(lock_init),
const lock_init = if (builtin.single_threaded) {} else u8(0);
const lock_init = if (builtin.single_threaded) {} else @as(u8, 0);
pub const Self = @This();
@ -128,7 +128,7 @@ test "std.atomic.stack" {
for (putters) |t|
t.wait();
_ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
@atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
for (getters) |t|
t.wait();
}
@ -141,8 +141,8 @@ test "std.atomic.stack" {
std.debug.panic(
"failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}",
context.get_count,
u32(puts_per_thread),
u32(put_thread_count),
@as(u32, puts_per_thread),
@as(u32, put_thread_count),
);
}
}

View File

@ -28,7 +28,7 @@ pub fn BloomFilter(
assert(n_items > 0);
assert(math.isPowerOfTwo(n_items));
assert(K > 0);
const cellEmpty = if (Cell == bool) false else Cell(0);
const cellEmpty = if (Cell == bool) false else @as(Cell, 0);
const cellMax = if (Cell == bool) true else math.maxInt(Cell);
const n_bytes = (n_items * comptime std.meta.bitCount(Cell)) / 8;
assert(n_bytes > 0);
@ -137,7 +137,7 @@ pub fn BloomFilter(
var i: usize = 0;
while (i < n_items) : (i += 1) {
const cell = self.getCell(@intCast(Index, i));
n += if (if (Cell == bool) cell else cell > 0) Index(1) else Index(0);
n += if (if (Cell == bool) cell else cell > 0) @as(Index, 1) else @as(Index, 0);
}
}
return n;
@ -161,7 +161,7 @@ fn hashFunc(out: []u8, Ki: usize, in: []const u8) void {
test "std.BloomFilter" {
inline for ([_]type{ bool, u1, u2, u3, u4 }) |Cell| {
const emptyCell = if (Cell == bool) false else Cell(0);
const emptyCell = if (Cell == bool) false else @as(Cell, 0);
const BF = BloomFilter(128 * 8, 8, Cell, builtin.endian, hashFunc);
var bf = BF{};
var i: usize = undefined;
@ -170,8 +170,8 @@ test "std.BloomFilter" {
while (i < BF.items) : (i += 1) {
testing.expectEqual(emptyCell, bf.getCell(@intCast(BF.Index, i)));
}
testing.expectEqual(BF.Index(0), bf.popCount());
testing.expectEqual(f64(0), bf.estimateItems());
testing.expectEqual(@as(BF.Index, 0), bf.popCount());
testing.expectEqual(@as(f64, 0), bf.estimateItems());
// fill in a few items
bf.incrementCell(42);
bf.incrementCell(255);
@ -196,8 +196,8 @@ test "std.BloomFilter" {
while (i < BF.items) : (i += 1) {
testing.expectEqual(emptyCell, bf.getCell(@intCast(BF.Index, i)));
}
testing.expectEqual(BF.Index(0), bf.popCount());
testing.expectEqual(f64(0), bf.estimateItems());
testing.expectEqual(@as(BF.Index, 0), bf.popCount());
testing.expectEqual(@as(f64, 0), bf.estimateItems());
// Lets add a string
bf.add("foo");
@ -218,8 +218,8 @@ test "std.BloomFilter" {
while (i < BF.items) : (i += 1) {
testing.expectEqual(emptyCell, bf.getCell(@intCast(BF.Index, i)));
}
testing.expectEqual(BF.Index(0), bf.popCount());
testing.expectEqual(f64(0), bf.estimateItems());
testing.expectEqual(@as(BF.Index, 0), bf.popCount());
testing.expectEqual(@as(f64, 0), bf.estimateItems());
comptime var teststrings = [_][]const u8{
"foo",
@ -246,12 +246,12 @@ test "std.BloomFilter" {
inline for (teststrings) |str| {
testing.expectEqual(true, larger_bf.contains(str));
}
testing.expectEqual(u12(bf.popCount()) * (4096 / 1024), larger_bf.popCount());
testing.expectEqual(@as(u12, bf.popCount()) * (4096 / 1024), larger_bf.popCount());
const smaller_bf = bf.resize(64);
inline for (teststrings) |str| {
testing.expectEqual(true, smaller_bf.contains(str));
}
testing.expect(bf.popCount() <= u10(smaller_bf.popCount()) * (1024 / 64));
testing.expect(bf.popCount() <= @as(u10, smaller_bf.popCount()) * (1024 / 64));
}
}

View File

@ -53,7 +53,7 @@ pub const Builder = struct {
release_mode: ?builtin.Mode,
is_release: bool,
override_lib_dir: ?[]const u8,
vcpkg_root: VcpkgRoot,
pkg_config_pkg_list: ?(PkgConfigError![]const PkgConfigPkg) = null,
const PkgConfigError = error{
@ -159,6 +159,7 @@ pub const Builder = struct {
.is_release = false,
.override_lib_dir = null,
.install_path = undefined,
.vcpkg_root = VcpkgRoot{ .Unattempted = {} },
};
try self.top_level_steps.append(&self.install_tls);
try self.top_level_steps.append(&self.uninstall_tls);
@ -957,6 +958,7 @@ pub const Builder = struct {
error.ProcessTerminated => error.PkgConfigCrashed,
error.ExitCodeFailure => error.PkgConfigFailed,
error.FileNotFound => error.PkgConfigNotInstalled,
error.InvalidName => error.PkgConfigNotInstalled,
error.PkgConfigInvalidOutput => error.PkgConfigInvalidOutput,
else => return err,
};
@ -1046,6 +1048,7 @@ pub const LibExeObjStep = struct {
output_dir: ?[]const u8,
need_system_paths: bool,
is_linking_libc: bool = false,
vcpkg_bin_path: ?[]const u8 = null,
installed_path: ?[]const u8,
install_step: ?*InstallArtifactStep,
@ -1264,6 +1267,11 @@ pub const LibExeObjStep = struct {
// option is supplied.
const run_step = RunStep.create(exe.builder, exe.builder.fmt("run {}", exe.step.name));
run_step.addArtifactArg(exe);
if (exe.vcpkg_bin_path) |path| {
run_step.addPathDir(path);
}
return run_step;
}
@ -1569,6 +1577,43 @@ pub const LibExeObjStep = struct {
}) catch unreachable;
}
/// If Vcpkg was found on the system, it will be added to include and lib
/// paths for the specified target.
pub fn addVcpkgPaths(self: *LibExeObjStep, linkage: VcpkgLinkage) !void {
// Ideally in the Unattempted case we would call the function recursively
// after findVcpkgRoot and have only one switch statement, but the compiler
// cannot resolve the error set.
switch (self.builder.vcpkg_root) {
.Unattempted => {
self.builder.vcpkg_root = if (try findVcpkgRoot(self.builder.allocator)) |root|
VcpkgRoot{ .Found = root }
else
.NotFound;
},
.NotFound => return error.VcpkgNotFound,
.Found => {},
}
switch (self.builder.vcpkg_root) {
.Unattempted => unreachable,
.NotFound => return error.VcpkgNotFound,
.Found => |root| {
const allocator = self.builder.allocator;
const triplet = try Target.vcpkgTriplet(allocator, self.target, linkage);
defer self.builder.allocator.free(triplet);
const include_path = try fs.path.join(allocator, [_][]const u8{ root, "installed", triplet, "include" });
errdefer allocator.free(include_path);
try self.include_dirs.append(IncludeDir{ .RawPath = include_path });
const lib_path = try fs.path.join(allocator, [_][]const u8{ root, "installed", triplet, "lib" });
try self.lib_paths.append(lib_path);
self.vcpkg_bin_path = try fs.path.join(allocator, [_][]const u8{ root, "installed", triplet, "bin" });
},
}
}
pub fn setExecCmd(self: *LibExeObjStep, args: []const ?[]const u8) void {
assert(self.kind == Kind.Test);
self.exec_cmd_args = args;
@ -2341,6 +2386,42 @@ fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_maj
};
}
/// Returned slice must be freed by the caller.
fn findVcpkgRoot(allocator: *Allocator) !?[]const u8 {
const appdata_path = try fs.getAppDataDir(allocator, "vcpkg");
defer allocator.free(appdata_path);
const path_file = try fs.path.join(allocator, [_][]const u8{ appdata_path, "vcpkg.path.txt" });
defer allocator.free(path_file);
const file = fs.File.openRead(path_file) catch return null;
defer file.close();
const size = @intCast(usize, try file.getEndPos());
const vcpkg_path = try allocator.alloc(u8, size);
const size_read = try file.read(vcpkg_path);
std.debug.assert(size == size_read);
return vcpkg_path;
}
const VcpkgRoot = union(VcpkgRootStatus) {
Unattempted: void,
NotFound: void,
Found: []const u8,
};
const VcpkgRootStatus = enum {
Unattempted,
NotFound,
Found,
};
pub const VcpkgLinkage = enum {
Static,
Dynamic,
};
pub const InstallDir = enum {
Prefix,
Lib,

View File

@ -90,40 +90,11 @@ pub const Mode = enum {
ReleaseSmall,
};
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const TypeId = enum {
Type,
Void,
Bool,
NoReturn,
Int,
Float,
Pointer,
Array,
Struct,
ComptimeFloat,
ComptimeInt,
Undefined,
Null,
Optional,
ErrorUnion,
ErrorSet,
Enum,
Union,
Fn,
BoundFn,
ArgTuple,
Opaque,
Frame,
AnyFrame,
Vector,
EnumLiteral,
};
pub const TypeId = @TagType(TypeInfo);
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const TypeInfo = union(TypeId) {
pub const TypeInfo = union(enum) {
Type: void,
Void: void,
Bool: void,

View File

@ -8,9 +8,16 @@ pub usingnamespace switch (builtin.os) {
.linux => @import("c/linux.zig"),
.windows => @import("c/windows.zig"),
.macosx, .ios, .tvos, .watchos => @import("c/darwin.zig"),
.freebsd => @import("c/freebsd.zig"),
.freebsd, .kfreebsd => @import("c/freebsd.zig"),
.netbsd => @import("c/netbsd.zig"),
.dragonfly => @import("c/dragonfly.zig"),
.openbsd => @import("c/openbsd.zig"),
.haiku => @import("c/haiku.zig"),
.hermit => @import("c/hermit.zig"),
.solaris => @import("c/solaris.zig"),
.fuchsia => @import("c/fuchsia.zig"),
.minix => @import("c/minix.zig"),
.emscripten => @import("c/emscripten.zig"),
else => struct {},
};
@ -203,3 +210,18 @@ pub extern "c" fn dn_expand(
exp_dn: [*]u8,
length: c_int,
) c_int;
pub extern "c" fn sched_yield() c_int;
pub const PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{};
pub extern "c" fn pthread_mutex_lock(mutex: *pthread_mutex_t) c_int;
pub extern "c" fn pthread_mutex_unlock(mutex: *pthread_mutex_t) c_int;
pub extern "c" fn pthread_mutex_destroy(mutex: *pthread_mutex_t) c_int;
pub const PTHREAD_COND_INITIALIZER = pthread_cond_t{};
pub extern "c" fn pthread_cond_wait(noalias cond: *pthread_cond_t, noalias mutex: *pthread_mutex_t) c_int;
pub extern "c" fn pthread_cond_signal(cond: *pthread_cond_t) c_int;
pub extern "c" fn pthread_cond_destroy(cond: *pthread_cond_t) c_int;
pub const pthread_t = *@OpaqueType();
pub const FILE = @OpaqueType();

View File

@ -53,7 +53,7 @@ pub extern "c" fn host_get_clock_service(host: host_t, clock_id: clock_id_t, clo
pub extern "c" fn mach_port_deallocate(task: ipc_space_t, name: mach_port_name_t) kern_return_t;
pub fn sigaddset(set: *sigset_t, signo: u5) void {
set.* |= u32(1) << (signo - 1);
set.* |= @as(u32, 1) << (signo - 1);
}
pub extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
@ -112,3 +112,19 @@ pub const EAI_PROTOCOL = 13;
/// argument buffer overflow
pub const EAI_OVERFLOW = 14;
pub const EAI_MAX = 15;
pub const pthread_mutex_t = extern struct {
__sig: c_long = 0x32AAABA7,
__opaque: [__PTHREAD_MUTEX_SIZE__]u8 = [_]u8{0} ** __PTHREAD_MUTEX_SIZE__,
};
pub const pthread_cond_t = extern struct {
__sig: c_long = 0x3CB0B1BB,
__opaque: [__PTHREAD_COND_SIZE__]u8 = [_]u8{0} ** __PTHREAD_COND_SIZE__,
};
const __PTHREAD_MUTEX_SIZE__ = if (@sizeOf(usize) == 8) 56 else 40;
const __PTHREAD_COND_SIZE__ = if (@sizeOf(usize) == 8) 40 else 24;
pub const pthread_attr_t = extern struct {
__sig: c_long,
__opaque: [56]u8,
};

View File

@ -1,6 +1,5 @@
const std = @import("../std.zig");
usingnamespace std.c;
extern "c" threadlocal var errno: c_int;
pub fn _errno() *c_int {
return &errno;
@ -12,3 +11,15 @@ pub extern "c" fn getrandom(buf_ptr: [*]u8, buf_len: usize, flags: c_uint) isize
pub const dl_iterate_phdr_callback = extern fn (info: *dl_phdr_info, size: usize, data: ?*c_void) c_int;
pub extern "c" fn dl_iterate_phdr(callback: dl_iterate_phdr_callback, data: ?*c_void) c_int;
pub const pthread_mutex_t = extern struct {
inner: ?*c_void = null,
};
pub const pthread_cond_t = extern struct {
inner: ?*c_void = null,
};
pub const pthread_attr_t = extern struct { // copied from freebsd
__size: [56]u8,
__align: c_long,
};

8
lib/std/c/emscripten.zig Normal file
View File

@ -0,0 +1,8 @@
pub const pthread_mutex_t = extern struct {
size: [__SIZEOF_PTHREAD_MUTEX_T]u8 align(4) = [_]u8{0} ** __SIZEOF_PTHREAD_MUTEX_T,
};
pub const pthread_cond_t = extern struct {
size: [__SIZEOF_PTHREAD_COND_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_COND_T,
};
const __SIZEOF_PTHREAD_COND_T = 48;
const __SIZEOF_PTHREAD_MUTEX_T = 28;

View File

@ -10,3 +10,15 @@ pub extern "c" fn getrandom(buf_ptr: [*]u8, buf_len: usize, flags: c_uint) isize
pub const dl_iterate_phdr_callback = extern fn (info: *dl_phdr_info, size: usize, data: ?*c_void) c_int;
pub extern "c" fn dl_iterate_phdr(callback: dl_iterate_phdr_callback, data: ?*c_void) c_int;
pub const pthread_mutex_t = extern struct {
inner: ?*c_void = null,
};
pub const pthread_cond_t = extern struct {
inner: ?*c_void = null,
};
pub const pthread_attr_t = extern struct {
__size: [56]u8,
__align: c_long,
};

8
lib/std/c/fuchsia.zig Normal file
View File

@ -0,0 +1,8 @@
pub const pthread_mutex_t = extern struct {
size: [__SIZEOF_PTHREAD_MUTEX_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_MUTEX_T,
};
pub const pthread_cond_t = extern struct {
size: [__SIZEOF_PTHREAD_COND_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_COND_T,
};
const __SIZEOF_PTHREAD_COND_T = 48;
const __SIZEOF_PTHREAD_MUTEX_T = 40;

14
lib/std/c/haiku.zig Normal file
View File

@ -0,0 +1,14 @@
pub const pthread_mutex_t = extern struct {
flags: u32 = 0,
lock: i32 = 0,
unused: i32 = -42,
owner: i32 = -1,
owner_count: i32 = 0,
};
pub const pthread_cond_t = extern struct {
flags: u32 = 0,
unused: i32 = -42,
mutex: ?*c_void = null,
waiter_count: i32 = 0,
lock: i32 = 0,
};

6
lib/std/c/hermit.zig Normal file
View File

@ -0,0 +1,6 @@
pub const pthread_mutex_t = extern struct {
inner: usize = ~usize(0),
};
pub const pthread_cond_t = extern struct {
inner: usize = ~usize(0),
};

View File

@ -75,3 +75,26 @@ pub const dl_iterate_phdr_callback = extern fn (info: *dl_phdr_info, size: usize
pub extern "c" fn dl_iterate_phdr(callback: dl_iterate_phdr_callback, data: ?*c_void) c_int;
pub extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
pub const pthread_attr_t = extern struct {
__size: [56]u8,
__align: c_long,
};
pub const pthread_mutex_t = extern struct {
size: [__SIZEOF_PTHREAD_MUTEX_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_MUTEX_T,
};
pub const pthread_cond_t = extern struct {
size: [__SIZEOF_PTHREAD_COND_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_COND_T,
};
const __SIZEOF_PTHREAD_COND_T = 48;
const __SIZEOF_PTHREAD_MUTEX_T = if (builtin.os == .fuchsia) 40 else switch (builtin.abi) {
.musl, .musleabi, .musleabihf => if (@sizeOf(usize) == 8) 40 else 24,
.gnu, .gnuabin32, .gnuabi64, .gnueabi, .gnueabihf, .gnux32 => switch (builtin.arch) {
.aarch64 => 48,
.x86_64 => if (builtin.abi == .gnux32) 40 else 32,
.mips64, .powerpc64, .powerpc64le, .sparcv9 => 40,
else => if (@sizeOf(usize) == 8) 40 else 24,
},
else => unreachable,
};

18
lib/std/c/minix.zig Normal file
View File

@ -0,0 +1,18 @@
const builtin = @import("builtin");
pub const pthread_mutex_t = extern struct {
size: [__SIZEOF_PTHREAD_MUTEX_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_MUTEX_T,
};
pub const pthread_cond_t = extern struct {
size: [__SIZEOF_PTHREAD_COND_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_COND_T,
};
const __SIZEOF_PTHREAD_COND_T = 48;
const __SIZEOF_PTHREAD_MUTEX_T = switch (builtin.abi) {
.musl, .musleabi, .musleabihf => if (@sizeOf(usize) == 8) 40 else 24,
.gnu, .gnuabin32, .gnuabi64, .gnueabi, .gnueabihf, .gnux32 => switch (builtin.arch) {
.aarch64 => 48,
.x86_64 => if (builtin.abi == .gnux32) 40 else 32,
.mips64, .powerpc64, .powerpc64le, .sparcv9 => 40,
else => if (@sizeOf(usize) == 8) 40 else 24,
},
else => unreachable,
};

View File

@ -6,3 +6,32 @@ pub const _errno = __errno;
pub extern "c" fn getdents(fd: c_int, buf_ptr: [*]u8, nbytes: usize) usize;
pub extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
pub const pthread_mutex_t = extern struct {
ptm_magic: c_uint = 0x33330003,
ptm_errorcheck: padded_spin_t = 0,
ptm_unused: padded_spin_t = 0,
ptm_owner: usize = 0,
ptm_waiters: ?*u8 = null,
ptm_recursed: c_uint = 0,
ptm_spare2: ?*c_void = null,
};
pub const pthread_cond_t = extern struct {
ptc_magic: c_uint = 0x55550005,
ptc_lock: pthread_spin_t = 0,
ptc_waiters_first: ?*u8 = null,
ptc_waiters_last: ?*u8 = null,
ptc_mutex: ?*pthread_mutex_t = null,
ptc_private: ?*c_void = null,
};
const pthread_spin_t = if (builtin.arch == .arm or .arch == .powerpc) c_int else u8;
const padded_spin_t = switch (builtin.arch) {
.sparc, .sparcel, .sparcv9, .i386, .x86_64, .le64 => u32,
else => spin_t,
};
pub const pthread_attr_t = extern struct {
pta_magic: u32,
pta_flags: c_int,
pta_private: *c_void,
};

6
lib/std/c/openbsd.zig Normal file
View File

@ -0,0 +1,6 @@
pub const pthread_mutex_t = extern struct {
inner: ?*c_void = null,
};
pub const pthread_cond_t = extern struct {
inner: ?*c_void = null,
};

15
lib/std/c/solaris.zig Normal file
View File

@ -0,0 +1,15 @@
pub const pthread_mutex_t = extern struct {
__pthread_mutex_flag1: u16 = 0,
__pthread_mutex_flag2: u8 = 0,
__pthread_mutex_ceiling: u8 = 0,
__pthread_mutex_type: u16 = 0,
__pthread_mutex_magic: u16 = 0x4d58,
__pthread_mutex_lock: u64 = 0,
__pthread_mutex_data: u64 = 0,
};
pub const pthread_cond_t = extern struct {
__pthread_cond_flag: u32 = 0,
__pthread_cond_type: u16 = 0,
__pthread_cond_magic: u16 = 0x4356,
__pthread_cond_data: u64 = 0,
};

View File

@ -50,8 +50,20 @@ pub const ChildProcess = struct {
err_pipe: if (builtin.os == .windows) void else [2]os.fd_t,
llnode: if (builtin.os == .windows) void else TailQueue(*ChildProcess).Node,
pub const SpawnError = error{OutOfMemory} || os.ExecveError || os.SetIdError ||
os.ChangeCurDirError || windows.CreateProcessError;
pub const SpawnError = error{
OutOfMemory,
/// POSIX-only. `StdIo.Ignore` was selected and opening `/dev/null` returned ENODEV.
NoDevice,
/// Windows-only. One of:
/// * `cwd` was provided and it could not be re-encoded into UTF16LE, or
/// * The `PATH` or `PATHEXT` environment variable contained invalid UTF-8.
InvalidUtf8,
/// Windows-only. `cwd` was provided, but the path did not exist when spawning the child process.
CurrentWorkingDirectoryUnlinked,
} || os.ExecveError || os.SetIdError || os.ChangeCurDirError || windows.CreateProcessError || windows.WaitForSingleObjectError;
pub const Term = union(enum) {
Exited: u32,
@ -102,7 +114,7 @@ pub const ChildProcess = struct {
}
/// On success must call `kill` or `wait`.
pub fn spawn(self: *ChildProcess) !void {
pub fn spawn(self: *ChildProcess) SpawnError!void {
if (builtin.os == .windows) {
return self.spawnWindows();
} else {
@ -110,7 +122,7 @@ pub const ChildProcess = struct {
}
}
pub fn spawnAndWait(self: *ChildProcess) !Term {
pub fn spawnAndWait(self: *ChildProcess) SpawnError!Term {
try self.spawn();
return self.wait();
}
@ -162,7 +174,13 @@ pub const ChildProcess = struct {
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
pub fn exec(allocator: *mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env_map: ?*const BufMap, max_output_size: usize) !ExecResult {
pub fn exec(
allocator: *mem.Allocator,
argv: []const []const u8,
cwd: ?[]const u8,
env_map: ?*const BufMap,
max_output_size: usize,
) !ExecResult {
const child = try ChildProcess.init(argv, allocator);
defer child.deinit();
@ -219,7 +237,7 @@ pub const ChildProcess = struct {
fn waitUnwrappedWindows(self: *ChildProcess) !void {
const result = windows.WaitForSingleObject(self.handle, windows.INFINITE);
self.term = (SpawnError!Term)(x: {
self.term = @as(SpawnError!Term, x: {
var exit_code: windows.DWORD = undefined;
if (windows.kernel32.GetExitCodeProcess(self.handle, &exit_code) == 0) {
break :x Term{ .Unknown = 0 };
@ -292,7 +310,7 @@ pub const ChildProcess = struct {
Term{ .Unknown = status };
}
fn spawnPosix(self: *ChildProcess) !void {
fn spawnPosix(self: *ChildProcess) SpawnError!void {
const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try os.pipe() else undefined;
errdefer if (self.stdin_behavior == StdIo.Pipe) {
destroyPipe(stdin_pipe);
@ -309,7 +327,16 @@ pub const ChildProcess = struct {
};
const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
const dev_null_fd = if (any_ignore) try os.openC(c"/dev/null", os.O_RDWR, 0) else undefined;
const dev_null_fd = if (any_ignore)
os.openC(c"/dev/null", os.O_RDWR, 0) catch |err| switch (err) {
error.PathAlreadyExists => unreachable,
error.NoSpaceLeft => unreachable,
error.FileTooBig => unreachable,
error.DeviceBusy => unreachable,
else => |e| return e,
}
else
undefined;
defer {
if (any_ignore) os.close(dev_null_fd);
}
@ -403,7 +430,7 @@ pub const ChildProcess = struct {
}
}
fn spawnWindows(self: *ChildProcess) !void {
fn spawnWindows(self: *ChildProcess) SpawnError!void {
const saAttr = windows.SECURITY_ATTRIBUTES{
.nLength = @sizeOf(windows.SECURITY_ATTRIBUTES),
.bInheritHandle = windows.TRUE,
@ -412,11 +439,28 @@ pub const ChildProcess = struct {
const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
const nul_handle = if (any_ignore) blk: {
break :blk try windows.CreateFile("NUL", windows.GENERIC_READ, windows.FILE_SHARE_READ, null, windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL, null);
} else blk: {
break :blk undefined;
};
const nul_handle = if (any_ignore)
windows.CreateFile(
"NUL",
windows.GENERIC_READ,
windows.FILE_SHARE_READ,
null,
windows.OPEN_EXISTING,
windows.FILE_ATTRIBUTE_NORMAL,
null,
) catch |err| switch (err) {
error.SharingViolation => unreachable, // not possible for "NUL"
error.PathAlreadyExists => unreachable, // not possible for "NUL"
error.PipeBusy => unreachable, // not possible for "NUL"
error.InvalidUtf8 => unreachable, // not possible for "NUL"
error.BadPathName => unreachable, // not possible for "NUL"
error.FileNotFound => unreachable, // not possible for "NUL"
error.AccessDenied => unreachable, // not possible for "NUL"
error.NameTooLong => unreachable, // not possible for "NUL"
else => |e| return e,
}
else
undefined;
defer {
if (any_ignore) os.close(nul_handle);
}
@ -542,10 +586,25 @@ pub const ChildProcess = struct {
windowsCreateProcess(app_name_w.ptr, cmd_line_w.ptr, envp_ptr, cwd_w_ptr, &siStartInfo, &piProcInfo) catch |no_path_err| {
if (no_path_err != error.FileNotFound) return no_path_err;
const PATH = try process.getEnvVarOwned(self.allocator, "PATH");
defer self.allocator.free(PATH);
const PATHEXT = try process.getEnvVarOwned(self.allocator, "PATHEXT");
defer self.allocator.free(PATHEXT);
var free_path = true;
const PATH = process.getEnvVarOwned(self.allocator, "PATH") catch |err| switch (err) {
error.EnvironmentVariableNotFound => blk: {
free_path = false;
break :blk "";
},
else => |e| return e,
};
defer if (free_path) self.allocator.free(PATH);
var free_path_ext = true;
const PATHEXT = process.getEnvVarOwned(self.allocator, "PATHEXT") catch |err| switch (err) {
error.EnvironmentVariableNotFound => blk: {
free_path_ext = false;
break :blk "";
},
else => |e| return e,
};
defer if (free_path_ext) self.allocator.free(PATHEXT);
var it = mem.tokenize(PATH, ";");
retry: while (it.next()) |search_path| {
@ -717,7 +776,7 @@ fn destroyPipe(pipe: [2]os.fd_t) void {
// Child of fork calls this to report an error to the fork parent.
// Then the child exits.
fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
writeIntFd(fd, ErrInt(@errorToInt(err))) catch {};
writeIntFd(fd, @as(ErrInt, @errorToInt(err))) catch {};
os.exit(1);
}

View File

@ -179,7 +179,7 @@ pub const Coff = struct {
if (byte != 0 and i == buffer.len)
return error.NameTooLong;
return i;
return @as(usize, i);
}
pub fn loadSections(self: *Coff) !void {

View File

@ -6,7 +6,7 @@ const testing = std.testing;
// Apply sbox0 to each byte in w.
fn subw(w: u32) u32 {
return u32(sbox0[w >> 24]) << 24 | u32(sbox0[w >> 16 & 0xff]) << 16 | u32(sbox0[w >> 8 & 0xff]) << 8 | u32(sbox0[w & 0xff]);
return @as(u32, sbox0[w >> 24]) << 24 | @as(u32, sbox0[w >> 16 & 0xff]) << 16 | @as(u32, sbox0[w >> 8 & 0xff]) << 8 | @as(u32, sbox0[w & 0xff]);
}
fn rotw(w: u32) u32 {
@ -48,10 +48,10 @@ fn encryptBlock(xk: []const u32, dst: []u8, src: []const u8) void {
}
// Last round uses s-box directly and XORs to produce output.
s0 = u32(sbox0[t0 >> 24]) << 24 | u32(sbox0[t1 >> 16 & 0xff]) << 16 | u32(sbox0[t2 >> 8 & 0xff]) << 8 | u32(sbox0[t3 & 0xff]);
s1 = u32(sbox0[t1 >> 24]) << 24 | u32(sbox0[t2 >> 16 & 0xff]) << 16 | u32(sbox0[t3 >> 8 & 0xff]) << 8 | u32(sbox0[t0 & 0xff]);
s2 = u32(sbox0[t2 >> 24]) << 24 | u32(sbox0[t3 >> 16 & 0xff]) << 16 | u32(sbox0[t0 >> 8 & 0xff]) << 8 | u32(sbox0[t1 & 0xff]);
s3 = u32(sbox0[t3 >> 24]) << 24 | u32(sbox0[t0 >> 16 & 0xff]) << 16 | u32(sbox0[t1 >> 8 & 0xff]) << 8 | u32(sbox0[t2 & 0xff]);
s0 = @as(u32, sbox0[t0 >> 24]) << 24 | @as(u32, sbox0[t1 >> 16 & 0xff]) << 16 | @as(u32, sbox0[t2 >> 8 & 0xff]) << 8 | @as(u32, sbox0[t3 & 0xff]);
s1 = @as(u32, sbox0[t1 >> 24]) << 24 | @as(u32, sbox0[t2 >> 16 & 0xff]) << 16 | @as(u32, sbox0[t3 >> 8 & 0xff]) << 8 | @as(u32, sbox0[t0 & 0xff]);
s2 = @as(u32, sbox0[t2 >> 24]) << 24 | @as(u32, sbox0[t3 >> 16 & 0xff]) << 16 | @as(u32, sbox0[t0 >> 8 & 0xff]) << 8 | @as(u32, sbox0[t1 & 0xff]);
s3 = @as(u32, sbox0[t3 >> 24]) << 24 | @as(u32, sbox0[t0 >> 16 & 0xff]) << 16 | @as(u32, sbox0[t1 >> 8 & 0xff]) << 8 | @as(u32, sbox0[t2 & 0xff]);
s0 ^= xk[k + 0];
s1 ^= xk[k + 1];
@ -99,10 +99,10 @@ pub fn decryptBlock(xk: []const u32, dst: []u8, src: []const u8) void {
}
// Last round uses s-box directly and XORs to produce output.
s0 = u32(sbox1[t0 >> 24]) << 24 | u32(sbox1[t3 >> 16 & 0xff]) << 16 | u32(sbox1[t2 >> 8 & 0xff]) << 8 | u32(sbox1[t1 & 0xff]);
s1 = u32(sbox1[t1 >> 24]) << 24 | u32(sbox1[t0 >> 16 & 0xff]) << 16 | u32(sbox1[t3 >> 8 & 0xff]) << 8 | u32(sbox1[t2 & 0xff]);
s2 = u32(sbox1[t2 >> 24]) << 24 | u32(sbox1[t1 >> 16 & 0xff]) << 16 | u32(sbox1[t0 >> 8 & 0xff]) << 8 | u32(sbox1[t3 & 0xff]);
s3 = u32(sbox1[t3 >> 24]) << 24 | u32(sbox1[t2 >> 16 & 0xff]) << 16 | u32(sbox1[t1 >> 8 & 0xff]) << 8 | u32(sbox1[t0 & 0xff]);
s0 = @as(u32, sbox1[t0 >> 24]) << 24 | @as(u32, sbox1[t3 >> 16 & 0xff]) << 16 | @as(u32, sbox1[t2 >> 8 & 0xff]) << 8 | @as(u32, sbox1[t1 & 0xff]);
s1 = @as(u32, sbox1[t1 >> 24]) << 24 | @as(u32, sbox1[t0 >> 16 & 0xff]) << 16 | @as(u32, sbox1[t3 >> 8 & 0xff]) << 8 | @as(u32, sbox1[t2 & 0xff]);
s2 = @as(u32, sbox1[t2 >> 24]) << 24 | @as(u32, sbox1[t1 >> 16 & 0xff]) << 16 | @as(u32, sbox1[t0 >> 8 & 0xff]) << 8 | @as(u32, sbox1[t3 & 0xff]);
s3 = @as(u32, sbox1[t3 >> 24]) << 24 | @as(u32, sbox1[t2 >> 16 & 0xff]) << 16 | @as(u32, sbox1[t1 >> 8 & 0xff]) << 8 | @as(u32, sbox1[t0 & 0xff]);
s0 ^= xk[k + 0];
s1 ^= xk[k + 1];
@ -256,7 +256,7 @@ fn expandKey(key: []const u8, enc: []u32, dec: []u32) void {
while (i < enc.len) : (i += 1) {
var t = enc[i - 1];
if (i % nk == 0) {
t = subw(rotw(t)) ^ (u32(powx[i / nk - 1]) << 24);
t = subw(rotw(t)) ^ (@as(u32, powx[i / nk - 1]) << 24);
} else if (nk > 6 and i % nk == 4) {
t = subw(t);
}

View File

@ -131,9 +131,7 @@ fn printPad(stdout: var, s: []const u8) !void {
}
pub fn main() !void {
var stdout_file = try std.io.getStdOut();
var stdout_out_stream = stdout_file.outStream();
const stdout = &stdout_out_stream.stream;
const stdout = &std.io.getStdOut().outStream().stream;
var buffer: [1024]u8 = undefined;
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);

View File

@ -164,13 +164,13 @@ fn Blake2s(comptime out_len: usize) type {
inline while (j < 10) : (j += 1) {
inline for (rounds) |r| {
v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.x]];
v[r.d] = math.rotr(u32, v[r.d] ^ v[r.a], usize(16));
v[r.d] = math.rotr(u32, v[r.d] ^ v[r.a], @as(usize, 16));
v[r.c] = v[r.c] +% v[r.d];
v[r.b] = math.rotr(u32, v[r.b] ^ v[r.c], usize(12));
v[r.b] = math.rotr(u32, v[r.b] ^ v[r.c], @as(usize, 12));
v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.y]];
v[r.d] = math.rotr(u32, v[r.d] ^ v[r.a], usize(8));
v[r.d] = math.rotr(u32, v[r.d] ^ v[r.a], @as(usize, 8));
v[r.c] = v[r.c] +% v[r.d];
v[r.b] = math.rotr(u32, v[r.b] ^ v[r.c], usize(7));
v[r.b] = math.rotr(u32, v[r.b] ^ v[r.c], @as(usize, 7));
}
}
@ -398,13 +398,13 @@ fn Blake2b(comptime out_len: usize) type {
inline while (j < 12) : (j += 1) {
inline for (rounds) |r| {
v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.x]];
v[r.d] = math.rotr(u64, v[r.d] ^ v[r.a], usize(32));
v[r.d] = math.rotr(u64, v[r.d] ^ v[r.a], @as(usize, 32));
v[r.c] = v[r.c] +% v[r.d];
v[r.b] = math.rotr(u64, v[r.b] ^ v[r.c], usize(24));
v[r.b] = math.rotr(u64, v[r.b] ^ v[r.c], @as(usize, 24));
v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.y]];
v[r.d] = math.rotr(u64, v[r.d] ^ v[r.a], usize(16));
v[r.d] = math.rotr(u64, v[r.d] ^ v[r.a], @as(usize, 16));
v[r.c] = v[r.c] +% v[r.d];
v[r.b] = math.rotr(u64, v[r.b] ^ v[r.c], usize(63));
v[r.b] = math.rotr(u64, v[r.b] ^ v[r.c], @as(usize, 63));
}
}

View File

@ -49,13 +49,13 @@ fn salsa20_wordtobyte(out: []u8, input: [16]u32) void {
// two-round cycles
inline for (rounds) |r| {
x[r.a] +%= x[r.b];
x[r.d] = std.math.rotl(u32, x[r.d] ^ x[r.a], u32(16));
x[r.d] = std.math.rotl(u32, x[r.d] ^ x[r.a], @as(u32, 16));
x[r.c] +%= x[r.d];
x[r.b] = std.math.rotl(u32, x[r.b] ^ x[r.c], u32(12));
x[r.b] = std.math.rotl(u32, x[r.b] ^ x[r.c], @as(u32, 12));
x[r.a] +%= x[r.b];
x[r.d] = std.math.rotl(u32, x[r.d] ^ x[r.a], u32(8));
x[r.d] = std.math.rotl(u32, x[r.d] ^ x[r.a], @as(u32, 8));
x[r.c] +%= x[r.d];
x[r.b] = std.math.rotl(u32, x[r.b] ^ x[r.c], u32(7));
x[r.b] = std.math.rotl(u32, x[r.b] ^ x[r.c], @as(u32, 7));
}
}

View File

@ -34,9 +34,9 @@ pub const State = struct {
pub fn permute(self: *Self) void {
const state = &self.data;
var round = u32(24);
var round = @as(u32, 24);
while (round > 0) : (round -= 1) {
var column = usize(0);
var column = @as(usize, 0);
while (column < 4) : (column += 1) {
const x = math.rotl(u32, state[column], 24);
const y = math.rotl(u32, state[4 + column], 9);
@ -61,7 +61,7 @@ pub const State = struct {
}
pub fn squeeze(self: *Self, out: []u8) void {
var i = usize(0);
var i = @as(usize, 0);
while (i + RATE <= out.len) : (i += RATE) {
self.permute();
mem.copy(u8, out[i..], self.toSliceConst()[0..RATE]);
@ -79,7 +79,7 @@ test "permute" {
var state = State{
.data = blk: {
var input: [12]u32 = undefined;
var i = u32(0);
var i = @as(u32, 0);
while (i < 12) : (i += 1) {
input[i] = i * i * i + i *% 0x9e3779b9;
}

View File

@ -126,10 +126,10 @@ pub const Md5 = struct {
while (i < 16) : (i += 1) {
// NOTE: Performing or's separately improves perf by ~10%
s[i] = 0;
s[i] |= u32(b[i * 4 + 0]);
s[i] |= u32(b[i * 4 + 1]) << 8;
s[i] |= u32(b[i * 4 + 2]) << 16;
s[i] |= u32(b[i * 4 + 3]) << 24;
s[i] |= @as(u32, b[i * 4 + 0]);
s[i] |= @as(u32, b[i * 4 + 1]) << 8;
s[i] |= @as(u32, b[i * 4 + 2]) << 16;
s[i] |= @as(u32, b[i * 4 + 3]) << 24;
}
var v: [4]u32 = [_]u32{

View File

@ -87,11 +87,11 @@ pub const Poly1305 = struct {
// ctx->h <= 4_ffffffff_ffffffff_ffffffff_ffffffff
fn polyBlock(ctx: *Self) void {
// s = h + c, without carry propagation
const s0 = u64(ctx.h[0]) + ctx.c[0]; // s0 <= 1_fffffffe
const s1 = u64(ctx.h[1]) + ctx.c[1]; // s1 <= 1_fffffffe
const s2 = u64(ctx.h[2]) + ctx.c[2]; // s2 <= 1_fffffffe
const s3 = u64(ctx.h[3]) + ctx.c[3]; // s3 <= 1_fffffffe
const s4 = u64(ctx.h[4]) + ctx.c[4]; // s4 <= 5
const s0 = @as(u64, ctx.h[0]) + ctx.c[0]; // s0 <= 1_fffffffe
const s1 = @as(u64, ctx.h[1]) + ctx.c[1]; // s1 <= 1_fffffffe
const s2 = @as(u64, ctx.h[2]) + ctx.c[2]; // s2 <= 1_fffffffe
const s3 = @as(u64, ctx.h[3]) + ctx.c[3]; // s3 <= 1_fffffffe
const s4 = @as(u64, ctx.h[4]) + ctx.c[4]; // s4 <= 5
// Local all the things!
const r0 = ctx.r[0]; // r0 <= 0fffffff
@ -197,7 +197,7 @@ pub const Poly1305 = struct {
// check if we should subtract 2^130-5 by performing the
// corresponding carry propagation.
const _u0 = u64(5) + ctx.h[0]; // <= 1_00000004
const _u0 = @as(u64, 5) + ctx.h[0]; // <= 1_00000004
const _u1 = (_u0 >> 32) + ctx.h[1]; // <= 1_00000000
const _u2 = (_u1 >> 32) + ctx.h[2]; // <= 1_00000000
const _u3 = (_u2 >> 32) + ctx.h[3]; // <= 1_00000000

View File

@ -146,10 +146,10 @@ pub const Sha1 = struct {
Rp(0, 1, 2, 3, 4, 15),
};
inline for (round0a) |r| {
s[r.i] = (u32(b[r.i * 4 + 0]) << 24) | (u32(b[r.i * 4 + 1]) << 16) | (u32(b[r.i * 4 + 2]) << 8) | (u32(b[r.i * 4 + 3]) << 0);
s[r.i] = (@as(u32, b[r.i * 4 + 0]) << 24) | (@as(u32, b[r.i * 4 + 1]) << 16) | (@as(u32, b[r.i * 4 + 2]) << 8) | (@as(u32, b[r.i * 4 + 3]) << 0);
v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x5A827999 +% s[r.i & 0xf] +% ((v[r.b] & v[r.c]) | (~v[r.b] & v[r.d]));
v[r.b] = math.rotl(u32, v[r.b], u32(30));
v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], @as(u32, 5)) +% 0x5A827999 +% s[r.i & 0xf] +% ((v[r.b] & v[r.c]) | (~v[r.b] & v[r.d]));
v[r.b] = math.rotl(u32, v[r.b], @as(u32, 30));
}
const round0b = comptime [_]RoundParam{
@ -160,10 +160,10 @@ pub const Sha1 = struct {
};
inline for (round0b) |r| {
const t = s[(r.i - 3) & 0xf] ^ s[(r.i - 8) & 0xf] ^ s[(r.i - 14) & 0xf] ^ s[(r.i - 16) & 0xf];
s[r.i & 0xf] = math.rotl(u32, t, u32(1));
s[r.i & 0xf] = math.rotl(u32, t, @as(u32, 1));
v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x5A827999 +% s[r.i & 0xf] +% ((v[r.b] & v[r.c]) | (~v[r.b] & v[r.d]));
v[r.b] = math.rotl(u32, v[r.b], u32(30));
v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], @as(u32, 5)) +% 0x5A827999 +% s[r.i & 0xf] +% ((v[r.b] & v[r.c]) | (~v[r.b] & v[r.d]));
v[r.b] = math.rotl(u32, v[r.b], @as(u32, 30));
}
const round1 = comptime [_]RoundParam{
@ -190,10 +190,10 @@ pub const Sha1 = struct {
};
inline for (round1) |r| {
const t = s[(r.i - 3) & 0xf] ^ s[(r.i - 8) & 0xf] ^ s[(r.i - 14) & 0xf] ^ s[(r.i - 16) & 0xf];
s[r.i & 0xf] = math.rotl(u32, t, u32(1));
s[r.i & 0xf] = math.rotl(u32, t, @as(u32, 1));
v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x6ED9EBA1 +% s[r.i & 0xf] +% (v[r.b] ^ v[r.c] ^ v[r.d]);
v[r.b] = math.rotl(u32, v[r.b], u32(30));
v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], @as(u32, 5)) +% 0x6ED9EBA1 +% s[r.i & 0xf] +% (v[r.b] ^ v[r.c] ^ v[r.d]);
v[r.b] = math.rotl(u32, v[r.b], @as(u32, 30));
}
const round2 = comptime [_]RoundParam{
@ -220,10 +220,10 @@ pub const Sha1 = struct {
};
inline for (round2) |r| {
const t = s[(r.i - 3) & 0xf] ^ s[(r.i - 8) & 0xf] ^ s[(r.i - 14) & 0xf] ^ s[(r.i - 16) & 0xf];
s[r.i & 0xf] = math.rotl(u32, t, u32(1));
s[r.i & 0xf] = math.rotl(u32, t, @as(u32, 1));
v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x8F1BBCDC +% s[r.i & 0xf] +% ((v[r.b] & v[r.c]) ^ (v[r.b] & v[r.d]) ^ (v[r.c] & v[r.d]));
v[r.b] = math.rotl(u32, v[r.b], u32(30));
v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], @as(u32, 5)) +% 0x8F1BBCDC +% s[r.i & 0xf] +% ((v[r.b] & v[r.c]) ^ (v[r.b] & v[r.d]) ^ (v[r.c] & v[r.d]));
v[r.b] = math.rotl(u32, v[r.b], @as(u32, 30));
}
const round3 = comptime [_]RoundParam{
@ -250,10 +250,10 @@ pub const Sha1 = struct {
};
inline for (round3) |r| {
const t = s[(r.i - 3) & 0xf] ^ s[(r.i - 8) & 0xf] ^ s[(r.i - 14) & 0xf] ^ s[(r.i - 16) & 0xf];
s[r.i & 0xf] = math.rotl(u32, t, u32(1));
s[r.i & 0xf] = math.rotl(u32, t, @as(u32, 1));
v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0xCA62C1D6 +% s[r.i & 0xf] +% (v[r.b] ^ v[r.c] ^ v[r.d]);
v[r.b] = math.rotl(u32, v[r.b], u32(30));
v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], @as(u32, 5)) +% 0xCA62C1D6 +% s[r.i & 0xf] +% (v[r.b] ^ v[r.c] ^ v[r.d]);
v[r.b] = math.rotl(u32, v[r.b], @as(u32, 30));
}
d.s[0] +%= v[0];

View File

@ -180,13 +180,13 @@ fn Sha2_32(comptime params: Sha2Params32) type {
var i: usize = 0;
while (i < 16) : (i += 1) {
s[i] = 0;
s[i] |= u32(b[i * 4 + 0]) << 24;
s[i] |= u32(b[i * 4 + 1]) << 16;
s[i] |= u32(b[i * 4 + 2]) << 8;
s[i] |= u32(b[i * 4 + 3]) << 0;
s[i] |= @as(u32, b[i * 4 + 0]) << 24;
s[i] |= @as(u32, b[i * 4 + 1]) << 16;
s[i] |= @as(u32, b[i * 4 + 2]) << 8;
s[i] |= @as(u32, b[i * 4 + 3]) << 0;
}
while (i < 64) : (i += 1) {
s[i] = s[i - 16] +% s[i - 7] +% (math.rotr(u32, s[i - 15], u32(7)) ^ math.rotr(u32, s[i - 15], u32(18)) ^ (s[i - 15] >> 3)) +% (math.rotr(u32, s[i - 2], u32(17)) ^ math.rotr(u32, s[i - 2], u32(19)) ^ (s[i - 2] >> 10));
s[i] = s[i - 16] +% s[i - 7] +% (math.rotr(u32, s[i - 15], @as(u32, 7)) ^ math.rotr(u32, s[i - 15], @as(u32, 18)) ^ (s[i - 15] >> 3)) +% (math.rotr(u32, s[i - 2], @as(u32, 17)) ^ math.rotr(u32, s[i - 2], @as(u32, 19)) ^ (s[i - 2] >> 10));
}
var v: [8]u32 = [_]u32{
@ -267,11 +267,11 @@ fn Sha2_32(comptime params: Sha2Params32) type {
Rp256(1, 2, 3, 4, 5, 6, 7, 0, 63, 0xC67178F2),
};
inline for (round0) |r| {
v[r.h] = v[r.h] +% (math.rotr(u32, v[r.e], u32(6)) ^ math.rotr(u32, v[r.e], u32(11)) ^ math.rotr(u32, v[r.e], u32(25))) +% (v[r.g] ^ (v[r.e] & (v[r.f] ^ v[r.g]))) +% r.k +% s[r.i];
v[r.h] = v[r.h] +% (math.rotr(u32, v[r.e], @as(u32, 6)) ^ math.rotr(u32, v[r.e], @as(u32, 11)) ^ math.rotr(u32, v[r.e], @as(u32, 25))) +% (v[r.g] ^ (v[r.e] & (v[r.f] ^ v[r.g]))) +% r.k +% s[r.i];
v[r.d] = v[r.d] +% v[r.h];
v[r.h] = v[r.h] +% (math.rotr(u32, v[r.a], u32(2)) ^ math.rotr(u32, v[r.a], u32(13)) ^ math.rotr(u32, v[r.a], u32(22))) +% ((v[r.a] & (v[r.b] | v[r.c])) | (v[r.b] & v[r.c]));
v[r.h] = v[r.h] +% (math.rotr(u32, v[r.a], @as(u32, 2)) ^ math.rotr(u32, v[r.a], @as(u32, 13)) ^ math.rotr(u32, v[r.a], @as(u32, 22))) +% ((v[r.a] & (v[r.b] | v[r.c])) | (v[r.b] & v[r.c]));
}
d.s[0] +%= v[0];
@ -522,17 +522,19 @@ fn Sha2_64(comptime params: Sha2Params64) type {
var i: usize = 0;
while (i < 16) : (i += 1) {
s[i] = 0;
s[i] |= u64(b[i * 8 + 0]) << 56;
s[i] |= u64(b[i * 8 + 1]) << 48;
s[i] |= u64(b[i * 8 + 2]) << 40;
s[i] |= u64(b[i * 8 + 3]) << 32;
s[i] |= u64(b[i * 8 + 4]) << 24;
s[i] |= u64(b[i * 8 + 5]) << 16;
s[i] |= u64(b[i * 8 + 6]) << 8;
s[i] |= u64(b[i * 8 + 7]) << 0;
s[i] |= @as(u64, b[i * 8 + 0]) << 56;
s[i] |= @as(u64, b[i * 8 + 1]) << 48;
s[i] |= @as(u64, b[i * 8 + 2]) << 40;
s[i] |= @as(u64, b[i * 8 + 3]) << 32;
s[i] |= @as(u64, b[i * 8 + 4]) << 24;
s[i] |= @as(u64, b[i * 8 + 5]) << 16;
s[i] |= @as(u64, b[i * 8 + 6]) << 8;
s[i] |= @as(u64, b[i * 8 + 7]) << 0;
}
while (i < 80) : (i += 1) {
s[i] = s[i - 16] +% s[i - 7] +% (math.rotr(u64, s[i - 15], u64(1)) ^ math.rotr(u64, s[i - 15], u64(8)) ^ (s[i - 15] >> 7)) +% (math.rotr(u64, s[i - 2], u64(19)) ^ math.rotr(u64, s[i - 2], u64(61)) ^ (s[i - 2] >> 6));
s[i] = s[i - 16] +% s[i - 7] +%
(math.rotr(u64, s[i - 15], @as(u64, 1)) ^ math.rotr(u64, s[i - 15], @as(u64, 8)) ^ (s[i - 15] >> 7)) +%
(math.rotr(u64, s[i - 2], @as(u64, 19)) ^ math.rotr(u64, s[i - 2], @as(u64, 61)) ^ (s[i - 2] >> 6));
}
var v: [8]u64 = [_]u64{
@ -629,11 +631,11 @@ fn Sha2_64(comptime params: Sha2Params64) type {
Rp512(1, 2, 3, 4, 5, 6, 7, 0, 79, 0x6C44198C4A475817),
};
inline for (round0) |r| {
v[r.h] = v[r.h] +% (math.rotr(u64, v[r.e], u64(14)) ^ math.rotr(u64, v[r.e], u64(18)) ^ math.rotr(u64, v[r.e], u64(41))) +% (v[r.g] ^ (v[r.e] & (v[r.f] ^ v[r.g]))) +% r.k +% s[r.i];
v[r.h] = v[r.h] +% (math.rotr(u64, v[r.e], @as(u64, 14)) ^ math.rotr(u64, v[r.e], @as(u64, 18)) ^ math.rotr(u64, v[r.e], @as(u64, 41))) +% (v[r.g] ^ (v[r.e] & (v[r.f] ^ v[r.g]))) +% r.k +% s[r.i];
v[r.d] = v[r.d] +% v[r.h];
v[r.h] = v[r.h] +% (math.rotr(u64, v[r.a], u64(28)) ^ math.rotr(u64, v[r.a], u64(34)) ^ math.rotr(u64, v[r.a], u64(39))) +% ((v[r.a] & (v[r.b] | v[r.c])) | (v[r.b] & v[r.c]));
v[r.h] = v[r.h] +% (math.rotr(u64, v[r.a], @as(u64, 28)) ^ math.rotr(u64, v[r.a], @as(u64, 34)) ^ math.rotr(u64, v[r.a], @as(u64, 39))) +% ((v[r.a] & (v[r.b] | v[r.c])) | (v[r.b] & v[r.c]));
}
d.s[0] +%= v[0];

View File

@ -133,7 +133,7 @@ fn keccak_f(comptime F: usize, d: []u8) void {
}
x = 0;
inline while (x < 5) : (x += 1) {
t[0] = c[M5[x + 4]] ^ math.rotl(u64, c[M5[x + 1]], usize(1));
t[0] = c[M5[x + 4]] ^ math.rotl(u64, c[M5[x + 1]], @as(usize, 1));
y = 0;
inline while (y < 5) : (y += 1) {
s[x + y * 5] ^= t[0];

View File

@ -199,9 +199,9 @@ const Fe = struct {
inline fn carryRound(c: []i64, t: []i64, comptime i: comptime_int, comptime shift: comptime_int, comptime mult: comptime_int) void {
const j = (i + 1) % 10;
c[i] = (t[i] + (i64(1) << shift)) >> (shift + 1);
c[i] = (t[i] + (@as(i64, 1) << shift)) >> (shift + 1);
t[j] += c[i] * mult;
t[i] -= c[i] * (i64(1) << (shift + 1));
t[i] -= c[i] * (@as(i64, 1) << (shift + 1));
}
fn carry1(h: *Fe, t: []i64) void {
@ -256,15 +256,15 @@ const Fe = struct {
var t: [10]i64 = undefined;
t[0] = readIntSliceLittle(u32, s[0..4]);
t[1] = u32(readIntSliceLittle(u24, s[4..7])) << 6;
t[2] = u32(readIntSliceLittle(u24, s[7..10])) << 5;
t[3] = u32(readIntSliceLittle(u24, s[10..13])) << 3;
t[4] = u32(readIntSliceLittle(u24, s[13..16])) << 2;
t[1] = @as(u32, readIntSliceLittle(u24, s[4..7])) << 6;
t[2] = @as(u32, readIntSliceLittle(u24, s[7..10])) << 5;
t[3] = @as(u32, readIntSliceLittle(u24, s[10..13])) << 3;
t[4] = @as(u32, readIntSliceLittle(u24, s[13..16])) << 2;
t[5] = readIntSliceLittle(u32, s[16..20]);
t[6] = u32(readIntSliceLittle(u24, s[20..23])) << 7;
t[7] = u32(readIntSliceLittle(u24, s[23..26])) << 5;
t[8] = u32(readIntSliceLittle(u24, s[26..29])) << 4;
t[9] = (u32(readIntSliceLittle(u24, s[29..32])) & 0x7fffff) << 2;
t[6] = @as(u32, readIntSliceLittle(u24, s[20..23])) << 7;
t[7] = @as(u32, readIntSliceLittle(u24, s[23..26])) << 5;
t[8] = @as(u32, readIntSliceLittle(u24, s[26..29])) << 4;
t[9] = (@as(u32, readIntSliceLittle(u24, s[29..32])) & 0x7fffff) << 2;
carry1(h, t[0..]);
}
@ -273,7 +273,7 @@ const Fe = struct {
var t: [10]i64 = undefined;
for (t[0..]) |_, i| {
t[i] = i64(f.b[i]) * g;
t[i] = @as(i64, f.b[i]) * g;
}
carry1(h, t[0..]);
@ -305,16 +305,16 @@ const Fe = struct {
// t's become h
var t: [10]i64 = undefined;
t[0] = f[0] * i64(g[0]) + F[1] * i64(G[9]) + f[2] * i64(G[8]) + F[3] * i64(G[7]) + f[4] * i64(G[6]) + F[5] * i64(G[5]) + f[6] * i64(G[4]) + F[7] * i64(G[3]) + f[8] * i64(G[2]) + F[9] * i64(G[1]);
t[1] = f[0] * i64(g[1]) + f[1] * i64(g[0]) + f[2] * i64(G[9]) + f[3] * i64(G[8]) + f[4] * i64(G[7]) + f[5] * i64(G[6]) + f[6] * i64(G[5]) + f[7] * i64(G[4]) + f[8] * i64(G[3]) + f[9] * i64(G[2]);
t[2] = f[0] * i64(g[2]) + F[1] * i64(g[1]) + f[2] * i64(g[0]) + F[3] * i64(G[9]) + f[4] * i64(G[8]) + F[5] * i64(G[7]) + f[6] * i64(G[6]) + F[7] * i64(G[5]) + f[8] * i64(G[4]) + F[9] * i64(G[3]);
t[3] = f[0] * i64(g[3]) + f[1] * i64(g[2]) + f[2] * i64(g[1]) + f[3] * i64(g[0]) + f[4] * i64(G[9]) + f[5] * i64(G[8]) + f[6] * i64(G[7]) + f[7] * i64(G[6]) + f[8] * i64(G[5]) + f[9] * i64(G[4]);
t[4] = f[0] * i64(g[4]) + F[1] * i64(g[3]) + f[2] * i64(g[2]) + F[3] * i64(g[1]) + f[4] * i64(g[0]) + F[5] * i64(G[9]) + f[6] * i64(G[8]) + F[7] * i64(G[7]) + f[8] * i64(G[6]) + F[9] * i64(G[5]);
t[5] = f[0] * i64(g[5]) + f[1] * i64(g[4]) + f[2] * i64(g[3]) + f[3] * i64(g[2]) + f[4] * i64(g[1]) + f[5] * i64(g[0]) + f[6] * i64(G[9]) + f[7] * i64(G[8]) + f[8] * i64(G[7]) + f[9] * i64(G[6]);
t[6] = f[0] * i64(g[6]) + F[1] * i64(g[5]) + f[2] * i64(g[4]) + F[3] * i64(g[3]) + f[4] * i64(g[2]) + F[5] * i64(g[1]) + f[6] * i64(g[0]) + F[7] * i64(G[9]) + f[8] * i64(G[8]) + F[9] * i64(G[7]);
t[7] = f[0] * i64(g[7]) + f[1] * i64(g[6]) + f[2] * i64(g[5]) + f[3] * i64(g[4]) + f[4] * i64(g[3]) + f[5] * i64(g[2]) + f[6] * i64(g[1]) + f[7] * i64(g[0]) + f[8] * i64(G[9]) + f[9] * i64(G[8]);
t[8] = f[0] * i64(g[8]) + F[1] * i64(g[7]) + f[2] * i64(g[6]) + F[3] * i64(g[5]) + f[4] * i64(g[4]) + F[5] * i64(g[3]) + f[6] * i64(g[2]) + F[7] * i64(g[1]) + f[8] * i64(g[0]) + F[9] * i64(G[9]);
t[9] = f[0] * i64(g[9]) + f[1] * i64(g[8]) + f[2] * i64(g[7]) + f[3] * i64(g[6]) + f[4] * i64(g[5]) + f[5] * i64(g[4]) + f[6] * i64(g[3]) + f[7] * i64(g[2]) + f[8] * i64(g[1]) + f[9] * i64(g[0]);
t[0] = f[0] * @as(i64, g[0]) + F[1] * @as(i64, G[9]) + f[2] * @as(i64, G[8]) + F[3] * @as(i64, G[7]) + f[4] * @as(i64, G[6]) + F[5] * @as(i64, G[5]) + f[6] * @as(i64, G[4]) + F[7] * @as(i64, G[3]) + f[8] * @as(i64, G[2]) + F[9] * @as(i64, G[1]);
t[1] = f[0] * @as(i64, g[1]) + f[1] * @as(i64, g[0]) + f[2] * @as(i64, G[9]) + f[3] * @as(i64, G[8]) + f[4] * @as(i64, G[7]) + f[5] * @as(i64, G[6]) + f[6] * @as(i64, G[5]) + f[7] * @as(i64, G[4]) + f[8] * @as(i64, G[3]) + f[9] * @as(i64, G[2]);
t[2] = f[0] * @as(i64, g[2]) + F[1] * @as(i64, g[1]) + f[2] * @as(i64, g[0]) + F[3] * @as(i64, G[9]) + f[4] * @as(i64, G[8]) + F[5] * @as(i64, G[7]) + f[6] * @as(i64, G[6]) + F[7] * @as(i64, G[5]) + f[8] * @as(i64, G[4]) + F[9] * @as(i64, G[3]);
t[3] = f[0] * @as(i64, g[3]) + f[1] * @as(i64, g[2]) + f[2] * @as(i64, g[1]) + f[3] * @as(i64, g[0]) + f[4] * @as(i64, G[9]) + f[5] * @as(i64, G[8]) + f[6] * @as(i64, G[7]) + f[7] * @as(i64, G[6]) + f[8] * @as(i64, G[5]) + f[9] * @as(i64, G[4]);
t[4] = f[0] * @as(i64, g[4]) + F[1] * @as(i64, g[3]) + f[2] * @as(i64, g[2]) + F[3] * @as(i64, g[1]) + f[4] * @as(i64, g[0]) + F[5] * @as(i64, G[9]) + f[6] * @as(i64, G[8]) + F[7] * @as(i64, G[7]) + f[8] * @as(i64, G[6]) + F[9] * @as(i64, G[5]);
t[5] = f[0] * @as(i64, g[5]) + f[1] * @as(i64, g[4]) + f[2] * @as(i64, g[3]) + f[3] * @as(i64, g[2]) + f[4] * @as(i64, g[1]) + f[5] * @as(i64, g[0]) + f[6] * @as(i64, G[9]) + f[7] * @as(i64, G[8]) + f[8] * @as(i64, G[7]) + f[9] * @as(i64, G[6]);
t[6] = f[0] * @as(i64, g[6]) + F[1] * @as(i64, g[5]) + f[2] * @as(i64, g[4]) + F[3] * @as(i64, g[3]) + f[4] * @as(i64, g[2]) + F[5] * @as(i64, g[1]) + f[6] * @as(i64, g[0]) + F[7] * @as(i64, G[9]) + f[8] * @as(i64, G[8]) + F[9] * @as(i64, G[7]);
t[7] = f[0] * @as(i64, g[7]) + f[1] * @as(i64, g[6]) + f[2] * @as(i64, g[5]) + f[3] * @as(i64, g[4]) + f[4] * @as(i64, g[3]) + f[5] * @as(i64, g[2]) + f[6] * @as(i64, g[1]) + f[7] * @as(i64, g[0]) + f[8] * @as(i64, G[9]) + f[9] * @as(i64, G[8]);
t[8] = f[0] * @as(i64, g[8]) + F[1] * @as(i64, g[7]) + f[2] * @as(i64, g[6]) + F[3] * @as(i64, g[5]) + f[4] * @as(i64, g[4]) + F[5] * @as(i64, g[3]) + f[6] * @as(i64, g[2]) + F[7] * @as(i64, g[1]) + f[8] * @as(i64, g[0]) + F[9] * @as(i64, G[9]);
t[9] = f[0] * @as(i64, g[9]) + f[1] * @as(i64, g[8]) + f[2] * @as(i64, g[7]) + f[3] * @as(i64, g[6]) + f[4] * @as(i64, g[5]) + f[5] * @as(i64, g[4]) + f[6] * @as(i64, g[3]) + f[7] * @as(i64, g[2]) + f[8] * @as(i64, g[1]) + f[9] * @as(i64, g[0]);
carry2(h, t[0..]);
}
@ -348,16 +348,16 @@ const Fe = struct {
var t: [10]i64 = undefined;
t[0] = f0 * i64(f0) + f1_2 * i64(f9_38) + f2_2 * i64(f8_19) + f3_2 * i64(f7_38) + f4_2 * i64(f6_19) + f5 * i64(f5_38);
t[1] = f0_2 * i64(f1) + f2 * i64(f9_38) + f3_2 * i64(f8_19) + f4 * i64(f7_38) + f5_2 * i64(f6_19);
t[2] = f0_2 * i64(f2) + f1_2 * i64(f1) + f3_2 * i64(f9_38) + f4_2 * i64(f8_19) + f5_2 * i64(f7_38) + f6 * i64(f6_19);
t[3] = f0_2 * i64(f3) + f1_2 * i64(f2) + f4 * i64(f9_38) + f5_2 * i64(f8_19) + f6 * i64(f7_38);
t[4] = f0_2 * i64(f4) + f1_2 * i64(f3_2) + f2 * i64(f2) + f5_2 * i64(f9_38) + f6_2 * i64(f8_19) + f7 * i64(f7_38);
t[5] = f0_2 * i64(f5) + f1_2 * i64(f4) + f2_2 * i64(f3) + f6 * i64(f9_38) + f7_2 * i64(f8_19);
t[6] = f0_2 * i64(f6) + f1_2 * i64(f5_2) + f2_2 * i64(f4) + f3_2 * i64(f3) + f7_2 * i64(f9_38) + f8 * i64(f8_19);
t[7] = f0_2 * i64(f7) + f1_2 * i64(f6) + f2_2 * i64(f5) + f3_2 * i64(f4) + f8 * i64(f9_38);
t[8] = f0_2 * i64(f8) + f1_2 * i64(f7_2) + f2_2 * i64(f6) + f3_2 * i64(f5_2) + f4 * i64(f4) + f9 * i64(f9_38);
t[9] = f0_2 * i64(f9) + f1_2 * i64(f8) + f2_2 * i64(f7) + f3_2 * i64(f6) + f4 * i64(f5_2);
t[0] = f0 * @as(i64, f0) + f1_2 * @as(i64, f9_38) + f2_2 * @as(i64, f8_19) + f3_2 * @as(i64, f7_38) + f4_2 * @as(i64, f6_19) + f5 * @as(i64, f5_38);
t[1] = f0_2 * @as(i64, f1) + f2 * @as(i64, f9_38) + f3_2 * @as(i64, f8_19) + f4 * @as(i64, f7_38) + f5_2 * @as(i64, f6_19);
t[2] = f0_2 * @as(i64, f2) + f1_2 * @as(i64, f1) + f3_2 * @as(i64, f9_38) + f4_2 * @as(i64, f8_19) + f5_2 * @as(i64, f7_38) + f6 * @as(i64, f6_19);
t[3] = f0_2 * @as(i64, f3) + f1_2 * @as(i64, f2) + f4 * @as(i64, f9_38) + f5_2 * @as(i64, f8_19) + f6 * @as(i64, f7_38);
t[4] = f0_2 * @as(i64, f4) + f1_2 * @as(i64, f3_2) + f2 * @as(i64, f2) + f5_2 * @as(i64, f9_38) + f6_2 * @as(i64, f8_19) + f7 * @as(i64, f7_38);
t[5] = f0_2 * @as(i64, f5) + f1_2 * @as(i64, f4) + f2_2 * @as(i64, f3) + f6 * @as(i64, f9_38) + f7_2 * @as(i64, f8_19);
t[6] = f0_2 * @as(i64, f6) + f1_2 * @as(i64, f5_2) + f2_2 * @as(i64, f4) + f3_2 * @as(i64, f3) + f7_2 * @as(i64, f9_38) + f8 * @as(i64, f8_19);
t[7] = f0_2 * @as(i64, f7) + f1_2 * @as(i64, f6) + f2_2 * @as(i64, f5) + f3_2 * @as(i64, f4) + f8 * @as(i64, f9_38);
t[8] = f0_2 * @as(i64, f8) + f1_2 * @as(i64, f7_2) + f2_2 * @as(i64, f6) + f3_2 * @as(i64, f5_2) + f4 * @as(i64, f4) + f9 * @as(i64, f9_38);
t[9] = f0_2 * @as(i64, f9) + f1_2 * @as(i64, f8) + f2_2 * @as(i64, f7) + f3_2 * @as(i64, f6) + f4 * @as(i64, f5_2);
carry2(h, t[0..]);
}
@ -500,7 +500,7 @@ const Fe = struct {
if (i + 1 < 10) {
t[i + 1] += c[i];
}
t[i] -= c[i] * (i32(1) << shift);
t[i] -= c[i] * (@as(i32, 1) << shift);
}
fn toBytes(s: []u8, h: *const Fe) void {
@ -511,7 +511,7 @@ const Fe = struct {
t[i] = h.b[i];
}
var q = (19 * t[9] + ((i32(1) << 24))) >> 25;
var q = (19 * t[9] + ((@as(i32, 1) << 24))) >> 25;
{
var i: usize = 0;
while (i < 5) : (i += 1) {

View File

@ -45,18 +45,19 @@ var stderr_file_out_stream: File.OutStream = undefined;
var stderr_stream: ?*io.OutStream(File.WriteError) = null;
var stderr_mutex = std.Mutex.init();
pub fn warn(comptime fmt: []const u8, args: ...) void {
const held = stderr_mutex.acquire();
defer held.release();
const stderr = getStderrStream() catch return;
const stderr = getStderrStream();
stderr.print(fmt, args) catch return;
}
pub fn getStderrStream() !*io.OutStream(File.WriteError) {
pub fn getStderrStream() *io.OutStream(File.WriteError) {
if (stderr_stream) |st| {
return st;
} else {
stderr_file = try io.getStdErr();
stderr_file = io.getStdErr();
stderr_file_out_stream = stderr_file.outStream();
const st = &stderr_file_out_stream.stream;
stderr_stream = st;
@ -64,6 +65,10 @@ pub fn getStderrStream() !*io.OutStream(File.WriteError) {
}
}
pub fn getStderrMutex() *std.Mutex {
return &stderr_mutex;
}
/// TODO multithreaded awareness
var self_debug_info: ?DebugInfo = null;
@ -85,7 +90,7 @@ fn wantTtyColor() bool {
/// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned.
/// TODO multithreaded awareness
pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
const stderr = getStderrStream() catch return;
const stderr = getStderrStream();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n") catch return;
return;
@ -104,7 +109,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
/// unbuffered, and ignores any error returned.
/// TODO multithreaded awareness
pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
const stderr = getStderrStream() catch return;
const stderr = getStderrStream();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n") catch return;
return;
@ -177,7 +182,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *builtin.StackTrace
/// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned.
/// TODO multithreaded awareness
pub fn dumpStackTrace(stack_trace: builtin.StackTrace) void {
const stderr = getStderrStream() catch return;
const stderr = getStderrStream();
if (builtin.strip_debug_info) {
stderr.print("Unable to dump stack trace: debug info stripped\n") catch return;
return;
@ -232,7 +237,7 @@ pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, c
// which first called panic can finish printing a stack trace.
os.abort();
}
const stderr = getStderrStream() catch os.abort();
const stderr = getStderrStream();
stderr.print(format ++ "\n", args) catch os.abort();
if (trace) |t| {
dumpStackTrace(t.*);
@ -1003,7 +1008,7 @@ fn readSparseBitVector(stream: var, allocator: *mem.Allocator) ![]usize {
const word = try stream.readIntLittle(u32);
var bit_i: u5 = 0;
while (true) : (bit_i += 1) {
if (word & (u32(1) << bit_i) != 0) {
if (word & (@as(u32, 1) << bit_i) != 0) {
try list.append(word_i * 32 + bit_i);
}
if (bit_i == maxInt(u5)) break;
@ -1551,13 +1556,14 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: boo
// TODO the noasyncs here are workarounds
fn parseFormValueDwarfOffsetSize(in_stream: var, is_64: bool) !u64 {
return if (is_64) try noasync in_stream.readIntLittle(u64) else u64(try noasync in_stream.readIntLittle(u32));
return if (is_64) try noasync in_stream.readIntLittle(u64) else @as(u64, try noasync in_stream.readIntLittle(u32));
}
// TODO the noasyncs here are workarounds
fn parseFormValueTargetAddrSize(in_stream: var) !u64 {
if (@sizeOf(usize) == 4) {
return u64(try noasync in_stream.readIntLittle(u32));
// TODO this cast should not be needed
return @as(u64, try noasync in_stream.readIntLittle(u32));
} else if (@sizeOf(usize) == 8) {
return noasync in_stream.readIntLittle(u64);
} else {
@ -1705,7 +1711,12 @@ fn getLineNumberInfoMacOs(di: *DebugInfo, symbol: MachoSymbol, target_address: u
const ofile_path = mem.toSliceConst(u8, di.strings.ptr + ofile.n_strx);
gop.kv.value = MachOFile{
.bytes = try std.io.readFileAllocAligned(di.ofiles.allocator, ofile_path, @alignOf(macho.mach_header_64)),
.bytes = try std.fs.Dir.cwd().readFileAllocAligned(
di.ofiles.allocator,
ofile_path,
maxInt(usize),
@alignOf(macho.mach_header_64),
),
.sect_debug_info = null,
.sect_debug_line = null,
};
@ -1841,7 +1852,7 @@ fn getLineNumberInfoMacOs(di: *DebugInfo, symbol: MachoSymbol, target_address: u
// special opcodes
const adjusted_opcode = opcode - opcode_base;
const inc_addr = minimum_instruction_length * (adjusted_opcode / line_range);
const inc_line = i32(line_base) + i32(adjusted_opcode % line_range);
const inc_line = @as(i32, line_base) + @as(i32, adjusted_opcode % line_range);
prog.line += inc_line;
prog.address += inc_addr;
if (try prog.checkLineMatch()) |info| return info;
@ -1908,7 +1919,7 @@ fn getLineNumberInfoDwarf(di: *DwarfInfo, compile_unit: CompileUnit, target_addr
if (unit_length == 0) {
return error.MissingDebugInfo;
}
const next_offset = unit_length + (if (is_64) usize(12) else usize(4));
const next_offset = unit_length + (if (is_64) @as(usize, 12) else @as(usize, 4));
const version = try di.dwarf_in_stream.readInt(u16, di.endian);
// TODO support 3 and 5
@ -2007,7 +2018,7 @@ fn getLineNumberInfoDwarf(di: *DwarfInfo, compile_unit: CompileUnit, target_addr
// special opcodes
const adjusted_opcode = opcode - opcode_base;
const inc_addr = minimum_instruction_length * (adjusted_opcode / line_range);
const inc_line = i32(line_base) + i32(adjusted_opcode % line_range);
const inc_line = @as(i32, line_base) + @as(i32, adjusted_opcode % line_range);
prog.line += inc_line;
prog.address += inc_addr;
if (try prog.checkLineMatch()) |info| return info;
@ -2088,7 +2099,7 @@ fn scanAllFunctions(di: *DwarfInfo) !void {
var is_64: bool = undefined;
const unit_length = try readInitialLength(@typeOf(di.dwarf_in_stream.readFn).ReturnType.ErrorSet, di.dwarf_in_stream, &is_64);
if (unit_length == 0) return;
const next_offset = unit_length + (if (is_64) usize(12) else usize(4));
const next_offset = unit_length + (if (is_64) @as(usize, 12) else @as(usize, 4));
const version = try di.dwarf_in_stream.readInt(u16, di.endian);
if (version < 2 or version > 5) return error.InvalidDebugInfo;
@ -2190,7 +2201,7 @@ fn scanAllCompileUnits(di: *DwarfInfo) !void {
var is_64: bool = undefined;
const unit_length = try readInitialLength(@typeOf(di.dwarf_in_stream.readFn).ReturnType.ErrorSet, di.dwarf_in_stream, &is_64);
if (unit_length == 0) return;
const next_offset = unit_length + (if (is_64) usize(12) else usize(4));
const next_offset = unit_length + (if (is_64) @as(usize, 12) else @as(usize, 4));
const version = try di.dwarf_in_stream.readInt(u16, di.endian);
if (version < 2 or version > 5) return error.InvalidDebugInfo;
@ -2307,7 +2318,8 @@ fn readInitialLengthMem(ptr: *[*]const u8, is_64: *bool) !u64 {
} else {
if (first_32_bits >= 0xfffffff0) return error.InvalidDebugInfo;
ptr.* += 4;
return u64(first_32_bits);
// TODO this cast should not be needed
return @as(u64, first_32_bits);
}
}
@ -2324,7 +2336,8 @@ fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool)
return in_stream.readIntLittle(u64);
} else {
if (first_32_bits >= 0xfffffff0) return error.InvalidDebugInfo;
return u64(first_32_bits);
// TODO this cast should not be needed
return @as(u64, first_32_bits);
}
}

View File

@ -62,13 +62,13 @@ pub fn readILEB128(comptime T: type, in_stream: var) !T {
var shift: usize = 0;
while (true) {
const byte = u8(try in_stream.readByte());
const byte: u8 = try in_stream.readByte();
if (shift > T.bit_count)
return error.Overflow;
var operand: UT = undefined;
if (@shlWithOverflow(UT, UT(byte & 0x7f), @intCast(ShiftT, shift), &operand)) {
if (@shlWithOverflow(UT, @as(UT, byte & 0x7f), @intCast(ShiftT, shift), &operand)) {
if (byte != 0x7f)
return error.Overflow;
}
@ -101,7 +101,7 @@ pub fn readILEB128Mem(comptime T: type, ptr: *[*]const u8) !T {
return error.Overflow;
var operand: UT = undefined;
if (@shlWithOverflow(UT, UT(byte & 0x7f), @intCast(ShiftT, shift), &operand)) {
if (@shlWithOverflow(UT, @as(UT, byte & 0x7f), @intCast(ShiftT, shift), &operand)) {
if (byte != 0x7f)
return error.Overflow;
}

View File

@ -215,8 +215,8 @@ pub const ElfLib = struct {
var i: usize = 0;
while (i < self.hashtab[1]) : (i += 1) {
if (0 == (u32(1) << @intCast(u5, self.syms[i].st_info & 0xf) & OK_TYPES)) continue;
if (0 == (u32(1) << @intCast(u5, self.syms[i].st_info >> 4) & OK_BINDS)) continue;
if (0 == (@as(u32, 1) << @intCast(u5, self.syms[i].st_info & 0xf) & OK_TYPES)) continue;
if (0 == (@as(u32, 1) << @intCast(u5, self.syms[i].st_info >> 4) & OK_BINDS)) continue;
if (0 == self.syms[i].st_shndx) continue;
if (!mem.eql(u8, name, mem.toSliceConst(u8, self.strings + self.syms[i].st_name))) continue;
if (maybe_versym) |versym| {

View File

@ -441,9 +441,9 @@ pub const Elf = struct {
elf.program_header_offset = try in.readInt(u64, elf.endian);
elf.section_header_offset = try in.readInt(u64, elf.endian);
} else {
elf.entry_addr = u64(try in.readInt(u32, elf.endian));
elf.program_header_offset = u64(try in.readInt(u32, elf.endian));
elf.section_header_offset = u64(try in.readInt(u32, elf.endian));
elf.entry_addr = @as(u64, try in.readInt(u32, elf.endian));
elf.program_header_offset = @as(u64, try in.readInt(u32, elf.endian));
elf.section_header_offset = @as(u64, try in.readInt(u32, elf.endian));
}
// skip over flags
@ -458,13 +458,13 @@ pub const Elf = struct {
const ph_entry_count = try in.readInt(u16, elf.endian);
const sh_entry_size = try in.readInt(u16, elf.endian);
const sh_entry_count = try in.readInt(u16, elf.endian);
elf.string_section_index = usize(try in.readInt(u16, elf.endian));
elf.string_section_index = @as(usize, try in.readInt(u16, elf.endian));
if (elf.string_section_index >= sh_entry_count) return error.InvalidFormat;
const sh_byte_count = u64(sh_entry_size) * u64(sh_entry_count);
const sh_byte_count = @as(u64, sh_entry_size) * @as(u64, sh_entry_count);
const end_sh = try math.add(u64, elf.section_header_offset, sh_byte_count);
const ph_byte_count = u64(ph_entry_size) * u64(ph_entry_count);
const ph_byte_count = @as(u64, ph_entry_size) * @as(u64, ph_entry_count);
const end_ph = try math.add(u64, elf.program_header_offset, ph_byte_count);
const stream_end = try seekable_stream.getEndPos();
@ -499,14 +499,14 @@ pub const Elf = struct {
// TODO (multiple occurrences) allow implicit cast from %u32 -> %u64 ?
elf_section.name = try in.readInt(u32, elf.endian);
elf_section.sh_type = try in.readInt(u32, elf.endian);
elf_section.flags = u64(try in.readInt(u32, elf.endian));
elf_section.addr = u64(try in.readInt(u32, elf.endian));
elf_section.offset = u64(try in.readInt(u32, elf.endian));
elf_section.size = u64(try in.readInt(u32, elf.endian));
elf_section.flags = @as(u64, try in.readInt(u32, elf.endian));
elf_section.addr = @as(u64, try in.readInt(u32, elf.endian));
elf_section.offset = @as(u64, try in.readInt(u32, elf.endian));
elf_section.size = @as(u64, try in.readInt(u32, elf.endian));
elf_section.link = try in.readInt(u32, elf.endian);
elf_section.info = try in.readInt(u32, elf.endian);
elf_section.addr_align = u64(try in.readInt(u32, elf.endian));
elf_section.ent_size = u64(try in.readInt(u32, elf.endian));
elf_section.addr_align = @as(u64, try in.readInt(u32, elf.endian));
elf_section.ent_size = @as(u64, try in.readInt(u32, elf.endian));
}
}

View File

@ -161,7 +161,7 @@ pub fn Channel(comptime T: type) type {
fn dispatch(self: *SelfChannel) void {
// set the "need dispatch" flag
_ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 1, .SeqCst);
@atomicStore(u8, &self.need_dispatch, 1, .SeqCst);
lock: while (true) {
// set the lock flag
@ -169,7 +169,7 @@ pub fn Channel(comptime T: type) type {
if (prev_lock != 0) return;
// clear the need_dispatch flag since we're about to do it
_ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst);
@atomicStore(u8, &self.need_dispatch, 0, .SeqCst);
while (true) {
one_dispatch: {

View File

@ -328,11 +328,11 @@ pub fn preadWindows(loop: *Loop, fd: fd_t, data: []u8, offset: u64) !usize {
windows.ERROR.IO_PENDING => unreachable,
windows.ERROR.OPERATION_ABORTED => return error.OperationAborted,
windows.ERROR.BROKEN_PIPE => return error.BrokenPipe,
windows.ERROR.HANDLE_EOF => return usize(bytes_transferred),
windows.ERROR.HANDLE_EOF => return @as(usize, bytes_transferred),
else => |err| return windows.unexpectedError(err),
}
}
return usize(bytes_transferred);
return @as(usize, bytes_transferred);
}
/// iovecs must live until preadv frame completes
@ -415,7 +415,8 @@ pub fn openPosix(
pub fn openRead(loop: *Loop, path: []const u8) File.OpenError!fd_t {
switch (builtin.os) {
.macosx, .linux, .freebsd, .netbsd, .dragonfly => {
const flags = os.O_LARGEFILE | os.O_RDONLY | os.O_CLOEXEC;
const O_LARGEFILE = if (@hasDecl(os, "O_LARGEFILE")) os.O_LARGEFILE else 0;
const flags = O_LARGEFILE | os.O_RDONLY | os.O_CLOEXEC;
return openPosix(loop, path, flags, File.default_mode);
},
@ -448,7 +449,8 @@ pub fn openWriteMode(loop: *Loop, path: []const u8, mode: File.Mode) File.OpenEr
.netbsd,
.dragonfly,
=> {
const flags = os.O_LARGEFILE | os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC | os.O_TRUNC;
const O_LARGEFILE = if (@hasDecl(os, "O_LARGEFILE")) os.O_LARGEFILE else 0;
const flags = O_LARGEFILE | os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC | os.O_TRUNC;
return openPosix(loop, path, flags, File.default_mode);
},
.windows => return windows.CreateFile(
@ -472,7 +474,8 @@ pub fn openReadWrite(
) File.OpenError!fd_t {
switch (builtin.os) {
.macosx, .linux, .freebsd, .netbsd, .dragonfly => {
const flags = os.O_LARGEFILE | os.O_RDWR | os.O_CREAT | os.O_CLOEXEC;
const O_LARGEFILE = if (@hasDecl(os, "O_LARGEFILE")) os.O_LARGEFILE else 0;
const flags = O_LARGEFILE | os.O_RDWR | os.O_CREAT | os.O_CLOEXEC;
return openPosix(loop, path, flags, mode);
},

View File

@ -12,12 +12,13 @@ pub fn Future(comptime T: type) type {
return struct {
lock: Lock,
data: T,
available: Available,
/// TODO make this an enum
/// 0 - not started
/// 1 - started
/// 2 - finished
available: u8,
const Available = enum(u8) {
NotStarted,
Started,
Finished,
};
const Self = @This();
const Queue = std.atomic.Queue(anyframe);
@ -34,7 +35,7 @@ pub fn Future(comptime T: type) type {
/// available.
/// Thread-safe.
pub async fn get(self: *Self) *T {
if (@atomicLoad(u8, &self.available, .SeqCst) == 2) {
if (@atomicLoad(Available, &self.available, .SeqCst) == .Finished) {
return &self.data;
}
const held = self.lock.acquire();
@ -46,7 +47,7 @@ pub fn Future(comptime T: type) type {
/// Gets the data without waiting for it. If it's available, a pointer is
/// returned. Otherwise, null is returned.
pub fn getOrNull(self: *Self) ?*T {
if (@atomicLoad(u8, &self.available, .SeqCst) == 2) {
if (@atomicLoad(Available, &self.available, .SeqCst) == .Finished) {
return &self.data;
} else {
return null;
@ -59,14 +60,14 @@ pub fn Future(comptime T: type) type {
/// It's not required to call start() before resolve() but it can be useful since
/// this method is thread-safe.
pub async fn start(self: *Self) ?*T {
const state = @cmpxchgStrong(u8, &self.available, 0, 1, .SeqCst, .SeqCst) orelse return null;
const state = @cmpxchgStrong(Available, &self.available, .NotStarted, .Started, .SeqCst, .SeqCst) orelse return null;
switch (state) {
1 => {
.Started => {
const held = self.lock.acquire();
held.release();
return &self.data;
},
2 => return &self.data,
.Finished => return &self.data,
else => unreachable,
}
}
@ -74,8 +75,8 @@ pub fn Future(comptime T: type) type {
/// Make the data become available. May be called only once.
/// Before calling this, modify the `data` property.
pub fn resolve(self: *Self) void {
const prev = @atomicRmw(u8, &self.available, .Xchg, 2, .SeqCst);
assert(prev == 0 or prev == 1); // resolve() called twice
const prev = @atomicRmw(Available, &self.available, .Xchg, .Finished, .SeqCst);
assert(prev != .Finished); // resolve() called twice
Lock.Held.release(Lock.Held{ .lock = &self.lock });
}
};

View File

@ -8,7 +8,7 @@ const Allocator = std.mem.Allocator;
pub fn Group(comptime ReturnType: type) type {
return struct {
frame_stack: Stack,
alloc_stack: Stack,
alloc_stack: AllocStack,
lock: Lock,
allocator: *Allocator,
@ -19,11 +19,17 @@ pub fn Group(comptime ReturnType: type) type {
else => void,
};
const Stack = std.atomic.Stack(anyframe->ReturnType);
const AllocStack = std.atomic.Stack(Node);
pub const Node = struct {
bytes: []const u8 = [0]u8{},
handle: anyframe->ReturnType,
};
pub fn init(allocator: *Allocator) Self {
return Self{
.frame_stack = Stack.init(),
.alloc_stack = Stack.init(),
.alloc_stack = AllocStack.init(),
.lock = Lock.init(),
.allocator = allocator,
};
@ -31,10 +37,12 @@ pub fn Group(comptime ReturnType: type) type {
/// Add a frame to the group. Thread-safe.
pub fn add(self: *Self, handle: anyframe->ReturnType) (error{OutOfMemory}!void) {
const node = try self.allocator.create(Stack.Node);
node.* = Stack.Node{
const node = try self.allocator.create(AllocStack.Node);
node.* = AllocStack.Node{
.next = undefined,
.data = handle,
.data = Node{
.handle = handle,
},
};
self.alloc_stack.push(node);
}
@ -48,6 +56,24 @@ pub fn Group(comptime ReturnType: type) type {
self.frame_stack.push(node);
}
/// This is equivalent to adding a frame to the group but the memory of its frame is
/// allocated by the group and freed by `wait`.
/// `func` must be async and have return type `ReturnType`.
/// Thread-safe.
pub fn call(self: *Self, comptime func: var, args: ...) error{OutOfMemory}!void {
var frame = try self.allocator.create(@Frame(func));
const node = try self.allocator.create(AllocStack.Node);
node.* = AllocStack.Node{
.next = undefined,
.data = Node{
.handle = frame,
.bytes = @sliceToBytes((*[1]@Frame(func))(frame)[0..]),
},
};
frame.* = async func(args);
self.alloc_stack.push(node);
}
/// Wait for all the calls and promises of the group to complete.
/// Thread-safe.
/// Safe to call any number of times.
@ -67,8 +93,7 @@ pub fn Group(comptime ReturnType: type) type {
}
}
while (self.alloc_stack.pop()) |node| {
const handle = node.data;
self.allocator.destroy(node);
const handle = node.data.handle;
if (Error == void) {
await handle;
} else {
@ -76,6 +101,8 @@ pub fn Group(comptime ReturnType: type) type {
result = err;
};
}
self.allocator.free(node.data.bytes);
self.allocator.destroy(node);
}
return result;
}

View File

@ -31,8 +31,8 @@ pub const Lock = struct {
}
// We need to release the lock.
_ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst);
_ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst);
@atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
@atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
// There might be a queue item. If we know the queue is empty, we can be done,
// because the other actor will try to obtain the lock.
@ -56,8 +56,8 @@ pub const Lock = struct {
}
// Release the lock again.
_ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst);
_ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst);
@atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
@atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
// Find out if we can be done.
if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
@ -101,7 +101,7 @@ pub const Lock = struct {
// We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor
// will attempt to grab the lock.
_ = @atomicRmw(u8, &self.queue_empty_bit, .Xchg, 0, .SeqCst);
@atomicStore(u8, &self.queue_empty_bit, 0, .SeqCst);
const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst);
if (old_bit == 0) {

View File

@ -266,7 +266,7 @@ pub const Loop = struct {
},
};
const empty_kevs = ([*]os.Kevent)(undefined)[0..0];
const empty_kevs = &[0]os.Kevent{};
for (self.eventfd_resume_nodes) |*eventfd_node, i| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
@ -289,7 +289,7 @@ pub const Loop = struct {
.next = undefined,
};
self.available_eventfd_resume_nodes.push(eventfd_node);
const kevent_array = (*const [1]os.Kevent)(&eventfd_node.data.kevent);
const kevent_array = @as(*const [1]os.Kevent, &eventfd_node.data.kevent);
_ = try os.kevent(self.os_data.kqfd, kevent_array, empty_kevs, null);
eventfd_node.data.kevent.flags = os.EV_CLEAR | os.EV_ENABLE;
eventfd_node.data.kevent.fflags = os.NOTE_TRIGGER;
@ -305,7 +305,7 @@ pub const Loop = struct {
.data = 0,
.udata = @ptrToInt(&self.final_resume_node),
};
const final_kev_arr = (*const [1]os.Kevent)(&self.os_data.final_kevent);
const final_kev_arr = @as(*const [1]os.Kevent, &self.os_data.final_kevent);
_ = try os.kevent(self.os_data.kqfd, final_kev_arr, empty_kevs, null);
self.os_data.final_kevent.flags = os.EV_ENABLE;
self.os_data.final_kevent.fflags = os.NOTE_TRIGGER;
@ -572,8 +572,8 @@ pub const Loop = struct {
eventfd_node.base.handle = next_tick_node.data;
switch (builtin.os) {
.macosx, .freebsd, .netbsd, .dragonfly => {
const kevent_array = (*const [1]os.Kevent)(&eventfd_node.kevent);
const empty_kevs = ([*]os.Kevent)(undefined)[0..0];
const kevent_array = @as(*const [1]os.Kevent, &eventfd_node.kevent);
const empty_kevs = &[0]os.Kevent{};
_ = os.kevent(self.os_data.kqfd, kevent_array, empty_kevs, null) catch {
self.next_tick_queue.unget(next_tick_node);
self.available_eventfd_resume_nodes.push(resume_stack_node);
@ -645,12 +645,6 @@ pub const Loop = struct {
}
}
/// This is equivalent to function call, except it calls `startCpuBoundOperation` first.
pub fn call(comptime func: var, args: ...) @typeOf(func).ReturnType {
startCpuBoundOperation();
return func(args);
}
/// Yielding lets the event loop run, starting any unstarted async operations.
/// Note that async operations automatically start when a function yields for any other reason,
/// for example, when async I/O is performed. This function is intended to be used only when
@ -695,8 +689,8 @@ pub const Loop = struct {
},
.macosx, .freebsd, .netbsd, .dragonfly => {
self.posixFsRequest(&self.os_data.fs_end_request);
const final_kevent = (*const [1]os.Kevent)(&self.os_data.final_kevent);
const empty_kevs = ([*]os.Kevent)(undefined)[0..0];
const final_kevent = @as(*const [1]os.Kevent, &self.os_data.final_kevent);
const empty_kevs = &[0]os.Kevent{};
// cannot fail because we already added it and this just enables it
_ = os.kevent(self.os_data.kqfd, final_kevent, empty_kevs, null) catch unreachable;
return;
@ -753,7 +747,7 @@ pub const Loop = struct {
},
.macosx, .freebsd, .netbsd, .dragonfly => {
var eventlist: [1]os.Kevent = undefined;
const empty_kevs = ([*]os.Kevent)(undefined)[0..0];
const empty_kevs = &[0]os.Kevent{};
const count = os.kevent(self.os_data.kqfd, empty_kevs, eventlist[0..], null) catch unreachable;
for (eventlist[0..count]) |ev| {
const resume_node = @intToPtr(*ResumeNode, ev.udata);
@ -815,12 +809,12 @@ pub const Loop = struct {
self.os_data.fs_queue.put(request_node);
switch (builtin.os) {
.macosx, .freebsd, .netbsd, .dragonfly => {
const fs_kevs = (*const [1]os.Kevent)(&self.os_data.fs_kevent_wake);
const empty_kevs = ([*]os.Kevent)(undefined)[0..0];
const fs_kevs = @as(*const [1]os.Kevent, &self.os_data.fs_kevent_wake);
const empty_kevs = &[0]os.Kevent{};
_ = os.kevent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable;
},
.linux => {
_ = @atomicRmw(i32, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
@atomicStore(i32, &self.os_data.fs_queue_item, 1, AtomicOrder.SeqCst);
const rc = os.linux.futex_wake(&self.os_data.fs_queue_item, os.linux.FUTEX_WAKE, 1);
switch (os.linux.getErrno(rc)) {
0 => {},
@ -843,7 +837,7 @@ pub const Loop = struct {
fn posixFsRun(self: *Loop) void {
while (true) {
if (builtin.os == .linux) {
_ = @atomicRmw(i32, &self.os_data.fs_queue_item, .Xchg, 0, .SeqCst);
@atomicStore(i32, &self.os_data.fs_queue_item, 0, .SeqCst);
}
while (self.os_data.fs_queue.get()) |node| {
switch (node.data.msg) {
@ -862,7 +856,8 @@ pub const Loop = struct {
},
.Close => |*msg| noasync os.close(msg.fd),
.WriteFile => |*msg| blk: {
const flags = os.O_LARGEFILE | os.O_WRONLY | os.O_CREAT |
const O_LARGEFILE = if (@hasDecl(os, "O_LARGEFILE")) os.O_LARGEFILE else 0;
const flags = O_LARGEFILE | os.O_WRONLY | os.O_CREAT |
os.O_CLOEXEC | os.O_TRUNC;
const fd = noasync os.openC(msg.path.ptr, flags, msg.mode) catch |err| {
msg.result = err;
@ -890,7 +885,7 @@ pub const Loop = struct {
}
},
.macosx, .freebsd, .netbsd, .dragonfly => {
const fs_kevs = (*const [1]os.Kevent)(&self.os_data.fs_kevent_wait);
const fs_kevs = @as(*const [1]os.Kevent, &self.os_data.fs_kevent_wait);
var out_kevs: [1]os.Kevent = undefined;
_ = os.kevent(self.os_data.fs_kqfd, fs_kevs, out_kevs[0..], null) catch unreachable;
},
@ -942,23 +937,6 @@ test "std.event.Loop - basic" {
loop.run();
}
test "std.event.Loop - call" {
// https://github.com/ziglang/zig/issues/1908
if (builtin.single_threaded) return error.SkipZigTest;
var loop: Loop = undefined;
try loop.initMultiThreaded();
defer loop.deinit();
var did_it = false;
var handle = async Loop.call(testEventLoop);
var handle2 = async Loop.call(testEventLoop2, &handle, &did_it);
loop.run();
testing.expect(did_it);
}
async fn testEventLoop() i32 {
return 1234;
}

View File

@ -13,17 +13,17 @@ const Loop = std.event.Loop;
/// When a write lock is held, it will not be released until the writer queue is empty.
/// TODO: make this API also work in blocking I/O mode
pub const RwLock = struct {
shared_state: u8, // TODO make this an enum
shared_state: State,
writer_queue: Queue,
reader_queue: Queue,
writer_queue_empty_bit: u8, // TODO make this a bool
reader_queue_empty_bit: u8, // TODO make this a bool
reader_lock_count: usize,
const State = struct {
const Unlocked = 0;
const WriteLock = 1;
const ReadLock = 2;
const State = enum(u8) {
Unlocked,
WriteLock,
ReadLock,
};
const Queue = std.atomic.Queue(anyframe);
@ -40,8 +40,8 @@ pub const RwLock = struct {
return;
}
_ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, .Xchg, 1, .SeqCst);
if (@cmpxchgStrong(u8, &self.lock.shared_state, State.ReadLock, State.Unlocked, .SeqCst, .SeqCst) != null) {
@atomicStore(u8, &self.lock.reader_queue_empty_bit, 1, .SeqCst);
if (@cmpxchgStrong(State, &self.lock.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
}
@ -64,15 +64,15 @@ pub const RwLock = struct {
// We need to release the write lock. Check if any readers are waiting to grab the lock.
if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) {
// Switch to a read lock.
_ = @atomicRmw(u8, &self.lock.shared_state, .Xchg, State.ReadLock, .SeqCst);
@atomicStore(State, &self.lock.shared_state, .ReadLock, .SeqCst);
while (self.lock.reader_queue.get()) |node| {
global_event_loop.onNextTick(node);
}
return;
}
_ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, .Xchg, 1, .SeqCst);
_ = @atomicRmw(u8, &self.lock.shared_state, .Xchg, State.Unlocked, .SeqCst);
@atomicStore(u8, &self.lock.writer_queue_empty_bit, 1, .SeqCst);
@atomicStore(State, &self.lock.shared_state, .Unlocked, .SeqCst);
self.lock.commonPostUnlock();
}
@ -80,7 +80,7 @@ pub const RwLock = struct {
pub fn init() RwLock {
return RwLock{
.shared_state = State.Unlocked,
.shared_state = .Unlocked,
.writer_queue = Queue.init(),
.writer_queue_empty_bit = 1,
.reader_queue = Queue.init(),
@ -92,7 +92,7 @@ pub const RwLock = struct {
/// Must be called when not locked. Not thread safe.
/// All calls to acquire() and release() must complete before calling deinit().
pub fn deinit(self: *RwLock) void {
assert(self.shared_state == State.Unlocked);
assert(self.shared_state == .Unlocked);
while (self.writer_queue.get()) |node| resume node.data;
while (self.reader_queue.get()) |node| resume node.data;
}
@ -113,10 +113,10 @@ pub const RwLock = struct {
// We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1,
// some actor will attempt to grab the lock.
_ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 0, .SeqCst);
@atomicStore(u8, &self.reader_queue_empty_bit, 0, .SeqCst);
// Here we don't care if we are the one to do the locking or if it was already locked for reading.
const have_read_lock = if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == State.ReadLock else true;
const have_read_lock = if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == .ReadLock else true;
if (have_read_lock) {
// Give out all the read locks.
if (self.reader_queue.get()) |first_node| {
@ -144,10 +144,10 @@ pub const RwLock = struct {
// We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1,
// some actor will attempt to grab the lock.
_ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 0, .SeqCst);
@atomicStore(u8, &self.writer_queue_empty_bit, 0, .SeqCst);
// Here we must be the one to acquire the write lock. It cannot already be locked.
if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, .SeqCst, .SeqCst) == null) {
if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) == null) {
// We now have a write lock.
if (self.writer_queue.get()) |node| {
// Whether this node is us or someone else, we tail resume it.
@ -166,7 +166,7 @@ pub const RwLock = struct {
// But if there's a writer_queue item or a reader_queue item,
// we are the actor which must loop and attempt to grab the lock again.
if (@atomicLoad(u8, &self.writer_queue_empty_bit, .SeqCst) == 0) {
if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.WriteLock, .SeqCst, .SeqCst) != null) {
if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
}
@ -176,13 +176,13 @@ pub const RwLock = struct {
return;
}
// Release the lock again.
_ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 1, .SeqCst);
_ = @atomicRmw(u8, &self.shared_state, .Xchg, State.Unlocked, .SeqCst);
@atomicStore(u8, &self.writer_queue_empty_bit, 1, .SeqCst);
@atomicStore(State, &self.shared_state, .Unlocked, .SeqCst);
continue;
}
if (@atomicLoad(u8, &self.reader_queue_empty_bit, .SeqCst) == 0) {
if (@cmpxchgStrong(u8, &self.shared_state, State.Unlocked, State.ReadLock, .SeqCst, .SeqCst) != null) {
if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst) != null) {
// We did not obtain the lock. Great, the queues are someone else's problem.
return;
}
@ -195,8 +195,8 @@ pub const RwLock = struct {
return;
}
// Release the lock again.
_ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 1, .SeqCst);
if (@cmpxchgStrong(u8, &self.shared_state, State.ReadLock, State.Unlocked, .SeqCst, .SeqCst) != null) {
@atomicStore(u8, &self.reader_queue_empty_bit, 1, .SeqCst);
if (@cmpxchgStrong(State, &self.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
}

View File

@ -257,7 +257,7 @@ test "ByteFifo" {
defer fifo.deinit();
try fifo.write("HELLO");
testing.expectEqual(usize(5), fifo.readableLength());
testing.expectEqual(@as(usize, 5), fifo.readableLength());
testing.expectEqualSlices(u8, "HELLO", fifo.readableSlice(0));
{
@ -265,34 +265,34 @@ test "ByteFifo" {
while (i < 5) : (i += 1) {
try fifo.write([_]u8{try fifo.peekItem(i)});
}
testing.expectEqual(usize(10), fifo.readableLength());
testing.expectEqual(@as(usize, 10), fifo.readableLength());
testing.expectEqualSlices(u8, "HELLOHELLO", fifo.readableSlice(0));
}
{
testing.expectEqual(u8('H'), try fifo.readItem());
testing.expectEqual(u8('E'), try fifo.readItem());
testing.expectEqual(u8('L'), try fifo.readItem());
testing.expectEqual(u8('L'), try fifo.readItem());
testing.expectEqual(u8('O'), try fifo.readItem());
testing.expectEqual(@as(u8, 'H'), try fifo.readItem());
testing.expectEqual(@as(u8, 'E'), try fifo.readItem());
testing.expectEqual(@as(u8, 'L'), try fifo.readItem());
testing.expectEqual(@as(u8, 'L'), try fifo.readItem());
testing.expectEqual(@as(u8, 'O'), try fifo.readItem());
}
testing.expectEqual(usize(5), fifo.readableLength());
testing.expectEqual(@as(usize, 5), fifo.readableLength());
{ // Writes that wrap around
testing.expectEqual(usize(11), fifo.writableLength());
testing.expectEqual(usize(6), fifo.writableSlice(0).len);
testing.expectEqual(@as(usize, 11), fifo.writableLength());
testing.expectEqual(@as(usize, 6), fifo.writableSlice(0).len);
fifo.writeAssumeCapacity("6<chars<11");
testing.expectEqualSlices(u8, "HELLO6<char", fifo.readableSlice(0));
testing.expectEqualSlices(u8, "s<11", fifo.readableSlice(11));
fifo.discard(11);
testing.expectEqualSlices(u8, "s<11", fifo.readableSlice(0));
fifo.discard(4);
testing.expectEqual(usize(0), fifo.readableLength());
testing.expectEqual(@as(usize, 0), fifo.readableLength());
}
{
const buf = try fifo.writeableWithSize(12);
testing.expectEqual(usize(12), buf.len);
testing.expectEqual(@as(usize, 12), buf.len);
var i: u8 = 0;
while (i < 10) : (i += 1) {
buf[i] = i + 'a';
@ -313,6 +313,6 @@ test "ByteFifo" {
try fifo.print("{}, {}!", "Hello", "World");
var result: [30]u8 = undefined;
testing.expectEqualSlices(u8, "Hello, World!", fifo.read(&result));
testing.expectEqual(usize(0), fifo.readableLength());
testing.expectEqual(@as(usize, 0), fifo.readableLength());
}
}

View File

@ -167,6 +167,10 @@ pub fn format(
'}' => {
const arg_to_print = comptime nextArg(&used_pos_args, maybe_pos_arg, &next_arg);
if (arg_to_print >= args.len) {
@compileError("Too few arguments");
}
try formatType(
args[arg_to_print],
fmt[0..0],
@ -378,10 +382,10 @@ pub fn formatType(
const info = @typeInfo(T).Union;
if (info.tag_type) |UnionTagType| {
try output(context, "{ .");
try output(context, @tagName(UnionTagType(value)));
try output(context, @tagName(@as(UnionTagType, value)));
try output(context, " = ");
inline for (info.fields) |u_field| {
if (@enumToInt(UnionTagType(value)) == u_field.enum_field.?.value) {
if (@enumToInt(@as(UnionTagType, value)) == u_field.enum_field.?.value) {
try formatType(@field(value, u_field.name), "", options, context, Errors, output, max_depth - 1);
}
}
@ -499,7 +503,7 @@ pub fn formatIntValue(
const int_value = if (@typeOf(value) == comptime_int) blk: {
const Int = math.IntFittingRange(value, value);
break :blk Int(value);
break :blk @as(Int, value);
} else
value;
@ -508,7 +512,7 @@ pub fn formatIntValue(
uppercase = false;
} else if (comptime std.mem.eql(u8, fmt, "c")) {
if (@typeOf(int_value).bit_count <= 8) {
return formatAsciiChar(u8(int_value), options, context, Errors, output);
return formatAsciiChar(@as(u8, int_value), options, context, Errors, output);
} else {
@compileError("Cannot print integer that is larger than 8 bits as a ascii");
}
@ -574,7 +578,7 @@ pub fn formatAsciiChar(
comptime Errors: type,
output: fn (@typeOf(context), []const u8) Errors!void,
) Errors!void {
return output(context, (*const [1]u8)(&c)[0..]);
return output(context, @as(*const [1]u8, &c)[0..]);
}
pub fn formatBuf(
@ -590,7 +594,7 @@ pub fn formatBuf(
var leftover_padding = if (width > buf.len) (width - buf.len) else return;
const pad_byte: u8 = options.fill;
while (leftover_padding > 0) : (leftover_padding -= 1) {
try output(context, (*const [1]u8)(&pad_byte)[0..1]);
try output(context, @as(*const [1]u8, &pad_byte)[0..1]);
}
}
@ -664,7 +668,7 @@ pub fn formatFloatScientific(
try output(context, float_decimal.digits[0..1]);
try output(context, ".");
if (float_decimal.digits.len > 1) {
const num_digits = if (@typeOf(value) == f32) math.min(usize(9), float_decimal.digits.len) else float_decimal.digits.len;
const num_digits = if (@typeOf(value) == f32) math.min(@as(usize, 9), float_decimal.digits.len) else float_decimal.digits.len;
try output(context, float_decimal.digits[1..num_digits]);
} else {
@ -699,7 +703,7 @@ pub fn formatFloatDecimal(
comptime Errors: type,
output: fn (@typeOf(context), []const u8) Errors!void,
) Errors!void {
var x = f64(value);
var x = @as(f64, value);
// Errol doesn't handle these special cases.
if (math.signbit(x)) {
@ -888,7 +892,7 @@ pub fn formatInt(
) Errors!void {
const int_value = if (@typeOf(value) == comptime_int) blk: {
const Int = math.IntFittingRange(value, value);
break :blk Int(value);
break :blk @as(Int, value);
} else
value;
@ -917,14 +921,14 @@ fn formatIntSigned(
const uint = @IntType(false, @typeOf(value).bit_count);
if (value < 0) {
const minus_sign: u8 = '-';
try output(context, (*const [1]u8)(&minus_sign)[0..]);
try output(context, @as(*const [1]u8, &minus_sign)[0..]);
const new_value = @intCast(uint, -(value + 1)) + 1;
return formatIntUnsigned(new_value, base, uppercase, new_options, context, Errors, output);
} else if (options.width == null or options.width.? == 0) {
return formatIntUnsigned(@intCast(uint, value), base, uppercase, options, context, Errors, output);
} else {
const plus_sign: u8 = '+';
try output(context, (*const [1]u8)(&plus_sign)[0..]);
try output(context, @as(*const [1]u8, &plus_sign)[0..]);
const new_value = @intCast(uint, value);
return formatIntUnsigned(new_value, base, uppercase, new_options, context, Errors, output);
}
@ -962,7 +966,7 @@ fn formatIntUnsigned(
const zero_byte: u8 = options.fill;
var leftover_padding = padding - index;
while (true) {
try output(context, (*const [1]u8)(&zero_byte)[0..]);
try output(context, @as(*const [1]u8, &zero_byte)[0..]);
leftover_padding -= 1;
if (leftover_padding == 0) break;
}
@ -994,7 +998,7 @@ fn formatIntCallback(context: *FormatIntBuf, bytes: []const u8) (error{}!void) {
pub fn parseInt(comptime T: type, buf: []const u8, radix: u8) !T {
if (!T.is_signed) return parseUnsigned(T, buf, radix);
if (buf.len == 0) return T(0);
if (buf.len == 0) return @as(T, 0);
if (buf[0] == '-') {
return math.negate(try parseUnsigned(T, buf[1..], radix));
} else if (buf[0] == '+') {
@ -1084,7 +1088,7 @@ pub fn charToDigit(c: u8, radix: u8) (error{InvalidCharacter}!u8) {
fn digitToChar(digit: u8, uppercase: bool) u8 {
return switch (digit) {
0...9 => digit + '0',
10...35 => digit + ((if (uppercase) u8('A') else u8('a')) - 10),
10...35 => digit + ((if (uppercase) @as(u8, 'A') else @as(u8, 'a')) - 10),
else => unreachable,
};
}
@ -1130,19 +1134,19 @@ fn countSize(size: *usize, bytes: []const u8) (error{}!void) {
test "bufPrintInt" {
var buffer: [100]u8 = undefined;
const buf = buffer[0..];
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, i32(-12345678), 2, false, FormatOptions{}), "-101111000110000101001110"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, i32(-12345678), 10, false, FormatOptions{}), "-12345678"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, i32(-12345678), 16, false, FormatOptions{}), "-bc614e"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, i32(-12345678), 16, true, FormatOptions{}), "-BC614E"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, @as(i32, -12345678), 2, false, FormatOptions{}), "-101111000110000101001110"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, @as(i32, -12345678), 10, false, FormatOptions{}), "-12345678"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, @as(i32, -12345678), 16, false, FormatOptions{}), "-bc614e"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, @as(i32, -12345678), 16, true, FormatOptions{}), "-BC614E"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, u32(12345678), 10, true, FormatOptions{}), "12345678"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, @as(u32, 12345678), 10, true, FormatOptions{}), "12345678"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, u32(666), 10, false, FormatOptions{ .width = 6 }), " 666"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, u32(0x1234), 16, false, FormatOptions{ .width = 6 }), " 1234"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, u32(0x1234), 16, false, FormatOptions{ .width = 1 }), "1234"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, @as(u32, 666), 10, false, FormatOptions{ .width = 6 }), " 666"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, @as(u32, 0x1234), 16, false, FormatOptions{ .width = 6 }), " 1234"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, @as(u32, 0x1234), 16, false, FormatOptions{ .width = 1 }), "1234"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, i32(42), 10, false, FormatOptions{ .width = 3 }), "+42"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, i32(-42), 10, false, FormatOptions{ .width = 3 }), "-42"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, @as(i32, 42), 10, false, FormatOptions{ .width = 3 }), "+42"));
testing.expect(mem.eql(u8, bufPrintIntToSlice(buf, @as(i32, -42), 10, false, FormatOptions{ .width = 3 }), "-42"));
}
fn bufPrintIntToSlice(buf: []u8, value: var, base: u8, uppercase: bool, options: FormatOptions) []u8 {
@ -1204,8 +1208,8 @@ test "int.specifier" {
}
test "int.padded" {
try testFmt("u8: ' 1'", "u8: '{:4}'", u8(1));
try testFmt("u8: 'xxx1'", "u8: '{:x<4}'", u8(1));
try testFmt("u8: ' 1'", "u8: '{:4}'", @as(u8, 1));
try testFmt("u8: 'xxx1'", "u8: '{:x<4}'", @as(u8, 1));
}
test "buffer" {
@ -1283,8 +1287,8 @@ test "filesize" {
// TODO https://github.com/ziglang/zig/issues/3289
return error.SkipZigTest;
}
try testFmt("file size: 63MiB\n", "file size: {Bi}\n", usize(63 * 1024 * 1024));
try testFmt("file size: 66.06MB\n", "file size: {B:.2}\n", usize(63 * 1024 * 1024));
try testFmt("file size: 63MiB\n", "file size: {Bi}\n", @as(usize, 63 * 1024 * 1024));
try testFmt("file size: 66.06MB\n", "file size: {B:.2}\n", @as(usize, 63 * 1024 * 1024));
}
test "struct" {
@ -1321,10 +1325,10 @@ test "float.scientific" {
// TODO https://github.com/ziglang/zig/issues/3289
return error.SkipZigTest;
}
try testFmt("f32: 1.34000003e+00", "f32: {e}", f32(1.34));
try testFmt("f32: 1.23400001e+01", "f32: {e}", f32(12.34));
try testFmt("f64: -1.234e+11", "f64: {e}", f64(-12.34e10));
try testFmt("f64: 9.99996e-40", "f64: {e}", f64(9.999960e-40));
try testFmt("f32: 1.34000003e+00", "f32: {e}", @as(f32, 1.34));
try testFmt("f32: 1.23400001e+01", "f32: {e}", @as(f32, 12.34));
try testFmt("f64: -1.234e+11", "f64: {e}", @as(f64, -12.34e10));
try testFmt("f64: 9.99996e-40", "f64: {e}", @as(f64, 9.999960e-40));
}
test "float.scientific.precision" {
@ -1332,12 +1336,12 @@ test "float.scientific.precision" {
// TODO https://github.com/ziglang/zig/issues/3289
return error.SkipZigTest;
}
try testFmt("f64: 1.40971e-42", "f64: {e:.5}", f64(1.409706e-42));
try testFmt("f64: 1.00000e-09", "f64: {e:.5}", f64(@bitCast(f32, u32(814313563))));
try testFmt("f64: 7.81250e-03", "f64: {e:.5}", f64(@bitCast(f32, u32(1006632960))));
try testFmt("f64: 1.40971e-42", "f64: {e:.5}", @as(f64, 1.409706e-42));
try testFmt("f64: 1.00000e-09", "f64: {e:.5}", @as(f64, @bitCast(f32, @as(u32, 814313563))));
try testFmt("f64: 7.81250e-03", "f64: {e:.5}", @as(f64, @bitCast(f32, @as(u32, 1006632960))));
// libc rounds 1.000005e+05 to 1.00000e+05 but zig does 1.00001e+05.
// In fact, libc doesn't round a lot of 5 cases up when one past the precision point.
try testFmt("f64: 1.00001e+05", "f64: {e:.5}", f64(@bitCast(f32, u32(1203982400))));
try testFmt("f64: 1.00001e+05", "f64: {e:.5}", @as(f64, @bitCast(f32, @as(u32, 1203982400))));
}
test "float.special" {
@ -1360,21 +1364,21 @@ test "float.decimal" {
// TODO https://github.com/ziglang/zig/issues/3289
return error.SkipZigTest;
}
try testFmt("f64: 152314000000000000000000000000", "f64: {d}", f64(1.52314e+29));
try testFmt("f32: 1.1", "f32: {d:.1}", f32(1.1234));
try testFmt("f32: 1234.57", "f32: {d:.2}", f32(1234.567));
try testFmt("f64: 152314000000000000000000000000", "f64: {d}", @as(f64, 1.52314e+29));
try testFmt("f32: 1.1", "f32: {d:.1}", @as(f32, 1.1234));
try testFmt("f32: 1234.57", "f32: {d:.2}", @as(f32, 1234.567));
// -11.1234 is converted to f64 -11.12339... internally (errol3() function takes f64).
// -11.12339... is rounded back up to -11.1234
try testFmt("f32: -11.1234", "f32: {d:.4}", f32(-11.1234));
try testFmt("f32: 91.12345", "f32: {d:.5}", f32(91.12345));
try testFmt("f64: 91.1234567890", "f64: {d:.10}", f64(91.12345678901235));
try testFmt("f64: 0.00000", "f64: {d:.5}", f64(0.0));
try testFmt("f64: 6", "f64: {d:.0}", f64(5.700));
try testFmt("f64: 10.0", "f64: {d:.1}", f64(9.999));
try testFmt("f64: 1.000", "f64: {d:.3}", f64(1.0));
try testFmt("f64: 0.00030000", "f64: {d:.8}", f64(0.0003));
try testFmt("f64: 0.00000", "f64: {d:.5}", f64(1.40130e-45));
try testFmt("f64: 0.00000", "f64: {d:.5}", f64(9.999960e-40));
try testFmt("f32: -11.1234", "f32: {d:.4}", @as(f32, -11.1234));
try testFmt("f32: 91.12345", "f32: {d:.5}", @as(f32, 91.12345));
try testFmt("f64: 91.1234567890", "f64: {d:.10}", @as(f64, 91.12345678901235));
try testFmt("f64: 0.00000", "f64: {d:.5}", @as(f64, 0.0));
try testFmt("f64: 6", "f64: {d:.0}", @as(f64, 5.700));
try testFmt("f64: 10.0", "f64: {d:.1}", @as(f64, 9.999));
try testFmt("f64: 1.000", "f64: {d:.3}", @as(f64, 1.0));
try testFmt("f64: 0.00030000", "f64: {d:.8}", @as(f64, 0.0003));
try testFmt("f64: 0.00000", "f64: {d:.5}", @as(f64, 1.40130e-45));
try testFmt("f64: 0.00000", "f64: {d:.5}", @as(f64, 9.999960e-40));
}
test "float.libc.sanity" {
@ -1382,22 +1386,22 @@ test "float.libc.sanity" {
// TODO https://github.com/ziglang/zig/issues/3289
return error.SkipZigTest;
}
try testFmt("f64: 0.00001", "f64: {d:.5}", f64(@bitCast(f32, u32(916964781))));
try testFmt("f64: 0.00001", "f64: {d:.5}", f64(@bitCast(f32, u32(925353389))));
try testFmt("f64: 0.10000", "f64: {d:.5}", f64(@bitCast(f32, u32(1036831278))));
try testFmt("f64: 1.00000", "f64: {d:.5}", f64(@bitCast(f32, u32(1065353133))));
try testFmt("f64: 10.00000", "f64: {d:.5}", f64(@bitCast(f32, u32(1092616192))));
try testFmt("f64: 0.00001", "f64: {d:.5}", @as(f64, @bitCast(f32, @as(u32, 916964781))));
try testFmt("f64: 0.00001", "f64: {d:.5}", @as(f64, @bitCast(f32, @as(u32, 925353389))));
try testFmt("f64: 0.10000", "f64: {d:.5}", @as(f64, @bitCast(f32, @as(u32, 1036831278))));
try testFmt("f64: 1.00000", "f64: {d:.5}", @as(f64, @bitCast(f32, @as(u32, 1065353133))));
try testFmt("f64: 10.00000", "f64: {d:.5}", @as(f64, @bitCast(f32, @as(u32, 1092616192))));
// libc differences
//
// This is 0.015625 exactly according to gdb. We thus round down,
// however glibc rounds up for some reason. This occurs for all
// floats of the form x.yyyy25 on a precision point.
try testFmt("f64: 0.01563", "f64: {d:.5}", f64(@bitCast(f32, u32(1015021568))));
try testFmt("f64: 0.01563", "f64: {d:.5}", @as(f64, @bitCast(f32, @as(u32, 1015021568))));
// errol3 rounds to ... 630 but libc rounds to ...632. Grisu3
// also rounds to 630 so I'm inclined to believe libc is not
// optimal here.
try testFmt("f64: 18014400656965630.00000", "f64: {d:.5}", f64(@bitCast(f32, u32(1518338049))));
try testFmt("f64: 18014400656965630.00000", "f64: {d:.5}", @as(f64, @bitCast(f32, @as(u32, 1518338049))));
}
test "custom" {
@ -1673,17 +1677,17 @@ test "formatType max_depth" {
}
test "positional" {
try testFmt("2 1 0", "{2} {1} {0}", usize(0), usize(1), usize(2));
try testFmt("2 1 0", "{2} {1} {}", usize(0), usize(1), usize(2));
try testFmt("0 0", "{0} {0}", usize(0));
try testFmt("0 1", "{} {1}", usize(0), usize(1));
try testFmt("1 0 0 1", "{1} {} {0} {}", usize(0), usize(1));
try testFmt("2 1 0", "{2} {1} {0}", @as(usize, 0), @as(usize, 1), @as(usize, 2));
try testFmt("2 1 0", "{2} {1} {}", @as(usize, 0), @as(usize, 1), @as(usize, 2));
try testFmt("0 0", "{0} {0}", @as(usize, 0));
try testFmt("0 1", "{} {1}", @as(usize, 0), @as(usize, 1));
try testFmt("1 0 0 1", "{1} {} {0} {}", @as(usize, 0), @as(usize, 1));
}
test "positional with specifier" {
try testFmt("10.0", "{0d:.1}", f64(9.999));
try testFmt("10.0", "{0d:.1}", @as(f64, 9.999));
}
test "positional/alignment/width/precision" {
try testFmt("10.0", "{0d: >3.1}", f64(9.999));
try testFmt("10.0", "{0d: >3.1}", @as(f64, 9.999));
}

View File

@ -296,7 +296,7 @@ fn hpMul10(hp: *HP) void {
/// @buf: The output buffer.
/// &return: The exponent.
fn errolInt(val: f64, buffer: []u8) FloatDecimal {
const pow19 = u128(1e19);
const pow19 = @as(u128, 1e19);
assert((val > 9.007199254740992e15) and val < (3.40282366920938e38));
@ -670,7 +670,7 @@ fn fpeint(from: f64) u128 {
const bits = @bitCast(u64, from);
assert((bits & ((1 << 52) - 1)) == 0);
return u128(1) << @truncate(u7, (bits >> 52) -% 1023);
return @as(u128, 1) << @truncate(u7, (bits >> 52) -% 1023);
}
/// Given two different integers with the same length in terms of the number

View File

@ -59,29 +59,29 @@ const Z96 = struct {
// d += s
inline fn add(d: *Z96, s: Z96) void {
var w = u64(d.d0) + u64(s.d0);
var w = @as(u64, d.d0) + @as(u64, s.d0);
d.d0 = @truncate(u32, w);
w >>= 32;
w += u64(d.d1) + u64(s.d1);
w += @as(u64, d.d1) + @as(u64, s.d1);
d.d1 = @truncate(u32, w);
w >>= 32;
w += u64(d.d2) + u64(s.d2);
w += @as(u64, d.d2) + @as(u64, s.d2);
d.d2 = @truncate(u32, w);
}
// d -= s
inline fn sub(d: *Z96, s: Z96) void {
var w = u64(d.d0) -% u64(s.d0);
var w = @as(u64, d.d0) -% @as(u64, s.d0);
d.d0 = @truncate(u32, w);
w >>= 32;
w += u64(d.d1) -% u64(s.d1);
w += @as(u64, d.d1) -% @as(u64, s.d1);
d.d1 = @truncate(u32, w);
w >>= 32;
w += u64(d.d2) -% u64(s.d2);
w += @as(u64, d.d2) -% @as(u64, s.d2);
d.d2 = @truncate(u32, w);
}
};
@ -160,7 +160,7 @@ fn convertRepr(comptime T: type, n: FloatRepr) T {
break :blk if (n.negative) f64_minus_zero else f64_plus_zero;
} else if (s.d2 != 0) {
const binexs2 = @intCast(u64, binary_exponent) << 52;
const rr = (u64(s.d2 & ~mask28) << 24) | ((u64(s.d1) + 128) >> 8) | binexs2;
const rr = (@as(u64, s.d2 & ~mask28) << 24) | ((@as(u64, s.d1) + 128) >> 8) | binexs2;
break :blk if (n.negative) rr | (1 << 63) else rr;
} else {
break :blk 0;
@ -375,7 +375,7 @@ pub fn parseFloat(comptime T: type, s: []const u8) !T {
return switch (try parseRepr(s, &r)) {
ParseResult.Ok => convertRepr(T, r),
ParseResult.PlusZero => 0.0,
ParseResult.MinusZero => -T(0.0),
ParseResult.MinusZero => -@as(T, 0.0),
ParseResult.PlusInf => std.math.inf(T),
ParseResult.MinusInf => -std.math.inf(T),
};
@ -426,8 +426,8 @@ test "fmt.parseFloat" {
expect(approxEq(T, try parseFloat(T, "1234e-2"), 12.34, epsilon));
expect(approxEq(T, try parseFloat(T, "123142.1"), 123142.1, epsilon));
expect(approxEq(T, try parseFloat(T, "-123142.1124"), T(-123142.1124), epsilon));
expect(approxEq(T, try parseFloat(T, "0.7062146892655368"), T(0.7062146892655368), epsilon));
expect(approxEq(T, try parseFloat(T, "-123142.1124"), @as(T, -123142.1124), epsilon));
expect(approxEq(T, try parseFloat(T, "0.7062146892655368"), @as(T, 0.7062146892655368), epsilon));
}
}
}

View File

@ -6,6 +6,7 @@ const base64 = std.base64;
const crypto = std.crypto;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const math = std.math;
pub const path = @import("fs/path.zig");
pub const File = @import("fs/file.zig").File;
@ -584,7 +585,7 @@ pub const Dir = struct {
.FileBothDirectoryInformation,
w.FALSE,
null,
if (self.first) w.BOOLEAN(w.TRUE) else w.BOOLEAN(w.FALSE),
if (self.first) @as(w.BOOLEAN, w.TRUE) else @as(w.BOOLEAN, w.FALSE),
);
self.first = false;
if (io.Information == 0) return null;
@ -698,17 +699,81 @@ pub const Dir = struct {
/// Call `File.close` on the result when done.
pub fn openRead(self: Dir, sub_path: []const u8) File.OpenError!File {
if (builtin.os == .windows) {
const path_w = try os.windows.sliceToPrefixedFileW(sub_path);
return self.openReadW(&path_w);
}
const path_c = try os.toPosixPath(sub_path);
return self.openReadC(&path_c);
}
/// Call `File.close` on the result when done.
pub fn openReadC(self: Dir, sub_path: [*]const u8) File.OpenError!File {
const flags = os.O_LARGEFILE | os.O_RDONLY | os.O_CLOEXEC;
if (builtin.os == .windows) {
const path_w = try os.windows.cStrToPrefixedFileW(sub_path);
return self.openReadW(&path_w);
}
const O_LARGEFILE = if (@hasDecl(os, "O_LARGEFILE")) os.O_LARGEFILE else 0;
const flags = O_LARGEFILE | os.O_RDONLY | os.O_CLOEXEC;
const fd = try os.openatC(self.fd, sub_path, flags, 0);
return File.openHandle(fd);
}
pub fn openReadW(self: Dir, sub_path_w: [*]const u16) File.OpenError!File {
const w = os.windows;
var result = File{ .handle = undefined };
const path_len_bytes = math.cast(u16, mem.toSliceConst(u16, sub_path_w).len * 2) catch |err| switch (err) {
error.Overflow => return error.NameTooLong,
};
var nt_name = w.UNICODE_STRING{
.Length = path_len_bytes,
.MaximumLength = path_len_bytes,
.Buffer = @intToPtr([*]u16, @ptrToInt(sub_path_w)),
};
var attr = w.OBJECT_ATTRIBUTES{
.Length = @sizeOf(w.OBJECT_ATTRIBUTES),
.RootDirectory = if (path.isAbsoluteW(sub_path_w)) null else self.fd,
.Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here.
.ObjectName = &nt_name,
.SecurityDescriptor = null,
.SecurityQualityOfService = null,
};
if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
return error.IsDir;
}
if (sub_path_w[0] == '.' and sub_path_w[1] == '.' and sub_path_w[2] == 0) {
return error.IsDir;
}
var io: w.IO_STATUS_BLOCK = undefined;
const rc = w.ntdll.NtCreateFile(
&result.handle,
w.GENERIC_READ | w.SYNCHRONIZE,
&attr,
&io,
null,
w.FILE_ATTRIBUTE_NORMAL,
w.FILE_SHARE_READ,
w.FILE_OPEN,
w.FILE_NON_DIRECTORY_FILE | w.FILE_SYNCHRONOUS_IO_NONALERT,
null,
0,
);
switch (rc) {
w.STATUS.SUCCESS => return result,
w.STATUS.OBJECT_NAME_INVALID => unreachable,
w.STATUS.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
w.STATUS.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
w.STATUS.INVALID_PARAMETER => unreachable,
w.STATUS.SHARING_VIOLATION => return error.SharingViolation,
w.STATUS.ACCESS_DENIED => return error.AccessDenied,
w.STATUS.PIPE_BUSY => return error.PipeBusy,
w.STATUS.OBJECT_PATH_SYNTAX_BAD => unreachable,
else => return w.unexpectedStatus(rc),
}
}
/// Call `close` on the result when done.
pub fn openDir(self: Dir, sub_path: []const u8) OpenError!Dir {
if (builtin.os == .windows) {
@ -866,6 +931,34 @@ pub const Dir = struct {
return os.readlinkatC(self.fd, sub_path_c, buffer);
}
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
pub fn readFileAlloc(self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
return self.readFileAllocAligned(allocator, file_path, max_bytes, @alignOf(u8));
}
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
pub fn readFileAllocAligned(
self: Dir,
allocator: *mem.Allocator,
file_path: []const u8,
max_bytes: usize,
comptime A: u29,
) ![]align(A) u8 {
var file = try self.openRead(file_path);
defer file.close();
const size = math.cast(usize, try file.getEndPos()) catch math.maxInt(usize);
if (size > max_bytes) return error.FileTooBig;
const buf = try allocator.alignedAlloc(u8, A, size);
errdefer allocator.free(buf);
try file.inStream().stream.readNoEof(buf);
return buf;
}
pub const DeleteTreeError = error{
AccessDenied,
FileTooBig,
@ -1100,7 +1193,7 @@ pub const Walker = struct {
}
pub fn deinit(self: *Walker) void {
while (self.stack.popOrNull()) |*item| item.dir_it.close();
while (self.stack.popOrNull()) |*item| item.dir_it.dir.close();
self.stack.deinit();
self.name_buffer.deinit();
}
@ -1150,9 +1243,9 @@ pub fn openSelfExe() OpenSelfExeError!File {
return File.openReadC(c"/proc/self/exe");
}
if (builtin.os == .windows) {
var buf: [os.windows.PATH_MAX_WIDE]u16 = undefined;
const wide_slice = try selfExePathW(&buf);
return File.openReadW(wide_slice.ptr);
const wide_slice = selfExePathW();
const prefixed_path_w = try os.windows.wToPrefixedFileW(wide_slice);
return Dir.cwd().openReadW(&prefixed_path_w);
}
var buf: [MAX_PATH_BYTES]u8 = undefined;
const self_exe_path = try selfExePath(&buf);
@ -1203,8 +1296,7 @@ pub fn selfExePath(out_buffer: *[MAX_PATH_BYTES]u8) SelfExePathError![]u8 {
return mem.toSlice(u8, out_buffer);
},
.windows => {
var utf16le_buf: [os.windows.PATH_MAX_WIDE]u16 = undefined;
const utf16le_slice = try selfExePathW(&utf16le_buf);
const utf16le_slice = selfExePathW();
// Trust that Windows gives us valid UTF-16LE.
const end_index = std.unicode.utf16leToUtf8(out_buffer, utf16le_slice) catch unreachable;
return out_buffer[0..end_index];
@ -1213,9 +1305,10 @@ pub fn selfExePath(out_buffer: *[MAX_PATH_BYTES]u8) SelfExePathError![]u8 {
}
}
/// Same as `selfExePath` except the result is UTF16LE-encoded.
pub fn selfExePathW(out_buffer: *[os.windows.PATH_MAX_WIDE]u16) SelfExePathError![]u16 {
return os.windows.GetModuleFileNameW(null, out_buffer, out_buffer.len);
/// The result is UTF16LE-encoded.
pub fn selfExePathW() []const u16 {
const image_path_name = &os.windows.peb().ProcessParameters.ImagePathName;
return mem.toSliceConst(u16, image_path_name.Buffer);
}
/// `selfExeDirPath` except allocates the result on the heap.

View File

@ -25,42 +25,23 @@ pub const File = struct {
pub const OpenError = windows.CreateFileError || os.OpenError;
/// Call close to clean up.
/// Deprecated; call `std.fs.Dir.openRead` directly.
pub fn openRead(path: []const u8) OpenError!File {
if (builtin.os == .windows) {
const path_w = try windows.sliceToPrefixedFileW(path);
return openReadW(&path_w);
}
const path_c = try os.toPosixPath(path);
return openReadC(&path_c);
return std.fs.Dir.cwd().openRead(path);
}
/// `openRead` except with a null terminated path
pub fn openReadC(path: [*]const u8) OpenError!File {
if (builtin.os == .windows) {
const path_w = try windows.cStrToPrefixedFileW(path);
return openReadW(&path_w);
}
const flags = os.O_LARGEFILE | os.O_RDONLY | os.O_CLOEXEC;
const fd = try os.openC(path, flags, 0);
return openHandle(fd);
/// Deprecated; call `std.fs.Dir.openReadC` directly.
pub fn openReadC(path_c: [*]const u8) OpenError!File {
return std.fs.Dir.cwd().openReadC(path_c);
}
/// `openRead` except with a null terminated UTF16LE encoded path
/// Deprecated; call `std.fs.Dir.openReadW` directly.
pub fn openReadW(path_w: [*]const u16) OpenError!File {
const handle = try windows.CreateFileW(
path_w,
windows.GENERIC_READ,
windows.FILE_SHARE_READ,
null,
windows.OPEN_EXISTING,
windows.FILE_ATTRIBUTE_NORMAL,
null,
);
return openHandle(handle);
return std.fs.Dir.cwd().openReadW(path_w);
}
/// Calls `openWriteMode` with `default_mode` for the mode.
/// TODO: deprecate this and move it to `std.fs.Dir`.
pub fn openWrite(path: []const u8) OpenError!File {
return openWriteMode(path, default_mode);
}
@ -68,6 +49,7 @@ pub const File = struct {
/// If the path does not exist it will be created.
/// If a file already exists in the destination it will be truncated.
/// Call close to clean up.
/// TODO: deprecate this and move it to `std.fs.Dir`.
pub fn openWriteMode(path: []const u8, file_mode: Mode) OpenError!File {
if (builtin.os == .windows) {
const path_w = try windows.sliceToPrefixedFileW(path);
@ -78,17 +60,20 @@ pub const File = struct {
}
/// Same as `openWriteMode` except `path` is null-terminated.
/// TODO: deprecate this and move it to `std.fs.Dir`.
pub fn openWriteModeC(path: [*]const u8, file_mode: Mode) OpenError!File {
if (builtin.os == .windows) {
const path_w = try windows.cStrToPrefixedFileW(path);
return openWriteModeW(&path_w, file_mode);
}
const flags = os.O_LARGEFILE | os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC | os.O_TRUNC;
const O_LARGEFILE = if (@hasDecl(os, "O_LARGEFILE")) os.O_LARGEFILE else 0;
const flags = O_LARGEFILE | os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC | os.O_TRUNC;
const fd = try os.openC(path, flags, file_mode);
return openHandle(fd);
}
/// Same as `openWriteMode` except `path` is null-terminated and UTF16LE encoded
/// TODO: deprecate this and move it to `std.fs.Dir`.
pub fn openWriteModeW(path_w: [*]const u16, file_mode: Mode) OpenError!File {
const handle = try windows.CreateFileW(
path_w,
@ -105,6 +90,7 @@ pub const File = struct {
/// If the path does not exist it will be created.
/// If a file already exists in the destination this returns OpenError.PathAlreadyExists
/// Call close to clean up.
/// TODO: deprecate this and move it to `std.fs.Dir`.
pub fn openWriteNoClobber(path: []const u8, file_mode: Mode) OpenError!File {
if (builtin.os == .windows) {
const path_w = try windows.sliceToPrefixedFileW(path);
@ -114,16 +100,19 @@ pub const File = struct {
return openWriteNoClobberC(&path_c, file_mode);
}
/// TODO: deprecate this and move it to `std.fs.Dir`.
pub fn openWriteNoClobberC(path: [*]const u8, file_mode: Mode) OpenError!File {
if (builtin.os == .windows) {
const path_w = try windows.cStrToPrefixedFileW(path);
return openWriteNoClobberW(&path_w, file_mode);
}
const flags = os.O_LARGEFILE | os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC | os.O_EXCL;
const O_LARGEFILE = if (@hasDecl(os, "O_LARGEFILE")) os.O_LARGEFILE else 0;
const flags = O_LARGEFILE | os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC | os.O_EXCL;
const fd = try os.openC(path, flags, file_mode);
return openHandle(fd);
}
/// TODO: deprecate this and move it to `std.fs.Dir`.
pub fn openWriteNoClobberW(path_w: [*]const u16, file_mode: Mode) OpenError!File {
const handle = try windows.CreateFileW(
path_w,
@ -146,16 +135,19 @@ pub const File = struct {
/// In general it is recommended to avoid this function. For example,
/// instead of testing if a file exists and then opening it, just
/// open it and handle the error for file not found.
/// TODO: deprecate this and move it to `std.fs.Dir`.
pub fn access(path: []const u8) !void {
return os.access(path, os.F_OK);
}
/// Same as `access` except the parameter is null-terminated.
/// TODO: deprecate this and move it to `std.fs.Dir`.
pub fn accessC(path: [*]const u8) !void {
return os.accessC(path, os.F_OK);
}
/// Same as `access` except the parameter is null-terminated UTF16LE-encoded.
/// TODO: deprecate this and move it to `std.fs.Dir`.
pub fn accessW(path: [*]const u16) !void {
return os.accessW(path, os.F_OK);
}
@ -272,9 +264,9 @@ pub const File = struct {
return Stat{
.size = @bitCast(u64, st.size),
.mode = st.mode,
.atime = i64(atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec,
.mtime = i64(mtime.tv_sec) * std.time.ns_per_s + mtime.tv_nsec,
.ctime = i64(ctime.tv_sec) * std.time.ns_per_s + ctime.tv_nsec,
.atime = @as(i64, atime.tv_sec) * std.time.ns_per_s + atime.tv_nsec,
.mtime = @as(i64, mtime.tv_sec) * std.time.ns_per_s + mtime.tv_nsec,
.ctime = @as(i64, ctime.tv_sec) * std.time.ns_per_s + ctime.tv_nsec,
};
}

View File

@ -32,7 +32,7 @@ pub fn isSep(byte: u8) bool {
/// This is different from mem.join in that the separator will not be repeated if
/// it is found at the end or beginning of a pair of consecutive paths.
fn joinSep(allocator: *Allocator, separator: u8, paths: []const []const u8) ![]u8 {
if (paths.len == 0) return (([*]u8)(undefined))[0..0];
if (paths.len == 0) return &[0]u8{};
const total_len = blk: {
var sum: usize = paths[0].len;

View File

@ -306,7 +306,7 @@ test "hash struct deep" {
test "testHash optional" {
const a: ?u32 = 123;
const b: ?u32 = null;
testing.expectEqual(testHash(a), testHash(u32(123)));
testing.expectEqual(testHash(a), testHash(@as(u32, 123)));
testing.expect(testHash(a) != testHash(b));
testing.expectEqual(testHash(b), 0);
}
@ -315,9 +315,9 @@ test "testHash array" {
const a = [_]u32{ 1, 2, 3 };
const h = testHash(a);
var hasher = Wyhash.init(0);
autoHash(&hasher, u32(1));
autoHash(&hasher, u32(2));
autoHash(&hasher, u32(3));
autoHash(&hasher, @as(u32, 1));
autoHash(&hasher, @as(u32, 2));
autoHash(&hasher, @as(u32, 3));
testing.expectEqual(h, hasher.final());
}
@ -330,9 +330,9 @@ test "testHash struct" {
const f = Foo{};
const h = testHash(f);
var hasher = Wyhash.init(0);
autoHash(&hasher, u32(1));
autoHash(&hasher, u32(2));
autoHash(&hasher, u32(3));
autoHash(&hasher, @as(u32, 1));
autoHash(&hasher, @as(u32, 2));
autoHash(&hasher, @as(u32, 3));
testing.expectEqual(h, hasher.final());
}

View File

@ -214,7 +214,7 @@ pub const CityHash64 = struct {
}
fn hashLen0To16(str: []const u8) u64 {
const len: u64 = u64(str.len);
const len: u64 = @as(u64, str.len);
if (len >= 8) {
const mul: u64 = k2 +% len *% 2;
const a: u64 = fetch64(str.ptr) +% k2;
@ -240,7 +240,7 @@ pub const CityHash64 = struct {
}
fn hashLen17To32(str: []const u8) u64 {
const len: u64 = u64(str.len);
const len: u64 = @as(u64, str.len);
const mul: u64 = k2 +% len *% 2;
const a: u64 = fetch64(str.ptr) *% k1;
const b: u64 = fetch64(str.ptr + 8);
@ -251,7 +251,7 @@ pub const CityHash64 = struct {
}
fn hashLen33To64(str: []const u8) u64 {
const len: u64 = u64(str.len);
const len: u64 = @as(u64, str.len);
const mul: u64 = k2 +% len *% 2;
const a: u64 = fetch64(str.ptr) *% k2;
const b: u64 = fetch64(str.ptr + 8);
@ -305,7 +305,7 @@ pub const CityHash64 = struct {
return hashLen33To64(str);
}
var len: u64 = u64(str.len);
var len: u64 = @as(u64, str.len);
var x: u64 = fetch64(str.ptr + str.len - 40);
var y: u64 = fetch64(str.ptr + str.len - 16) +% fetch64(str.ptr + str.len - 56);

View File

@ -65,10 +65,10 @@ pub fn Crc32WithPoly(comptime poly: u32) type {
const p = input[i .. i + 8];
// Unrolling this way gives ~50Mb/s increase
self.crc ^= (u32(p[0]) << 0);
self.crc ^= (u32(p[1]) << 8);
self.crc ^= (u32(p[2]) << 16);
self.crc ^= (u32(p[3]) << 24);
self.crc ^= (@as(u32, p[0]) << 0);
self.crc ^= (@as(u32, p[1]) << 8);
self.crc ^= (@as(u32, p[2]) << 16);
self.crc ^= (@as(u32, p[3]) << 24);
self.crc =
lookup_tables[0][p[7]] ^

View File

@ -98,7 +98,7 @@ pub const Murmur2_64 = struct {
pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
const m: u64 = 0xc6a4a7935bd1e995;
const len = u64(str.len);
const len = @as(u64, str.len);
var h1: u64 = seed ^ (len *% m);
for (@ptrCast([*]allowzero align(1) const u64, str.ptr)[0..@intCast(usize, len >> 3)]) |v| {
var k1: u64 = v;

View File

@ -102,7 +102,7 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
}
const b2 = self.v0 ^ self.v1 ^ self.v2 ^ self.v3;
return (u128(b2) << 64) | b1;
return (@as(u128, b2) << 64) | b1;
}
fn round(self: *Self, b: []const u8) void {
@ -121,19 +121,19 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
fn sipRound(d: *Self) void {
d.v0 +%= d.v1;
d.v1 = math.rotl(u64, d.v1, u64(13));
d.v1 = math.rotl(u64, d.v1, @as(u64, 13));
d.v1 ^= d.v0;
d.v0 = math.rotl(u64, d.v0, u64(32));
d.v0 = math.rotl(u64, d.v0, @as(u64, 32));
d.v2 +%= d.v3;
d.v3 = math.rotl(u64, d.v3, u64(16));
d.v3 = math.rotl(u64, d.v3, @as(u64, 16));
d.v3 ^= d.v2;
d.v0 +%= d.v3;
d.v3 = math.rotl(u64, d.v3, u64(21));
d.v3 = math.rotl(u64, d.v3, @as(u64, 21));
d.v3 ^= d.v0;
d.v2 +%= d.v1;
d.v1 = math.rotl(u64, d.v1, u64(17));
d.v1 = math.rotl(u64, d.v1, @as(u64, 17));
d.v1 ^= d.v2;
d.v2 = math.rotl(u64, d.v2, u64(32));
d.v2 = math.rotl(u64, d.v2, @as(u64, 32));
}
pub fn hash(key: []const u8, input: []const u8) T {

View File

@ -402,7 +402,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
fn keyToIndex(hm: Self, key: K) usize {
return hm.constrainIndex(usize(hash(key)));
return hm.constrainIndex(@as(usize, hash(key)));
}
fn constrainIndex(hm: Self, i: usize) usize {

View File

@ -41,8 +41,7 @@ var direct_allocator_state = Allocator{
const DirectAllocator = struct {
fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
if (n == 0)
return (([*]u8)(undefined))[0..0];
if (n == 0) return &[0]u8{};
if (builtin.os == .windows) {
const w = os.windows;
@ -261,8 +260,7 @@ pub const HeapAllocator = switch (builtin.os) {
fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
if (n == 0)
return (([*]u8)(undefined))[0..0];
if (n == 0) return &[0]u8{};
const amt = n + alignment + @sizeOf(usize);
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, builtin.AtomicOrder.SeqCst);
@ -677,7 +675,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
) catch {
const result = try self.fallback_allocator.reallocFn(
self.fallback_allocator,
([*]u8)(undefined)[0..0],
&[0]u8{},
undefined,
new_size,
new_align,
@ -895,10 +893,10 @@ fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!vo
if (mem.page_size << 2 > maxInt(usize)) return;
const USizeShift = @IntType(false, std.math.log2(usize.bit_count));
const large_align = u29(mem.page_size << 2);
const large_align = @as(u29, mem.page_size << 2);
var align_mask: usize = undefined;
_ = @shlWithOverflow(usize, ~usize(0), USizeShift(@ctz(u29, large_align)), &align_mask);
_ = @shlWithOverflow(usize, ~@as(usize, 0), @as(USizeShift, @ctz(u29, large_align)), &align_mask);
var slice = try allocator.alignedAlloc(u8, large_align, 500);
testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));

View File

@ -399,7 +399,7 @@ test "Headers.iterator" {
}
count += 1;
}
testing.expectEqual(i32(2), count);
testing.expectEqual(@as(i32, 2), count);
}
test "Headers.contains" {
@ -420,10 +420,10 @@ test "Headers.delete" {
try h.append("cookie", "somevalue", null);
testing.expectEqual(false, h.delete("not-present"));
testing.expectEqual(usize(3), h.count());
testing.expectEqual(@as(usize, 3), h.count());
testing.expectEqual(true, h.delete("foo"));
testing.expectEqual(usize(2), h.count());
testing.expectEqual(@as(usize, 2), h.count());
{
const e = h.at(0);
testing.expectEqualSlices(u8, "baz", e.name);
@ -448,7 +448,7 @@ test "Headers.orderedRemove" {
try h.append("cookie", "somevalue", null);
h.orderedRemove(0);
testing.expectEqual(usize(2), h.count());
testing.expectEqual(@as(usize, 2), h.count());
{
const e = h.at(0);
testing.expectEqualSlices(u8, "baz", e.name);
@ -471,7 +471,7 @@ test "Headers.swapRemove" {
try h.append("cookie", "somevalue", null);
h.swapRemove(0);
testing.expectEqual(usize(2), h.count());
testing.expectEqual(@as(usize, 2), h.count());
{
const e = h.at(0);
testing.expectEqualSlices(u8, "cookie", e.name);

View File

@ -34,28 +34,23 @@ else
Mode.blocking;
pub const is_async = mode != .blocking;
pub const GetStdIoError = os.windows.GetStdHandleError;
pub fn getStdOut() GetStdIoError!File {
pub fn getStdOut() File {
if (builtin.os == .windows) {
const handle = try os.windows.GetStdHandle(os.windows.STD_OUTPUT_HANDLE);
return File.openHandle(handle);
return File.openHandle(os.windows.peb().ProcessParameters.hStdOutput);
}
return File.openHandle(os.STDOUT_FILENO);
}
pub fn getStdErr() GetStdIoError!File {
pub fn getStdErr() File {
if (builtin.os == .windows) {
const handle = try os.windows.GetStdHandle(os.windows.STD_ERROR_HANDLE);
return File.openHandle(handle);
return File.openHandle(os.windows.peb().ProcessParameters.hStdError);
}
return File.openHandle(os.STDERR_FILENO);
}
pub fn getStdIn() GetStdIoError!File {
pub fn getStdIn() File {
if (builtin.os == .windows) {
const handle = try os.windows.GetStdHandle(os.windows.STD_INPUT_HANDLE);
return File.openHandle(handle);
return File.openHandle(os.windows.peb().ProcessParameters.hStdInput);
}
return File.openHandle(os.STDIN_FILENO);
}
@ -74,24 +69,9 @@ pub fn writeFile(path: []const u8, data: []const u8) !void {
}
/// On success, caller owns returned buffer.
/// TODO move this to `std.fs` and add a version to `std.fs.Dir`.
/// This function is deprecated; use `std.fs.Dir.readFileAlloc`.
pub fn readFileAlloc(allocator: *mem.Allocator, path: []const u8) ![]u8 {
return readFileAllocAligned(allocator, path, @alignOf(u8));
}
/// On success, caller owns returned buffer.
/// TODO move this to `std.fs` and add a version to `std.fs.Dir`.
pub fn readFileAllocAligned(allocator: *mem.Allocator, path: []const u8, comptime A: u29) ![]align(A) u8 {
var file = try File.openRead(path);
defer file.close();
const size = try math.cast(usize, try file.getEndPos());
const buf = try allocator.alignedAlloc(u8, A, size);
errdefer allocator.free(buf);
var adapter = file.inStream();
try adapter.stream.readNoEof(buf[0..size]);
return buf;
return fs.Dir.cwd().readFileAlloc(allocator, path, math.maxInt(usize));
}
pub fn BufferedInStream(comptime Error: type) type {
@ -353,21 +333,21 @@ pub fn BitInStream(endian: builtin.Endian, comptime Error: type) type {
const Buf = @IntType(false, buf_bit_count);
const BufShift = math.Log2Int(Buf);
out_bits.* = usize(0);
out_bits.* = @as(usize, 0);
if (U == u0 or bits == 0) return 0;
var out_buffer = Buf(0);
var out_buffer = @as(Buf, 0);
if (self.bit_count > 0) {
const n = if (self.bit_count >= bits) @intCast(u3, bits) else self.bit_count;
const shift = u7_bit_count - n;
switch (endian) {
builtin.Endian.Big => {
out_buffer = Buf(self.bit_buffer >> shift);
out_buffer = @as(Buf, self.bit_buffer >> shift);
self.bit_buffer <<= n;
},
builtin.Endian.Little => {
const value = (self.bit_buffer << shift) >> shift;
out_buffer = Buf(value);
out_buffer = @as(Buf, value);
self.bit_buffer >>= n;
},
}
@ -393,28 +373,28 @@ pub fn BitInStream(endian: builtin.Endian, comptime Error: type) type {
if (n >= u8_bit_count) {
out_buffer <<= @intCast(u3, u8_bit_count - 1);
out_buffer <<= 1;
out_buffer |= Buf(next_byte);
out_buffer |= @as(Buf, next_byte);
out_bits.* += u8_bit_count;
continue;
}
const shift = @intCast(u3, u8_bit_count - n);
out_buffer <<= @intCast(BufShift, n);
out_buffer |= Buf(next_byte >> shift);
out_buffer |= @as(Buf, next_byte >> shift);
out_bits.* += n;
self.bit_buffer = @truncate(u7, next_byte << @intCast(u3, n - 1));
self.bit_count = shift;
},
builtin.Endian.Little => {
if (n >= u8_bit_count) {
out_buffer |= Buf(next_byte) << @intCast(BufShift, out_bits.*);
out_buffer |= @as(Buf, next_byte) << @intCast(BufShift, out_bits.*);
out_bits.* += u8_bit_count;
continue;
}
const shift = @intCast(u3, u8_bit_count - n);
const value = (next_byte << shift) >> shift;
out_buffer |= Buf(value) << @intCast(BufShift, out_bits.*);
out_buffer |= @as(Buf, value) << @intCast(BufShift, out_bits.*);
out_bits.* += n;
self.bit_buffer = @truncate(u7, next_byte >> @intCast(u3, n));
self.bit_count = shift;
@ -434,7 +414,7 @@ pub fn BitInStream(endian: builtin.Endian, comptime Error: type) type {
var self = @fieldParentPtr(Self, "stream", self_stream);
var out_bits: usize = undefined;
var out_bits_total = usize(0);
var out_bits_total = @as(usize, 0);
//@NOTE: I'm not sure this is a good idea, maybe alignToByte should be forced
if (self.bit_count > 0) {
for (buffer) |*b, i| {
@ -598,7 +578,7 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
self.index = 0;
}
fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
fn writeFn(out_stream: *Stream, bytes: []const u8) Error!void {
const self = @fieldParentPtr(Self, "stream", out_stream);
if (bytes.len >= self.buffer.len) {
@ -814,8 +794,7 @@ pub const BufferedAtomicFile = struct {
};
pub fn readLine(buf: *std.Buffer) ![]u8 {
var stdin = try getStdIn();
var stdin_stream = stdin.inStream();
var stdin_stream = getStdIn().inStream();
return readLineFrom(&stdin_stream.stream, buf);
}
@ -856,8 +835,7 @@ test "io.readLineFrom" {
}
pub fn readLineSlice(slice: []u8) ![]u8 {
var stdin = try getStdIn();
var stdin_stream = stdin.inStream();
var stdin_stream = getStdIn().inStream();
return readLineSliceFrom(&stdin_stream.stream, slice);
}
@ -949,14 +927,14 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
return @truncate(T, @bitCast(PossiblySignedByte, buffer[0]));
}
var result = U(0);
var result = @as(U, 0);
for (buffer) |byte, i| {
switch (endian) {
builtin.Endian.Big => {
result = (result << u8_bit_count) | byte;
},
builtin.Endian.Little => {
result |= U(byte) << @intCast(Log2U, u8_bit_count * i);
result |= @as(U, byte) << @intCast(Log2U, u8_bit_count * i);
},
}
}
@ -1050,7 +1028,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
return;
}
ptr.* = OC(undefined); //make it non-null so the following .? is guaranteed safe
ptr.* = @as(OC, undefined); //make it non-null so the following .? is guaranteed safe
const val_ptr = &ptr.*.?;
try self.deserializeInto(val_ptr);
},
@ -1154,7 +1132,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
switch (@typeId(T)) {
builtin.TypeId.Void => return,
builtin.TypeId.Bool => try self.serializeInt(u1(@boolToInt(value))),
builtin.TypeId.Bool => try self.serializeInt(@as(u1, @boolToInt(value))),
builtin.TypeId.Float, builtin.TypeId.Int => try self.serializeInt(value),
builtin.TypeId.Struct => {
const info = @typeInfo(T);
@ -1197,10 +1175,10 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
},
builtin.TypeId.Optional => {
if (value == null) {
try self.serializeInt(u1(@boolToInt(false)));
try self.serializeInt(@as(u1, @boolToInt(false)));
return;
}
try self.serializeInt(u1(@boolToInt(true)));
try self.serializeInt(@as(u1, @boolToInt(true)));
const OC = comptime meta.Child(T);
const val_ptr = &value.?;

View File

@ -40,12 +40,12 @@ pub fn OutStream(comptime WriteError: type) type {
}
pub fn writeByte(self: *Self, byte: u8) Error!void {
const slice = (*const [1]u8)(&byte)[0..];
const slice = @as(*const [1]u8, &byte)[0..];
return self.writeFn(self, slice);
}
pub fn writeByteNTimes(self: *Self, byte: u8, n: usize) Error!void {
const slice = (*const [1]u8)(&byte)[0..];
const slice = @as(*const [1]u8, &byte)[0..];
var i: usize = 0;
while (i < n) : (i += 1) {
try self.writeFn(self, slice);

View File

@ -226,49 +226,49 @@ test "BitOutStream" {
const OutError = io.SliceOutStream.Error;
var bit_stream_be = io.BitOutStream(builtin.Endian.Big, OutError).init(&mem_out_be.stream);
try bit_stream_be.writeBits(u2(1), 1);
try bit_stream_be.writeBits(u5(2), 2);
try bit_stream_be.writeBits(u128(3), 3);
try bit_stream_be.writeBits(u8(4), 4);
try bit_stream_be.writeBits(u9(5), 5);
try bit_stream_be.writeBits(u1(1), 1);
try bit_stream_be.writeBits(@as(u2, 1), 1);
try bit_stream_be.writeBits(@as(u5, 2), 2);
try bit_stream_be.writeBits(@as(u128, 3), 3);
try bit_stream_be.writeBits(@as(u8, 4), 4);
try bit_stream_be.writeBits(@as(u9, 5), 5);
try bit_stream_be.writeBits(@as(u1, 1), 1);
expect(mem_be[0] == 0b11001101 and mem_be[1] == 0b00001011);
mem_out_be.pos = 0;
try bit_stream_be.writeBits(u15(0b110011010000101), 15);
try bit_stream_be.writeBits(@as(u15, 0b110011010000101), 15);
try bit_stream_be.flushBits();
expect(mem_be[0] == 0b11001101 and mem_be[1] == 0b00001010);
mem_out_be.pos = 0;
try bit_stream_be.writeBits(u32(0b110011010000101), 16);
try bit_stream_be.writeBits(@as(u32, 0b110011010000101), 16);
expect(mem_be[0] == 0b01100110 and mem_be[1] == 0b10000101);
try bit_stream_be.writeBits(u0(0), 0);
try bit_stream_be.writeBits(@as(u0, 0), 0);
var mem_out_le = io.SliceOutStream.init(mem_le[0..]);
var bit_stream_le = io.BitOutStream(builtin.Endian.Little, OutError).init(&mem_out_le.stream);
try bit_stream_le.writeBits(u2(1), 1);
try bit_stream_le.writeBits(u5(2), 2);
try bit_stream_le.writeBits(u128(3), 3);
try bit_stream_le.writeBits(u8(4), 4);
try bit_stream_le.writeBits(u9(5), 5);
try bit_stream_le.writeBits(u1(1), 1);
try bit_stream_le.writeBits(@as(u2, 1), 1);
try bit_stream_le.writeBits(@as(u5, 2), 2);
try bit_stream_le.writeBits(@as(u128, 3), 3);
try bit_stream_le.writeBits(@as(u8, 4), 4);
try bit_stream_le.writeBits(@as(u9, 5), 5);
try bit_stream_le.writeBits(@as(u1, 1), 1);
expect(mem_le[0] == 0b00011101 and mem_le[1] == 0b10010101);
mem_out_le.pos = 0;
try bit_stream_le.writeBits(u15(0b110011010000101), 15);
try bit_stream_le.writeBits(@as(u15, 0b110011010000101), 15);
try bit_stream_le.flushBits();
expect(mem_le[0] == 0b10000101 and mem_le[1] == 0b01100110);
mem_out_le.pos = 0;
try bit_stream_le.writeBits(u32(0b1100110100001011), 16);
try bit_stream_le.writeBits(@as(u32, 0b1100110100001011), 16);
expect(mem_le[0] == 0b00001011 and mem_le[1] == 0b11001101);
try bit_stream_le.writeBits(u0(0), 0);
try bit_stream_le.writeBits(@as(u0, 0), 0);
}
test "BitStreams with File Stream" {
@ -282,12 +282,12 @@ test "BitStreams with File Stream" {
const OutError = File.WriteError;
var bit_stream = io.BitOutStream(builtin.endian, OutError).init(file_out_stream);
try bit_stream.writeBits(u2(1), 1);
try bit_stream.writeBits(u5(2), 2);
try bit_stream.writeBits(u128(3), 3);
try bit_stream.writeBits(u8(4), 4);
try bit_stream.writeBits(u9(5), 5);
try bit_stream.writeBits(u1(1), 1);
try bit_stream.writeBits(@as(u2, 1), 1);
try bit_stream.writeBits(@as(u5, 2), 2);
try bit_stream.writeBits(@as(u128, 3), 3);
try bit_stream.writeBits(@as(u8, 4), 4);
try bit_stream.writeBits(@as(u9, 5), 5);
try bit_stream.writeBits(@as(u1, 1), 1);
try bit_stream.flushBits();
}
{
@ -345,8 +345,8 @@ fn testIntSerializerDeserializer(comptime endian: builtin.Endian, comptime packi
inline while (i <= max_test_bitsize) : (i += 1) {
const U = @IntType(false, i);
const S = @IntType(true, i);
try serializer.serializeInt(U(i));
if (i != 0) try serializer.serializeInt(S(-1)) else try serializer.serialize(S(0));
try serializer.serializeInt(@as(U, i));
if (i != 0) try serializer.serializeInt(@as(S, -1)) else try serializer.serialize(@as(S, 0));
}
try serializer.flush();
@ -356,8 +356,8 @@ fn testIntSerializerDeserializer(comptime endian: builtin.Endian, comptime packi
const S = @IntType(true, i);
const x = try deserializer.deserializeInt(U);
const y = try deserializer.deserializeInt(S);
expect(x == U(i));
if (i != 0) expect(y == S(-1)) else expect(y == 0);
expect(x == @as(U, i));
if (i != 0) expect(y == @as(S, -1)) else expect(y == 0);
}
const u8_bit_count = comptime meta.bitCount(u8);
@ -577,11 +577,11 @@ fn testBadData(comptime endian: builtin.Endian, comptime packing: io.Packing) !v
var in_stream = &in.stream;
var deserializer = io.Deserializer(endian, packing, InError).init(in_stream);
try serializer.serialize(u14(3));
try serializer.serialize(@as(u14, 3));
expectError(error.InvalidEnumTag, deserializer.deserialize(A));
out.pos = 0;
try serializer.serialize(u14(3));
try serializer.serialize(u14(88));
try serializer.serialize(@as(u14, 3));
try serializer.serialize(@as(u14, 88));
expectError(error.InvalidEnumTag, deserializer.deserialize(C));
}
@ -603,7 +603,7 @@ test "c out stream" {
}
const out_stream = &io.COutStream.init(out_file).stream;
try out_stream.print("hi: {}\n", i32(123));
try out_stream.print("hi: {}\n", @as(i32, 123));
}
test "File seek ops" {

View File

@ -1012,119 +1012,39 @@ pub const Value = union(enum) {
Object: ObjectMap,
pub fn dump(self: Value) void {
switch (self) {
Value.Null => {
debug.warn("null");
},
Value.Bool => |inner| {
debug.warn("{}", inner);
},
Value.Integer => |inner| {
debug.warn("{}", inner);
},
Value.Float => |inner| {
debug.warn("{:.5}", inner);
},
Value.String => |inner| {
debug.warn("\"{}\"", inner);
},
Value.Array => |inner| {
var not_first = false;
debug.warn("[");
for (inner.toSliceConst()) |value| {
if (not_first) {
debug.warn(",");
}
not_first = true;
value.dump();
}
debug.warn("]");
},
Value.Object => |inner| {
var not_first = false;
debug.warn("{{");
var it = inner.iterator();
var held = std.debug.getStderrMutex().acquire();
defer held.release();
while (it.next()) |entry| {
if (not_first) {
debug.warn(",");
}
not_first = true;
debug.warn("\"{}\":", entry.key);
entry.value.dump();
}
debug.warn("}}");
},
}
const stderr = std.debug.getStderrStream();
self.dumpStream(stderr, 1024) catch return;
}
pub fn dumpIndent(self: Value, indent: usize) void {
pub fn dumpIndent(self: Value, comptime indent: usize) void {
if (indent == 0) {
self.dump();
} else {
self.dumpIndentLevel(indent, 0);
var held = std.debug.getStderrMutex().acquire();
defer held.release();
const stderr = std.debug.getStderrStream();
self.dumpStreamIndent(indent, stderr, 1024) catch return;
}
}
fn dumpIndentLevel(self: Value, indent: usize, level: usize) void {
switch (self) {
Value.Null => {
debug.warn("null");
},
Value.Bool => |inner| {
debug.warn("{}", inner);
},
Value.Integer => |inner| {
debug.warn("{}", inner);
},
Value.Float => |inner| {
debug.warn("{:.5}", inner);
},
Value.String => |inner| {
debug.warn("\"{}\"", inner);
},
Value.Array => |inner| {
var not_first = false;
debug.warn("[\n");
for (inner.toSliceConst()) |value| {
if (not_first) {
debug.warn(",\n");
}
not_first = true;
padSpace(level + indent);
value.dumpIndentLevel(indent, level + indent);
}
debug.warn("\n");
padSpace(level);
debug.warn("]");
},
Value.Object => |inner| {
var not_first = false;
debug.warn("{{\n");
var it = inner.iterator();
while (it.next()) |entry| {
if (not_first) {
debug.warn(",\n");
}
not_first = true;
padSpace(level + indent);
debug.warn("\"{}\": ", entry.key);
entry.value.dumpIndentLevel(indent, level + indent);
}
debug.warn("\n");
padSpace(level);
debug.warn("}}");
},
}
pub fn dumpStream(self: @This(), stream: var, comptime max_depth: usize) !void {
var w = std.json.WriteStream(@typeOf(stream).Child, max_depth).init(stream);
w.newline = "";
w.one_indent = "";
w.space = "";
try w.emitJson(self);
}
fn padSpace(indent: usize) void {
var i: usize = 0;
while (i < indent) : (i += 1) {
debug.warn(" ");
}
pub fn dumpStreamIndent(self: @This(), comptime indent: usize, stream: var, comptime max_depth: usize) !void {
var one_indent = " " ** indent;
var w = std.json.WriteStream(@typeOf(stream).Child, max_depth).init(stream);
w.one_indent = one_indent;
try w.emitJson(self);
}
};
@ -1423,7 +1343,7 @@ test "write json then parse it" {
try jw.emitBool(true);
try jw.objectField("int");
try jw.emitNumber(i32(1234));
try jw.emitNumber(@as(i32, 1234));
try jw.objectField("array");
try jw.beginArray();
@ -1432,7 +1352,7 @@ test "write json then parse it" {
try jw.emitNull();
try jw.arrayElem();
try jw.emitNumber(f64(12.34));
try jw.emitNumber(@as(f64, 12.34));
try jw.endArray();

View File

@ -27,6 +27,9 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
/// The string used as a newline character.
newline: []const u8 = "\n",
/// The string used as spacing.
space: []const u8 = " ",
stream: *OutStream,
state_index: usize,
state: [max_depth]State,
@ -87,7 +90,8 @@ pub fn WriteStream(comptime OutStream: type, comptime max_depth: usize) type {
self.pushState(.Value);
try self.indent();
try self.writeEscapedString(name);
try self.stream.write(": ");
try self.stream.write(":");
try self.stream.write(self.space);
},
}
}

View File

@ -1,24 +1,26 @@
const std = @import("std.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const testing = std.testing;
const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
/// Thread-safe initialization of global data.
/// TODO use a mutex instead of a spinlock
pub fn lazyInit(comptime T: type) LazyInit(T) {
return LazyInit(T){
.data = undefined,
.state = 0,
};
}
fn LazyInit(comptime T: type) type {
return struct {
state: u8, // TODO make this an enum
state: State = .NotResolved,
data: Data,
const State = enum(u8) {
NotResolved,
Resolving,
Resolved,
};
const Self = @This();
// TODO this isn't working for void, investigate and then remove this special case
@ -30,16 +32,16 @@ fn LazyInit(comptime T: type) type {
/// perform the initialization and then call resolve().
pub fn get(self: *Self) ?Ptr {
while (true) {
var state = @cmpxchgWeak(u8, &self.state, 0, 1, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return null;
var state = @cmpxchgWeak(State, &self.state, .NotResolved, .Resolving, .SeqCst, .SeqCst) orelse return null;
switch (state) {
0 => continue,
1 => {
.NotResolved => continue,
.Resolving => {
// TODO mutex instead of a spinlock
continue;
},
2 => {
.Resolved => {
if (@sizeOf(T) == 0) {
return T(undefined);
return @as(T, undefined);
} else {
return &self.data;
}
@ -50,8 +52,8 @@ fn LazyInit(comptime T: type) type {
}
pub fn resolve(self: *Self) void {
const prev = @atomicRmw(u8, &self.state, AtomicRmwOp.Xchg, 2, AtomicOrder.SeqCst);
assert(prev == 1); // resolve() called twice
const prev = @atomicRmw(State, &self.state, .Xchg, .Resolved, .SeqCst);
assert(prev != .Resolved); // resolve() called twice
}
};
}

View File

@ -10,6 +10,9 @@ pub const e = 2.71828182845904523536028747135266249775724709369995;
/// Archimedes' constant (π)
pub const pi = 3.14159265358979323846264338327950288419716939937510;
/// Circle constant (τ)
pub const tau = 2 * pi;
/// log2(e)
pub const log2e = 1.442695040888963407359924681001892137;
@ -44,10 +47,10 @@ pub const sqrt2 = 1.414213562373095048801688724209698079;
pub const sqrt1_2 = 0.707106781186547524400844362104849039;
// From a small c++ [program using boost float128](https://github.com/winksaville/cpp_boost_float128)
pub const f128_true_min = @bitCast(f128, u128(0x00000000000000000000000000000001));
pub const f128_min = @bitCast(f128, u128(0x00010000000000000000000000000000));
pub const f128_max = @bitCast(f128, u128(0x7FFEFFFFFFFFFFFFFFFFFFFFFFFFFFFF));
pub const f128_epsilon = @bitCast(f128, u128(0x3F8F0000000000000000000000000000));
pub const f128_true_min = @bitCast(f128, @as(u128, 0x00000000000000000000000000000001));
pub const f128_min = @bitCast(f128, @as(u128, 0x00010000000000000000000000000000));
pub const f128_max = @bitCast(f128, @as(u128, 0x7FFEFFFFFFFFFFFFFFFFFFFFFFFFFFFF));
pub const f128_epsilon = @bitCast(f128, @as(u128, 0x3F8F0000000000000000000000000000));
pub const f128_toint = 1.0 / f128_epsilon;
// float.h details
@ -69,28 +72,28 @@ pub const f16_max = 65504;
pub const f16_epsilon = 0.0009765625; // 2**-10
pub const f16_toint = 1.0 / f16_epsilon;
pub const nan_u16 = u16(0x7C01);
pub const nan_u16 = @as(u16, 0x7C01);
pub const nan_f16 = @bitCast(f16, nan_u16);
pub const inf_u16 = u16(0x7C00);
pub const inf_u16 = @as(u16, 0x7C00);
pub const inf_f16 = @bitCast(f16, inf_u16);
pub const nan_u32 = u32(0x7F800001);
pub const nan_u32 = @as(u32, 0x7F800001);
pub const nan_f32 = @bitCast(f32, nan_u32);
pub const inf_u32 = u32(0x7F800000);
pub const inf_u32 = @as(u32, 0x7F800000);
pub const inf_f32 = @bitCast(f32, inf_u32);
pub const nan_u64 = u64(0x7FF << 52) | 1;
pub const nan_u64 = @as(u64, 0x7FF << 52) | 1;
pub const nan_f64 = @bitCast(f64, nan_u64);
pub const inf_u64 = u64(0x7FF << 52);
pub const inf_u64 = @as(u64, 0x7FF << 52);
pub const inf_f64 = @bitCast(f64, inf_u64);
pub const nan_u128 = u128(0x7fff0000000000000000000000000001);
pub const nan_u128 = @as(u128, 0x7fff0000000000000000000000000001);
pub const nan_f128 = @bitCast(f128, nan_u128);
pub const inf_u128 = u128(0x7fff0000000000000000000000000000);
pub const inf_u128 = @as(u128, 0x7fff0000000000000000000000000000);
pub const inf_f128 = @bitCast(f128, inf_u128);
pub const nan = @import("math/nan.zig").nan;
@ -248,7 +251,7 @@ pub fn Min(comptime A: type, comptime B: type) type {
},
else => {},
}
return @typeOf(A(0) + B(0));
return @typeOf(@as(A, 0) + @as(B, 0));
}
/// Returns the smaller number. When one of the parameter's type's full range fits in the other,
@ -273,7 +276,7 @@ pub fn min(x: var, y: var) Min(@typeOf(x), @typeOf(y)) {
}
test "math.min" {
testing.expect(min(i32(-1), i32(2)) == -1);
testing.expect(min(@as(i32, -1), @as(i32, 2)) == -1);
{
var a: u16 = 999;
var b: u32 = 10;
@ -309,7 +312,7 @@ pub fn max(x: var, y: var) @typeOf(x + y) {
}
test "math.max" {
testing.expect(max(i32(-1), i32(2)) == 2);
testing.expect(max(@as(i32, -1), @as(i32, 2)) == 2);
}
pub fn mul(comptime T: type, a: T, b: T) (error{Overflow}!T) {
@ -352,10 +355,10 @@ pub fn shl(comptime T: type, a: T, shift_amt: var) T {
}
test "math.shl" {
testing.expect(shl(u8, 0b11111111, usize(3)) == 0b11111000);
testing.expect(shl(u8, 0b11111111, usize(8)) == 0);
testing.expect(shl(u8, 0b11111111, usize(9)) == 0);
testing.expect(shl(u8, 0b11111111, isize(-2)) == 0b00111111);
testing.expect(shl(u8, 0b11111111, @as(usize, 3)) == 0b11111000);
testing.expect(shl(u8, 0b11111111, @as(usize, 8)) == 0);
testing.expect(shl(u8, 0b11111111, @as(usize, 9)) == 0);
testing.expect(shl(u8, 0b11111111, @as(isize, -2)) == 0b00111111);
testing.expect(shl(u8, 0b11111111, 3) == 0b11111000);
testing.expect(shl(u8, 0b11111111, 8) == 0);
testing.expect(shl(u8, 0b11111111, 9) == 0);
@ -380,10 +383,10 @@ pub fn shr(comptime T: type, a: T, shift_amt: var) T {
}
test "math.shr" {
testing.expect(shr(u8, 0b11111111, usize(3)) == 0b00011111);
testing.expect(shr(u8, 0b11111111, usize(8)) == 0);
testing.expect(shr(u8, 0b11111111, usize(9)) == 0);
testing.expect(shr(u8, 0b11111111, isize(-2)) == 0b11111100);
testing.expect(shr(u8, 0b11111111, @as(usize, 3)) == 0b00011111);
testing.expect(shr(u8, 0b11111111, @as(usize, 8)) == 0);
testing.expect(shr(u8, 0b11111111, @as(usize, 9)) == 0);
testing.expect(shr(u8, 0b11111111, @as(isize, -2)) == 0b11111100);
testing.expect(shr(u8, 0b11111111, 3) == 0b00011111);
testing.expect(shr(u8, 0b11111111, 8) == 0);
testing.expect(shr(u8, 0b11111111, 9) == 0);
@ -402,11 +405,11 @@ pub fn rotr(comptime T: type, x: T, r: var) T {
}
test "math.rotr" {
testing.expect(rotr(u8, 0b00000001, usize(0)) == 0b00000001);
testing.expect(rotr(u8, 0b00000001, usize(9)) == 0b10000000);
testing.expect(rotr(u8, 0b00000001, usize(8)) == 0b00000001);
testing.expect(rotr(u8, 0b00000001, usize(4)) == 0b00010000);
testing.expect(rotr(u8, 0b00000001, isize(-1)) == 0b00000010);
testing.expect(rotr(u8, 0b00000001, @as(usize, 0)) == 0b00000001);
testing.expect(rotr(u8, 0b00000001, @as(usize, 9)) == 0b10000000);
testing.expect(rotr(u8, 0b00000001, @as(usize, 8)) == 0b00000001);
testing.expect(rotr(u8, 0b00000001, @as(usize, 4)) == 0b00010000);
testing.expect(rotr(u8, 0b00000001, @as(isize, -1)) == 0b00000010);
}
/// Rotates left. Only unsigned values can be rotated.
@ -421,11 +424,11 @@ pub fn rotl(comptime T: type, x: T, r: var) T {
}
test "math.rotl" {
testing.expect(rotl(u8, 0b00000001, usize(0)) == 0b00000001);
testing.expect(rotl(u8, 0b00000001, usize(9)) == 0b00000010);
testing.expect(rotl(u8, 0b00000001, usize(8)) == 0b00000001);
testing.expect(rotl(u8, 0b00000001, usize(4)) == 0b00010000);
testing.expect(rotl(u8, 0b00000001, isize(-1)) == 0b10000000);
testing.expect(rotl(u8, 0b00000001, @as(usize, 0)) == 0b00000001);
testing.expect(rotl(u8, 0b00000001, @as(usize, 9)) == 0b00000010);
testing.expect(rotl(u8, 0b00000001, @as(usize, 8)) == 0b00000001);
testing.expect(rotl(u8, 0b00000001, @as(usize, 4)) == 0b00010000);
testing.expect(rotl(u8, 0b00000001, @as(isize, -1)) == 0b10000000);
}
pub fn Log2Int(comptime T: type) type {
@ -532,8 +535,8 @@ test "math.absInt" {
comptime testAbsInt();
}
fn testAbsInt() void {
testing.expect((absInt(i32(-10)) catch unreachable) == 10);
testing.expect((absInt(i32(10)) catch unreachable) == 10);
testing.expect((absInt(@as(i32, -10)) catch unreachable) == 10);
testing.expect((absInt(@as(i32, 10)) catch unreachable) == 10);
}
pub const absFloat = fabs;
@ -543,8 +546,8 @@ test "math.absFloat" {
comptime testAbsFloat();
}
fn testAbsFloat() void {
testing.expect(absFloat(f32(-10.05)) == 10.05);
testing.expect(absFloat(f32(10.05)) == 10.05);
testing.expect(absFloat(@as(f32, -10.05)) == 10.05);
testing.expect(absFloat(@as(f32, 10.05)) == 10.05);
}
pub fn divTrunc(comptime T: type, numerator: T, denominator: T) !T {
@ -679,14 +682,14 @@ pub fn absCast(x: var) t: {
}
test "math.absCast" {
testing.expect(absCast(i32(-999)) == 999);
testing.expect(@typeOf(absCast(i32(-999))) == u32);
testing.expect(absCast(@as(i32, -999)) == 999);
testing.expect(@typeOf(absCast(@as(i32, -999))) == u32);
testing.expect(absCast(i32(999)) == 999);
testing.expect(@typeOf(absCast(i32(999))) == u32);
testing.expect(absCast(@as(i32, 999)) == 999);
testing.expect(@typeOf(absCast(@as(i32, 999))) == u32);
testing.expect(absCast(i32(minInt(i32))) == -minInt(i32));
testing.expect(@typeOf(absCast(i32(minInt(i32)))) == u32);
testing.expect(absCast(@as(i32, minInt(i32))) == -minInt(i32));
testing.expect(@typeOf(absCast(@as(i32, minInt(i32)))) == u32);
testing.expect(absCast(-999) == 999);
}
@ -705,13 +708,13 @@ pub fn negateCast(x: var) !@IntType(true, @typeOf(x).bit_count) {
}
test "math.negateCast" {
testing.expect((negateCast(u32(999)) catch unreachable) == -999);
testing.expect(@typeOf(negateCast(u32(999)) catch unreachable) == i32);
testing.expect((negateCast(@as(u32, 999)) catch unreachable) == -999);
testing.expect(@typeOf(negateCast(@as(u32, 999)) catch unreachable) == i32);
testing.expect((negateCast(u32(-minInt(i32))) catch unreachable) == minInt(i32));
testing.expect(@typeOf(negateCast(u32(-minInt(i32))) catch unreachable) == i32);
testing.expect((negateCast(@as(u32, -minInt(i32))) catch unreachable) == minInt(i32));
testing.expect(@typeOf(negateCast(@as(u32, -minInt(i32))) catch unreachable) == i32);
testing.expectError(error.Overflow, negateCast(u32(maxInt(i32) + 10)));
testing.expectError(error.Overflow, negateCast(@as(u32, maxInt(i32) + 10)));
}
/// Cast an integer to a different integer type. If the value doesn't fit,
@ -729,13 +732,13 @@ pub fn cast(comptime T: type, x: var) (error{Overflow}!T) {
}
test "math.cast" {
testing.expectError(error.Overflow, cast(u8, u32(300)));
testing.expectError(error.Overflow, cast(i8, i32(-200)));
testing.expectError(error.Overflow, cast(u8, i8(-1)));
testing.expectError(error.Overflow, cast(u64, i8(-1)));
testing.expectError(error.Overflow, cast(u8, @as(u32, 300)));
testing.expectError(error.Overflow, cast(i8, @as(i32, -200)));
testing.expectError(error.Overflow, cast(u8, @as(i8, -1)));
testing.expectError(error.Overflow, cast(u64, @as(i8, -1)));
testing.expect((try cast(u8, u32(255))) == u8(255));
testing.expect(@typeOf(try cast(u8, u32(255))) == u8);
testing.expect((try cast(u8, @as(u32, 255))) == @as(u8, 255));
testing.expect(@typeOf(try cast(u8, @as(u32, 255))) == u8);
}
pub const AlignCastError = error{UnalignedMemory};
@ -786,9 +789,9 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) @IntType(T.is_signed, T
comptime assert(@typeId(T) == builtin.TypeId.Int);
comptime assert(!T.is_signed);
assert(value != 0);
comptime const promotedType = @IntType(T.is_signed, T.bit_count + 1);
comptime const shiftType = std.math.Log2Int(promotedType);
return promotedType(1) << @intCast(shiftType, T.bit_count - @clz(T, value - 1));
comptime const PromotedType = @IntType(T.is_signed, T.bit_count + 1);
comptime const shiftType = std.math.Log2Int(PromotedType);
return @as(PromotedType, 1) << @intCast(shiftType, T.bit_count - @clz(T, value - 1));
}
/// Returns the next power of two (if the value is not already a power of two).
@ -797,8 +800,8 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) @IntType(T.is_signed, T
pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) {
comptime assert(@typeId(T) == builtin.TypeId.Int);
comptime assert(!T.is_signed);
comptime const promotedType = @IntType(T.is_signed, T.bit_count + 1);
comptime const overflowBit = promotedType(1) << T.bit_count;
comptime const PromotedType = @IntType(T.is_signed, T.bit_count + 1);
comptime const overflowBit = @as(PromotedType, 1) << T.bit_count;
var x = ceilPowerOfTwoPromote(T, value);
if (overflowBit & x != 0) {
return error.Overflow;
@ -812,15 +815,15 @@ test "math.ceilPowerOfTwoPromote" {
}
fn testCeilPowerOfTwoPromote() void {
testing.expectEqual(u33(1), ceilPowerOfTwoPromote(u32, 1));
testing.expectEqual(u33(2), ceilPowerOfTwoPromote(u32, 2));
testing.expectEqual(u33(64), ceilPowerOfTwoPromote(u32, 63));
testing.expectEqual(u33(64), ceilPowerOfTwoPromote(u32, 64));
testing.expectEqual(u33(128), ceilPowerOfTwoPromote(u32, 65));
testing.expectEqual(u6(8), ceilPowerOfTwoPromote(u5, 7));
testing.expectEqual(u6(8), ceilPowerOfTwoPromote(u5, 8));
testing.expectEqual(u6(16), ceilPowerOfTwoPromote(u5, 9));
testing.expectEqual(u5(16), ceilPowerOfTwoPromote(u4, 9));
testing.expectEqual(@as(u33, 1), ceilPowerOfTwoPromote(u32, 1));
testing.expectEqual(@as(u33, 2), ceilPowerOfTwoPromote(u32, 2));
testing.expectEqual(@as(u33, 64), ceilPowerOfTwoPromote(u32, 63));
testing.expectEqual(@as(u33, 64), ceilPowerOfTwoPromote(u32, 64));
testing.expectEqual(@as(u33, 128), ceilPowerOfTwoPromote(u32, 65));
testing.expectEqual(@as(u6, 8), ceilPowerOfTwoPromote(u5, 7));
testing.expectEqual(@as(u6, 8), ceilPowerOfTwoPromote(u5, 8));
testing.expectEqual(@as(u6, 16), ceilPowerOfTwoPromote(u5, 9));
testing.expectEqual(@as(u5, 16), ceilPowerOfTwoPromote(u4, 9));
}
test "math.ceilPowerOfTwo" {
@ -829,14 +832,14 @@ test "math.ceilPowerOfTwo" {
}
fn testCeilPowerOfTwo() !void {
testing.expectEqual(u32(1), try ceilPowerOfTwo(u32, 1));
testing.expectEqual(u32(2), try ceilPowerOfTwo(u32, 2));
testing.expectEqual(u32(64), try ceilPowerOfTwo(u32, 63));
testing.expectEqual(u32(64), try ceilPowerOfTwo(u32, 64));
testing.expectEqual(u32(128), try ceilPowerOfTwo(u32, 65));
testing.expectEqual(u5(8), try ceilPowerOfTwo(u5, 7));
testing.expectEqual(u5(8), try ceilPowerOfTwo(u5, 8));
testing.expectEqual(u5(16), try ceilPowerOfTwo(u5, 9));
testing.expectEqual(@as(u32, 1), try ceilPowerOfTwo(u32, 1));
testing.expectEqual(@as(u32, 2), try ceilPowerOfTwo(u32, 2));
testing.expectEqual(@as(u32, 64), try ceilPowerOfTwo(u32, 63));
testing.expectEqual(@as(u32, 64), try ceilPowerOfTwo(u32, 64));
testing.expectEqual(@as(u32, 128), try ceilPowerOfTwo(u32, 65));
testing.expectEqual(@as(u5, 8), try ceilPowerOfTwo(u5, 7));
testing.expectEqual(@as(u5, 8), try ceilPowerOfTwo(u5, 8));
testing.expectEqual(@as(u5, 16), try ceilPowerOfTwo(u5, 9));
testing.expectError(error.Overflow, ceilPowerOfTwo(u4, 9));
}
@ -848,7 +851,7 @@ pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
pub fn log2_int_ceil(comptime T: type, x: T) Log2Int(T) {
assert(x != 0);
const log2_val = log2_int(T, x);
if (T(1) << log2_val == x)
if (@as(T, 1) << log2_val == x)
return log2_val;
return log2_val + 1;
}
@ -870,8 +873,8 @@ pub fn lossyCast(comptime T: type, value: var) T {
switch (@typeInfo(@typeOf(value))) {
builtin.TypeId.Int => return @intToFloat(T, value),
builtin.TypeId.Float => return @floatCast(T, value),
builtin.TypeId.ComptimeInt => return T(value),
builtin.TypeId.ComptimeFloat => return T(value),
builtin.TypeId.ComptimeInt => return @as(T, value),
builtin.TypeId.ComptimeFloat => return @as(T, value),
else => @compileError("bad type"),
}
}
@ -944,7 +947,7 @@ test "max value type" {
pub fn mulWide(comptime T: type, a: T, b: T) @IntType(T.is_signed, T.bit_count * 2) {
const ResultInt = @IntType(T.is_signed, T.bit_count * 2);
return ResultInt(a) * ResultInt(b);
return @as(ResultInt, a) * @as(ResultInt, b);
}
test "math.mulWide" {

View File

@ -149,8 +149,8 @@ fn acos64(x: f64) f64 {
}
test "math.acos" {
expect(acos(f32(0.0)) == acos32(0.0));
expect(acos(f64(0.0)) == acos64(0.0));
expect(acos(@as(f32, 0.0)) == acos32(0.0));
expect(acos(@as(f64, 0.0)) == acos64(0.0));
}
test "math.acos32" {

View File

@ -61,8 +61,8 @@ fn acosh64(x: f64) f64 {
}
test "math.acosh" {
expect(acosh(f32(1.5)) == acosh32(1.5));
expect(acosh(f64(1.5)) == acosh64(1.5));
expect(acosh(@as(f32, 1.5)) == acosh32(1.5));
expect(acosh(@as(f64, 1.5)) == acosh64(1.5));
}
test "math.acosh32" {

View File

@ -142,8 +142,8 @@ fn asin64(x: f64) f64 {
}
test "math.asin" {
expect(asin(f32(0.0)) == asin32(0.0));
expect(asin(f64(0.0)) == asin64(0.0));
expect(asin(@as(f32, 0.0)) == asin32(0.0));
expect(asin(@as(f64, 0.0)) == asin64(0.0));
}
test "math.asin32" {

View File

@ -89,8 +89,8 @@ fn asinh64(x: f64) f64 {
}
test "math.asinh" {
expect(asinh(f32(0.0)) == asinh32(0.0));
expect(asinh(f64(0.0)) == asinh64(0.0));
expect(asinh(@as(f32, 0.0)) == asinh32(0.0));
expect(asinh(@as(f64, 0.0)) == asinh64(0.0));
}
test "math.asinh32" {

View File

@ -212,8 +212,8 @@ fn atan64(x_: f64) f64 {
}
test "math.atan" {
expect(@bitCast(u32, atan(f32(0.2))) == @bitCast(u32, atan32(0.2)));
expect(atan(f64(0.2)) == atan64(0.2));
expect(@bitCast(u32, atan(@as(f32, 0.2))) == @bitCast(u32, atan32(0.2)));
expect(atan(@as(f64, 0.2)) == atan64(0.2));
}
test "math.atan32" {

View File

@ -84,8 +84,8 @@ fn atanh_64(x: f64) f64 {
}
test "math.atanh" {
expect(atanh(f32(0.0)) == atanh_32(0.0));
expect(atanh(f64(0.0)) == atanh_64(0.0));
expect(atanh(@as(f32, 0.0)) == atanh_32(0.0));
expect(atanh(@as(f64, 0.0)) == atanh_64(0.0));
}
test "math.atanh_32" {

View File

@ -261,7 +261,7 @@ pub const Int = struct {
/// the minus sign. This is used for determining the number of characters needed to print the
/// value. It is inexact and may exceed the given value by ~1-2 bytes.
pub fn sizeInBase(self: Int, base: usize) usize {
const bit_count = usize(@boolToInt(!self.isPositive())) + self.bitCountAbs();
const bit_count = @as(usize, @boolToInt(!self.isPositive())) + self.bitCountAbs();
return (bit_count / math.log2(base)) + 1;
}
@ -281,7 +281,7 @@ pub const Int = struct {
var w_value: UT = if (value < 0) @intCast(UT, -value) else @intCast(UT, value);
if (info.bits <= Limb.bit_count) {
self.limbs[0] = Limb(w_value);
self.limbs[0] = @as(Limb, w_value);
self.metadata += 1;
} else {
var i: usize = 0;
@ -453,7 +453,7 @@ pub const Int = struct {
for (self.limbs[0..self.len()]) |limb| {
var shift: usize = 0;
while (shift < Limb.bit_count) : (shift += base_shift) {
const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & Limb(base - 1));
const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & @as(Limb, base - 1));
const ch = try digitToChar(r, base);
try digits.append(ch);
}
@ -560,7 +560,7 @@ pub const Int = struct {
/// Returns -1, 0, 1 if a < b, a == b or a > b respectively.
pub fn cmp(a: Int, b: Int) i8 {
if (a.isPositive() != b.isPositive()) {
return if (a.isPositive()) i8(1) else -1;
return if (a.isPositive()) @as(i8, 1) else -1;
} else {
const r = cmpAbs(a, b);
return if (a.isPositive()) r else -r;
@ -785,7 +785,7 @@ pub const Int = struct {
const c1: Limb = @boolToInt(@addWithOverflow(Limb, a, carry.*, &r1));
// r2 = b * c
const bc = DoubleLimb(math.mulWide(Limb, b, c));
const bc = @as(DoubleLimb, math.mulWide(Limb, b, c));
const r2 = @truncate(Limb, bc);
const c2 = @truncate(Limb, bc >> Limb.bit_count);
@ -1084,7 +1084,7 @@ pub const Int = struct {
rem.* = 0;
for (a) |_, ri| {
const i = a.len - ri - 1;
const pdiv = ((DoubleLimb(rem.*) << Limb.bit_count) | a[i]);
const pdiv = ((@as(DoubleLimb, rem.*) << Limb.bit_count) | a[i]);
if (pdiv == 0) {
quo[i] = 0;
@ -1143,9 +1143,9 @@ pub const Int = struct {
if (x.limbs[i] == y.limbs[t]) {
q.limbs[i - t - 1] = maxInt(Limb);
} else {
const num = (DoubleLimb(x.limbs[i]) << Limb.bit_count) | DoubleLimb(x.limbs[i - 1]);
const z = @intCast(Limb, num / DoubleLimb(y.limbs[t]));
q.limbs[i - t - 1] = if (z > maxInt(Limb)) maxInt(Limb) else Limb(z);
const num = (@as(DoubleLimb, x.limbs[i]) << Limb.bit_count) | @as(DoubleLimb, x.limbs[i - 1]);
const z = @intCast(Limb, num / @as(DoubleLimb, y.limbs[t]));
q.limbs[i - t - 1] = if (z > maxInt(Limb)) maxInt(Limb) else @as(Limb, z);
}
// 3.2
@ -1362,7 +1362,7 @@ test "big.int comptime_int set" {
comptime var i: usize = 0;
inline while (i < s_limb_count) : (i += 1) {
const result = Limb(s & maxInt(Limb));
const result = @as(Limb, s & maxInt(Limb));
s >>= Limb.bit_count / 2;
s >>= Limb.bit_count / 2;
testing.expect(a.limbs[i] == result);
@ -1377,7 +1377,7 @@ test "big.int comptime_int set negative" {
}
test "big.int int set unaligned small" {
var a = try Int.initSet(al, u7(45));
var a = try Int.initSet(al, @as(u7, 45));
testing.expect(a.limbs[0] == 45);
testing.expect(a.isPositive() == true);

View File

@ -54,11 +54,11 @@ fn cbrt32(x: f32) f32 {
// first step newton to 16 bits
var t: f64 = @bitCast(f32, u);
var r: f64 = t * t * t;
t = t * (f64(x) + x + r) / (x + r + r);
t = t * (@as(f64, x) + x + r) / (x + r + r);
// second step newton to 47 bits
r = t * t * t;
t = t * (f64(x) + x + r) / (x + r + r);
t = t * (@as(f64, x) + x + r) / (x + r + r);
return @floatCast(f32, t);
}
@ -97,7 +97,7 @@ fn cbrt64(x: f64) f64 {
}
u &= 1 << 63;
u |= u64(hx) << 32;
u |= @as(u64, hx) << 32;
var t = @bitCast(f64, u);
// cbrt to 23 bits
@ -120,8 +120,8 @@ fn cbrt64(x: f64) f64 {
}
test "math.cbrt" {
expect(cbrt(f32(0.0)) == cbrt32(0.0));
expect(cbrt(f64(0.0)) == cbrt64(0.0));
expect(cbrt(@as(f32, 0.0)) == cbrt32(0.0));
expect(cbrt(@as(f64, 0.0)) == cbrt64(0.0));
}
test "math.cbrt32" {

View File

@ -37,7 +37,7 @@ fn ceil32(x: f32) f32 {
if (e >= 23) {
return x;
} else if (e >= 0) {
m = u32(0x007FFFFF) >> @intCast(u5, e);
m = @as(u32, 0x007FFFFF) >> @intCast(u5, e);
if (u & m == 0) {
return x;
}
@ -87,8 +87,8 @@ fn ceil64(x: f64) f64 {
}
test "math.ceil" {
expect(ceil(f32(0.0)) == ceil32(0.0));
expect(ceil(f64(0.0)) == ceil64(0.0));
expect(ceil(@as(f32, 0.0)) == ceil32(0.0));
expect(ceil(@as(f64, 0.0)) == ceil64(0.0));
}
test "math.ceil32" {

View File

@ -133,8 +133,8 @@ test "complex.div" {
const b = Complex(f32).new(2, 7);
const c = a.div(b);
testing.expect(math.approxEq(f32, c.re, f32(31) / 53, epsilon) and
math.approxEq(f32, c.im, f32(-29) / 53, epsilon));
testing.expect(math.approxEq(f32, c.re, @as(f32, 31) / 53, epsilon) and
math.approxEq(f32, c.im, @as(f32, -29) / 53, epsilon));
}
test "complex.conjugate" {
@ -148,8 +148,8 @@ test "complex.reciprocal" {
const a = Complex(f32).new(5, 3);
const c = a.reciprocal();
testing.expect(math.approxEq(f32, c.re, f32(5) / 34, epsilon) and
math.approxEq(f32, c.im, f32(-3) / 34, epsilon));
testing.expect(math.approxEq(f32, c.re, @as(f32, 5) / 34, epsilon) and
math.approxEq(f32, c.im, @as(f32, -3) / 34, epsilon));
}
test "complex.magnitude" {

View File

@ -8,7 +8,7 @@ const Complex = cmath.Complex;
pub fn acos(z: var) Complex(@typeOf(z.re)) {
const T = @typeOf(z.re);
const q = cmath.asin(z);
return Complex(T).new(T(math.pi) / 2 - q.re, -q.im);
return Complex(T).new(@as(T, math.pi) / 2 - q.re, -q.im);
}
const epsilon = 0.0001;

View File

@ -59,13 +59,13 @@ fn frexp_exp64(x: f64, expt: *i32) f64 {
expt.* = @intCast(i32, hx >> 20) - (0x3ff + 1023) + k;
const high_word = (hx & 0xfffff) | ((0x3ff + 1023) << 20);
return @bitCast(f64, (u64(high_word) << 32) | lx);
return @bitCast(f64, (@as(u64, high_word) << 32) | lx);
}
fn ldexp_cexp64(z: Complex(f64), expt: i32) Complex(f64) {
var ex_expt: i32 = undefined;
const exp_x = frexp_exp64(z.re, &ex_expt);
const exptf = i64(expt + ex_expt);
const exptf = @as(i64, expt + ex_expt);
const half_expt1 = @divTrunc(exptf, 2);
const scale1 = @bitCast(f64, (0x3ff + half_expt1) << 20);

View File

@ -52,8 +52,8 @@ fn sqrt32(z: Complex(f32)) Complex(f32) {
// y = nan special case is handled fine below
// double-precision avoids overflow with correct rounding.
const dx = f64(x);
const dy = f64(y);
const dx = @as(f64, x);
const dy = @as(f64, y);
if (dx >= 0) {
const t = math.sqrt((dx + math.hypot(f64, dx, dy)) * 0.5);

View File

@ -76,7 +76,7 @@ fn tanh64(z: Complex(f64)) Complex(f64) {
return Complex(f64).new(x, r);
}
const xx = @bitCast(f64, (u64(hx - 0x40000000) << 32) | lx);
const xx = @bitCast(f64, (@as(u64, hx - 0x40000000) << 32) | lx);
const r = if (math.isInf(y)) y else math.sin(y) * math.cos(y);
return Complex(f64).new(xx, math.copysign(f64, 0, r));
}

View File

@ -24,7 +24,7 @@ fn copysign16(x: f16, y: f16) f16 {
const uy = @bitCast(u16, y);
const h1 = ux & (maxInt(u16) / 2);
const h2 = uy & (u16(1) << 15);
const h2 = uy & (@as(u16, 1) << 15);
return @bitCast(f16, h1 | h2);
}
@ -33,7 +33,7 @@ fn copysign32(x: f32, y: f32) f32 {
const uy = @bitCast(u32, y);
const h1 = ux & (maxInt(u32) / 2);
const h2 = uy & (u32(1) << 31);
const h2 = uy & (@as(u32, 1) << 31);
return @bitCast(f32, h1 | h2);
}
@ -42,7 +42,7 @@ fn copysign64(x: f64, y: f64) f64 {
const uy = @bitCast(u64, y);
const h1 = ux & (maxInt(u64) / 2);
const h2 = uy & (u64(1) << 63);
const h2 = uy & (@as(u64, 1) << 63);
return @bitCast(f64, h1 | h2);
}

View File

@ -83,8 +83,8 @@ fn cos_(comptime T: type, x_: T) T {
}
test "math.cos" {
expect(cos(f32(0.0)) == cos_(f32, 0.0));
expect(cos(f64(0.0)) == cos_(f64, 0.0));
expect(cos(@as(f32, 0.0)) == cos_(f32, 0.0));
expect(cos(@as(f64, 0.0)) == cos_(f64, 0.0));
}
test "math.cos32" {

View File

@ -88,8 +88,8 @@ fn cosh64(x: f64) f64 {
}
test "math.cosh" {
expect(cosh(f32(1.5)) == cosh32(1.5));
expect(cosh(f64(1.5)) == cosh64(1.5));
expect(cosh(@as(f32, 1.5)) == cosh32(1.5));
expect(cosh(@as(f64, 1.5)) == cosh64(1.5));
}
test "math.cosh32" {

View File

@ -134,7 +134,7 @@ fn exp64(x_: f64) f64 {
}
if (x < -708.39641853226410622) {
// underflow if x != -inf
// math.forceEval(f32(-0x1.0p-149 / x));
// math.forceEval(@as(f32, -0x1.0p-149 / x));
if (x < -745.13321910194110842) {
return 0;
}
@ -183,8 +183,8 @@ fn exp64(x_: f64) f64 {
}
test "math.exp" {
assert(exp(f32(0.0)) == exp32(0.0));
assert(exp(f64(0.0)) == exp64(0.0));
assert(exp(@as(f32, 0.0)) == exp32(0.0));
assert(exp(@as(f64, 0.0)) == exp64(0.0));
}
test "math.exp32" {

View File

@ -85,7 +85,7 @@ fn exp2_32(x: f32) f32 {
const k = i_0 / tblsiz;
// NOTE: musl relies on undefined overflow shift behaviour. Appears that this produces the
// intended result but should confirm how GCC/Clang handle this to ensure.
const uk = @bitCast(f64, u64(0x3FF + k) << 52);
const uk = @bitCast(f64, @as(u64, 0x3FF + k) << 52);
i_0 &= tblsiz - 1;
uf -= redux;
@ -421,8 +421,8 @@ fn exp2_64(x: f64) f64 {
}
test "math.exp2" {
expect(exp2(f32(0.8923)) == exp2_32(0.8923));
expect(exp2(f64(0.8923)) == exp2_64(0.8923));
expect(exp2(@as(f32, 0.8923)) == exp2_32(0.8923));
expect(exp2(@as(f64, 0.8923)) == exp2_64(0.8923));
}
test "math.exp2_32" {

View File

@ -287,8 +287,8 @@ fn expm1_64(x_: f64) f64 {
}
test "math.exp1m" {
expect(expm1(f32(0.0)) == expm1_32(0.0));
expect(expm1(f64(0.0)) == expm1_64(0.0));
expect(expm1(@as(f32, 0.0)) == expm1_32(0.0));
expect(expm1(@as(f64, 0.0)) == expm1_64(0.0));
}
test "math.expm1_32" {

View File

@ -30,6 +30,6 @@ fn expo2d(x: f64) f64 {
const kln2 = 0x1.62066151ADD8BP+10;
const u = (0x3FF + k / 2) << 20;
const scale = @bitCast(f64, u64(u) << 32);
const scale = @bitCast(f64, @as(u64, u) << 32);
return math.exp(x - kln2) * scale * scale;
}

View File

@ -50,10 +50,10 @@ fn fabs128(x: f128) f128 {
}
test "math.fabs" {
expect(fabs(f16(1.0)) == fabs16(1.0));
expect(fabs(f32(1.0)) == fabs32(1.0));
expect(fabs(f64(1.0)) == fabs64(1.0));
expect(fabs(f128(1.0)) == fabs128(1.0));
expect(fabs(@as(f16, 1.0)) == fabs16(1.0));
expect(fabs(@as(f32, 1.0)) == fabs32(1.0));
expect(fabs(@as(f64, 1.0)) == fabs64(1.0));
expect(fabs(@as(f128, 1.0)) == fabs128(1.0));
}
test "math.fabs16" {

View File

@ -40,7 +40,7 @@ fn floor16(x: f16) f16 {
}
if (e >= 0) {
m = u16(1023) >> @intCast(u4, e);
m = @as(u16, 1023) >> @intCast(u4, e);
if (u & m == 0) {
return x;
}
@ -74,7 +74,7 @@ fn floor32(x: f32) f32 {
}
if (e >= 0) {
m = u32(0x007FFFFF) >> @intCast(u5, e);
m = @as(u32, 0x007FFFFF) >> @intCast(u5, e);
if (u & m == 0) {
return x;
}
@ -123,9 +123,9 @@ fn floor64(x: f64) f64 {
}
test "math.floor" {
expect(floor(f16(1.3)) == floor16(1.3));
expect(floor(f32(1.3)) == floor32(1.3));
expect(floor(f64(1.3)) == floor64(1.3));
expect(floor(@as(f16, 1.3)) == floor16(1.3));
expect(floor(@as(f32, 1.3)) == floor32(1.3));
expect(floor(@as(f64, 1.3)) == floor64(1.3));
}
test "math.floor16" {

View File

@ -18,7 +18,7 @@ pub fn fma(comptime T: type, x: T, y: T, z: T) T {
}
fn fma32(x: f32, y: f32, z: f32) f32 {
const xy = f64(x) * y;
const xy = @as(f64, x) * y;
const xy_z = xy + z;
const u = @bitCast(u64, xy_z);
const e = (u >> 52) & 0x7FF;

View File

@ -108,11 +108,11 @@ fn frexp64(x: f64) frexp64_result {
}
test "math.frexp" {
const a = frexp(f32(1.3));
const a = frexp(@as(f32, 1.3));
const b = frexp32(1.3);
expect(a.significand == b.significand and a.exponent == b.exponent);
const c = frexp(f64(1.3));
const c = frexp(@as(f64, 1.3));
const d = frexp64(1.3);
expect(c.significand == d.significand and c.exponent == d.exponent);
}

View File

@ -56,7 +56,7 @@ fn hypot32(x: f32, y: f32) f32 {
yy *= 0x1.0p-90;
}
return z * math.sqrt(@floatCast(f32, f64(x) * x + f64(y) * y));
return z * math.sqrt(@floatCast(f32, @as(f64, x) * x + @as(f64, y) * y));
}
fn sq(hi: *f64, lo: *f64, x: f64) void {

View File

@ -26,7 +26,7 @@ pub fn ilogb(x: var) i32 {
}
// NOTE: Should these be exposed publicly?
const fp_ilogbnan = -1 - i32(maxInt(u32) >> 1);
const fp_ilogbnan = -1 - @as(i32, maxInt(u32) >> 1);
const fp_ilogb0 = fp_ilogbnan;
fn ilogb32(x: f32) i32 {
@ -101,8 +101,8 @@ fn ilogb64(x: f64) i32 {
}
test "math.ilogb" {
expect(ilogb(f32(0.2)) == ilogb32(0.2));
expect(ilogb(f64(0.2)) == ilogb64(0.2));
expect(ilogb(@as(f32, 0.2)) == ilogb32(0.2));
expect(ilogb(@as(f64, 0.2)) == ilogb64(0.2));
}
test "math.ilogb32" {

View File

@ -26,12 +26,12 @@ pub fn isFinite(x: var) bool {
}
test "math.isFinite" {
expect(isFinite(f16(0.0)));
expect(isFinite(f16(-0.0)));
expect(isFinite(f32(0.0)));
expect(isFinite(f32(-0.0)));
expect(isFinite(f64(0.0)));
expect(isFinite(f64(-0.0)));
expect(isFinite(@as(f16, 0.0)));
expect(isFinite(@as(f16, -0.0)));
expect(isFinite(@as(f32, 0.0)));
expect(isFinite(@as(f32, -0.0)));
expect(isFinite(@as(f64, 0.0)));
expect(isFinite(@as(f64, -0.0)));
expect(!isFinite(math.inf(f16)));
expect(!isFinite(-math.inf(f16)));
expect(!isFinite(math.inf(f32)));

Some files were not shown because too many files have changed in this diff Show More