Merge pull request #1032 from ziglang/pointer-reform

use * for pointer type instead of &
This commit is contained in:
Andrew Kelley 2018-06-01 11:49:25 -04:00 committed by GitHub
commit 3918e7699d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
150 changed files with 2437 additions and 2349 deletions

View File

@ -10,7 +10,7 @@ const ArrayList = std.ArrayList;
const Buffer = std.Buffer;
const io = std.io;
pub fn build(b: &Builder) !void {
pub fn build(b: *Builder) !void {
const mode = b.standardReleaseOptions();
var docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
@ -132,7 +132,7 @@ pub fn build(b: &Builder) !void {
test_step.dependOn(tests.addGenHTests(b, test_filter));
}
fn dependOnLib(lib_exe_obj: &std.build.LibExeObjStep, dep: &const LibraryDep) void {
fn dependOnLib(lib_exe_obj: *std.build.LibExeObjStep, dep: *const LibraryDep) void {
for (dep.libdirs.toSliceConst()) |lib_dir| {
lib_exe_obj.addLibPath(lib_dir);
}
@ -147,7 +147,7 @@ fn dependOnLib(lib_exe_obj: &std.build.LibExeObjStep, dep: &const LibraryDep) vo
}
}
fn addCppLib(b: &Builder, lib_exe_obj: &std.build.LibExeObjStep, cmake_binary_dir: []const u8, lib_name: []const u8) void {
fn addCppLib(b: *Builder, lib_exe_obj: *std.build.LibExeObjStep, cmake_binary_dir: []const u8, lib_name: []const u8) void {
const lib_prefix = if (lib_exe_obj.target.isWindows()) "" else "lib";
lib_exe_obj.addObjectFile(os.path.join(b.allocator, cmake_binary_dir, "zig_cpp", b.fmt("{}{}{}", lib_prefix, lib_name, lib_exe_obj.target.libFileExt())) catch unreachable);
}
@ -159,7 +159,7 @@ const LibraryDep = struct {
includes: ArrayList([]const u8),
};
fn findLLVM(b: &Builder, llvm_config_exe: []const u8) !LibraryDep {
fn findLLVM(b: *Builder, llvm_config_exe: []const u8) !LibraryDep {
const libs_output = try b.exec([][]const u8{
llvm_config_exe,
"--libs",
@ -217,7 +217,7 @@ fn findLLVM(b: &Builder, llvm_config_exe: []const u8) !LibraryDep {
return result;
}
pub fn installStdLib(b: &Builder, stdlib_files: []const u8) void {
pub fn installStdLib(b: *Builder, stdlib_files: []const u8) void {
var it = mem.split(stdlib_files, ";");
while (it.next()) |stdlib_file| {
const src_path = os.path.join(b.allocator, "std", stdlib_file) catch unreachable;
@ -226,7 +226,7 @@ pub fn installStdLib(b: &Builder, stdlib_files: []const u8) void {
}
}
pub fn installCHeaders(b: &Builder, c_header_files: []const u8) void {
pub fn installCHeaders(b: *Builder, c_header_files: []const u8) void {
var it = mem.split(c_header_files, ";");
while (it.next()) |c_header_file| {
const src_path = os.path.join(b.allocator, "c_headers", c_header_file) catch unreachable;
@ -235,7 +235,7 @@ pub fn installCHeaders(b: &Builder, c_header_files: []const u8) void {
}
}
fn nextValue(index: &usize, build_info: []const u8) []const u8 {
fn nextValue(index: *usize, build_info: []const u8) []const u8 {
const start = index.*;
while (true) : (index.* += 1) {
switch (build_info[index.*]) {

View File

@ -104,7 +104,7 @@ const Tokenizer = struct {
};
}
fn next(self: &Tokenizer) Token {
fn next(self: *Tokenizer) Token {
var result = Token{
.id = Token.Id.Eof,
.start = self.index,
@ -196,7 +196,7 @@ const Tokenizer = struct {
line_end: usize,
};
fn getTokenLocation(self: &Tokenizer, token: &const Token) Location {
fn getTokenLocation(self: *Tokenizer, token: *const Token) Location {
var loc = Location{
.line = 0,
.column = 0,
@ -221,7 +221,7 @@ const Tokenizer = struct {
}
};
fn parseError(tokenizer: &Tokenizer, token: &const Token, comptime fmt: []const u8, args: ...) error {
fn parseError(tokenizer: *Tokenizer, token: *const Token, comptime fmt: []const u8, args: ...) error {
const loc = tokenizer.getTokenLocation(token);
warn("{}:{}:{}: error: " ++ fmt ++ "\n", tokenizer.source_file_name, loc.line + 1, loc.column + 1, args);
if (loc.line_start <= loc.line_end) {
@ -244,13 +244,13 @@ fn parseError(tokenizer: &Tokenizer, token: &const Token, comptime fmt: []const
return error.ParseError;
}
fn assertToken(tokenizer: &Tokenizer, token: &const Token, id: Token.Id) !void {
fn assertToken(tokenizer: *Tokenizer, token: *const Token, id: Token.Id) !void {
if (token.id != id) {
return parseError(tokenizer, token, "expected {}, found {}", @tagName(id), @tagName(token.id));
}
}
fn eatToken(tokenizer: &Tokenizer, id: Token.Id) !Token {
fn eatToken(tokenizer: *Tokenizer, id: Token.Id) !Token {
const token = tokenizer.next();
try assertToken(tokenizer, token, id);
return token;
@ -317,7 +317,7 @@ const Action = enum {
Close,
};
fn genToc(allocator: &mem.Allocator, tokenizer: &Tokenizer) !Toc {
fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
var urls = std.HashMap([]const u8, Token, mem.hash_slice_u8, mem.eql_slice_u8).init(allocator);
errdefer urls.deinit();
@ -546,7 +546,7 @@ fn genToc(allocator: &mem.Allocator, tokenizer: &Tokenizer) !Toc {
};
}
fn urlize(allocator: &mem.Allocator, input: []const u8) ![]u8 {
fn urlize(allocator: *mem.Allocator, input: []const u8) ![]u8 {
var buf = try std.Buffer.initSize(allocator, 0);
defer buf.deinit();
@ -566,7 +566,7 @@ fn urlize(allocator: &mem.Allocator, input: []const u8) ![]u8 {
return buf.toOwnedSlice();
}
fn escapeHtml(allocator: &mem.Allocator, input: []const u8) ![]u8 {
fn escapeHtml(allocator: *mem.Allocator, input: []const u8) ![]u8 {
var buf = try std.Buffer.initSize(allocator, 0);
defer buf.deinit();
@ -608,7 +608,7 @@ test "term color" {
assert(mem.eql(u8, result, "A<span class=\"t32\">green</span>B"));
}
fn termColor(allocator: &mem.Allocator, input: []const u8) ![]u8 {
fn termColor(allocator: *mem.Allocator, input: []const u8) ![]u8 {
var buf = try std.Buffer.initSize(allocator, 0);
defer buf.deinit();
@ -688,7 +688,7 @@ fn termColor(allocator: &mem.Allocator, input: []const u8) ![]u8 {
return buf.toOwnedSlice();
}
fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var, zig_exe: []const u8) !void {
fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: var, zig_exe: []const u8) !void {
var code_progress_index: usize = 0;
for (toc.nodes) |node| {
switch (node) {
@ -1036,7 +1036,7 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
}
}
fn exec(allocator: &mem.Allocator, args: []const []const u8) !os.ChildProcess.ExecResult {
fn exec(allocator: *mem.Allocator, args: []const []const u8) !os.ChildProcess.ExecResult {
const result = try os.ChildProcess.exec(allocator, args, null, null, max_doc_file_size);
switch (result.term) {
os.ChildProcess.Term.Exited => |exit_code| {

View File

@ -458,7 +458,7 @@ test "string literals" {
// A C string literal is a null terminated pointer.
const null_terminated_bytes = c"hello";
assert(@typeOf(null_terminated_bytes) == &const u8);
assert(@typeOf(null_terminated_bytes) == *const u8);
assert(null_terminated_bytes[5] == 0);
}
{#code_end#}
@ -547,7 +547,7 @@ const c_string_literal =
;
{#code_end#}
<p>
In this example the variable <code>c_string_literal</code> has type <code>&amp;const char</code> and
In this example the variable <code>c_string_literal</code> has type <code>*const char</code> and
has a terminating null byte.
</p>
{#see_also|@embedFile#}
@ -1403,12 +1403,12 @@ test "address of syntax" {
assert(x_ptr.* == 1234);
// When you get the address of a const variable, you get a const pointer.
assert(@typeOf(x_ptr) == &const i32);
assert(@typeOf(x_ptr) == *const i32);
// If you want to mutate the value, you'd need an address of a mutable variable:
var y: i32 = 5678;
const y_ptr = &y;
assert(@typeOf(y_ptr) == &i32);
assert(@typeOf(y_ptr) == *i32);
y_ptr.* += 1;
assert(y_ptr.* == 5679);
}
@ -1455,7 +1455,7 @@ comptime {
test "@ptrToInt and @intToPtr" {
// To convert an integer address into a pointer, use @intToPtr:
const ptr = @intToPtr(&i32, 0xdeadbeef);
const ptr = @intToPtr(*i32, 0xdeadbeef);
// To convert a pointer to an integer, use @ptrToInt:
const addr = @ptrToInt(ptr);
@ -1467,7 +1467,7 @@ test "@ptrToInt and @intToPtr" {
comptime {
// Zig is able to do this at compile-time, as long as
// ptr is never dereferenced.
const ptr = @intToPtr(&i32, 0xdeadbeef);
const ptr = @intToPtr(*i32, 0xdeadbeef);
const addr = @ptrToInt(ptr);
assert(@typeOf(addr) == usize);
assert(addr == 0xdeadbeef);
@ -1477,17 +1477,17 @@ test "volatile" {
// In Zig, loads and stores are assumed to not have side effects.
// If a given load or store should have side effects, such as
// Memory Mapped Input/Output (MMIO), use `volatile`:
const mmio_ptr = @intToPtr(&volatile u8, 0x12345678);
const mmio_ptr = @intToPtr(*volatile u8, 0x12345678);
// Now loads and stores with mmio_ptr are guaranteed to all happen
// and in the same order as in source code.
assert(@typeOf(mmio_ptr) == &volatile u8);
assert(@typeOf(mmio_ptr) == *volatile u8);
}
test "nullable pointers" {
// Pointers cannot be null. If you want a null pointer, use the nullable
// prefix `?` to make the pointer type nullable.
var ptr: ?&i32 = null;
var ptr: ?*i32 = null;
var x: i32 = 1;
ptr = &x;
@ -1496,7 +1496,7 @@ test "nullable pointers" {
// Nullable pointers are the same size as normal pointers, because pointer
// value 0 is used as the null value.
assert(@sizeOf(?&i32) == @sizeOf(&i32));
assert(@sizeOf(?*i32) == @sizeOf(*i32));
}
test "pointer casting" {
@ -1504,7 +1504,7 @@ test "pointer casting" {
// operation that Zig cannot protect you against. Use @ptrCast only when other
// conversions are not possible.
const bytes align(@alignOf(u32)) = []u8{0x12, 0x12, 0x12, 0x12};
const u32_ptr = @ptrCast(&const u32, &bytes[0]);
const u32_ptr = @ptrCast(*const u32, &bytes[0]);
assert(u32_ptr.* == 0x12121212);
// Even this example is contrived - there are better ways to do the above than
@ -1518,7 +1518,7 @@ test "pointer casting" {
test "pointer child type" {
// pointer types have a `child` field which tells you the type they point to.
assert((&u32).Child == u32);
assert((*u32).Child == u32);
}
{#code_end#}
{#header_open|Alignment#}
@ -1543,15 +1543,15 @@ const builtin = @import("builtin");
test "variable alignment" {
var x: i32 = 1234;
const align_of_i32 = @alignOf(@typeOf(x));
assert(@typeOf(&x) == &i32);
assert(&i32 == &align(align_of_i32) i32);
assert(@typeOf(&x) == *i32);
assert(*i32 == *align(align_of_i32) i32);
if (builtin.arch == builtin.Arch.x86_64) {
assert((&i32).alignment == 4);
assert((*i32).alignment == 4);
}
}
{#code_end#}
<p>In the same way that a <code>&amp;i32</code> can be implicitly cast to a
<code>&amp;const i32</code>, a pointer with a larger alignment can be implicitly
<p>In the same way that a <code>*i32</code> can be implicitly cast to a
<code>*const i32</code>, a pointer with a larger alignment can be implicitly
cast to a pointer with a smaller alignment, but not vice versa.
</p>
<p>
@ -1565,7 +1565,7 @@ var foo: u8 align(4) = 100;
test "global variable alignment" {
assert(@typeOf(&foo).alignment == 4);
assert(@typeOf(&foo) == &align(4) u8);
assert(@typeOf(&foo) == *align(4) u8);
const slice = (&foo)[0..1];
assert(@typeOf(slice) == []align(4) u8);
}
@ -1610,7 +1610,7 @@ fn foo(bytes: []u8) u32 {
<code>u8</code> can alias any memory.
</p>
<p>As an example, this code produces undefined behavior:</p>
<pre><code class="zig">@ptrCast(&amp;u32, f32(12.34)).*</code></pre>
<pre><code class="zig">@ptrCast(*u32, f32(12.34)).*</code></pre>
<p>Instead, use {#link|@bitCast#}:
<pre><code class="zig">@bitCast(u32, f32(12.34))</code></pre>
<p>As an added benefit, the <code>@bitcast</code> version works at compile-time.</p>
@ -1736,7 +1736,7 @@ const Vec3 = struct {
};
}
pub fn dot(self: &const Vec3, other: &const Vec3) f32 {
pub fn dot(self: *const Vec3, other: *const Vec3) f32 {
return self.x * other.x + self.y * other.y + self.z * other.z;
}
};
@ -1768,7 +1768,7 @@ test "struct namespaced variable" {
// struct field order is determined by the compiler for optimal performance.
// however, you can still calculate a struct base pointer given a field pointer:
fn setYBasedOnX(x: &f32, y: f32) void {
fn setYBasedOnX(x: *f32, y: f32) void {
const point = @fieldParentPtr(Point, "x", x);
point.y = y;
}
@ -1786,13 +1786,13 @@ test "field parent pointer" {
fn LinkedList(comptime T: type) type {
return struct {
pub const Node = struct {
prev: ?&Node,
next: ?&Node,
prev: ?*Node,
next: ?*Node,
data: T,
};
first: ?&Node,
last: ?&Node,
first: ?*Node,
last: ?*Node,
len: usize,
};
}
@ -2039,7 +2039,7 @@ const Variant = union(enum) {
Int: i32,
Bool: bool,
fn truthy(self: &const Variant) bool {
fn truthy(self: *const Variant) bool {
return switch (self.*) {
Variant.Int => |x_int| x_int != 0,
Variant.Bool => |x_bool| x_bool,
@ -2786,7 +2786,7 @@ test "pass aggregate type by value to function" {
}
{#code_end#}
<p>
Instead, one must use <code>&amp;const</code>. Zig allows implicitly casting something
Instead, one must use <code>*const</code>. Zig allows implicitly casting something
to a const pointer to it:
</p>
{#code_begin|test#}
@ -2794,7 +2794,7 @@ const Foo = struct {
x: i32,
};
fn bar(foo: &const Foo) void {}
fn bar(foo: *const Foo) void {}
test "implicitly cast to const pointer" {
bar(Foo {.x = 12,});
@ -3208,16 +3208,16 @@ struct Foo *do_a_thing(void) {
<p>Zig code</p>
{#code_begin|syntax#}
// malloc prototype included for reference
extern fn malloc(size: size_t) ?&u8;
extern fn malloc(size: size_t) ?*u8;
fn doAThing() ?&Foo {
fn doAThing() ?*Foo {
const ptr = malloc(1234) ?? return null;
// ...
}
{#code_end#}
<p>
Here, Zig is at least as convenient, if not more, than C. And, the type of "ptr"
is <code>&u8</code> <em>not</em> <code>?&u8</code>. The <code>??</code> operator
is <code>*u8</code> <em>not</em> <code>?*u8</code>. The <code>??</code> operator
unwrapped the nullable type and therefore <code>ptr</code> is guaranteed to be non-null everywhere
it is used in the function.
</p>
@ -3237,7 +3237,7 @@ fn doAThing() ?&Foo {
In Zig you can accomplish the same thing:
</p>
{#code_begin|syntax#}
fn doAThing(nullable_foo: ?&Foo) void {
fn doAThing(nullable_foo: ?*Foo) void {
// do some stuff
if (nullable_foo) |foo| {
@ -3713,7 +3713,7 @@ fn List(comptime T: type) type {
</p>
{#code_begin|syntax#}
const Node = struct {
next: &Node,
next: *Node,
name: []u8,
};
{#code_end#}
@ -3745,7 +3745,7 @@ pub fn main() void {
{#code_begin|syntax#}
/// Calls print and then flushes the buffer.
pub fn printf(self: &OutStream, comptime format: []const u8, args: ...) error!void {
pub fn printf(self: *OutStream, comptime format: []const u8, args: ...) error!void {
const State = enum {
Start,
OpenBrace,
@ -3817,7 +3817,7 @@ pub fn printf(self: &OutStream, comptime format: []const u8, args: ...) error!vo
and emits a function that actually looks like this:
</p>
{#code_begin|syntax#}
pub fn printf(self: &OutStream, arg0: i32, arg1: []const u8) !void {
pub fn printf(self: *OutStream, arg0: i32, arg1: []const u8) !void {
try self.write("here is a string: '");
try self.printValue(arg0);
try self.write("' here is a number: ");
@ -3831,7 +3831,7 @@ pub fn printf(self: &OutStream, arg0: i32, arg1: []const u8) !void {
on the type:
</p>
{#code_begin|syntax#}
pub fn printValue(self: &OutStream, value: var) !void {
pub fn printValue(self: *OutStream, value: var) !void {
const T = @typeOf(value);
if (@isInteger(T)) {
return self.printInt(T, value);
@ -3911,7 +3911,7 @@ pub fn main() void {
at compile time.
</p>
{#header_open|@addWithOverflow#}
<pre><code class="zig">@addWithOverflow(comptime T: type, a: T, b: T, result: &T) -&gt; bool</code></pre>
<pre><code class="zig">@addWithOverflow(comptime T: type, a: T, b: T, result: *T) bool</code></pre>
<p>
Performs <code>result.* = a + b</code>. If overflow or underflow occurs,
stores the overflowed bits in <code>result</code> and returns <code>true</code>.
@ -3919,7 +3919,7 @@ pub fn main() void {
</p>
{#header_close#}
{#header_open|@ArgType#}
<pre><code class="zig">@ArgType(comptime T: type, comptime n: usize) -&gt; type</code></pre>
<pre><code class="zig">@ArgType(comptime T: type, comptime n: usize) type</code></pre>
<p>
This builtin function takes a function type and returns the type of the parameter at index <code>n</code>.
</p>
@ -3931,7 +3931,7 @@ pub fn main() void {
</p>
{#header_close#}
{#header_open|@atomicLoad#}
<pre><code class="zig">@atomicLoad(comptime T: type, ptr: &amp;const T, comptime ordering: builtin.AtomicOrder) -&gt; T</code></pre>
<pre><code class="zig">@atomicLoad(comptime T: type, ptr: *const T, comptime ordering: builtin.AtomicOrder) T</code></pre>
<p>
This builtin function atomically dereferences a pointer and returns the value.
</p>
@ -3950,7 +3950,7 @@ pub fn main() void {
</p>
{#header_close#}
{#header_open|@atomicRmw#}
<pre><code class="zig">@atomicRmw(comptime T: type, ptr: &amp;T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) -&gt; T</code></pre>
<pre><code class="zig">@atomicRmw(comptime T: type, ptr: *T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) T</code></pre>
<p>
This builtin function atomically modifies memory and then returns the previous value.
</p>
@ -3969,7 +3969,7 @@ pub fn main() void {
</p>
{#header_close#}
{#header_open|@bitCast#}
<pre><code class="zig">@bitCast(comptime DestType: type, value: var) -&gt; DestType</code></pre>
<pre><code class="zig">@bitCast(comptime DestType: type, value: var) DestType</code></pre>
<p>
Converts a value of one type to another type.
</p>
@ -4002,9 +4002,9 @@ pub fn main() void {
{#header_close#}
{#header_open|@alignCast#}
<pre><code class="zig">@alignCast(comptime alignment: u29, ptr: var) -&gt; var</code></pre>
<pre><code class="zig">@alignCast(comptime alignment: u29, ptr: var) var</code></pre>
<p>
<code>ptr</code> can be <code>&amp;T</code>, <code>fn()</code>, <code>?&amp;T</code>,
<code>ptr</code> can be <code>*T</code>, <code>fn()</code>, <code>?*T</code>,
<code>?fn()</code>, or <code>[]T</code>. It returns the same type as <code>ptr</code>
except with the alignment adjusted to the new value.
</p>
@ -4013,7 +4013,7 @@ pub fn main() void {
{#header_close#}
{#header_open|@alignOf#}
<pre><code class="zig">@alignOf(comptime T: type) -&gt; (number literal)</code></pre>
<pre><code class="zig">@alignOf(comptime T: type) (number literal)</code></pre>
<p>
This function returns the number of bytes that this type should be aligned to
for the current target to match the C ABI. When the child type of a pointer has
@ -4021,7 +4021,7 @@ pub fn main() void {
</p>
<pre><code class="zig">const assert = @import("std").debug.assert;
comptime {
assert(&u32 == &align(@alignOf(u32)) u32);
assert(*u32 == *align(@alignOf(u32)) u32);
}</code></pre>
<p>
The result is a target-specific compile time constant. It is guaranteed to be
@ -4049,7 +4049,7 @@ comptime {
{#see_also|Import from C Header File|@cInclude|@cImport|@cUndef|void#}
{#header_close#}
{#header_open|@cImport#}
<pre><code class="zig">@cImport(expression) -&gt; (namespace)</code></pre>
<pre><code class="zig">@cImport(expression) (namespace)</code></pre>
<p>
This function parses C code and imports the functions, types, variables, and
compatible macro definitions into the result namespace.
@ -4095,13 +4095,13 @@ comptime {
{#see_also|Import from C Header File|@cImport|@cDefine|@cInclude#}
{#header_close#}
{#header_open|@canImplicitCast#}
<pre><code class="zig">@canImplicitCast(comptime T: type, value) -&gt; bool</code></pre>
<pre><code class="zig">@canImplicitCast(comptime T: type, value) bool</code></pre>
<p>
Returns whether a value can be implicitly casted to a given type.
</p>
{#header_close#}
{#header_open|@clz#}
<pre><code class="zig">@clz(x: T) -&gt; U</code></pre>
<pre><code class="zig">@clz(x: T) U</code></pre>
<p>
This function counts the number of leading zeroes in <code>x</code> which is an integer
type <code>T</code>.
@ -4116,13 +4116,13 @@ comptime {
{#header_close#}
{#header_open|@cmpxchgStrong#}
<pre><code class="zig">@cmpxchgStrong(comptime T: type, ptr: &T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) -&gt; ?T</code></pre>
<pre><code class="zig">@cmpxchgStrong(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T</code></pre>
<p>
This function performs a strong atomic compare exchange operation. It's the equivalent of this code,
except atomic:
</p>
{#code_begin|syntax#}
fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: &T, expected_value: T, new_value: T) ?T {
fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_value: T) ?T {
const old_value = ptr.*;
if (old_value == expected_value) {
ptr.* = new_value;
@ -4143,13 +4143,13 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: &T, expected_value: T, new_v
{#see_also|Compile Variables|cmpxchgWeak#}
{#header_close#}
{#header_open|@cmpxchgWeak#}
<pre><code class="zig">@cmpxchgWeak(comptime T: type, ptr: &T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) -&gt; ?T</code></pre>
<pre><code class="zig">@cmpxchgWeak(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T</code></pre>
<p>
This function performs a weak atomic compare exchange operation. It's the equivalent of this code,
except atomic:
</p>
{#code_begin|syntax#}
fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: &T, expected_value: T, new_value: T) ?T {
fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_value: T) ?T {
const old_value = ptr.*;
if (old_value == expected_value and usuallyTrueButSometimesFalse()) {
ptr.* = new_value;
@ -4237,7 +4237,7 @@ test "main" {
{#code_end#}
{#header_close#}
{#header_open|@ctz#}
<pre><code class="zig">@ctz(x: T) -&gt; U</code></pre>
<pre><code class="zig">@ctz(x: T) U</code></pre>
<p>
This function counts the number of trailing zeroes in <code>x</code> which is an integer
type <code>T</code>.
@ -4251,7 +4251,7 @@ test "main" {
</p>
{#header_close#}
{#header_open|@divExact#}
<pre><code class="zig">@divExact(numerator: T, denominator: T) -&gt; T</code></pre>
<pre><code class="zig">@divExact(numerator: T, denominator: T) T</code></pre>
<p>
Exact division. Caller guarantees <code>denominator != 0</code> and
<code>@divTrunc(numerator, denominator) * denominator == numerator</code>.
@ -4264,7 +4264,7 @@ test "main" {
{#see_also|@divTrunc|@divFloor#}
{#header_close#}
{#header_open|@divFloor#}
<pre><code class="zig">@divFloor(numerator: T, denominator: T) -&gt; T</code></pre>
<pre><code class="zig">@divFloor(numerator: T, denominator: T) T</code></pre>
<p>
Floored division. Rounds toward negative infinity. For unsigned integers it is
the same as <code>numerator / denominator</code>. Caller guarantees <code>denominator != 0</code> and
@ -4278,7 +4278,7 @@ test "main" {
{#see_also|@divTrunc|@divExact#}
{#header_close#}
{#header_open|@divTrunc#}
<pre><code class="zig">@divTrunc(numerator: T, denominator: T) -&gt; T</code></pre>
<pre><code class="zig">@divTrunc(numerator: T, denominator: T) T</code></pre>
<p>
Truncated division. Rounds toward zero. For unsigned integers it is
the same as <code>numerator / denominator</code>. Caller guarantees <code>denominator != 0</code> and
@ -4292,7 +4292,7 @@ test "main" {
{#see_also|@divFloor|@divExact#}
{#header_close#}
{#header_open|@embedFile#}
<pre><code class="zig">@embedFile(comptime path: []const u8) -&gt; [X]u8</code></pre>
<pre><code class="zig">@embedFile(comptime path: []const u8) [X]u8</code></pre>
<p>
This function returns a compile time constant fixed-size array with length
equal to the byte count of the file given by <code>path</code>. The contents of the array
@ -4304,19 +4304,19 @@ test "main" {
{#see_also|@import#}
{#header_close#}
{#header_open|@export#}
<pre><code class="zig">@export(comptime name: []const u8, target: var, linkage: builtin.GlobalLinkage) -&gt; []const u8</code></pre>
<pre><code class="zig">@export(comptime name: []const u8, target: var, linkage: builtin.GlobalLinkage) []const u8</code></pre>
<p>
Creates a symbol in the output object file.
</p>
{#header_close#}
{#header_open|@tagName#}
<pre><code class="zig">@tagName(value: var) -&gt; []const u8</code></pre>
<pre><code class="zig">@tagName(value: var) []const u8</code></pre>
<p>
Converts an enum value or union value to a slice of bytes representing the name.
</p>
{#header_close#}
{#header_open|@TagType#}
<pre><code class="zig">@TagType(T: type) -&gt; type</code></pre>
<pre><code class="zig">@TagType(T: type) type</code></pre>
<p>
For an enum, returns the integer type that is used to store the enumeration value.
</p>
@ -4325,7 +4325,7 @@ test "main" {
</p>
{#header_close#}
{#header_open|@errorName#}
<pre><code class="zig">@errorName(err: error) -&gt; []u8</code></pre>
<pre><code class="zig">@errorName(err: error) []u8</code></pre>
<p>
This function returns the string representation of an error. If an error
declaration is:
@ -4341,7 +4341,7 @@ test "main" {
</p>
{#header_close#}
{#header_open|@errorReturnTrace#}
<pre><code class="zig">@errorReturnTrace() -&gt; ?&builtin.StackTrace</code></pre>
<pre><code class="zig">@errorReturnTrace() ?*builtin.StackTrace</code></pre>
<p>
If the binary is built with error return tracing, and this function is invoked in a
function that calls a function with an error or error union return type, returns a
@ -4360,7 +4360,7 @@ test "main" {
{#header_close#}
{#header_open|@fieldParentPtr#}
<pre><code class="zig">@fieldParentPtr(comptime ParentType: type, comptime field_name: []const u8,
field_ptr: &T) -&gt; &ParentType</code></pre>
field_ptr: *T) *ParentType</code></pre>
<p>
Given a pointer to a field, returns the base pointer of a struct.
</p>
@ -4380,7 +4380,7 @@ test "main" {
</p>
{#header_close#}
{#header_open|@import#}
<pre><code class="zig">@import(comptime path: []u8) -&gt; (namespace)</code></pre>
<pre><code class="zig">@import(comptime path: []u8) (namespace)</code></pre>
<p>
This function finds a zig file corresponding to <code>path</code> and imports all the
public top level declarations into the resulting namespace.
@ -4400,7 +4400,7 @@ test "main" {
{#see_also|Compile Variables|@embedFile#}
{#header_close#}
{#header_open|@inlineCall#}
<pre><code class="zig">@inlineCall(function: X, args: ...) -&gt; Y</code></pre>
<pre><code class="zig">@inlineCall(function: X, args: ...) Y</code></pre>
<p>
This calls a function, in the same way that invoking an expression with parentheses does:
</p>
@ -4420,19 +4420,19 @@ fn add(a: i32, b: i32) i32 { return a + b; }
{#see_also|@noInlineCall#}
{#header_close#}
{#header_open|@intToPtr#}
<pre><code class="zig">@intToPtr(comptime DestType: type, int: usize) -&gt; DestType</code></pre>
<pre><code class="zig">@intToPtr(comptime DestType: type, int: usize) DestType</code></pre>
<p>
Converts an integer to a pointer. To convert the other way, use {#link|@ptrToInt#}.
</p>
{#header_close#}
{#header_open|@IntType#}
<pre><code class="zig">@IntType(comptime is_signed: bool, comptime bit_count: u8) -&gt; type</code></pre>
<pre><code class="zig">@IntType(comptime is_signed: bool, comptime bit_count: u8) type</code></pre>
<p>
This function returns an integer type with the given signness and bit count.
</p>
{#header_close#}
{#header_open|@maxValue#}
<pre><code class="zig">@maxValue(comptime T: type) -&gt; (number literal)</code></pre>
<pre><code class="zig">@maxValue(comptime T: type) (number literal)</code></pre>
<p>
This function returns the maximum value of the integer type <code>T</code>.
</p>
@ -4441,7 +4441,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
</p>
{#header_close#}
{#header_open|@memberCount#}
<pre><code class="zig">@memberCount(comptime T: type) -&gt; (number literal)</code></pre>
<pre><code class="zig">@memberCount(comptime T: type) (number literal)</code></pre>
<p>
This function returns the number of members in a struct, enum, or union type.
</p>
@ -4453,7 +4453,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
</p>
{#header_close#}
{#header_open|@memberName#}
<pre><code class="zig">@memberName(comptime T: type, comptime index: usize) -&gt; [N]u8</code></pre>
<pre><code class="zig">@memberName(comptime T: type, comptime index: usize) [N]u8</code></pre>
<p>Returns the field name of a struct, union, or enum.</p>
<p>
The result is a compile time constant.
@ -4463,15 +4463,15 @@ fn add(a: i32, b: i32) i32 { return a + b; }
</p>
{#header_close#}
{#header_open|@field#}
<pre><code class="zig">@field(lhs: var, comptime field_name: []const u8) -&gt; (field)</code></pre>
<pre><code class="zig">@field(lhs: var, comptime field_name: []const u8) (field)</code></pre>
<p>Preforms field access equivalent to <code>lhs.-&gtfield_name-&lt</code>.</p>
{#header_close#}
{#header_open|@memberType#}
<pre><code class="zig">@memberType(comptime T: type, comptime index: usize) -&gt; type</code></pre>
<pre><code class="zig">@memberType(comptime T: type, comptime index: usize) type</code></pre>
<p>Returns the field type of a struct or union.</p>
{#header_close#}
{#header_open|@memcpy#}
<pre><code class="zig">@memcpy(noalias dest: &u8, noalias source: &const u8, byte_count: usize)</code></pre>
<pre><code class="zig">@memcpy(noalias dest: *u8, noalias source: *const u8, byte_count: usize)</code></pre>
<p>
This function copies bytes from one region of memory to another. <code>dest</code> and
<code>source</code> are both pointers and must not overlap.
@ -4489,7 +4489,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }
mem.copy(u8, dest[0...byte_count], source[0...byte_count]);</code></pre>
{#header_close#}
{#header_open|@memset#}
<pre><code class="zig">@memset(dest: &u8, c: u8, byte_count: usize)</code></pre>
<pre><code class="zig">@memset(dest: *u8, c: u8, byte_count: usize)</code></pre>
<p>
This function sets a region of memory to <code>c</code>. <code>dest</code> is a pointer.
</p>
@ -4506,7 +4506,7 @@ mem.copy(u8, dest[0...byte_count], source[0...byte_count]);</code></pre>
mem.set(u8, dest, c);</code></pre>
{#header_close#}
{#header_open|@minValue#}
<pre><code class="zig">@minValue(comptime T: type) -&gt; (number literal)</code></pre>
<pre><code class="zig">@minValue(comptime T: type) (number literal)</code></pre>
<p>
This function returns the minimum value of the integer type T.
</p>
@ -4515,7 +4515,7 @@ mem.set(u8, dest, c);</code></pre>
</p>
{#header_close#}
{#header_open|@mod#}
<pre><code class="zig">@mod(numerator: T, denominator: T) -&gt; T</code></pre>
<pre><code class="zig">@mod(numerator: T, denominator: T) T</code></pre>
<p>
Modulus division. For unsigned integers this is the same as
<code>numerator % denominator</code>. Caller guarantees <code>denominator &gt; 0</code>.
@ -4528,7 +4528,7 @@ mem.set(u8, dest, c);</code></pre>
{#see_also|@rem#}
{#header_close#}
{#header_open|@mulWithOverflow#}
<pre><code class="zig">@mulWithOverflow(comptime T: type, a: T, b: T, result: &T) -&gt; bool</code></pre>
<pre><code class="zig">@mulWithOverflow(comptime T: type, a: T, b: T, result: *T) bool</code></pre>
<p>
Performs <code>result.* = a * b</code>. If overflow or underflow occurs,
stores the overflowed bits in <code>result</code> and returns <code>true</code>.
@ -4536,7 +4536,7 @@ mem.set(u8, dest, c);</code></pre>
</p>
{#header_close#}
{#header_open|@newStackCall#}
<pre><code class="zig">@newStackCall(new_stack: []u8, function: var, args: ...) -&gt; var</code></pre>
<pre><code class="zig">@newStackCall(new_stack: []u8, function: var, args: ...) var</code></pre>
<p>
This calls a function, in the same way that invoking an expression with parentheses does. However,
instead of using the same stack as the caller, the function uses the stack provided in the <code>new_stack</code>
@ -4572,7 +4572,7 @@ fn targetFunction(x: i32) usize {
{#code_end#}
{#header_close#}
{#header_open|@noInlineCall#}
<pre><code class="zig">@noInlineCall(function: var, args: ...) -&gt; var</code></pre>
<pre><code class="zig">@noInlineCall(function: var, args: ...) var</code></pre>
<p>
This calls a function, in the same way that invoking an expression with parentheses does:
</p>
@ -4594,13 +4594,13 @@ fn add(a: i32, b: i32) i32 {
{#see_also|@inlineCall#}
{#header_close#}
{#header_open|@offsetOf#}
<pre><code class="zig">@offsetOf(comptime T: type, comptime field_name: [] const u8) -&gt; (number literal)</code></pre>
<pre><code class="zig">@offsetOf(comptime T: type, comptime field_name: [] const u8) (number literal)</code></pre>
<p>
This function returns the byte offset of a field relative to its containing struct.
</p>
{#header_close#}
{#header_open|@OpaqueType#}
<pre><code class="zig">@OpaqueType() -&gt; type</code></pre>
<pre><code class="zig">@OpaqueType() type</code></pre>
<p>
Creates a new type with an unknown size and alignment.
</p>
@ -4608,12 +4608,12 @@ fn add(a: i32, b: i32) i32 {
This is typically used for type safety when interacting with C code that does not expose struct details.
Example:
</p>
{#code_begin|test_err|expected type '&Derp', found '&Wat'#}
{#code_begin|test_err|expected type '*Derp', found '*Wat'#}
const Derp = @OpaqueType();
const Wat = @OpaqueType();
extern fn bar(d: &Derp) void;
export fn foo(w: &Wat) void {
extern fn bar(d: *Derp) void;
export fn foo(w: *Wat) void {
bar(w);
}
@ -4623,7 +4623,7 @@ test "call foo" {
{#code_end#}
{#header_close#}
{#header_open|@panic#}
<pre><code class="zig">@panic(message: []const u8) -&gt; noreturn</code></pre>
<pre><code class="zig">@panic(message: []const u8) noreturn</code></pre>
<p>
Invokes the panic handler function. By default the panic handler function
calls the public <code>panic</code> function exposed in the root source file, or
@ -4639,19 +4639,19 @@ test "call foo" {
{#see_also|Root Source File#}
{#header_close#}
{#header_open|@ptrCast#}
<pre><code class="zig">@ptrCast(comptime DestType: type, value: var) -&gt; DestType</code></pre>
<pre><code class="zig">@ptrCast(comptime DestType: type, value: var) DestType</code></pre>
<p>
Converts a pointer of one type to a pointer of another type.
</p>
{#header_close#}
{#header_open|@ptrToInt#}
<pre><code class="zig">@ptrToInt(value: var) -&gt; usize</code></pre>
<pre><code class="zig">@ptrToInt(value: var) usize</code></pre>
<p>
Converts <code>value</code> to a <code>usize</code> which is the address of the pointer. <code>value</code> can be one of these types:
</p>
<ul>
<li><code>&amp;T</code></li>
<li><code>?&amp;T</code></li>
<li><code>*T</code></li>
<li><code>?*T</code></li>
<li><code>fn()</code></li>
<li><code>?fn()</code></li>
</ul>
@ -4659,7 +4659,7 @@ test "call foo" {
{#header_close#}
{#header_open|@rem#}
<pre><code class="zig">@rem(numerator: T, denominator: T) -&gt; T</code></pre>
<pre><code class="zig">@rem(numerator: T, denominator: T) T</code></pre>
<p>
Remainder division. For unsigned integers this is the same as
<code>numerator % denominator</code>. Caller guarantees <code>denominator &gt; 0</code>.
@ -4776,13 +4776,13 @@ pub const FloatMode = enum {
{#see_also|Compile Variables#}
{#header_close#}
{#header_open|@setGlobalSection#}
<pre><code class="zig">@setGlobalSection(global_variable_name, comptime section_name: []const u8) -&gt; bool</code></pre>
<pre><code class="zig">@setGlobalSection(global_variable_name, comptime section_name: []const u8) bool</code></pre>
<p>
Puts the global variable in the specified section.
</p>
{#header_close#}
{#header_open|@shlExact#}
<pre><code class="zig">@shlExact(value: T, shift_amt: Log2T) -&gt; T</code></pre>
<pre><code class="zig">@shlExact(value: T, shift_amt: Log2T) T</code></pre>
<p>
Performs the left shift operation (<code>&lt;&lt;</code>). Caller guarantees
that the shift will not shift any 1 bits out.
@ -4794,7 +4794,7 @@ pub const FloatMode = enum {
{#see_also|@shrExact|@shlWithOverflow#}
{#header_close#}
{#header_open|@shlWithOverflow#}
<pre><code class="zig">@shlWithOverflow(comptime T: type, a: T, shift_amt: Log2T, result: &T) -&gt; bool</code></pre>
<pre><code class="zig">@shlWithOverflow(comptime T: type, a: T, shift_amt: Log2T, result: *T) bool</code></pre>
<p>
Performs <code>result.* = a &lt;&lt; b</code>. If overflow or underflow occurs,
stores the overflowed bits in <code>result</code> and returns <code>true</code>.
@ -4807,7 +4807,7 @@ pub const FloatMode = enum {
{#see_also|@shlExact|@shrExact#}
{#header_close#}
{#header_open|@shrExact#}
<pre><code class="zig">@shrExact(value: T, shift_amt: Log2T) -&gt; T</code></pre>
<pre><code class="zig">@shrExact(value: T, shift_amt: Log2T) T</code></pre>
<p>
Performs the right shift operation (<code>&gt;&gt;</code>). Caller guarantees
that the shift will not shift any 1 bits out.
@ -4819,7 +4819,7 @@ pub const FloatMode = enum {
{#see_also|@shlExact|@shlWithOverflow#}
{#header_close#}
{#header_open|@sizeOf#}
<pre><code class="zig">@sizeOf(comptime T: type) -&gt; (number literal)</code></pre>
<pre><code class="zig">@sizeOf(comptime T: type) (number literal)</code></pre>
<p>
This function returns the number of bytes it takes to store <code>T</code> in memory.
</p>
@ -4828,7 +4828,7 @@ pub const FloatMode = enum {
</p>
{#header_close#}
{#header_open|@sqrt#}
<pre><code class="zig">@sqrt(comptime T: type, value: T) -&gt; T</code></pre>
<pre><code class="zig">@sqrt(comptime T: type, value: T) T</code></pre>
<p>
Performs the square root of a floating point number. Uses a dedicated hardware instruction
when available. Currently only supports f32 and f64 at runtime. f128 at runtime is TODO.
@ -4838,7 +4838,7 @@ pub const FloatMode = enum {
</p>
{#header_close#}
{#header_open|@subWithOverflow#}
<pre><code class="zig">@subWithOverflow(comptime T: type, a: T, b: T, result: &T) -&gt; bool</code></pre>
<pre><code class="zig">@subWithOverflow(comptime T: type, a: T, b: T, result: *T) bool</code></pre>
<p>
Performs <code>result.* = a - b</code>. If overflow or underflow occurs,
stores the overflowed bits in <code>result</code> and returns <code>true</code>.
@ -4846,7 +4846,7 @@ pub const FloatMode = enum {
</p>
{#header_close#}
{#header_open|@truncate#}
<pre><code class="zig">@truncate(comptime T: type, integer) -&gt; T</code></pre>
<pre><code class="zig">@truncate(comptime T: type, integer) T</code></pre>
<p>
This function truncates bits from an integer type, resulting in a smaller
integer type.
@ -4870,7 +4870,7 @@ const b: u8 = @truncate(u8, a);
{#header_close#}
{#header_open|@typeId#}
<pre><code class="zig">@typeId(comptime T: type) -&gt; @import("builtin").TypeId</code></pre>
<pre><code class="zig">@typeId(comptime T: type) @import("builtin").TypeId</code></pre>
<p>
Returns which kind of type something is. Possible values:
</p>
@ -4904,7 +4904,7 @@ pub const TypeId = enum {
{#code_end#}
{#header_close#}
{#header_open|@typeInfo#}
<pre><code class="zig">@typeInfo(comptime T: type) -&gt; @import("builtin").TypeInfo</code></pre>
<pre><code class="zig">@typeInfo(comptime T: type) @import("builtin").TypeInfo</code></pre>
<p>
Returns information on the type. Returns a value of the following union:
</p>
@ -5080,14 +5080,14 @@ pub const TypeInfo = union(TypeId) {
{#code_end#}
{#header_close#}
{#header_open|@typeName#}
<pre><code class="zig">@typeName(T: type) -&gt; []u8</code></pre>
<pre><code class="zig">@typeName(T: type) []u8</code></pre>
<p>
This function returns the string representation of a type.
</p>
{#header_close#}
{#header_open|@typeOf#}
<pre><code class="zig">@typeOf(expression) -&gt; type</code></pre>
<pre><code class="zig">@typeOf(expression) type</code></pre>
<p>
This function returns a compile-time constant, which is the type of the
expression passed as an argument. The expression is evaluated.
@ -5937,7 +5937,7 @@ pub const __zig_test_fn_slice = {}; // overwritten later
{#header_open|C String Literals#}
{#code_begin|exe#}
{#link_libc#}
extern fn puts(&const u8) void;
extern fn puts(*const u8) void;
pub fn main() void {
puts(c"this has a null terminator");
@ -5996,8 +5996,8 @@ const c = @cImport({
{#code_begin|syntax#}
const base64 = @import("std").base64;
export fn decode_base_64(dest_ptr: &u8, dest_len: usize,
source_ptr: &const u8, source_len: usize) usize
export fn decode_base_64(dest_ptr: *u8, dest_len: usize,
source_ptr: *const u8, source_len: usize) usize
{
const src = source_ptr[0..source_len];
const dest = dest_ptr[0..dest_len];
@ -6028,7 +6028,7 @@ int main(int argc, char **argv) {
{#code_begin|syntax#}
const Builder = @import("std").build.Builder;
pub fn build(b: &Builder) void {
pub fn build(b: *Builder) void {
const obj = b.addObject("base64", "base64.zig");
const exe = b.addCExecutable("test");

View File

@ -41,7 +41,7 @@ fn usage(exe: []const u8) !void {
return error.Invalid;
}
fn cat_file(stdout: &os.File, file: &os.File) !void {
fn cat_file(stdout: *os.File, file: *os.File) !void {
var buf: [1024 * 4]u8 = undefined;
while (true) {

View File

@ -7,7 +7,7 @@ const c = @cImport({
const msg = c"Hello, world!\n";
export fn main(argc: c_int, argv: &&u8) c_int {
export fn main(argc: c_int, argv: **u8) c_int {
if (c.printf(msg) != c_int(c.strlen(msg))) return -1;
return 0;

View File

@ -1,6 +1,6 @@
const base64 = @import("std").base64;
export fn decode_base_64(dest_ptr: &u8, dest_len: usize, source_ptr: &const u8, source_len: usize) usize {
export fn decode_base_64(dest_ptr: *u8, dest_len: usize, source_ptr: *const u8, source_len: usize) usize {
const src = source_ptr[0..source_len];
const dest = dest_ptr[0..dest_len];
const base64_decoder = base64.standard_decoder_unsafe;

View File

@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
pub fn build(b: &Builder) void {
pub fn build(b: *Builder) void {
const obj = b.addObject("base64", "base64.zig");
const exe = b.addCExecutable("test");

View File

@ -1,6 +1,6 @@
const Builder = @import("std").build.Builder;
pub fn build(b: &Builder) void {
pub fn build(b: *Builder) void {
const lib = b.addSharedLibrary("mathtest", "mathtest.zig", b.version(1, 0, 0));
const exe = b.addCExecutable("test");

View File

@ -30,7 +30,7 @@ fn argInAllowedSet(maybe_set: ?[]const []const u8, arg: []const u8) bool {
}
// Modifies the current argument index during iteration
fn readFlagArguments(allocator: &Allocator, args: []const []const u8, required: usize, allowed_set: ?[]const []const u8, index: &usize) !FlagArg {
fn readFlagArguments(allocator: *Allocator, args: []const []const u8, required: usize, allowed_set: ?[]const []const u8, index: *usize) !FlagArg {
switch (required) {
0 => return FlagArg{ .None = undefined }, // TODO: Required to force non-tag but value?
1 => {
@ -79,7 +79,7 @@ pub const Args = struct {
flags: HashMapFlags,
positionals: ArrayList([]const u8),
pub fn parse(allocator: &Allocator, comptime spec: []const Flag, args: []const []const u8) !Args {
pub fn parse(allocator: *Allocator, comptime spec: []const Flag, args: []const []const u8) !Args {
var parsed = Args{
.flags = HashMapFlags.init(allocator),
.positionals = ArrayList([]const u8).init(allocator),
@ -143,18 +143,18 @@ pub const Args = struct {
return parsed;
}
pub fn deinit(self: &Args) void {
pub fn deinit(self: *Args) void {
self.flags.deinit();
self.positionals.deinit();
}
// e.g. --help
pub fn present(self: &Args, name: []const u8) bool {
pub fn present(self: *Args, name: []const u8) bool {
return self.flags.contains(name);
}
// e.g. --name value
pub fn single(self: &Args, name: []const u8) ?[]const u8 {
pub fn single(self: *Args, name: []const u8) ?[]const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
FlagArg.Single => |inner| {
@ -168,7 +168,7 @@ pub const Args = struct {
}
// e.g. --names value1 value2 value3
pub fn many(self: &Args, name: []const u8) ?[]const []const u8 {
pub fn many(self: *Args, name: []const u8) ?[]const []const u8 {
if (self.flags.get(name)) |entry| {
switch (entry.value) {
FlagArg.Many => |inner| {

View File

@ -16,18 +16,18 @@ pub const Msg = struct {
text: []u8,
first_token: TokenIndex,
last_token: TokenIndex,
tree: &ast.Tree,
tree: *ast.Tree,
};
/// `path` must outlive the returned Msg
/// `tree` must outlive the returned Msg
/// Caller owns returned Msg and must free with `allocator`
pub fn createFromParseError(
allocator: &mem.Allocator,
parse_error: &const ast.Error,
tree: &ast.Tree,
allocator: *mem.Allocator,
parse_error: *const ast.Error,
tree: *ast.Tree,
path: []const u8,
) !&Msg {
) !*Msg {
const loc_token = parse_error.loc();
var text_buf = try std.Buffer.initSize(allocator, 0);
defer text_buf.deinit();
@ -47,7 +47,7 @@ pub fn createFromParseError(
return msg;
}
pub fn printToStream(stream: var, msg: &const Msg, color_on: bool) !void {
pub fn printToStream(stream: var, msg: *const Msg, color_on: bool) !void {
const first_token = msg.tree.tokens.at(msg.first_token);
const last_token = msg.tree.tokens.at(msg.last_token);
const start_loc = msg.tree.tokenLocationPtr(0, first_token);
@ -76,7 +76,7 @@ pub fn printToStream(stream: var, msg: &const Msg, color_on: bool) !void {
try stream.write("\n");
}
pub fn printToFile(file: &os.File, msg: &const Msg, color: Color) !void {
pub fn printToFile(file: *os.File, msg: *const Msg, color: Color) !void {
const color_on = switch (color) {
Color.Auto => file.isTty(),
Color.On => true,

View File

@ -7,7 +7,7 @@ const os = std.os;
const warn = std.debug.warn;
/// Caller must free result
pub fn testZigInstallPrefix(allocator: &mem.Allocator, test_path: []const u8) ![]u8 {
pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![]u8 {
const test_zig_dir = try os.path.join(allocator, test_path, "lib", "zig");
errdefer allocator.free(test_zig_dir);
@ -21,7 +21,7 @@ pub fn testZigInstallPrefix(allocator: &mem.Allocator, test_path: []const u8) ![
}
/// Caller must free result
pub fn findZigLibDir(allocator: &mem.Allocator) ![]u8 {
pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
const self_exe_path = try os.selfExeDirPath(allocator);
defer allocator.free(self_exe_path);
@ -42,7 +42,7 @@ pub fn findZigLibDir(allocator: &mem.Allocator) ![]u8 {
return error.FileNotFound;
}
pub fn resolveZigLibDir(allocator: &mem.Allocator) ![]u8 {
pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 {
return findZigLibDir(allocator) catch |err| {
warn(
\\Unable to find zig lib directory: {}.

View File

@ -2,7 +2,7 @@ const Scope = @import("scope.zig").Scope;
pub const Instruction = struct {
id: Id,
scope: &Scope,
scope: *Scope,
pub const Id = enum {
Br,

View File

@ -18,8 +18,8 @@ const Target = @import("target.zig").Target;
const errmsg = @import("errmsg.zig");
var stderr_file: os.File = undefined;
var stderr: &io.OutStream(io.FileOutStream.Error) = undefined;
var stdout: &io.OutStream(io.FileOutStream.Error) = undefined;
var stderr: *io.OutStream(io.FileOutStream.Error) = undefined;
var stdout: *io.OutStream(io.FileOutStream.Error) = undefined;
const usage =
\\usage: zig [command] [options]
@ -43,7 +43,7 @@ const usage =
const Command = struct {
name: []const u8,
exec: fn (&Allocator, []const []const u8) error!void,
exec: fn (*Allocator, []const []const u8) error!void,
};
pub fn main() !void {
@ -191,7 +191,7 @@ const missing_build_file =
\\
;
fn cmdBuild(allocator: &Allocator, args: []const []const u8) !void {
fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_build_spec, args);
defer flags.deinit();
@ -426,7 +426,7 @@ const args_build_generic = []Flag{
Flag.Arg1("--ver-patch"),
};
fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Module.Kind) !void {
fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Module.Kind) !void {
var flags = try Args.parse(allocator, args_build_generic, args);
defer flags.deinit();
@ -661,19 +661,19 @@ fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Mo
try stderr.print("building {}: {}\n", @tagName(out_type), in_file);
}
fn cmdBuildExe(allocator: &Allocator, args: []const []const u8) !void {
fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void {
try buildOutputType(allocator, args, Module.Kind.Exe);
}
// cmd:build-lib ///////////////////////////////////////////////////////////////////////////////////
fn cmdBuildLib(allocator: &Allocator, args: []const []const u8) !void {
fn cmdBuildLib(allocator: *Allocator, args: []const []const u8) !void {
try buildOutputType(allocator, args, Module.Kind.Lib);
}
// cmd:build-obj ///////////////////////////////////////////////////////////////////////////////////
fn cmdBuildObj(allocator: &Allocator, args: []const []const u8) !void {
fn cmdBuildObj(allocator: *Allocator, args: []const []const u8) !void {
try buildOutputType(allocator, args, Module.Kind.Obj);
}
@ -700,7 +700,7 @@ const args_fmt_spec = []Flag{
}),
};
fn cmdFmt(allocator: &Allocator, args: []const []const u8) !void {
fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_fmt_spec, args);
defer flags.deinit();
@ -768,7 +768,7 @@ fn cmdFmt(allocator: &Allocator, args: []const []const u8) !void {
// cmd:targets /////////////////////////////////////////////////////////////////////////////////////
fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void {
fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write("Architectures:\n");
{
comptime var i: usize = 0;
@ -810,7 +810,7 @@ fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void {
// cmd:version /////////////////////////////////////////////////////////////////////////////////////
fn cmdVersion(allocator: &Allocator, args: []const []const u8) !void {
fn cmdVersion(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print("{}\n", std.cstr.toSliceConst(c.ZIG_VERSION_STRING));
}
@ -827,7 +827,7 @@ const usage_test =
const args_test_spec = []Flag{Flag.Bool("--help")};
fn cmdTest(allocator: &Allocator, args: []const []const u8) !void {
fn cmdTest(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_build_spec, args);
defer flags.deinit();
@ -862,7 +862,7 @@ const usage_run =
const args_run_spec = []Flag{Flag.Bool("--help")};
fn cmdRun(allocator: &Allocator, args: []const []const u8) !void {
fn cmdRun(allocator: *Allocator, args: []const []const u8) !void {
var compile_args = args;
var runtime_args: []const []const u8 = []const []const u8{};
@ -912,7 +912,7 @@ const args_translate_c_spec = []Flag{
Flag.Arg1("--output"),
};
fn cmdTranslateC(allocator: &Allocator, args: []const []const u8) !void {
fn cmdTranslateC(allocator: *Allocator, args: []const []const u8) !void {
var flags = try Args.parse(allocator, args_translate_c_spec, args);
defer flags.deinit();
@ -958,7 +958,7 @@ fn cmdTranslateC(allocator: &Allocator, args: []const []const u8) !void {
// cmd:help ////////////////////////////////////////////////////////////////////////////////////////
fn cmdHelp(allocator: &Allocator, args: []const []const u8) !void {
fn cmdHelp(allocator: *Allocator, args: []const []const u8) !void {
try stderr.write(usage);
}
@ -981,7 +981,7 @@ const info_zen =
\\
;
fn cmdZen(allocator: &Allocator, args: []const []const u8) !void {
fn cmdZen(allocator: *Allocator, args: []const []const u8) !void {
try stdout.write(info_zen);
}
@ -996,7 +996,7 @@ const usage_internal =
\\
;
fn cmdInternal(allocator: &Allocator, args: []const []const u8) !void {
fn cmdInternal(allocator: *Allocator, args: []const []const u8) !void {
if (args.len == 0) {
try stderr.write(usage_internal);
os.exit(1);
@ -1018,7 +1018,7 @@ fn cmdInternal(allocator: &Allocator, args: []const []const u8) !void {
try stderr.write(usage_internal);
}
fn cmdInternalBuildInfo(allocator: &Allocator, args: []const []const u8) !void {
fn cmdInternalBuildInfo(allocator: *Allocator, args: []const []const u8) !void {
try stdout.print(
\\ZIG_CMAKE_BINARY_DIR {}
\\ZIG_CXX_COMPILER {}

View File

@ -13,7 +13,7 @@ const ArrayList = std.ArrayList;
const errmsg = @import("errmsg.zig");
pub const Module = struct {
allocator: &mem.Allocator,
allocator: *mem.Allocator,
name: Buffer,
root_src_path: ?[]const u8,
module: llvm.ModuleRef,
@ -53,8 +53,8 @@ pub const Module = struct {
windows_subsystem_windows: bool,
windows_subsystem_console: bool,
link_libs_list: ArrayList(&LinkLib),
libc_link_lib: ?&LinkLib,
link_libs_list: ArrayList(*LinkLib),
libc_link_lib: ?*LinkLib,
err_color: errmsg.Color,
@ -106,19 +106,19 @@ pub const Module = struct {
pub const CliPkg = struct {
name: []const u8,
path: []const u8,
children: ArrayList(&CliPkg),
parent: ?&CliPkg,
children: ArrayList(*CliPkg),
parent: ?*CliPkg,
pub fn init(allocator: &mem.Allocator, name: []const u8, path: []const u8, parent: ?&CliPkg) !&CliPkg {
pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg {
var pkg = try allocator.create(CliPkg);
pkg.name = name;
pkg.path = path;
pkg.children = ArrayList(&CliPkg).init(allocator);
pkg.children = ArrayList(*CliPkg).init(allocator);
pkg.parent = parent;
return pkg;
}
pub fn deinit(self: &CliPkg) void {
pub fn deinit(self: *CliPkg) void {
for (self.children.toSliceConst()) |child| {
child.deinit();
}
@ -126,7 +126,7 @@ pub const Module = struct {
}
};
pub fn create(allocator: &mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: &const Target, kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !&Module {
pub fn create(allocator: *mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: *const Target, kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !*Module {
var name_buffer = try Buffer.init(allocator, name);
errdefer name_buffer.deinit();
@ -188,7 +188,7 @@ pub const Module = struct {
.link_objects = [][]const u8{},
.windows_subsystem_windows = false,
.windows_subsystem_console = false,
.link_libs_list = ArrayList(&LinkLib).init(allocator),
.link_libs_list = ArrayList(*LinkLib).init(allocator),
.libc_link_lib = null,
.err_color = errmsg.Color.Auto,
.darwin_frameworks = [][]const u8{},
@ -200,11 +200,11 @@ pub const Module = struct {
return module_ptr;
}
fn dump(self: &Module) void {
fn dump(self: *Module) void {
c.LLVMDumpModule(self.module);
}
pub fn destroy(self: &Module) void {
pub fn destroy(self: *Module) void {
c.LLVMDisposeBuilder(self.builder);
c.LLVMDisposeModule(self.module);
c.LLVMContextDispose(self.context);
@ -213,7 +213,7 @@ pub const Module = struct {
self.allocator.destroy(self);
}
pub fn build(self: &Module) !void {
pub fn build(self: *Module) !void {
if (self.llvm_argv.len != 0) {
var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.allocator, [][]const []const u8{
[][]const u8{"zig (LLVM option parsing)"},
@ -259,12 +259,12 @@ pub const Module = struct {
self.dump();
}
pub fn link(self: &Module, out_file: ?[]const u8) !void {
pub fn link(self: *Module, out_file: ?[]const u8) !void {
warn("TODO link");
return error.Todo;
}
pub fn addLinkLib(self: &Module, name: []const u8, provided_explicitly: bool) !&LinkLib {
pub fn addLinkLib(self: *Module, name: []const u8, provided_explicitly: bool) !*LinkLib {
const is_libc = mem.eql(u8, name, "c");
if (is_libc) {

View File

@ -1,6 +1,6 @@
pub const Scope = struct {
id: Id,
parent: &Scope,
parent: *Scope,
pub const Id = enum {
Decls,

View File

@ -11,7 +11,7 @@ pub const Target = union(enum) {
Native,
Cross: CrossTarget,
pub fn oFileExt(self: &const Target) []const u8 {
pub fn oFileExt(self: *const Target) []const u8 {
const environ = switch (self.*) {
Target.Native => builtin.environ,
Target.Cross => |t| t.environ,
@ -22,28 +22,28 @@ pub const Target = union(enum) {
};
}
pub fn exeFileExt(self: &const Target) []const u8 {
pub fn exeFileExt(self: *const Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".exe",
else => "",
};
}
pub fn getOs(self: &const Target) builtin.Os {
pub fn getOs(self: *const Target) builtin.Os {
return switch (self.*) {
Target.Native => builtin.os,
Target.Cross => |t| t.os,
};
}
pub fn isDarwin(self: &const Target) bool {
pub fn isDarwin(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.ios, builtin.Os.macosx => true,
else => false,
};
}
pub fn isWindows(self: &const Target) bool {
pub fn isWindows(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.windows => true,
else => false,

View File

@ -374,7 +374,7 @@ enum NodeType {
NodeTypeCharLiteral,
NodeTypeSymbol,
NodeTypePrefixOpExpr,
NodeTypeAddrOfExpr,
NodeTypePointerType,
NodeTypeFnCallExpr,
NodeTypeArrayAccessExpr,
NodeTypeSliceExpr,
@ -616,6 +616,7 @@ enum PrefixOp {
PrefixOpNegationWrap,
PrefixOpMaybe,
PrefixOpUnwrapMaybe,
PrefixOpAddrOf,
};
struct AstNodePrefixOpExpr {
@ -623,7 +624,7 @@ struct AstNodePrefixOpExpr {
AstNode *primary_expr;
};
struct AstNodeAddrOfExpr {
struct AstNodePointerType {
AstNode *align_expr;
BigInt *bit_offset_start;
BigInt *bit_offset_end;
@ -899,7 +900,7 @@ struct AstNode {
AstNodeBinOpExpr bin_op_expr;
AstNodeCatchExpr unwrap_err_expr;
AstNodePrefixOpExpr prefix_op_expr;
AstNodeAddrOfExpr addr_of_expr;
AstNodePointerType pointer_type;
AstNodeFnCallExpr fn_call_expr;
AstNodeArrayAccessExpr array_access_expr;
AstNodeSliceExpr slice_expr;
@ -2053,7 +2054,7 @@ enum IrInstructionId {
IrInstructionIdTypeInfo,
IrInstructionIdTypeId,
IrInstructionIdSetEvalBranchQuota,
IrInstructionIdPtrTypeOf,
IrInstructionIdPtrType,
IrInstructionIdAlignCast,
IrInstructionIdOpaqueType,
IrInstructionIdSetAlignStack,
@ -2274,8 +2275,6 @@ struct IrInstructionVarPtr {
IrInstruction base;
VariableTableEntry *var;
bool is_const;
bool is_volatile;
};
struct IrInstructionCall {
@ -2412,6 +2411,17 @@ struct IrInstructionArrayType {
IrInstruction *child_type;
};
struct IrInstructionPtrType {
IrInstruction base;
IrInstruction *align_value;
IrInstruction *child_type;
uint32_t bit_offset_start;
uint32_t bit_offset_end;
bool is_const;
bool is_volatile;
};
struct IrInstructionPromiseType {
IrInstruction base;
@ -2891,17 +2901,6 @@ struct IrInstructionSetEvalBranchQuota {
IrInstruction *new_quota;
};
struct IrInstructionPtrTypeOf {
IrInstruction base;
IrInstruction *align_value;
IrInstruction *child_type;
uint32_t bit_offset_start;
uint32_t bit_offset_end;
bool is_const;
bool is_volatile;
};
struct IrInstructionAlignCast {
IrInstruction base;

View File

@ -418,12 +418,12 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
const char *volatile_str = is_volatile ? "volatile " : "";
buf_resize(&entry->name, 0);
if (unaligned_bit_count == 0 && byte_alignment == abi_alignment) {
buf_appendf(&entry->name, "&%s%s%s", const_str, volatile_str, buf_ptr(&child_type->name));
buf_appendf(&entry->name, "*%s%s%s", const_str, volatile_str, buf_ptr(&child_type->name));
} else if (unaligned_bit_count == 0) {
buf_appendf(&entry->name, "&align(%" PRIu32 ") %s%s%s", byte_alignment,
buf_appendf(&entry->name, "*align(%" PRIu32 ") %s%s%s", byte_alignment,
const_str, volatile_str, buf_ptr(&child_type->name));
} else {
buf_appendf(&entry->name, "&align(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", byte_alignment,
buf_appendf(&entry->name, "*align(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", byte_alignment,
bit_offset, bit_offset + unaligned_bit_count, const_str, volatile_str, buf_ptr(&child_type->name));
}
@ -3270,7 +3270,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeThisLiteral:
case NodeTypeSymbol:
case NodeTypePrefixOpExpr:
case NodeTypeAddrOfExpr:
case NodeTypePointerType:
case NodeTypeIfBoolExpr:
case NodeTypeWhileExpr:
case NodeTypeForExpr:

View File

@ -68,6 +68,7 @@ static const char *prefix_op_str(PrefixOp prefix_op) {
case PrefixOpBinNot: return "~";
case PrefixOpMaybe: return "?";
case PrefixOpUnwrapMaybe: return "??";
case PrefixOpAddrOf: return "&";
}
zig_unreachable();
}
@ -185,8 +186,6 @@ static const char *node_type_str(NodeType node_type) {
return "Symbol";
case NodeTypePrefixOpExpr:
return "PrefixOpExpr";
case NodeTypeAddrOfExpr:
return "AddrOfExpr";
case NodeTypeUse:
return "Use";
case NodeTypeBoolLiteral:
@ -251,6 +250,8 @@ static const char *node_type_str(NodeType node_type) {
return "Suspend";
case NodeTypePromiseType:
return "PromiseType";
case NodeTypePointerType:
return "PointerType";
}
zig_unreachable();
}
@ -616,41 +617,41 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, "%s", prefix_op_str(op));
AstNode *child_node = node->data.prefix_op_expr.primary_expr;
bool new_grouped = child_node->type == NodeTypePrefixOpExpr || child_node->type == NodeTypeAddrOfExpr;
bool new_grouped = child_node->type == NodeTypePrefixOpExpr || child_node->type == NodeTypePointerType;
render_node_extra(ar, child_node, new_grouped);
if (!grouped) fprintf(ar->f, ")");
break;
}
case NodeTypeAddrOfExpr:
case NodeTypePointerType:
{
if (!grouped) fprintf(ar->f, "(");
fprintf(ar->f, "&");
if (node->data.addr_of_expr.align_expr != nullptr) {
fprintf(ar->f, "*");
if (node->data.pointer_type.align_expr != nullptr) {
fprintf(ar->f, "align(");
render_node_grouped(ar, node->data.addr_of_expr.align_expr);
if (node->data.addr_of_expr.bit_offset_start != nullptr) {
assert(node->data.addr_of_expr.bit_offset_end != nullptr);
render_node_grouped(ar, node->data.pointer_type.align_expr);
if (node->data.pointer_type.bit_offset_start != nullptr) {
assert(node->data.pointer_type.bit_offset_end != nullptr);
Buf offset_start_buf = BUF_INIT;
buf_resize(&offset_start_buf, 0);
bigint_append_buf(&offset_start_buf, node->data.addr_of_expr.bit_offset_start, 10);
bigint_append_buf(&offset_start_buf, node->data.pointer_type.bit_offset_start, 10);
Buf offset_end_buf = BUF_INIT;
buf_resize(&offset_end_buf, 0);
bigint_append_buf(&offset_end_buf, node->data.addr_of_expr.bit_offset_end, 10);
bigint_append_buf(&offset_end_buf, node->data.pointer_type.bit_offset_end, 10);
fprintf(ar->f, ":%s:%s ", buf_ptr(&offset_start_buf), buf_ptr(&offset_end_buf));
}
fprintf(ar->f, ") ");
}
if (node->data.addr_of_expr.is_const) {
if (node->data.pointer_type.is_const) {
fprintf(ar->f, "const ");
}
if (node->data.addr_of_expr.is_volatile) {
if (node->data.pointer_type.is_volatile) {
fprintf(ar->f, "volatile ");
}
render_node_ungrouped(ar, node->data.addr_of_expr.op_expr);
render_node_ungrouped(ar, node->data.pointer_type.op_expr);
if (!grouped) fprintf(ar->f, ")");
break;
}
@ -669,7 +670,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, " ");
}
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypeAddrOfExpr);
bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypePointerType);
render_node_extra(ar, fn_ref_node, grouped);
fprintf(ar->f, "(");
for (size_t i = 0; i < node->data.fn_call_expr.params.length; i += 1) {

View File

@ -4600,7 +4600,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdTypeInfo:
case IrInstructionIdTypeId:
case IrInstructionIdSetEvalBranchQuota:
case IrInstructionIdPtrTypeOf:
case IrInstructionIdPtrType:
case IrInstructionIdOpaqueType:
case IrInstructionIdSetAlignStack:
case IrInstructionIdArgType:

View File

@ -41,10 +41,6 @@ struct IrAnalyze {
static const LVal LVAL_NONE = { false, false, false };
static const LVal LVAL_PTR = { true, false, false };
static LVal make_lval_addr(bool is_const, bool is_volatile) {
return { true, is_const, is_volatile };
}
enum ConstCastResultId {
ConstCastResultIdOk,
ConstCastResultIdErrSet,
@ -108,8 +104,7 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc
static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutable *exec, AstNode *source_node, Buf *msg);
static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name,
IrInstruction *source_instr, IrInstruction *container_ptr, TypeTableEntry *container_type);
static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr);
static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction, VariableTableEntry *var);
static TypeTableEntry *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op);
static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval);
@ -629,8 +624,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSetEvalBranchQuo
return IrInstructionIdSetEvalBranchQuota;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrTypeOf *) {
return IrInstructionIdPtrTypeOf;
static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrType *) {
return IrInstructionIdPtrType;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignCast *) {
@ -1004,13 +999,9 @@ static IrInstruction *ir_build_bin_op_from(IrBuilder *irb, IrInstruction *old_in
return new_instruction;
}
static IrInstruction *ir_build_var_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node,
VariableTableEntry *var, bool is_const, bool is_volatile)
{
static IrInstruction *ir_build_var_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, VariableTableEntry *var) {
IrInstructionVarPtr *instruction = ir_build_instruction<IrInstructionVarPtr>(irb, scope, source_node);
instruction->var = var;
instruction->is_const = is_const;
instruction->is_volatile = is_volatile;
ir_ref_var(var);
@ -1196,11 +1187,11 @@ static IrInstruction *ir_build_br_from(IrBuilder *irb, IrInstruction *old_instru
return new_instruction;
}
static IrInstruction *ir_build_ptr_type_of(IrBuilder *irb, Scope *scope, AstNode *source_node,
static IrInstruction *ir_build_ptr_type(IrBuilder *irb, Scope *scope, AstNode *source_node,
IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value,
uint32_t bit_offset_start, uint32_t bit_offset_end)
{
IrInstructionPtrTypeOf *ptr_type_of_instruction = ir_build_instruction<IrInstructionPtrTypeOf>(irb, scope, source_node);
IrInstructionPtrType *ptr_type_of_instruction = ir_build_instruction<IrInstructionPtrType>(irb, scope, source_node);
ptr_type_of_instruction->align_value = align_value;
ptr_type_of_instruction->child_type = child_type;
ptr_type_of_instruction->is_const = is_const;
@ -3519,8 +3510,7 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node,
VariableTableEntry *var = find_variable(irb->codegen, scope, variable_name);
if (var) {
IrInstruction *var_ptr = ir_build_var_ptr(irb, scope, node, var,
!lval.is_ptr || lval.is_const, lval.is_ptr && lval.is_volatile);
IrInstruction *var_ptr = ir_build_var_ptr(irb, scope, node, var);
if (lval.is_ptr)
return var_ptr;
else
@ -4609,14 +4599,8 @@ static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode
}
static IrInstruction *ir_gen_prefix_op_id_lval(IrBuilder *irb, Scope *scope, AstNode *node, IrUnOp op_id, LVal lval) {
AstNode *expr_node;
if (node->type == NodeTypePrefixOpExpr) {
expr_node = node->data.prefix_op_expr.primary_expr;
} else if (node->type == NodeTypePtrDeref) {
expr_node = node->data.ptr_deref_expr.target;
} else {
zig_unreachable();
}
assert(node->type == NodeTypePrefixOpExpr);
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval);
if (value == irb->codegen->invalid_instruction)
@ -4640,16 +4624,12 @@ static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *
return ir_build_ref(irb, scope, value->source_node, value, lval.is_const, lval.is_volatile);
}
static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypeAddrOfExpr);
bool is_const = node->data.addr_of_expr.is_const;
bool is_volatile = node->data.addr_of_expr.is_volatile;
AstNode *expr_node = node->data.addr_of_expr.op_expr;
AstNode *align_expr = node->data.addr_of_expr.align_expr;
if (align_expr == nullptr && !is_const && !is_volatile) {
return ir_gen_node_extra(irb, expr_node, scope, make_lval_addr(is_const, is_volatile));
}
static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypePointerType);
bool is_const = node->data.pointer_type.is_const;
bool is_volatile = node->data.pointer_type.is_volatile;
AstNode *expr_node = node->data.pointer_type.op_expr;
AstNode *align_expr = node->data.pointer_type.align_expr;
IrInstruction *align_value;
if (align_expr != nullptr) {
@ -4665,27 +4645,27 @@ static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *n
return child_type;
uint32_t bit_offset_start = 0;
if (node->data.addr_of_expr.bit_offset_start != nullptr) {
if (!bigint_fits_in_bits(node->data.addr_of_expr.bit_offset_start, 32, false)) {
if (node->data.pointer_type.bit_offset_start != nullptr) {
if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_start, 32, false)) {
Buf *val_buf = buf_alloc();
bigint_append_buf(val_buf, node->data.addr_of_expr.bit_offset_start, 10);
bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_start, 10);
exec_add_error_node(irb->codegen, irb->exec, node,
buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
return irb->codegen->invalid_instruction;
}
bit_offset_start = bigint_as_unsigned(node->data.addr_of_expr.bit_offset_start);
bit_offset_start = bigint_as_unsigned(node->data.pointer_type.bit_offset_start);
}
uint32_t bit_offset_end = 0;
if (node->data.addr_of_expr.bit_offset_end != nullptr) {
if (!bigint_fits_in_bits(node->data.addr_of_expr.bit_offset_end, 32, false)) {
if (node->data.pointer_type.bit_offset_end != nullptr) {
if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_end, 32, false)) {
Buf *val_buf = buf_alloc();
bigint_append_buf(val_buf, node->data.addr_of_expr.bit_offset_end, 10);
bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_end, 10);
exec_add_error_node(irb->codegen, irb->exec, node,
buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf)));
return irb->codegen->invalid_instruction;
}
bit_offset_end = bigint_as_unsigned(node->data.addr_of_expr.bit_offset_end);
bit_offset_end = bigint_as_unsigned(node->data.pointer_type.bit_offset_end);
}
if ((bit_offset_start != 0 || bit_offset_end != 0) && bit_offset_start >= bit_offset_end) {
@ -4694,7 +4674,7 @@ static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *n
return irb->codegen->invalid_instruction;
}
return ir_build_ptr_type_of(irb, scope, node, child_type, is_const, is_volatile,
return ir_build_ptr_type(irb, scope, node, child_type, is_const, is_volatile,
align_value, bit_offset_start, bit_offset_end);
}
@ -4761,6 +4741,10 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpMaybe), lval);
case PrefixOpUnwrapMaybe:
return ir_gen_maybe_assert_ok(irb, scope, node, lval);
case PrefixOpAddrOf: {
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR), lval);
}
}
zig_unreachable();
}
@ -5150,7 +5134,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo
IrInstruction *undefined_value = ir_build_const_undefined(irb, child_scope, elem_node);
ir_build_var_decl(irb, child_scope, elem_node, elem_var, elem_var_type, nullptr, undefined_value);
IrInstruction *elem_var_ptr = ir_build_var_ptr(irb, child_scope, node, elem_var, false, false);
IrInstruction *elem_var_ptr = ir_build_var_ptr(irb, child_scope, node, elem_var);
AstNode *index_var_source_node;
VariableTableEntry *index_var;
@ -5168,7 +5152,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo
IrInstruction *zero = ir_build_const_usize(irb, child_scope, node, 0);
IrInstruction *one = ir_build_const_usize(irb, child_scope, node, 1);
ir_build_var_decl(irb, child_scope, index_var_source_node, index_var, usize, nullptr, zero);
IrInstruction *index_ptr = ir_build_var_ptr(irb, child_scope, node, index_var, false, false);
IrInstruction *index_ptr = ir_build_var_ptr(irb, child_scope, node, index_var);
IrBasicBlock *cond_block = ir_create_basic_block(irb, child_scope, "ForCond");
@ -6397,7 +6381,7 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast
IrInstruction *promise_result_type = ir_build_promise_result_type(irb, parent_scope, node, target_promise_type);
ir_build_await_bookkeeping(irb, parent_scope, node, promise_result_type);
ir_build_var_decl(irb, parent_scope, node, result_var, promise_result_type, nullptr, undefined_value);
IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, parent_scope, node, result_var, false, false);
IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, parent_scope, node, result_var);
ir_build_store_ptr(irb, parent_scope, node, result_ptr_field_ptr, my_result_var_ptr);
IrInstruction *save_token = ir_build_coro_save(irb, parent_scope, node, irb->exec->coro_handle);
IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node,
@ -6568,8 +6552,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_lval_wrap(irb, scope, ir_gen_if_bool_expr(irb, scope, node), lval);
case NodeTypePrefixOpExpr:
return ir_gen_prefix_op_expr(irb, scope, node, lval);
case NodeTypeAddrOfExpr:
return ir_lval_wrap(irb, scope, ir_gen_address_of(irb, scope, node), lval);
case NodeTypeContainerInitExpr:
return ir_lval_wrap(irb, scope, ir_gen_container_init_expr(irb, scope, node), lval);
case NodeTypeVariableDeclaration:
@ -6592,14 +6574,23 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_build_load_ptr(irb, scope, node, ptr_instruction);
}
case NodeTypePtrDeref:
return ir_gen_prefix_op_id_lval(irb, scope, node, IrUnOpDereference, lval);
case NodeTypePtrDeref: {
assert(node->type == NodeTypePtrDeref);
AstNode *expr_node = node->data.ptr_deref_expr.target;
IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval);
if (value == irb->codegen->invalid_instruction)
return value;
return ir_build_un_op(irb, scope, node, IrUnOpDereference, value);
}
case NodeTypeThisLiteral:
return ir_lval_wrap(irb, scope, ir_gen_this_literal(irb, scope, node), lval);
case NodeTypeBoolLiteral:
return ir_lval_wrap(irb, scope, ir_gen_bool_literal(irb, scope, node), lval);
case NodeTypeArrayType:
return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval);
case NodeTypePointerType:
return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval);
case NodeTypePromiseType:
return ir_lval_wrap(irb, scope, ir_gen_promise_type(irb, scope, node), lval);
case NodeTypeStringLiteral:
@ -6711,15 +6702,14 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
IrInstruction *coro_frame_type_value = ir_build_const_type(irb, coro_scope, node, coro_frame_type);
// TODO mark this var decl as "no safety" e.g. disable initializing the undef value to 0xaa
ir_build_var_decl(irb, coro_scope, node, promise_var, coro_frame_type_value, nullptr, undef);
coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var, false, false);
coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var);
VariableTableEntry *await_handle_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false);
IrInstruction *null_value = ir_build_const_null(irb, coro_scope, node);
IrInstruction *await_handle_type_val = ir_build_const_type(irb, coro_scope, node,
get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise));
ir_build_var_decl(irb, coro_scope, node, await_handle_var, await_handle_type_val, nullptr, null_value);
irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node,
await_handle_var, false, false);
irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node, await_handle_var);
u8_ptr_type = ir_build_const_type(irb, coro_scope, node,
get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false));
@ -6859,7 +6849,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle);
IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_mem_ptr_maybe);
IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false);
IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var, true, false);
IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var);
IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr);
IrInstruction *mem_slice = ir_build_slice(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false);
size_t arg_count = 2;
@ -8961,34 +8951,15 @@ static IrInstruction *ir_get_const_ptr(IrAnalyze *ira, IrInstruction *instructio
ConstExprValue *pointee, TypeTableEntry *pointee_type,
ConstPtrMut ptr_mut, bool ptr_is_const, bool ptr_is_volatile, uint32_t ptr_align)
{
if (pointee_type->id == TypeTableEntryIdMetaType) {
TypeTableEntry *type_entry = pointee->data.x_type;
if (type_entry->id == TypeTableEntryIdUnreachable) {
ir_add_error(ira, instruction, buf_sprintf("pointer to noreturn not allowed"));
return ira->codegen->invalid_instruction;
}
IrInstruction *const_instr = ir_get_const(ira, instruction);
ConstExprValue *const_val = &const_instr->value;
const_val->type = pointee_type;
type_ensure_zero_bits_known(ira->codegen, type_entry);
if (type_is_invalid(type_entry)) {
return ira->codegen->invalid_instruction;
}
const_val->data.x_type = get_pointer_to_type_extra(ira->codegen, type_entry,
ptr_is_const, ptr_is_volatile, get_abi_alignment(ira->codegen, type_entry), 0, 0);
return const_instr;
} else {
TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, pointee_type,
ptr_is_const, ptr_is_volatile, ptr_align, 0, 0);
IrInstruction *const_instr = ir_get_const(ira, instruction);
ConstExprValue *const_val = &const_instr->value;
const_val->type = ptr_type;
const_val->data.x_ptr.special = ConstPtrSpecialRef;
const_val->data.x_ptr.mut = ptr_mut;
const_val->data.x_ptr.data.ref.pointee = pointee;
return const_instr;
}
TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, pointee_type,
ptr_is_const, ptr_is_volatile, ptr_align, 0, 0);
IrInstruction *const_instr = ir_get_const(ira, instruction);
ConstExprValue *const_val = &const_instr->value;
const_val->type = ptr_type;
const_val->data.x_ptr.special = ConstPtrSpecialRef;
const_val->data.x_ptr.mut = ptr_mut;
const_val->data.x_ptr.data.ref.pointee = pointee;
return const_instr;
}
static TypeTableEntry *ir_analyze_const_ptr(IrAnalyze *ira, IrInstruction *instruction,
@ -9316,9 +9287,8 @@ static IrInstruction *ir_get_ref(IrAnalyze *ira, IrInstruction *source_instructi
ConstExprValue *val = ir_resolve_const(ira, value, UndefOk);
if (!val)
return ira->codegen->invalid_instruction;
bool final_is_const = (value->value.type->id == TypeTableEntryIdMetaType) ? is_const : true;
return ir_get_const_ptr(ira, source_instruction, val, value->value.type,
ConstPtrMutComptimeConst, final_is_const, is_volatile,
ConstPtrMutComptimeConst, is_const, is_volatile,
get_abi_alignment(ira->codegen, value->value.type));
}
@ -9463,6 +9433,8 @@ static IrInstruction *ir_analyze_enum_to_union(IrAnalyze *ira, IrInstruction *so
TypeUnionField *union_field = find_union_field_by_tag(wanted_type, &val->data.x_enum_tag);
assert(union_field != nullptr);
type_ensure_zero_bits_known(ira->codegen, union_field->type_entry);
if (type_is_invalid(union_field->type_entry))
return ira->codegen->invalid_instruction;
if (!union_field->type_entry->zero_bits) {
AstNode *field_node = wanted_type->data.unionation.decl_node->data.container_decl.fields.at(
union_field->enum_field->decl_index);
@ -10045,6 +10017,9 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
if (actual_type->id == TypeTableEntryIdNumLitFloat ||
actual_type->id == TypeTableEntryIdNumLitInt)
{
ensure_complete_type(ira->codegen, wanted_type);
if (type_is_invalid(wanted_type))
return ira->codegen->invalid_instruction;
if (wanted_type->id == TypeTableEntryIdEnum) {
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.enumeration.tag_int_type, value);
if (type_is_invalid(cast1->value.type))
@ -10247,21 +10222,6 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc
source_instruction->source_node, ptr);
load_ptr_instruction->value.type = child_type;
return load_ptr_instruction;
} else if (type_entry->id == TypeTableEntryIdMetaType) {
ConstExprValue *ptr_val = ir_resolve_const(ira, ptr, UndefBad);
if (!ptr_val)
return ira->codegen->invalid_instruction;
TypeTableEntry *ptr_type = ptr_val->data.x_type;
if (ptr_type->id == TypeTableEntryIdPointer) {
TypeTableEntry *child_type = ptr_type->data.pointer.child_type;
return ir_create_const_type(&ira->new_irb, source_instruction->scope,
source_instruction->source_node, child_type);
} else {
ir_add_error(ira, source_instruction,
buf_sprintf("attempt to dereference non pointer type '%s'", buf_ptr(&ptr_type->name)));
return ira->codegen->invalid_instruction;
}
} else {
ir_add_error_node(ira, source_instruction->source_node,
buf_sprintf("attempt to dereference non pointer type '%s'",
@ -11968,7 +11928,7 @@ IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_i
{
VariableTableEntry *coro_allocator_var = ira->old_irb.exec->coro_allocator_var;
assert(coro_allocator_var != nullptr);
IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var, true, false);
IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var);
IrInstruction *result = ir_get_deref(ira, source_instr, var_ptr_inst);
assert(result->value.type != nullptr);
return result;
@ -12149,7 +12109,7 @@ static VariableTableEntry *get_fn_var_by_index(FnTableEntry *fn_entry, size_t in
}
static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr)
VariableTableEntry *var)
{
if (var->mem_slot_index != SIZE_MAX && var->owner_exec->analysis == nullptr) {
assert(ira->codegen->errors.length != 0);
@ -12175,8 +12135,8 @@ static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
}
}
bool is_const = (var->value->type->id == TypeTableEntryIdMetaType) ? is_const_ptr : var->src_is_const;
bool is_volatile = (var->value->type->id == TypeTableEntryIdMetaType) ? is_volatile_ptr : false;
bool is_const = var->src_is_const;
bool is_volatile = false;
if (mem_slot != nullptr) {
switch (mem_slot->special) {
case ConstValSpecialRuntime:
@ -12202,7 +12162,7 @@ static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
no_mem_slot:
IrInstruction *var_ptr_instruction = ir_build_var_ptr(&ira->new_irb,
instruction->scope, instruction->source_node, var, is_const, is_volatile);
instruction->scope, instruction->source_node, var);
var_ptr_instruction->value.type = get_pointer_to_type_extra(ira->codegen, var->value->type,
var->src_is_const, is_volatile, var->align_bytes, 0, 0);
type_ensure_zero_bits_known(ira->codegen, var->value->type);
@ -12488,7 +12448,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
buf_sprintf("compiler bug: var args can't handle void. https://github.com/ziglang/zig/issues/557"));
return ira->codegen->builtin_types.entry_invalid;
}
IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, arg, arg_var, true, false);
IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, arg, arg_var);
if (type_is_invalid(arg_var_ptr_inst->value.type))
return ira->codegen->builtin_types.entry_invalid;
@ -12811,6 +12771,10 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op
TypeTableEntry *type_entry = ir_resolve_type(ira, value);
if (type_is_invalid(type_entry))
return ira->codegen->builtin_types.entry_invalid;
ensure_complete_type(ira->codegen, type_entry);
if (type_is_invalid(type_entry))
return ira->codegen->builtin_types.entry_invalid;
switch (type_entry->id) {
case TypeTableEntryIdInvalid:
zig_unreachable();
@ -13122,17 +13086,16 @@ static TypeTableEntry *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionP
}
static TypeTableEntry *ir_analyze_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr)
VariableTableEntry *var)
{
IrInstruction *result = ir_get_var_ptr(ira, instruction, var, is_const_ptr, is_volatile_ptr);
IrInstruction *result = ir_get_var_ptr(ira, instruction, var);
ir_link_new_instruction(result, instruction);
return result->value.type;
}
static TypeTableEntry *ir_analyze_instruction_var_ptr(IrAnalyze *ira, IrInstructionVarPtr *var_ptr_instruction) {
VariableTableEntry *var = var_ptr_instruction->var;
return ir_analyze_var_ptr(ira, &var_ptr_instruction->base, var, var_ptr_instruction->is_const,
var_ptr_instruction->is_volatile);
return ir_analyze_var_ptr(ira, &var_ptr_instruction->base, var);
}
static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, uint32_t new_align) {
@ -13154,11 +13117,6 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *ptr_type = array_ptr->value.type;
if (ptr_type->id == TypeTableEntryIdMetaType) {
ir_add_error(ira, &elem_ptr_instruction->base,
buf_sprintf("array access of non-array type '%s'", buf_ptr(&ptr_type->name)));
return ira->codegen->builtin_types.entry_invalid;
}
assert(ptr_type->id == TypeTableEntryIdPointer);
TypeTableEntry *array_type = ptr_type->data.pointer.child_type;
@ -13220,8 +13178,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
bool is_const = true;
bool is_volatile = false;
if (var) {
return ir_analyze_var_ptr(ira, &elem_ptr_instruction->base, var,
is_const, is_volatile);
return ir_analyze_var_ptr(ira, &elem_ptr_instruction->base, var);
} else {
return ir_analyze_const_ptr(ira, &elem_ptr_instruction->base, &ira->codegen->const_void_val,
ira->codegen->builtin_types.entry_void, ConstPtrMutComptimeConst, is_const, is_volatile);
@ -13239,6 +13196,9 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc
bool safety_check_on = elem_ptr_instruction->safety_check_on;
ensure_complete_type(ira->codegen, return_type->data.pointer.child_type);
if (type_is_invalid(return_type->data.pointer.child_type))
return ira->codegen->builtin_types.entry_invalid;
uint64_t elem_size = type_size(ira->codegen, return_type->data.pointer.child_type);
uint64_t abi_align = get_abi_alignment(ira->codegen, return_type->data.pointer.child_type);
uint64_t ptr_align = return_type->data.pointer.alignment;
@ -13605,7 +13565,7 @@ static TypeTableEntry *ir_analyze_decl_ref(IrAnalyze *ira, IrInstruction *source
add_link_lib_symbol(ira, tld_var->extern_lib_name, &var->name, source_instruction->source_node);
}
return ir_analyze_var_ptr(ira, source_instruction, var, false, false);
return ir_analyze_var_ptr(ira, source_instruction, var);
}
case TldIdFn:
{
@ -13654,14 +13614,8 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
if (type_is_invalid(container_ptr->value.type))
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *container_type;
if (container_ptr->value.type->id == TypeTableEntryIdPointer) {
container_type = container_ptr->value.type->data.pointer.child_type;
} else if (container_ptr->value.type->id == TypeTableEntryIdMetaType) {
container_type = container_ptr->value.type;
} else {
zig_unreachable();
}
TypeTableEntry *container_type = container_ptr->value.type->data.pointer.child_type;
assert(container_ptr->value.type->id == TypeTableEntryIdPointer);
Buf *field_name = field_ptr_instruction->field_name_buffer;
if (!field_name) {
@ -13734,17 +13688,9 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
if (!container_ptr_val)
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *child_type;
if (container_ptr->value.type->id == TypeTableEntryIdMetaType) {
TypeTableEntry *ptr_type = container_ptr_val->data.x_type;
assert(ptr_type->id == TypeTableEntryIdPointer);
child_type = ptr_type->data.pointer.child_type;
} else if (container_ptr->value.type->id == TypeTableEntryIdPointer) {
ConstExprValue *child_val = const_ptr_pointee(ira->codegen, container_ptr_val);
child_type = child_val->data.x_type;
} else {
zig_unreachable();
}
assert(container_ptr->value.type->id == TypeTableEntryIdPointer);
ConstExprValue *child_val = const_ptr_pointee(ira->codegen, container_ptr_val);
TypeTableEntry *child_type = child_val->data.x_type;
if (type_is_invalid(child_type)) {
return ira->codegen->builtin_types.entry_invalid;
@ -13762,7 +13708,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
}
if (child_type->id == TypeTableEntryIdEnum) {
ensure_complete_type(ira->codegen, child_type);
if (child_type->data.enumeration.is_invalid)
if (type_is_invalid(child_type))
return ira->codegen->builtin_types.entry_invalid;
TypeEnumField *field = find_enum_type_field(child_type, field_name);
@ -14635,27 +14581,27 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *ptr_type = value->value.type;
if (ptr_type->id == TypeTableEntryIdMetaType) {
// surprise! actually this is just ??T not an unwrap maybe instruction
TypeTableEntry *ptr_type_ptr = ir_resolve_type(ira, value);
assert(ptr_type_ptr->id == TypeTableEntryIdPointer);
TypeTableEntry *child_type = ptr_type_ptr->data.pointer.child_type;
type_ensure_zero_bits_known(ira->codegen, child_type);
TypeTableEntry *layer1 = get_maybe_type(ira->codegen, child_type);
TypeTableEntry *layer2 = get_maybe_type(ira->codegen, layer1);
TypeTableEntry *result_type = get_pointer_to_type(ira->codegen, layer2, true);
IrInstruction *const_instr = ir_build_const_type(&ira->new_irb, unwrap_maybe_instruction->base.scope,
unwrap_maybe_instruction->base.source_node, result_type);
ir_link_new_instruction(const_instr, &unwrap_maybe_instruction->base);
return const_instr->value.type;
}
assert(ptr_type->id == TypeTableEntryIdPointer);
TypeTableEntry *type_entry = ptr_type->data.pointer.child_type;
if (type_is_invalid(type_entry)) {
return ira->codegen->builtin_types.entry_invalid;
} else if (type_entry->id == TypeTableEntryIdMetaType) {
// surprise! actually this is just ??T not an unwrap maybe instruction
ConstExprValue *ptr_val = const_ptr_pointee(ira->codegen, &value->value);
assert(ptr_val->type->id == TypeTableEntryIdMetaType);
TypeTableEntry *child_type = ptr_val->data.x_type;
type_ensure_zero_bits_known(ira->codegen, child_type);
TypeTableEntry *layer1 = get_maybe_type(ira->codegen, child_type);
TypeTableEntry *layer2 = get_maybe_type(ira->codegen, layer1);
IrInstruction *const_instr = ir_build_const_type(&ira->new_irb, unwrap_maybe_instruction->base.scope,
unwrap_maybe_instruction->base.source_node, layer2);
IrInstruction *result_instr = ir_get_ref(ira, &unwrap_maybe_instruction->base, const_instr,
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile);
ir_link_new_instruction(result_instr, &unwrap_maybe_instruction->base);
return result_instr->value.type;
} else if (type_entry->id != TypeTableEntryIdMaybe) {
ir_add_error_node(ira, unwrap_maybe_instruction->value->source_node,
buf_sprintf("expected nullable type, found '%s'", buf_ptr(&type_entry->name)));
@ -15181,6 +15127,8 @@ static TypeTableEntry *ir_analyze_container_init_fields_union(IrAnalyze *ira, Ir
assert(container_type->id == TypeTableEntryIdUnion);
ensure_complete_type(ira->codegen, container_type);
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
if (instr_field_count != 1) {
ir_add_error(ira, instruction,
@ -15248,6 +15196,8 @@ static TypeTableEntry *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstru
}
ensure_complete_type(ira->codegen, container_type);
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
size_t actual_field_count = container_type->data.structure.src_field_count;
@ -15753,6 +15703,8 @@ static TypeTableEntry *ir_analyze_instruction_offset_of(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_invalid;
ensure_complete_type(ira->codegen, container_type);
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
IrInstruction *field_name_value = instruction->field_name->other;
Buf *field_name = ir_resolve_str(ira, field_name_value);
@ -15806,6 +15758,9 @@ static TypeTableEntry *ir_type_info_get_type(IrAnalyze *ira, const char *type_na
assert(type_info_var->type->id == TypeTableEntryIdMetaType);
ensure_complete_type(ira->codegen, type_info_var->data.x_type);
if (type_is_invalid(type_info_var->data.x_type))
return ira->codegen->builtin_types.entry_invalid;
type_info_type = type_info_var->data.x_type;
assert(type_info_type->id == TypeTableEntryIdUnion);
}
@ -15831,26 +15786,37 @@ static TypeTableEntry *ir_type_info_get_type(IrAnalyze *ira, const char *type_na
VariableTableEntry *var = tld->var;
ensure_complete_type(ira->codegen, var->value->type);
if (type_is_invalid(var->value->type))
return ira->codegen->builtin_types.entry_invalid;
assert(var->value->type->id == TypeTableEntryIdMetaType);
return var->value->data.x_type;
}
static void ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, ScopeDecls *decls_scope)
static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, ScopeDecls *decls_scope)
{
TypeTableEntry *type_info_definition_type = ir_type_info_get_type(ira, "Definition");
ensure_complete_type(ira->codegen, type_info_definition_type);
if (type_is_invalid(type_info_definition_type))
return false;
ensure_field_index(type_info_definition_type, "name", 0);
ensure_field_index(type_info_definition_type, "is_pub", 1);
ensure_field_index(type_info_definition_type, "data", 2);
TypeTableEntry *type_info_definition_data_type = ir_type_info_get_type(ira, "Data", type_info_definition_type);
ensure_complete_type(ira->codegen, type_info_definition_data_type);
if (type_is_invalid(type_info_definition_data_type))
return false;
TypeTableEntry *type_info_fn_def_type = ir_type_info_get_type(ira, "FnDef", type_info_definition_data_type);
ensure_complete_type(ira->codegen, type_info_fn_def_type);
if (type_is_invalid(type_info_fn_def_type))
return false;
TypeTableEntry *type_info_fn_def_inline_type = ir_type_info_get_type(ira, "Inline", type_info_fn_def_type);
ensure_complete_type(ira->codegen, type_info_fn_def_inline_type);
if (type_is_invalid(type_info_fn_def_inline_type))
return false;
// Loop through our definitions once to figure out how many definitions we will generate info for.
auto decl_it = decls_scope->decl_table.entry_iterator();
@ -15865,7 +15831,7 @@ static void ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
resolve_top_level_decl(ira->codegen, curr_entry->value, false, curr_entry->value->source_node);
if (curr_entry->value->resolution != TldResolutionOk)
{
return;
return false;
}
}
@ -15930,6 +15896,9 @@ static void ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
{
VariableTableEntry *var = ((TldVar *)curr_entry->value)->var;
ensure_complete_type(ira->codegen, var->value->type);
if (type_is_invalid(var->value->type))
return false;
if (var->value->type->id == TypeTableEntryIdMetaType)
{
// We have a variable of type 'type', so it's actually a type definition.
@ -16057,6 +16026,9 @@ static void ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
{
TypeTableEntry *type_entry = ((TldContainer *)curr_entry->value)->type_entry;
ensure_complete_type(ira->codegen, type_entry);
if (type_is_invalid(type_entry))
return false;
// This is a type.
bigint_init_unsigned(&inner_fields[2].data.x_union.tag, 0);
@ -16077,6 +16049,7 @@ static void ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
}
assert(definition_index == definition_count);
return true;
}
static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *type_entry)
@ -16085,6 +16058,8 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
assert(!type_is_invalid(type_entry));
ensure_complete_type(ira->codegen, type_entry);
if (type_is_invalid(type_entry))
return nullptr;
const auto make_enum_field_val = [ira](ConstExprValue *enum_field_val, TypeEnumField *enum_field,
TypeTableEntry *type_info_enum_field_type) {
@ -16312,7 +16287,8 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
}
// defs: []TypeInfo.Definition
ensure_field_index(result->type, "defs", 3);
ir_make_type_info_defs(ira, &fields[3], type_entry->data.enumeration.decls_scope);
if (!ir_make_type_info_defs(ira, &fields[3], type_entry->data.enumeration.decls_scope))
return nullptr;
break;
}
@ -16467,7 +16443,8 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
}
// defs: []TypeInfo.Definition
ensure_field_index(result->type, "defs", 3);
ir_make_type_info_defs(ira, &fields[3], type_entry->data.unionation.decls_scope);
if (!ir_make_type_info_defs(ira, &fields[3], type_entry->data.unionation.decls_scope))
return nullptr;
break;
}
@ -16478,6 +16455,8 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
buf_init_from_str(&ptr_field_name, "ptr");
TypeTableEntry *ptr_type = type_entry->data.structure.fields_by_name.get(&ptr_field_name)->type_entry;
ensure_complete_type(ira->codegen, ptr_type);
if (type_is_invalid(ptr_type))
return nullptr;
buf_deinit(&ptr_field_name);
result = create_ptr_like_type_info("Slice", ptr_type);
@ -16548,7 +16527,8 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
}
// defs: []TypeInfo.Definition
ensure_field_index(result->type, "defs", 2);
ir_make_type_info_defs(ira, &fields[2], type_entry->data.structure.decls_scope);
if (!ir_make_type_info_defs(ira, &fields[2], type_entry->data.structure.decls_scope))
return nullptr;
break;
}
@ -17339,8 +17319,11 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio
if (array_type->data.array.len == 0 && byte_alignment == 0) {
byte_alignment = get_abi_alignment(ira->codegen, array_type->data.array.child_type);
}
bool is_comptime_const = ptr_ptr->value.special == ConstValSpecialStatic &&
ptr_ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst;
TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.array.child_type,
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile,
ptr_type->data.pointer.is_const || is_comptime_const,
ptr_type->data.pointer.is_volatile,
byte_alignment, 0, 0);
return_type = get_slice_type(ira->codegen, slice_ptr_type);
} else if (array_type->id == TypeTableEntryIdPointer) {
@ -17527,6 +17510,10 @@ static TypeTableEntry *ir_analyze_instruction_member_count(IrAnalyze *ira, IrIns
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *container_type = ir_resolve_type(ira, container);
ensure_complete_type(ira->codegen, container_type);
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
uint64_t result;
if (type_is_invalid(container_type)) {
return ira->codegen->builtin_types.entry_invalid;
@ -17561,6 +17548,11 @@ static TypeTableEntry *ir_analyze_instruction_member_type(IrAnalyze *ira, IrInst
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
ensure_complete_type(ira->codegen, container_type);
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
uint64_t member_index;
IrInstruction *index_value = instruction->member_index->other;
if (!ir_resolve_usize(ira, index_value, &member_index))
@ -17603,6 +17595,10 @@ static TypeTableEntry *ir_analyze_instruction_member_name(IrAnalyze *ira, IrInst
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
ensure_complete_type(ira->codegen, container_type);
if (type_is_invalid(container_type))
return ira->codegen->builtin_types.entry_invalid;
uint64_t member_index;
IrInstruction *index_value = instruction->member_index->other;
if (!ir_resolve_usize(ira, index_value, &member_index))
@ -17914,15 +17910,6 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_err_payload(IrAnalyze *ira,
return ira->codegen->builtin_types.entry_invalid;
TypeTableEntry *ptr_type = value->value.type;
// Because we don't have Pointer Reform yet, we can't have a pointer to a 'type'.
// Therefor, we have to check for type 'type' here, so we can output a correct error
// without asserting the assert below.
if (ptr_type->id == TypeTableEntryIdMetaType) {
ir_add_error(ira, value,
buf_sprintf("expected error union type, found '%s'", buf_ptr(&ptr_type->name)));
return ira->codegen->builtin_types.entry_invalid;
}
// This will be a pointer type because unwrap err payload IR instruction operates on a pointer to a thing.
assert(ptr_type->id == TypeTableEntryIdPointer);
@ -18553,7 +18540,12 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc
return ira->codegen->builtin_types.entry_invalid;
ensure_complete_type(ira->codegen, dest_type);
if (type_is_invalid(dest_type))
return ira->codegen->builtin_types.entry_invalid;
ensure_complete_type(ira->codegen, src_type);
if (type_is_invalid(src_type))
return ira->codegen->builtin_types.entry_invalid;
if (get_codegen_ptr_type(src_type) != nullptr) {
ir_add_error(ira, value,
@ -18699,8 +18691,7 @@ static TypeTableEntry *ir_analyze_instruction_decl_ref(IrAnalyze *ira,
TldVar *tld_var = (TldVar *)tld;
VariableTableEntry *var = tld_var->var;
IrInstruction *var_ptr = ir_get_var_ptr(ira, &instruction->base, var,
!lval.is_ptr || lval.is_const, lval.is_ptr && lval.is_volatile);
IrInstruction *var_ptr = ir_get_var_ptr(ira, &instruction->base, var);
if (type_is_invalid(var_ptr->value.type))
return ira->codegen->builtin_types.entry_invalid;
@ -18778,16 +18769,24 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr
return usize;
}
static TypeTableEntry *ir_analyze_instruction_ptr_type_of(IrAnalyze *ira, IrInstructionPtrTypeOf *instruction) {
static TypeTableEntry *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstructionPtrType *instruction) {
TypeTableEntry *child_type = ir_resolve_type(ira, instruction->child_type->other);
if (type_is_invalid(child_type))
return ira->codegen->builtin_types.entry_invalid;
if (child_type->id == TypeTableEntryIdUnreachable) {
ir_add_error(ira, &instruction->base, buf_sprintf("pointer to noreturn not allowed"));
return ira->codegen->builtin_types.entry_invalid;
}
uint32_t align_bytes;
if (instruction->align_value != nullptr) {
if (!ir_resolve_align(ira, instruction->align_value->other, &align_bytes))
return ira->codegen->builtin_types.entry_invalid;
} else {
type_ensure_zero_bits_known(ira->codegen, child_type);
if (type_is_invalid(child_type))
return ira->codegen->builtin_types.entry_invalid;
align_bytes = get_abi_alignment(ira->codegen, child_type);
}
@ -19606,8 +19605,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_type_id(ira, (IrInstructionTypeId *)instruction);
case IrInstructionIdSetEvalBranchQuota:
return ir_analyze_instruction_set_eval_branch_quota(ira, (IrInstructionSetEvalBranchQuota *)instruction);
case IrInstructionIdPtrTypeOf:
return ir_analyze_instruction_ptr_type_of(ira, (IrInstructionPtrTypeOf *)instruction);
case IrInstructionIdPtrType:
return ir_analyze_instruction_ptr_type(ira, (IrInstructionPtrType *)instruction);
case IrInstructionIdAlignCast:
return ir_analyze_instruction_align_cast(ira, (IrInstructionAlignCast *)instruction);
case IrInstructionIdOpaqueType:
@ -19783,7 +19782,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdCheckStatementIsVoid:
case IrInstructionIdPanic:
case IrInstructionIdSetEvalBranchQuota:
case IrInstructionIdPtrTypeOf:
case IrInstructionIdPtrType:
case IrInstructionIdSetAlignStack:
case IrInstructionIdExport:
case IrInstructionIdCancel:

View File

@ -921,7 +921,7 @@ static void ir_print_can_implicit_cast(IrPrint *irp, IrInstructionCanImplicitCas
fprintf(irp->f, ")");
}
static void ir_print_ptr_type_of(IrPrint *irp, IrInstructionPtrTypeOf *instruction) {
static void ir_print_ptr_type(IrPrint *irp, IrInstructionPtrType *instruction) {
fprintf(irp->f, "&");
if (instruction->align_value != nullptr) {
fprintf(irp->f, "align(");
@ -1527,8 +1527,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdCanImplicitCast:
ir_print_can_implicit_cast(irp, (IrInstructionCanImplicitCast *)instruction);
break;
case IrInstructionIdPtrTypeOf:
ir_print_ptr_type_of(irp, (IrInstructionPtrTypeOf *)instruction);
case IrInstructionIdPtrType:
ir_print_ptr_type(irp, (IrInstructionPtrType *)instruction);
break;
case IrInstructionIdDeclRef:
ir_print_decl_ref(irp, (IrInstructionDeclRef *)instruction);

View File

@ -1167,20 +1167,19 @@ static PrefixOp tok_to_prefix_op(Token *token) {
case TokenIdTilde: return PrefixOpBinNot;
case TokenIdMaybe: return PrefixOpMaybe;
case TokenIdDoubleQuestion: return PrefixOpUnwrapMaybe;
case TokenIdAmpersand: return PrefixOpAddrOf;
default: return PrefixOpInvalid;
}
}
static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) {
Token *ampersand_tok = ast_eat_token(pc, token_index, TokenIdAmpersand);
AstNode *node = ast_create_node(pc, NodeTypeAddrOfExpr, ampersand_tok);
static AstNode *ast_parse_pointer_type(ParseContext *pc, size_t *token_index, Token *star_tok) {
AstNode *node = ast_create_node(pc, NodeTypePointerType, star_tok);
Token *token = &pc->tokens->at(*token_index);
if (token->id == TokenIdKeywordAlign) {
*token_index += 1;
ast_eat_token(pc, token_index, TokenIdLParen);
node->data.addr_of_expr.align_expr = ast_parse_expression(pc, token_index, true);
node->data.pointer_type.align_expr = ast_parse_expression(pc, token_index, true);
token = &pc->tokens->at(*token_index);
if (token->id == TokenIdColon) {
@ -1189,24 +1188,24 @@ static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) {
ast_eat_token(pc, token_index, TokenIdColon);
Token *bit_offset_end_tok = ast_eat_token(pc, token_index, TokenIdIntLiteral);
node->data.addr_of_expr.bit_offset_start = token_bigint(bit_offset_start_tok);
node->data.addr_of_expr.bit_offset_end = token_bigint(bit_offset_end_tok);
node->data.pointer_type.bit_offset_start = token_bigint(bit_offset_start_tok);
node->data.pointer_type.bit_offset_end = token_bigint(bit_offset_end_tok);
}
ast_eat_token(pc, token_index, TokenIdRParen);
token = &pc->tokens->at(*token_index);
}
if (token->id == TokenIdKeywordConst) {
*token_index += 1;
node->data.addr_of_expr.is_const = true;
node->data.pointer_type.is_const = true;
token = &pc->tokens->at(*token_index);
}
if (token->id == TokenIdKeywordVolatile) {
*token_index += 1;
node->data.addr_of_expr.is_volatile = true;
node->data.pointer_type.is_volatile = true;
}
node->data.addr_of_expr.op_expr = ast_parse_prefix_op_expr(pc, token_index, true);
node->data.pointer_type.op_expr = ast_parse_prefix_op_expr(pc, token_index, true);
return node;
}
@ -1216,8 +1215,17 @@ PrefixOp = "!" | "-" | "~" | ("*" option("align" "(" Expression option(":" Integ
*/
static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index, bool mandatory) {
Token *token = &pc->tokens->at(*token_index);
if (token->id == TokenIdAmpersand) {
return ast_parse_addr_of(pc, token_index);
if (token->id == TokenIdStar) {
*token_index += 1;
return ast_parse_pointer_type(pc, token_index, token);
}
if (token->id == TokenIdStarStar) {
*token_index += 1;
AstNode *child_node = ast_parse_pointer_type(pc, token_index, token);
child_node->column += 1;
AstNode *parent_node = ast_create_node(pc, NodeTypePointerType, token);
parent_node->data.pointer_type.op_expr = child_node;
return parent_node;
}
if (token->id == TokenIdKeywordTry) {
return ast_parse_try_expr(pc, token_index);
@ -1234,13 +1242,12 @@ static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index,
AstNode *node = ast_create_node(pc, NodeTypePrefixOpExpr, token);
AstNode *parent_node = node;
AstNode *prefix_op_expr = ast_parse_error_set_expr(pc, token_index, true);
node->data.prefix_op_expr.primary_expr = prefix_op_expr;
node->data.prefix_op_expr.prefix_op = prefix_op;
return parent_node;
return node;
}
@ -3121,9 +3128,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypeErrorType:
// none
break;
case NodeTypeAddrOfExpr:
visit_field(&node->data.addr_of_expr.align_expr, visit, context);
visit_field(&node->data.addr_of_expr.op_expr, visit, context);
case NodeTypePointerType:
visit_field(&node->data.pointer_type.align_expr, visit, context);
visit_field(&node->data.pointer_type.op_expr, visit, context);
break;
case NodeTypeErrorSetDecl:
visit_node_list(&node->data.err_set_decl.decls, visit, context);

View File

@ -276,11 +276,18 @@ static AstNode *maybe_suppress_result(Context *c, ResultUsed result_used, AstNod
node);
}
static AstNode *trans_create_node_addr_of(Context *c, bool is_const, bool is_volatile, AstNode *child_node) {
AstNode *node = trans_create_node(c, NodeTypeAddrOfExpr);
node->data.addr_of_expr.is_const = is_const;
node->data.addr_of_expr.is_volatile = is_volatile;
node->data.addr_of_expr.op_expr = child_node;
static AstNode *trans_create_node_ptr_type(Context *c, bool is_const, bool is_volatile, AstNode *child_node) {
AstNode *node = trans_create_node(c, NodeTypePointerType);
node->data.pointer_type.is_const = is_const;
node->data.pointer_type.is_volatile = is_volatile;
node->data.pointer_type.op_expr = child_node;
return node;
}
static AstNode *trans_create_node_addr_of(Context *c, AstNode *child_node) {
AstNode *node = trans_create_node(c, NodeTypePrefixOpExpr);
node->data.prefix_op_expr.prefix_op = PrefixOpAddrOf;
node->data.prefix_op_expr.primary_expr = child_node;
return node;
}
@ -848,7 +855,7 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
return trans_create_node_prefix_op(c, PrefixOpMaybe, child_node);
}
AstNode *pointer_node = trans_create_node_addr_of(c, child_qt.isConstQualified(),
AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
child_qt.isVolatileQualified(), child_node);
return trans_create_node_prefix_op(c, PrefixOpMaybe, pointer_node);
}
@ -1033,7 +1040,7 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
emit_warning(c, source_loc, "unresolved array element type");
return nullptr;
}
AstNode *pointer_node = trans_create_node_addr_of(c, child_qt.isConstQualified(),
AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
child_qt.isVolatileQualified(), child_type_node);
return pointer_node;
}
@ -1402,7 +1409,7 @@ static AstNode *trans_create_compound_assign_shift(Context *c, ResultUsed result
// const _ref = &lhs;
AstNode *lhs = trans_expr(c, ResultUsedYes, &child_scope->base, stmt->getLHS(), TransLValue);
if (lhs == nullptr) return nullptr;
AstNode *addr_of_lhs = trans_create_node_addr_of(c, false, false, lhs);
AstNode *addr_of_lhs = trans_create_node_addr_of(c, lhs);
// TODO: avoid name collisions with generated variable names
Buf* tmp_var_name = buf_create_from_str("_ref");
AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, addr_of_lhs);
@ -1476,7 +1483,7 @@ static AstNode *trans_create_compound_assign(Context *c, ResultUsed result_used,
// const _ref = &lhs;
AstNode *lhs = trans_expr(c, ResultUsedYes, &child_scope->base, stmt->getLHS(), TransLValue);
if (lhs == nullptr) return nullptr;
AstNode *addr_of_lhs = trans_create_node_addr_of(c, false, false, lhs);
AstNode *addr_of_lhs = trans_create_node_addr_of(c, lhs);
// TODO: avoid name collisions with generated variable names
Buf* tmp_var_name = buf_create_from_str("_ref");
AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, addr_of_lhs);
@ -1813,7 +1820,7 @@ static AstNode *trans_create_post_crement(Context *c, ResultUsed result_used, Tr
// const _ref = &expr;
AstNode *expr = trans_expr(c, ResultUsedYes, &child_scope->base, op_expr, TransLValue);
if (expr == nullptr) return nullptr;
AstNode *addr_of_expr = trans_create_node_addr_of(c, false, false, expr);
AstNode *addr_of_expr = trans_create_node_addr_of(c, expr);
// TODO: avoid name collisions with generated variable names
Buf* ref_var_name = buf_create_from_str("_ref");
AstNode *ref_var_decl = trans_create_node_var_decl_local(c, true, ref_var_name, nullptr, addr_of_expr);
@ -1868,7 +1875,7 @@ static AstNode *trans_create_pre_crement(Context *c, ResultUsed result_used, Tra
// const _ref = &expr;
AstNode *expr = trans_expr(c, ResultUsedYes, &child_scope->base, op_expr, TransLValue);
if (expr == nullptr) return nullptr;
AstNode *addr_of_expr = trans_create_node_addr_of(c, false, false, expr);
AstNode *addr_of_expr = trans_create_node_addr_of(c, expr);
// TODO: avoid name collisions with generated variable names
Buf* ref_var_name = buf_create_from_str("_ref");
AstNode *ref_var_decl = trans_create_node_var_decl_local(c, true, ref_var_name, nullptr, addr_of_expr);
@ -1917,7 +1924,7 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc
AstNode *value_node = trans_expr(c, result_used, scope, stmt->getSubExpr(), TransLValue);
if (value_node == nullptr)
return value_node;
return trans_create_node_addr_of(c, false, false, value_node);
return trans_create_node_addr_of(c, value_node);
}
case UO_Deref:
{
@ -4441,7 +4448,7 @@ static AstNode *parse_ctok_suffix_op_expr(Context *c, CTokenize *ctok, size_t *t
} else if (first_tok->id == CTokIdAsterisk) {
*tok_i += 1;
node = trans_create_node_addr_of(c, false, false, node);
node = trans_create_node_ptr_type(c, false, false, node);
} else {
return node;
}

View File

@ -17,10 +17,10 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
/// you uninitialized memory.
items: []align(A) T,
len: usize,
allocator: &Allocator,
allocator: *Allocator,
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn init(allocator: &Allocator) Self {
pub fn init(allocator: *Allocator) Self {
return Self{
.items = []align(A) T{},
.len = 0,
@ -28,30 +28,30 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
};
}
pub fn deinit(l: &const Self) void {
pub fn deinit(l: *const Self) void {
l.allocator.free(l.items);
}
pub fn toSlice(l: &const Self) []align(A) T {
pub fn toSlice(l: *const Self) []align(A) T {
return l.items[0..l.len];
}
pub fn toSliceConst(l: &const Self) []align(A) const T {
pub fn toSliceConst(l: *const Self) []align(A) const T {
return l.items[0..l.len];
}
pub fn at(l: &const Self, n: usize) T {
pub fn at(l: *const Self, n: usize) T {
return l.toSliceConst()[n];
}
pub fn count(self: &const Self) usize {
pub fn count(self: *const Self) usize {
return self.len;
}
/// ArrayList takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn fromOwnedSlice(allocator: &Allocator, slice: []align(A) T) Self {
pub fn fromOwnedSlice(allocator: *Allocator, slice: []align(A) T) Self {
return Self{
.items = slice,
.len = slice.len,
@ -60,14 +60,14 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
}
/// The caller owns the returned memory. ArrayList becomes empty.
pub fn toOwnedSlice(self: &Self) []align(A) T {
pub fn toOwnedSlice(self: *Self) []align(A) T {
const allocator = self.allocator;
const result = allocator.alignedShrink(T, A, self.items, self.len);
self.* = init(allocator);
return result;
}
pub fn insert(l: &Self, n: usize, item: &const T) !void {
pub fn insert(l: *Self, n: usize, item: *const T) !void {
try l.ensureCapacity(l.len + 1);
l.len += 1;
@ -75,7 +75,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
l.items[n] = item.*;
}
pub fn insertSlice(l: &Self, n: usize, items: []align(A) const T) !void {
pub fn insertSlice(l: *Self, n: usize, items: []align(A) const T) !void {
try l.ensureCapacity(l.len + items.len);
l.len += items.len;
@ -83,28 +83,28 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
mem.copy(T, l.items[n .. n + items.len], items);
}
pub fn append(l: &Self, item: &const T) !void {
pub fn append(l: *Self, item: *const T) !void {
const new_item_ptr = try l.addOne();
new_item_ptr.* = item.*;
}
pub fn appendSlice(l: &Self, items: []align(A) const T) !void {
pub fn appendSlice(l: *Self, items: []align(A) const T) !void {
try l.ensureCapacity(l.len + items.len);
mem.copy(T, l.items[l.len..], items);
l.len += items.len;
}
pub fn resize(l: &Self, new_len: usize) !void {
pub fn resize(l: *Self, new_len: usize) !void {
try l.ensureCapacity(new_len);
l.len = new_len;
}
pub fn shrink(l: &Self, new_len: usize) void {
pub fn shrink(l: *Self, new_len: usize) void {
assert(new_len <= l.len);
l.len = new_len;
}
pub fn ensureCapacity(l: &Self, new_capacity: usize) !void {
pub fn ensureCapacity(l: *Self, new_capacity: usize) !void {
var better_capacity = l.items.len;
if (better_capacity >= new_capacity) return;
while (true) {
@ -114,7 +114,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
l.items = try l.allocator.alignedRealloc(T, A, l.items, better_capacity);
}
pub fn addOne(l: &Self) !&T {
pub fn addOne(l: *Self) !*T {
const new_length = l.len + 1;
try l.ensureCapacity(new_length);
const result = &l.items[l.len];
@ -122,34 +122,34 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
return result;
}
pub fn pop(self: &Self) T {
pub fn pop(self: *Self) T {
self.len -= 1;
return self.items[self.len];
}
pub fn popOrNull(self: &Self) ?T {
pub fn popOrNull(self: *Self) ?T {
if (self.len == 0) return null;
return self.pop();
}
pub const Iterator = struct {
list: &const Self,
list: *const Self,
// how many items have we returned
count: usize,
pub fn next(it: &Iterator) ?T {
pub fn next(it: *Iterator) ?T {
if (it.count >= it.list.len) return null;
const val = it.list.at(it.count);
it.count += 1;
return val;
}
pub fn reset(it: &Iterator) void {
pub fn reset(it: *Iterator) void {
it.count = 0;
}
};
pub fn iterator(self: &const Self) Iterator {
pub fn iterator(self: *const Self) Iterator {
return Iterator{
.list = self,
.count = 0,

View File

@ -5,36 +5,36 @@ const AtomicRmwOp = builtin.AtomicRmwOp;
/// Many reader, many writer, non-allocating, thread-safe, lock-free
pub fn Queue(comptime T: type) type {
return struct {
head: &Node,
tail: &Node,
head: *Node,
tail: *Node,
root: Node,
pub const Self = this;
pub const Node = struct {
next: ?&Node,
next: ?*Node,
data: T,
};
// TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287
pub fn init(self: &Self) void {
pub fn init(self: *Self) void {
self.root.next = null;
self.head = &self.root;
self.tail = &self.root;
}
pub fn put(self: &Self, node: &Node) void {
pub fn put(self: *Self, node: *Node) void {
node.next = null;
const tail = @atomicRmw(&Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
_ = @atomicRmw(?&Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
const tail = @atomicRmw(*Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
_ = @atomicRmw(?*Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst);
}
pub fn get(self: &Self) ?&Node {
var head = @atomicLoad(&Node, &self.head, AtomicOrder.SeqCst);
pub fn get(self: *Self) ?*Node {
var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst);
while (true) {
const node = head.next ?? return null;
head = @cmpxchgWeak(&Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return node;
head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return node;
}
}
};
@ -42,8 +42,8 @@ pub fn Queue(comptime T: type) type {
const std = @import("std");
const Context = struct {
allocator: &std.mem.Allocator,
queue: &Queue(i32),
allocator: *std.mem.Allocator,
queue: *Queue(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
@ -79,11 +79,11 @@ test "std.atomic.queue" {
.get_count = 0,
};
var putters: [put_thread_count]&std.os.Thread = undefined;
var putters: [put_thread_count]*std.os.Thread = undefined;
for (putters) |*t| {
t.* = try std.os.spawnThread(&context, startPuts);
}
var getters: [put_thread_count]&std.os.Thread = undefined;
var getters: [put_thread_count]*std.os.Thread = undefined;
for (getters) |*t| {
t.* = try std.os.spawnThread(&context, startGets);
}
@ -98,7 +98,7 @@ test "std.atomic.queue" {
std.debug.assert(context.get_count == puts_per_thread * put_thread_count);
}
fn startPuts(ctx: &Context) u8 {
fn startPuts(ctx: *Context) u8 {
var put_count: usize = puts_per_thread;
var r = std.rand.DefaultPrng.init(0xdeadbeef);
while (put_count != 0) : (put_count -= 1) {
@ -112,7 +112,7 @@ fn startPuts(ctx: &Context) u8 {
return 0;
}
fn startGets(ctx: &Context) u8 {
fn startGets(ctx: *Context) u8 {
while (true) {
while (ctx.queue.get()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz

View File

@ -4,12 +4,12 @@ const AtomicOrder = builtin.AtomicOrder;
/// Many reader, many writer, non-allocating, thread-safe, lock-free
pub fn Stack(comptime T: type) type {
return struct {
root: ?&Node,
root: ?*Node,
pub const Self = this;
pub const Node = struct {
next: ?&Node,
next: ?*Node,
data: T,
};
@ -19,36 +19,36 @@ pub fn Stack(comptime T: type) type {
/// push operation, but only if you are the first item in the stack. if you did not succeed in
/// being the first item in the stack, returns the other item that was there.
pub fn pushFirst(self: &Self, node: &Node) ?&Node {
pub fn pushFirst(self: *Self, node: *Node) ?*Node {
node.next = null;
return @cmpxchgStrong(?&Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst);
return @cmpxchgStrong(?*Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst);
}
pub fn push(self: &Self, node: &Node) void {
var root = @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst);
pub fn push(self: *Self, node: *Node) void {
var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
node.next = root;
root = @cmpxchgWeak(?&Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break;
root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break;
}
}
pub fn pop(self: &Self) ?&Node {
var root = @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst);
pub fn pop(self: *Self) ?*Node {
var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
root = @cmpxchgWeak(?&Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root;
root = @cmpxchgWeak(?*Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root;
}
}
pub fn isEmpty(self: &Self) bool {
return @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst) == null;
pub fn isEmpty(self: *Self) bool {
return @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst) == null;
}
};
}
const std = @import("std");
const Context = struct {
allocator: &std.mem.Allocator,
stack: &Stack(i32),
allocator: *std.mem.Allocator,
stack: *Stack(i32),
put_sum: isize,
get_sum: isize,
get_count: usize,
@ -82,11 +82,11 @@ test "std.atomic.stack" {
.get_count = 0,
};
var putters: [put_thread_count]&std.os.Thread = undefined;
var putters: [put_thread_count]*std.os.Thread = undefined;
for (putters) |*t| {
t.* = try std.os.spawnThread(&context, startPuts);
}
var getters: [put_thread_count]&std.os.Thread = undefined;
var getters: [put_thread_count]*std.os.Thread = undefined;
for (getters) |*t| {
t.* = try std.os.spawnThread(&context, startGets);
}
@ -101,7 +101,7 @@ test "std.atomic.stack" {
std.debug.assert(context.get_count == puts_per_thread * put_thread_count);
}
fn startPuts(ctx: &Context) u8 {
fn startPuts(ctx: *Context) u8 {
var put_count: usize = puts_per_thread;
var r = std.rand.DefaultPrng.init(0xdeadbeef);
while (put_count != 0) : (put_count -= 1) {
@ -115,7 +115,7 @@ fn startPuts(ctx: &Context) u8 {
return 0;
}
fn startGets(ctx: &Context) u8 {
fn startGets(ctx: *Context) u8 {
while (true) {
while (ctx.stack.pop()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz

View File

@ -32,7 +32,7 @@ pub const Base64Encoder = struct {
}
/// dest.len must be what you get from ::calcSize.
pub fn encode(encoder: &const Base64Encoder, dest: []u8, source: []const u8) void {
pub fn encode(encoder: *const Base64Encoder, dest: []u8, source: []const u8) void {
assert(dest.len == Base64Encoder.calcSize(source.len));
var i: usize = 0;
@ -107,7 +107,7 @@ pub const Base64Decoder = struct {
}
/// If the encoded buffer is detected to be invalid, returns error.InvalidPadding.
pub fn calcSize(decoder: &const Base64Decoder, source: []const u8) !usize {
pub fn calcSize(decoder: *const Base64Decoder, source: []const u8) !usize {
if (source.len % 4 != 0) return error.InvalidPadding;
return calcDecodedSizeExactUnsafe(source, decoder.pad_char);
}
@ -115,7 +115,7 @@ pub const Base64Decoder = struct {
/// dest.len must be what you get from ::calcSize.
/// invalid characters result in error.InvalidCharacter.
/// invalid padding results in error.InvalidPadding.
pub fn decode(decoder: &const Base64Decoder, dest: []u8, source: []const u8) !void {
pub fn decode(decoder: *const Base64Decoder, dest: []u8, source: []const u8) !void {
assert(dest.len == (decoder.calcSize(source) catch unreachable));
assert(source.len % 4 == 0);
@ -181,7 +181,7 @@ pub const Base64DecoderWithIgnore = struct {
/// Invalid padding results in error.InvalidPadding.
/// Decoding more data than can fit in dest results in error.OutputTooSmall. See also ::calcSizeUpperBound.
/// Returns the number of bytes writen to dest.
pub fn decode(decoder_with_ignore: &const Base64DecoderWithIgnore, dest: []u8, source: []const u8) !usize {
pub fn decode(decoder_with_ignore: *const Base64DecoderWithIgnore, dest: []u8, source: []const u8) !usize {
const decoder = &decoder_with_ignore.decoder;
var src_cursor: usize = 0;
@ -290,13 +290,13 @@ pub const Base64DecoderUnsafe = struct {
}
/// The source buffer must be valid.
pub fn calcSize(decoder: &const Base64DecoderUnsafe, source: []const u8) usize {
pub fn calcSize(decoder: *const Base64DecoderUnsafe, source: []const u8) usize {
return calcDecodedSizeExactUnsafe(source, decoder.pad_char);
}
/// dest.len must be what you get from ::calcDecodedSizeExactUnsafe.
/// invalid characters or padding will result in undefined values.
pub fn decode(decoder: &const Base64DecoderUnsafe, dest: []u8, source: []const u8) void {
pub fn decode(decoder: *const Base64DecoderUnsafe, dest: []u8, source: []const u8) void {
assert(dest.len == decoder.calcSize(source));
var src_index: usize = 0;

View File

@ -11,12 +11,12 @@ pub const BufMap = struct {
const BufMapHashMap = HashMap([]const u8, []const u8, mem.hash_slice_u8, mem.eql_slice_u8);
pub fn init(allocator: &Allocator) BufMap {
pub fn init(allocator: *Allocator) BufMap {
var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) };
return self;
}
pub fn deinit(self: &const BufMap) void {
pub fn deinit(self: *const BufMap) void {
var it = self.hash_map.iterator();
while (true) {
const entry = it.next() ?? break;
@ -27,7 +27,7 @@ pub const BufMap = struct {
self.hash_map.deinit();
}
pub fn set(self: &BufMap, key: []const u8, value: []const u8) !void {
pub fn set(self: *BufMap, key: []const u8, value: []const u8) !void {
self.delete(key);
const key_copy = try self.copy(key);
errdefer self.free(key_copy);
@ -36,30 +36,30 @@ pub const BufMap = struct {
_ = try self.hash_map.put(key_copy, value_copy);
}
pub fn get(self: &const BufMap, key: []const u8) ?[]const u8 {
pub fn get(self: *const BufMap, key: []const u8) ?[]const u8 {
const entry = self.hash_map.get(key) ?? return null;
return entry.value;
}
pub fn delete(self: &BufMap, key: []const u8) void {
pub fn delete(self: *BufMap, key: []const u8) void {
const entry = self.hash_map.remove(key) ?? return;
self.free(entry.key);
self.free(entry.value);
}
pub fn count(self: &const BufMap) usize {
pub fn count(self: *const BufMap) usize {
return self.hash_map.count();
}
pub fn iterator(self: &const BufMap) BufMapHashMap.Iterator {
pub fn iterator(self: *const BufMap) BufMapHashMap.Iterator {
return self.hash_map.iterator();
}
fn free(self: &const BufMap, value: []const u8) void {
fn free(self: *const BufMap, value: []const u8) void {
self.hash_map.allocator.free(value);
}
fn copy(self: &const BufMap, value: []const u8) ![]const u8 {
fn copy(self: *const BufMap, value: []const u8) ![]const u8 {
return mem.dupe(self.hash_map.allocator, u8, value);
}
};

View File

@ -9,12 +9,12 @@ pub const BufSet = struct {
const BufSetHashMap = HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8);
pub fn init(a: &Allocator) BufSet {
pub fn init(a: *Allocator) BufSet {
var self = BufSet{ .hash_map = BufSetHashMap.init(a) };
return self;
}
pub fn deinit(self: &const BufSet) void {
pub fn deinit(self: *const BufSet) void {
var it = self.hash_map.iterator();
while (true) {
const entry = it.next() ?? break;
@ -24,7 +24,7 @@ pub const BufSet = struct {
self.hash_map.deinit();
}
pub fn put(self: &BufSet, key: []const u8) !void {
pub fn put(self: *BufSet, key: []const u8) !void {
if (self.hash_map.get(key) == null) {
const key_copy = try self.copy(key);
errdefer self.free(key_copy);
@ -32,28 +32,28 @@ pub const BufSet = struct {
}
}
pub fn delete(self: &BufSet, key: []const u8) void {
pub fn delete(self: *BufSet, key: []const u8) void {
const entry = self.hash_map.remove(key) ?? return;
self.free(entry.key);
}
pub fn count(self: &const BufSet) usize {
pub fn count(self: *const BufSet) usize {
return self.hash_map.count();
}
pub fn iterator(self: &const BufSet) BufSetHashMap.Iterator {
pub fn iterator(self: *const BufSet) BufSetHashMap.Iterator {
return self.hash_map.iterator();
}
pub fn allocator(self: &const BufSet) &Allocator {
pub fn allocator(self: *const BufSet) *Allocator {
return self.hash_map.allocator;
}
fn free(self: &const BufSet, value: []const u8) void {
fn free(self: *const BufSet, value: []const u8) void {
self.hash_map.allocator.free(value);
}
fn copy(self: &const BufSet, value: []const u8) ![]const u8 {
fn copy(self: *const BufSet, value: []const u8) ![]const u8 {
const result = try self.hash_map.allocator.alloc(u8, value.len);
mem.copy(u8, result, value);
return result;

View File

@ -12,14 +12,14 @@ pub const Buffer = struct {
list: ArrayList(u8),
/// Must deinitialize with deinit.
pub fn init(allocator: &Allocator, m: []const u8) !Buffer {
pub fn init(allocator: *Allocator, m: []const u8) !Buffer {
var self = try initSize(allocator, m.len);
mem.copy(u8, self.list.items, m);
return self;
}
/// Must deinitialize with deinit.
pub fn initSize(allocator: &Allocator, size: usize) !Buffer {
pub fn initSize(allocator: *Allocator, size: usize) !Buffer {
var self = initNull(allocator);
try self.resize(size);
return self;
@ -30,19 +30,19 @@ pub const Buffer = struct {
/// * ::replaceContents
/// * ::replaceContentsBuffer
/// * ::resize
pub fn initNull(allocator: &Allocator) Buffer {
pub fn initNull(allocator: *Allocator) Buffer {
return Buffer{ .list = ArrayList(u8).init(allocator) };
}
/// Must deinitialize with deinit.
pub fn initFromBuffer(buffer: &const Buffer) !Buffer {
pub fn initFromBuffer(buffer: *const Buffer) !Buffer {
return Buffer.init(buffer.list.allocator, buffer.toSliceConst());
}
/// Buffer takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Must deinitialize with deinit.
pub fn fromOwnedSlice(allocator: &Allocator, slice: []u8) Buffer {
pub fn fromOwnedSlice(allocator: *Allocator, slice: []u8) Buffer {
var self = Buffer{ .list = ArrayList(u8).fromOwnedSlice(allocator, slice) };
self.list.append(0);
return self;
@ -50,79 +50,79 @@ pub const Buffer = struct {
/// The caller owns the returned memory. The Buffer becomes null and
/// is safe to `deinit`.
pub fn toOwnedSlice(self: &Buffer) []u8 {
pub fn toOwnedSlice(self: *Buffer) []u8 {
const allocator = self.list.allocator;
const result = allocator.shrink(u8, self.list.items, self.len());
self.* = initNull(allocator);
return result;
}
pub fn deinit(self: &Buffer) void {
pub fn deinit(self: *Buffer) void {
self.list.deinit();
}
pub fn toSlice(self: &const Buffer) []u8 {
pub fn toSlice(self: *const Buffer) []u8 {
return self.list.toSlice()[0..self.len()];
}
pub fn toSliceConst(self: &const Buffer) []const u8 {
pub fn toSliceConst(self: *const Buffer) []const u8 {
return self.list.toSliceConst()[0..self.len()];
}
pub fn shrink(self: &Buffer, new_len: usize) void {
pub fn shrink(self: *Buffer, new_len: usize) void {
assert(new_len <= self.len());
self.list.shrink(new_len + 1);
self.list.items[self.len()] = 0;
}
pub fn resize(self: &Buffer, new_len: usize) !void {
pub fn resize(self: *Buffer, new_len: usize) !void {
try self.list.resize(new_len + 1);
self.list.items[self.len()] = 0;
}
pub fn isNull(self: &const Buffer) bool {
pub fn isNull(self: *const Buffer) bool {
return self.list.len == 0;
}
pub fn len(self: &const Buffer) usize {
pub fn len(self: *const Buffer) usize {
return self.list.len - 1;
}
pub fn append(self: &Buffer, m: []const u8) !void {
pub fn append(self: *Buffer, m: []const u8) !void {
const old_len = self.len();
try self.resize(old_len + m.len);
mem.copy(u8, self.list.toSlice()[old_len..], m);
}
pub fn appendByte(self: &Buffer, byte: u8) !void {
pub fn appendByte(self: *Buffer, byte: u8) !void {
const old_len = self.len();
try self.resize(old_len + 1);
self.list.toSlice()[old_len] = byte;
}
pub fn eql(self: &const Buffer, m: []const u8) bool {
pub fn eql(self: *const Buffer, m: []const u8) bool {
return mem.eql(u8, self.toSliceConst(), m);
}
pub fn startsWith(self: &const Buffer, m: []const u8) bool {
pub fn startsWith(self: *const Buffer, m: []const u8) bool {
if (self.len() < m.len) return false;
return mem.eql(u8, self.list.items[0..m.len], m);
}
pub fn endsWith(self: &const Buffer, m: []const u8) bool {
pub fn endsWith(self: *const Buffer, m: []const u8) bool {
const l = self.len();
if (l < m.len) return false;
const start = l - m.len;
return mem.eql(u8, self.list.items[start..l], m);
}
pub fn replaceContents(self: &const Buffer, m: []const u8) !void {
pub fn replaceContents(self: *const Buffer, m: []const u8) !void {
try self.resize(m.len);
mem.copy(u8, self.list.toSlice(), m);
}
/// For passing to C functions.
pub fn ptr(self: &const Buffer) &u8 {
pub fn ptr(self: *const Buffer) *u8 {
return self.list.items.ptr;
}
};

View File

@ -20,7 +20,7 @@ pub const Builder = struct {
install_tls: TopLevelStep,
have_uninstall_step: bool,
have_install_step: bool,
allocator: &Allocator,
allocator: *Allocator,
lib_paths: ArrayList([]const u8),
include_paths: ArrayList([]const u8),
rpaths: ArrayList([]const u8),
@ -36,9 +36,9 @@ pub const Builder = struct {
verbose_cimport: bool,
invalid_user_input: bool,
zig_exe: []const u8,
default_step: &Step,
default_step: *Step,
env_map: BufMap,
top_level_steps: ArrayList(&TopLevelStep),
top_level_steps: ArrayList(*TopLevelStep),
prefix: []const u8,
search_prefixes: ArrayList([]const u8),
lib_dir: []const u8,
@ -82,7 +82,7 @@ pub const Builder = struct {
description: []const u8,
};
pub fn init(allocator: &Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8) Builder {
pub fn init(allocator: *Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8) Builder {
var self = Builder{
.zig_exe = zig_exe,
.build_root = build_root,
@ -102,7 +102,7 @@ pub const Builder = struct {
.user_input_options = UserInputOptionsMap.init(allocator),
.available_options_map = AvailableOptionsMap.init(allocator),
.available_options_list = ArrayList(AvailableOption).init(allocator),
.top_level_steps = ArrayList(&TopLevelStep).init(allocator),
.top_level_steps = ArrayList(*TopLevelStep).init(allocator),
.default_step = undefined,
.env_map = os.getEnvMap(allocator) catch unreachable,
.prefix = undefined,
@ -127,7 +127,7 @@ pub const Builder = struct {
return self;
}
pub fn deinit(self: &Builder) void {
pub fn deinit(self: *Builder) void {
self.lib_paths.deinit();
self.include_paths.deinit();
self.rpaths.deinit();
@ -135,81 +135,81 @@ pub const Builder = struct {
self.top_level_steps.deinit();
}
pub fn setInstallPrefix(self: &Builder, maybe_prefix: ?[]const u8) void {
pub fn setInstallPrefix(self: *Builder, maybe_prefix: ?[]const u8) void {
self.prefix = maybe_prefix ?? "/usr/local"; // TODO better default
self.lib_dir = os.path.join(self.allocator, self.prefix, "lib") catch unreachable;
self.exe_dir = os.path.join(self.allocator, self.prefix, "bin") catch unreachable;
}
pub fn addExecutable(self: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
pub fn addExecutable(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
return LibExeObjStep.createExecutable(self, name, root_src);
}
pub fn addObject(self: &Builder, name: []const u8, root_src: []const u8) &LibExeObjStep {
pub fn addObject(self: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep {
return LibExeObjStep.createObject(self, name, root_src);
}
pub fn addSharedLibrary(self: &Builder, name: []const u8, root_src: ?[]const u8, ver: &const Version) &LibExeObjStep {
pub fn addSharedLibrary(self: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep {
return LibExeObjStep.createSharedLibrary(self, name, root_src, ver);
}
pub fn addStaticLibrary(self: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
pub fn addStaticLibrary(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
return LibExeObjStep.createStaticLibrary(self, name, root_src);
}
pub fn addTest(self: &Builder, root_src: []const u8) &TestStep {
pub fn addTest(self: *Builder, root_src: []const u8) *TestStep {
const test_step = self.allocator.create(TestStep) catch unreachable;
test_step.* = TestStep.init(self, root_src);
return test_step;
}
pub fn addAssemble(self: &Builder, name: []const u8, src: []const u8) &LibExeObjStep {
pub fn addAssemble(self: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
const obj_step = LibExeObjStep.createObject(self, name, null);
obj_step.addAssemblyFile(src);
return obj_step;
}
pub fn addCStaticLibrary(self: &Builder, name: []const u8) &LibExeObjStep {
pub fn addCStaticLibrary(self: *Builder, name: []const u8) *LibExeObjStep {
return LibExeObjStep.createCStaticLibrary(self, name);
}
pub fn addCSharedLibrary(self: &Builder, name: []const u8, ver: &const Version) &LibExeObjStep {
pub fn addCSharedLibrary(self: *Builder, name: []const u8, ver: *const Version) *LibExeObjStep {
return LibExeObjStep.createCSharedLibrary(self, name, ver);
}
pub fn addCExecutable(self: &Builder, name: []const u8) &LibExeObjStep {
pub fn addCExecutable(self: *Builder, name: []const u8) *LibExeObjStep {
return LibExeObjStep.createCExecutable(self, name);
}
pub fn addCObject(self: &Builder, name: []const u8, src: []const u8) &LibExeObjStep {
pub fn addCObject(self: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
return LibExeObjStep.createCObject(self, name, src);
}
/// ::argv is copied.
pub fn addCommand(self: &Builder, cwd: ?[]const u8, env_map: &const BufMap, argv: []const []const u8) &CommandStep {
pub fn addCommand(self: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep {
return CommandStep.create(self, cwd, env_map, argv);
}
pub fn addWriteFile(self: &Builder, file_path: []const u8, data: []const u8) &WriteFileStep {
pub fn addWriteFile(self: *Builder, file_path: []const u8, data: []const u8) *WriteFileStep {
const write_file_step = self.allocator.create(WriteFileStep) catch unreachable;
write_file_step.* = WriteFileStep.init(self, file_path, data);
return write_file_step;
}
pub fn addLog(self: &Builder, comptime format: []const u8, args: ...) &LogStep {
pub fn addLog(self: *Builder, comptime format: []const u8, args: ...) *LogStep {
const data = self.fmt(format, args);
const log_step = self.allocator.create(LogStep) catch unreachable;
log_step.* = LogStep.init(self, data);
return log_step;
}
pub fn addRemoveDirTree(self: &Builder, dir_path: []const u8) &RemoveDirStep {
pub fn addRemoveDirTree(self: *Builder, dir_path: []const u8) *RemoveDirStep {
const remove_dir_step = self.allocator.create(RemoveDirStep) catch unreachable;
remove_dir_step.* = RemoveDirStep.init(self, dir_path);
return remove_dir_step;
}
pub fn version(self: &const Builder, major: u32, minor: u32, patch: u32) Version {
pub fn version(self: *const Builder, major: u32, minor: u32, patch: u32) Version {
return Version{
.major = major,
.minor = minor,
@ -217,20 +217,20 @@ pub const Builder = struct {
};
}
pub fn addCIncludePath(self: &Builder, path: []const u8) void {
pub fn addCIncludePath(self: *Builder, path: []const u8) void {
self.include_paths.append(path) catch unreachable;
}
pub fn addRPath(self: &Builder, path: []const u8) void {
pub fn addRPath(self: *Builder, path: []const u8) void {
self.rpaths.append(path) catch unreachable;
}
pub fn addLibPath(self: &Builder, path: []const u8) void {
pub fn addLibPath(self: *Builder, path: []const u8) void {
self.lib_paths.append(path) catch unreachable;
}
pub fn make(self: &Builder, step_names: []const []const u8) !void {
var wanted_steps = ArrayList(&Step).init(self.allocator);
pub fn make(self: *Builder, step_names: []const []const u8) !void {
var wanted_steps = ArrayList(*Step).init(self.allocator);
defer wanted_steps.deinit();
if (step_names.len == 0) {
@ -247,7 +247,7 @@ pub const Builder = struct {
}
}
pub fn getInstallStep(self: &Builder) &Step {
pub fn getInstallStep(self: *Builder) *Step {
if (self.have_install_step) return &self.install_tls.step;
self.top_level_steps.append(&self.install_tls) catch unreachable;
@ -255,7 +255,7 @@ pub const Builder = struct {
return &self.install_tls.step;
}
pub fn getUninstallStep(self: &Builder) &Step {
pub fn getUninstallStep(self: *Builder) *Step {
if (self.have_uninstall_step) return &self.uninstall_tls.step;
self.top_level_steps.append(&self.uninstall_tls) catch unreachable;
@ -263,7 +263,7 @@ pub const Builder = struct {
return &self.uninstall_tls.step;
}
fn makeUninstall(uninstall_step: &Step) error!void {
fn makeUninstall(uninstall_step: *Step) error!void {
const uninstall_tls = @fieldParentPtr(TopLevelStep, "step", uninstall_step);
const self = @fieldParentPtr(Builder, "uninstall_tls", uninstall_tls);
@ -277,7 +277,7 @@ pub const Builder = struct {
// TODO remove empty directories
}
fn makeOneStep(self: &Builder, s: &Step) error!void {
fn makeOneStep(self: *Builder, s: *Step) error!void {
if (s.loop_flag) {
warn("Dependency loop detected:\n {}\n", s.name);
return error.DependencyLoopDetected;
@ -298,7 +298,7 @@ pub const Builder = struct {
try s.make();
}
fn getTopLevelStepByName(self: &Builder, name: []const u8) !&Step {
fn getTopLevelStepByName(self: *Builder, name: []const u8) !*Step {
for (self.top_level_steps.toSliceConst()) |top_level_step| {
if (mem.eql(u8, top_level_step.step.name, name)) {
return &top_level_step.step;
@ -308,7 +308,7 @@ pub const Builder = struct {
return error.InvalidStepName;
}
fn processNixOSEnvVars(self: &Builder) void {
fn processNixOSEnvVars(self: *Builder) void {
if (os.getEnvVarOwned(self.allocator, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| {
var it = mem.split(nix_cflags_compile, " ");
while (true) {
@ -350,7 +350,7 @@ pub const Builder = struct {
}
}
pub fn option(self: &Builder, comptime T: type, name: []const u8, description: []const u8) ?T {
pub fn option(self: *Builder, comptime T: type, name: []const u8, description: []const u8) ?T {
const type_id = comptime typeToEnum(T);
const available_option = AvailableOption{
.name = name,
@ -403,7 +403,7 @@ pub const Builder = struct {
}
}
pub fn step(self: &Builder, name: []const u8, description: []const u8) &Step {
pub fn step(self: *Builder, name: []const u8, description: []const u8) *Step {
const step_info = self.allocator.create(TopLevelStep) catch unreachable;
step_info.* = TopLevelStep{
.step = Step.initNoOp(name, self.allocator),
@ -413,7 +413,7 @@ pub const Builder = struct {
return &step_info.step;
}
pub fn standardReleaseOptions(self: &Builder) builtin.Mode {
pub fn standardReleaseOptions(self: *Builder) builtin.Mode {
if (self.release_mode) |mode| return mode;
const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") ?? false;
@ -429,7 +429,7 @@ pub const Builder = struct {
return mode;
}
pub fn addUserInputOption(self: &Builder, name: []const u8, value: []const u8) bool {
pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) bool {
if (self.user_input_options.put(name, UserInputOption{
.name = name,
.value = UserValue{ .Scalar = value },
@ -466,7 +466,7 @@ pub const Builder = struct {
return false;
}
pub fn addUserInputFlag(self: &Builder, name: []const u8) bool {
pub fn addUserInputFlag(self: *Builder, name: []const u8) bool {
if (self.user_input_options.put(name, UserInputOption{
.name = name,
.value = UserValue{ .Flag = {} },
@ -500,7 +500,7 @@ pub const Builder = struct {
};
}
fn markInvalidUserInput(self: &Builder) void {
fn markInvalidUserInput(self: *Builder) void {
self.invalid_user_input = true;
}
@ -514,7 +514,7 @@ pub const Builder = struct {
};
}
pub fn validateUserInputDidItFail(self: &Builder) bool {
pub fn validateUserInputDidItFail(self: *Builder) bool {
// make sure all args are used
var it = self.user_input_options.iterator();
while (true) {
@ -528,7 +528,7 @@ pub const Builder = struct {
return self.invalid_user_input;
}
fn spawnChild(self: &Builder, argv: []const []const u8) !void {
fn spawnChild(self: *Builder, argv: []const []const u8) !void {
return self.spawnChildEnvMap(null, &self.env_map, argv);
}
@ -540,7 +540,7 @@ pub const Builder = struct {
warn("\n");
}
fn spawnChildEnvMap(self: &Builder, cwd: ?[]const u8, env_map: &const BufMap, argv: []const []const u8) !void {
fn spawnChildEnvMap(self: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) !void {
if (self.verbose) {
printCmd(cwd, argv);
}
@ -573,28 +573,28 @@ pub const Builder = struct {
}
}
pub fn makePath(self: &Builder, path: []const u8) !void {
pub fn makePath(self: *Builder, path: []const u8) !void {
os.makePath(self.allocator, self.pathFromRoot(path)) catch |err| {
warn("Unable to create path {}: {}\n", path, @errorName(err));
return err;
};
}
pub fn installArtifact(self: &Builder, artifact: &LibExeObjStep) void {
pub fn installArtifact(self: *Builder, artifact: *LibExeObjStep) void {
self.getInstallStep().dependOn(&self.addInstallArtifact(artifact).step);
}
pub fn addInstallArtifact(self: &Builder, artifact: &LibExeObjStep) &InstallArtifactStep {
pub fn addInstallArtifact(self: *Builder, artifact: *LibExeObjStep) *InstallArtifactStep {
return InstallArtifactStep.create(self, artifact);
}
///::dest_rel_path is relative to prefix path or it can be an absolute path
pub fn installFile(self: &Builder, src_path: []const u8, dest_rel_path: []const u8) void {
pub fn installFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) void {
self.getInstallStep().dependOn(&self.addInstallFile(src_path, dest_rel_path).step);
}
///::dest_rel_path is relative to prefix path or it can be an absolute path
pub fn addInstallFile(self: &Builder, src_path: []const u8, dest_rel_path: []const u8) &InstallFileStep {
pub fn addInstallFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) *InstallFileStep {
const full_dest_path = os.path.resolve(self.allocator, self.prefix, dest_rel_path) catch unreachable;
self.pushInstalledFile(full_dest_path);
@ -603,16 +603,16 @@ pub const Builder = struct {
return install_step;
}
pub fn pushInstalledFile(self: &Builder, full_path: []const u8) void {
pub fn pushInstalledFile(self: *Builder, full_path: []const u8) void {
_ = self.getUninstallStep();
self.installed_files.append(full_path) catch unreachable;
}
fn copyFile(self: &Builder, source_path: []const u8, dest_path: []const u8) !void {
fn copyFile(self: *Builder, source_path: []const u8, dest_path: []const u8) !void {
return self.copyFileMode(source_path, dest_path, os.default_file_mode);
}
fn copyFileMode(self: &Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void {
fn copyFileMode(self: *Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void {
if (self.verbose) {
warn("cp {} {}\n", source_path, dest_path);
}
@ -629,15 +629,15 @@ pub const Builder = struct {
};
}
fn pathFromRoot(self: &Builder, rel_path: []const u8) []u8 {
fn pathFromRoot(self: *Builder, rel_path: []const u8) []u8 {
return os.path.resolve(self.allocator, self.build_root, rel_path) catch unreachable;
}
pub fn fmt(self: &Builder, comptime format: []const u8, args: ...) []u8 {
pub fn fmt(self: *Builder, comptime format: []const u8, args: ...) []u8 {
return fmt_lib.allocPrint(self.allocator, format, args) catch unreachable;
}
fn getCCExe(self: &Builder) []const u8 {
fn getCCExe(self: *Builder) []const u8 {
if (builtin.environ == builtin.Environ.msvc) {
return "cl.exe";
} else {
@ -645,7 +645,7 @@ pub const Builder = struct {
}
}
pub fn findProgram(self: &Builder, names: []const []const u8, paths: []const []const u8) ![]const u8 {
pub fn findProgram(self: *Builder, names: []const []const u8, paths: []const []const u8) ![]const u8 {
// TODO report error for ambiguous situations
const exe_extension = (Target{ .Native = {} }).exeFileExt();
for (self.search_prefixes.toSliceConst()) |search_prefix| {
@ -693,7 +693,7 @@ pub const Builder = struct {
return error.FileNotFound;
}
pub fn exec(self: &Builder, argv: []const []const u8) ![]u8 {
pub fn exec(self: *Builder, argv: []const []const u8) ![]u8 {
const max_output_size = 100 * 1024;
const result = try os.ChildProcess.exec(self.allocator, argv, null, null, max_output_size);
switch (result.term) {
@ -715,7 +715,7 @@ pub const Builder = struct {
}
}
pub fn addSearchPrefix(self: &Builder, search_prefix: []const u8) void {
pub fn addSearchPrefix(self: *Builder, search_prefix: []const u8) void {
self.search_prefixes.append(search_prefix) catch unreachable;
}
};
@ -736,7 +736,7 @@ pub const Target = union(enum) {
Native: void,
Cross: CrossTarget,
pub fn oFileExt(self: &const Target) []const u8 {
pub fn oFileExt(self: *const Target) []const u8 {
const environ = switch (self.*) {
Target.Native => builtin.environ,
Target.Cross => |t| t.environ,
@ -747,49 +747,49 @@ pub const Target = union(enum) {
};
}
pub fn exeFileExt(self: &const Target) []const u8 {
pub fn exeFileExt(self: *const Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".exe",
else => "",
};
}
pub fn libFileExt(self: &const Target) []const u8 {
pub fn libFileExt(self: *const Target) []const u8 {
return switch (self.getOs()) {
builtin.Os.windows => ".lib",
else => ".a",
};
}
pub fn getOs(self: &const Target) builtin.Os {
pub fn getOs(self: *const Target) builtin.Os {
return switch (self.*) {
Target.Native => builtin.os,
Target.Cross => |t| t.os,
};
}
pub fn isDarwin(self: &const Target) bool {
pub fn isDarwin(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.ios, builtin.Os.macosx => true,
else => false,
};
}
pub fn isWindows(self: &const Target) bool {
pub fn isWindows(self: *const Target) bool {
return switch (self.getOs()) {
builtin.Os.windows => true,
else => false,
};
}
pub fn wantSharedLibSymLinks(self: &const Target) bool {
pub fn wantSharedLibSymLinks(self: *const Target) bool {
return !self.isWindows();
}
};
pub const LibExeObjStep = struct {
step: Step,
builder: &Builder,
builder: *Builder,
name: []const u8,
target: Target,
link_libs: BufSet,
@ -836,56 +836,56 @@ pub const LibExeObjStep = struct {
Obj,
};
pub fn createSharedLibrary(builder: &Builder, name: []const u8, root_src: ?[]const u8, ver: &const Version) &LibExeObjStep {
pub fn createSharedLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Lib, false, ver);
return self;
}
pub fn createCSharedLibrary(builder: &Builder, name: []const u8, version: &const Version) &LibExeObjStep {
pub fn createCSharedLibrary(builder: *Builder, name: []const u8, version: *const Version) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Lib, version, false);
return self;
}
pub fn createStaticLibrary(builder: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
pub fn createStaticLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Lib, true, builder.version(0, 0, 0));
return self;
}
pub fn createCStaticLibrary(builder: &Builder, name: []const u8) &LibExeObjStep {
pub fn createCStaticLibrary(builder: *Builder, name: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Lib, builder.version(0, 0, 0), true);
return self;
}
pub fn createObject(builder: &Builder, name: []const u8, root_src: []const u8) &LibExeObjStep {
pub fn createObject(builder: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Obj, false, builder.version(0, 0, 0));
return self;
}
pub fn createCObject(builder: &Builder, name: []const u8, src: []const u8) &LibExeObjStep {
pub fn createCObject(builder: *Builder, name: []const u8, src: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Obj, builder.version(0, 0, 0), false);
self.object_src = src;
return self;
}
pub fn createExecutable(builder: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep {
pub fn createExecutable(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initExtraArgs(builder, name, root_src, Kind.Exe, false, builder.version(0, 0, 0));
return self;
}
pub fn createCExecutable(builder: &Builder, name: []const u8) &LibExeObjStep {
pub fn createCExecutable(builder: *Builder, name: []const u8) *LibExeObjStep {
const self = builder.allocator.create(LibExeObjStep) catch unreachable;
self.* = initC(builder, name, Kind.Exe, builder.version(0, 0, 0), false);
return self;
}
fn initExtraArgs(builder: &Builder, name: []const u8, root_src: ?[]const u8, kind: Kind, static: bool, ver: &const Version) LibExeObjStep {
fn initExtraArgs(builder: *Builder, name: []const u8, root_src: ?[]const u8, kind: Kind, static: bool, ver: *const Version) LibExeObjStep {
var self = LibExeObjStep{
.strip = false,
.builder = builder,
@ -924,7 +924,7 @@ pub const LibExeObjStep = struct {
return self;
}
fn initC(builder: &Builder, name: []const u8, kind: Kind, version: &const Version, static: bool) LibExeObjStep {
fn initC(builder: *Builder, name: []const u8, kind: Kind, version: *const Version, static: bool) LibExeObjStep {
var self = LibExeObjStep{
.builder = builder,
.name = name,
@ -964,7 +964,7 @@ pub const LibExeObjStep = struct {
return self;
}
fn computeOutFileNames(self: &LibExeObjStep) void {
fn computeOutFileNames(self: *LibExeObjStep) void {
switch (self.kind) {
Kind.Obj => {
self.out_filename = self.builder.fmt("{}{}", self.name, self.target.oFileExt());
@ -996,7 +996,7 @@ pub const LibExeObjStep = struct {
}
}
pub fn setTarget(self: &LibExeObjStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
pub fn setTarget(self: *LibExeObjStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
self.target = Target{
.Cross = CrossTarget{
.arch = target_arch,
@ -1008,16 +1008,16 @@ pub const LibExeObjStep = struct {
}
// TODO respect this in the C args
pub fn setLinkerScriptPath(self: &LibExeObjStep, path: []const u8) void {
pub fn setLinkerScriptPath(self: *LibExeObjStep, path: []const u8) void {
self.linker_script = path;
}
pub fn linkFramework(self: &LibExeObjStep, framework_name: []const u8) void {
pub fn linkFramework(self: *LibExeObjStep, framework_name: []const u8) void {
assert(self.target.isDarwin());
self.frameworks.put(framework_name) catch unreachable;
}
pub fn linkLibrary(self: &LibExeObjStep, lib: &LibExeObjStep) void {
pub fn linkLibrary(self: *LibExeObjStep, lib: *LibExeObjStep) void {
assert(self.kind != Kind.Obj);
assert(lib.kind == Kind.Lib);
@ -1038,26 +1038,26 @@ pub const LibExeObjStep = struct {
}
}
pub fn linkSystemLibrary(self: &LibExeObjStep, name: []const u8) void {
pub fn linkSystemLibrary(self: *LibExeObjStep, name: []const u8) void {
assert(self.kind != Kind.Obj);
self.link_libs.put(name) catch unreachable;
}
pub fn addSourceFile(self: &LibExeObjStep, file: []const u8) void {
pub fn addSourceFile(self: *LibExeObjStep, file: []const u8) void {
assert(self.kind != Kind.Obj);
assert(!self.is_zig);
self.source_files.append(file) catch unreachable;
}
pub fn setVerboseLink(self: &LibExeObjStep, value: bool) void {
pub fn setVerboseLink(self: *LibExeObjStep, value: bool) void {
self.verbose_link = value;
}
pub fn setBuildMode(self: &LibExeObjStep, mode: builtin.Mode) void {
pub fn setBuildMode(self: *LibExeObjStep, mode: builtin.Mode) void {
self.build_mode = mode;
}
pub fn setOutputPath(self: &LibExeObjStep, file_path: []const u8) void {
pub fn setOutputPath(self: *LibExeObjStep, file_path: []const u8) void {
self.output_path = file_path;
// catch a common mistake
@ -1066,11 +1066,11 @@ pub const LibExeObjStep = struct {
}
}
pub fn getOutputPath(self: &LibExeObjStep) []const u8 {
pub fn getOutputPath(self: *LibExeObjStep) []const u8 {
return if (self.output_path) |output_path| output_path else os.path.join(self.builder.allocator, self.builder.cache_root, self.out_filename) catch unreachable;
}
pub fn setOutputHPath(self: &LibExeObjStep, file_path: []const u8) void {
pub fn setOutputHPath(self: *LibExeObjStep, file_path: []const u8) void {
self.output_h_path = file_path;
// catch a common mistake
@ -1079,21 +1079,21 @@ pub const LibExeObjStep = struct {
}
}
pub fn getOutputHPath(self: &LibExeObjStep) []const u8 {
pub fn getOutputHPath(self: *LibExeObjStep) []const u8 {
return if (self.output_h_path) |output_h_path| output_h_path else os.path.join(self.builder.allocator, self.builder.cache_root, self.out_h_filename) catch unreachable;
}
pub fn addAssemblyFile(self: &LibExeObjStep, path: []const u8) void {
pub fn addAssemblyFile(self: *LibExeObjStep, path: []const u8) void {
self.assembly_files.append(path) catch unreachable;
}
pub fn addObjectFile(self: &LibExeObjStep, path: []const u8) void {
pub fn addObjectFile(self: *LibExeObjStep, path: []const u8) void {
assert(self.kind != Kind.Obj);
self.object_files.append(path) catch unreachable;
}
pub fn addObject(self: &LibExeObjStep, obj: &LibExeObjStep) void {
pub fn addObject(self: *LibExeObjStep, obj: *LibExeObjStep) void {
assert(obj.kind == Kind.Obj);
assert(self.kind != Kind.Obj);
@ -1110,15 +1110,15 @@ pub const LibExeObjStep = struct {
self.include_dirs.append(self.builder.cache_root) catch unreachable;
}
pub fn addIncludeDir(self: &LibExeObjStep, path: []const u8) void {
pub fn addIncludeDir(self: *LibExeObjStep, path: []const u8) void {
self.include_dirs.append(path) catch unreachable;
}
pub fn addLibPath(self: &LibExeObjStep, path: []const u8) void {
pub fn addLibPath(self: *LibExeObjStep, path: []const u8) void {
self.lib_paths.append(path) catch unreachable;
}
pub fn addPackagePath(self: &LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void {
pub fn addPackagePath(self: *LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void {
assert(self.is_zig);
self.packages.append(Pkg{
@ -1127,23 +1127,23 @@ pub const LibExeObjStep = struct {
}) catch unreachable;
}
pub fn addCompileFlags(self: &LibExeObjStep, flags: []const []const u8) void {
pub fn addCompileFlags(self: *LibExeObjStep, flags: []const []const u8) void {
for (flags) |flag| {
self.cflags.append(flag) catch unreachable;
}
}
pub fn setNoStdLib(self: &LibExeObjStep, disable: bool) void {
pub fn setNoStdLib(self: *LibExeObjStep, disable: bool) void {
assert(!self.is_zig);
self.disable_libc = disable;
}
fn make(step: &Step) !void {
fn make(step: *Step) !void {
const self = @fieldParentPtr(LibExeObjStep, "step", step);
return if (self.is_zig) self.makeZig() else self.makeC();
}
fn makeZig(self: &LibExeObjStep) !void {
fn makeZig(self: *LibExeObjStep) !void {
const builder = self.builder;
assert(self.is_zig);
@ -1309,7 +1309,7 @@ pub const LibExeObjStep = struct {
}
}
fn appendCompileFlags(self: &LibExeObjStep, args: &ArrayList([]const u8)) void {
fn appendCompileFlags(self: *LibExeObjStep, args: *ArrayList([]const u8)) void {
if (!self.strip) {
args.append("-g") catch unreachable;
}
@ -1354,7 +1354,7 @@ pub const LibExeObjStep = struct {
}
}
fn makeC(self: &LibExeObjStep) !void {
fn makeC(self: *LibExeObjStep) !void {
const builder = self.builder;
const cc = builder.getCCExe();
@ -1580,7 +1580,7 @@ pub const LibExeObjStep = struct {
pub const TestStep = struct {
step: Step,
builder: &Builder,
builder: *Builder,
root_src: []const u8,
build_mode: builtin.Mode,
verbose: bool,
@ -1591,7 +1591,7 @@ pub const TestStep = struct {
exec_cmd_args: ?[]const ?[]const u8,
include_dirs: ArrayList([]const u8),
pub fn init(builder: &Builder, root_src: []const u8) TestStep {
pub fn init(builder: *Builder, root_src: []const u8) TestStep {
const step_name = builder.fmt("test {}", root_src);
return TestStep{
.step = Step.init(step_name, builder.allocator, make),
@ -1608,31 +1608,31 @@ pub const TestStep = struct {
};
}
pub fn setVerbose(self: &TestStep, value: bool) void {
pub fn setVerbose(self: *TestStep, value: bool) void {
self.verbose = value;
}
pub fn addIncludeDir(self: &TestStep, path: []const u8) void {
pub fn addIncludeDir(self: *TestStep, path: []const u8) void {
self.include_dirs.append(path) catch unreachable;
}
pub fn setBuildMode(self: &TestStep, mode: builtin.Mode) void {
pub fn setBuildMode(self: *TestStep, mode: builtin.Mode) void {
self.build_mode = mode;
}
pub fn linkSystemLibrary(self: &TestStep, name: []const u8) void {
pub fn linkSystemLibrary(self: *TestStep, name: []const u8) void {
self.link_libs.put(name) catch unreachable;
}
pub fn setNamePrefix(self: &TestStep, text: []const u8) void {
pub fn setNamePrefix(self: *TestStep, text: []const u8) void {
self.name_prefix = text;
}
pub fn setFilter(self: &TestStep, text: ?[]const u8) void {
pub fn setFilter(self: *TestStep, text: ?[]const u8) void {
self.filter = text;
}
pub fn setTarget(self: &TestStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
pub fn setTarget(self: *TestStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void {
self.target = Target{
.Cross = CrossTarget{
.arch = target_arch,
@ -1642,11 +1642,11 @@ pub const TestStep = struct {
};
}
pub fn setExecCmd(self: &TestStep, args: []const ?[]const u8) void {
pub fn setExecCmd(self: *TestStep, args: []const ?[]const u8) void {
self.exec_cmd_args = args;
}
fn make(step: &Step) !void {
fn make(step: *Step) !void {
const self = @fieldParentPtr(TestStep, "step", step);
const builder = self.builder;
@ -1739,13 +1739,13 @@ pub const TestStep = struct {
pub const CommandStep = struct {
step: Step,
builder: &Builder,
builder: *Builder,
argv: [][]const u8,
cwd: ?[]const u8,
env_map: &const BufMap,
env_map: *const BufMap,
/// ::argv is copied.
pub fn create(builder: &Builder, cwd: ?[]const u8, env_map: &const BufMap, argv: []const []const u8) &CommandStep {
pub fn create(builder: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep {
const self = builder.allocator.create(CommandStep) catch unreachable;
self.* = CommandStep{
.builder = builder,
@ -1759,7 +1759,7 @@ pub const CommandStep = struct {
return self;
}
fn make(step: &Step) !void {
fn make(step: *Step) !void {
const self = @fieldParentPtr(CommandStep, "step", step);
const cwd = if (self.cwd) |cwd| self.builder.pathFromRoot(cwd) else self.builder.build_root;
@ -1769,13 +1769,13 @@ pub const CommandStep = struct {
const InstallArtifactStep = struct {
step: Step,
builder: &Builder,
artifact: &LibExeObjStep,
builder: *Builder,
artifact: *LibExeObjStep,
dest_file: []const u8,
const Self = this;
pub fn create(builder: &Builder, artifact: &LibExeObjStep) &Self {
pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self {
const self = builder.allocator.create(Self) catch unreachable;
const dest_dir = switch (artifact.kind) {
LibExeObjStep.Kind.Obj => unreachable,
@ -1797,7 +1797,7 @@ const InstallArtifactStep = struct {
return self;
}
fn make(step: &Step) !void {
fn make(step: *Step) !void {
const self = @fieldParentPtr(Self, "step", step);
const builder = self.builder;
@ -1818,11 +1818,11 @@ const InstallArtifactStep = struct {
pub const InstallFileStep = struct {
step: Step,
builder: &Builder,
builder: *Builder,
src_path: []const u8,
dest_path: []const u8,
pub fn init(builder: &Builder, src_path: []const u8, dest_path: []const u8) InstallFileStep {
pub fn init(builder: *Builder, src_path: []const u8, dest_path: []const u8) InstallFileStep {
return InstallFileStep{
.builder = builder,
.step = Step.init(builder.fmt("install {}", src_path), builder.allocator, make),
@ -1831,7 +1831,7 @@ pub const InstallFileStep = struct {
};
}
fn make(step: &Step) !void {
fn make(step: *Step) !void {
const self = @fieldParentPtr(InstallFileStep, "step", step);
try self.builder.copyFile(self.src_path, self.dest_path);
}
@ -1839,11 +1839,11 @@ pub const InstallFileStep = struct {
pub const WriteFileStep = struct {
step: Step,
builder: &Builder,
builder: *Builder,
file_path: []const u8,
data: []const u8,
pub fn init(builder: &Builder, file_path: []const u8, data: []const u8) WriteFileStep {
pub fn init(builder: *Builder, file_path: []const u8, data: []const u8) WriteFileStep {
return WriteFileStep{
.builder = builder,
.step = Step.init(builder.fmt("writefile {}", file_path), builder.allocator, make),
@ -1852,7 +1852,7 @@ pub const WriteFileStep = struct {
};
}
fn make(step: &Step) !void {
fn make(step: *Step) !void {
const self = @fieldParentPtr(WriteFileStep, "step", step);
const full_path = self.builder.pathFromRoot(self.file_path);
const full_path_dir = os.path.dirname(full_path);
@ -1869,10 +1869,10 @@ pub const WriteFileStep = struct {
pub const LogStep = struct {
step: Step,
builder: &Builder,
builder: *Builder,
data: []const u8,
pub fn init(builder: &Builder, data: []const u8) LogStep {
pub fn init(builder: *Builder, data: []const u8) LogStep {
return LogStep{
.builder = builder,
.step = Step.init(builder.fmt("log {}", data), builder.allocator, make),
@ -1880,7 +1880,7 @@ pub const LogStep = struct {
};
}
fn make(step: &Step) error!void {
fn make(step: *Step) error!void {
const self = @fieldParentPtr(LogStep, "step", step);
warn("{}", self.data);
}
@ -1888,10 +1888,10 @@ pub const LogStep = struct {
pub const RemoveDirStep = struct {
step: Step,
builder: &Builder,
builder: *Builder,
dir_path: []const u8,
pub fn init(builder: &Builder, dir_path: []const u8) RemoveDirStep {
pub fn init(builder: *Builder, dir_path: []const u8) RemoveDirStep {
return RemoveDirStep{
.builder = builder,
.step = Step.init(builder.fmt("RemoveDir {}", dir_path), builder.allocator, make),
@ -1899,7 +1899,7 @@ pub const RemoveDirStep = struct {
};
}
fn make(step: &Step) !void {
fn make(step: *Step) !void {
const self = @fieldParentPtr(RemoveDirStep, "step", step);
const full_path = self.builder.pathFromRoot(self.dir_path);
@ -1912,39 +1912,39 @@ pub const RemoveDirStep = struct {
pub const Step = struct {
name: []const u8,
makeFn: fn (self: &Step) error!void,
dependencies: ArrayList(&Step),
makeFn: fn (self: *Step) error!void,
dependencies: ArrayList(*Step),
loop_flag: bool,
done_flag: bool,
pub fn init(name: []const u8, allocator: &Allocator, makeFn: fn (&Step) error!void) Step {
pub fn init(name: []const u8, allocator: *Allocator, makeFn: fn (*Step) error!void) Step {
return Step{
.name = name,
.makeFn = makeFn,
.dependencies = ArrayList(&Step).init(allocator),
.dependencies = ArrayList(*Step).init(allocator),
.loop_flag = false,
.done_flag = false,
};
}
pub fn initNoOp(name: []const u8, allocator: &Allocator) Step {
pub fn initNoOp(name: []const u8, allocator: *Allocator) Step {
return init(name, allocator, makeNoOp);
}
pub fn make(self: &Step) !void {
pub fn make(self: *Step) !void {
if (self.done_flag) return;
try self.makeFn(self);
self.done_flag = true;
}
pub fn dependOn(self: &Step, other: &Step) void {
pub fn dependOn(self: *Step, other: *Step) void {
self.dependencies.append(other) catch unreachable;
}
fn makeNoOp(self: &Step) error!void {}
fn makeNoOp(self: *Step) error!void {}
};
fn doAtomicSymLinks(allocator: &Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
const out_dir = os.path.dirname(output_path);
const out_basename = os.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3

View File

@ -1,10 +1,10 @@
extern "c" fn __error() &c_int;
pub extern "c" fn _NSGetExecutablePath(buf: &u8, bufsize: &u32) c_int;
extern "c" fn __error() *c_int;
pub extern "c" fn _NSGetExecutablePath(buf: *u8, bufsize: *u32) c_int;
pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: &u8, buf_len: usize, basep: &i64) usize;
pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: *u8, buf_len: usize, basep: *i64) usize;
pub extern "c" fn mach_absolute_time() u64;
pub extern "c" fn mach_timebase_info(tinfo: ?&mach_timebase_info_data) void;
pub extern "c" fn mach_timebase_info(tinfo: ?*mach_timebase_info_data) void;
pub use @import("../os/darwin_errno.zig");

View File

@ -13,49 +13,49 @@ pub extern "c" fn abort() noreturn;
pub extern "c" fn exit(code: c_int) noreturn;
pub extern "c" fn isatty(fd: c_int) c_int;
pub extern "c" fn close(fd: c_int) c_int;
pub extern "c" fn fstat(fd: c_int, buf: &Stat) c_int;
pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: &Stat) c_int;
pub extern "c" fn fstat(fd: c_int, buf: *Stat) c_int;
pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: *Stat) c_int;
pub extern "c" fn lseek(fd: c_int, offset: isize, whence: c_int) isize;
pub extern "c" fn open(path: &const u8, oflag: c_int, ...) c_int;
pub extern "c" fn open(path: *const u8, oflag: c_int, ...) c_int;
pub extern "c" fn raise(sig: c_int) c_int;
pub extern "c" fn read(fd: c_int, buf: &c_void, nbyte: usize) isize;
pub extern "c" fn stat(noalias path: &const u8, noalias buf: &Stat) c_int;
pub extern "c" fn write(fd: c_int, buf: &const c_void, nbyte: usize) isize;
pub extern "c" fn mmap(addr: ?&c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?&c_void;
pub extern "c" fn munmap(addr: &c_void, len: usize) c_int;
pub extern "c" fn unlink(path: &const u8) c_int;
pub extern "c" fn getcwd(buf: &u8, size: usize) ?&u8;
pub extern "c" fn waitpid(pid: c_int, stat_loc: &c_int, options: c_int) c_int;
pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize;
pub extern "c" fn stat(noalias path: *const u8, noalias buf: *Stat) c_int;
pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize;
pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void;
pub extern "c" fn munmap(addr: *c_void, len: usize) c_int;
pub extern "c" fn unlink(path: *const u8) c_int;
pub extern "c" fn getcwd(buf: *u8, size: usize) ?*u8;
pub extern "c" fn waitpid(pid: c_int, stat_loc: *c_int, options: c_int) c_int;
pub extern "c" fn fork() c_int;
pub extern "c" fn access(path: &const u8, mode: c_uint) c_int;
pub extern "c" fn pipe(fds: &c_int) c_int;
pub extern "c" fn mkdir(path: &const u8, mode: c_uint) c_int;
pub extern "c" fn symlink(existing: &const u8, new: &const u8) c_int;
pub extern "c" fn rename(old: &const u8, new: &const u8) c_int;
pub extern "c" fn chdir(path: &const u8) c_int;
pub extern "c" fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) c_int;
pub extern "c" fn access(path: *const u8, mode: c_uint) c_int;
pub extern "c" fn pipe(fds: *c_int) c_int;
pub extern "c" fn mkdir(path: *const u8, mode: c_uint) c_int;
pub extern "c" fn symlink(existing: *const u8, new: *const u8) c_int;
pub extern "c" fn rename(old: *const u8, new: *const u8) c_int;
pub extern "c" fn chdir(path: *const u8) c_int;
pub extern "c" fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) c_int;
pub extern "c" fn dup(fd: c_int) c_int;
pub extern "c" fn dup2(old_fd: c_int, new_fd: c_int) c_int;
pub extern "c" fn readlink(noalias path: &const u8, noalias buf: &u8, bufsize: usize) isize;
pub extern "c" fn realpath(noalias file_name: &const u8, noalias resolved_name: &u8) ?&u8;
pub extern "c" fn sigprocmask(how: c_int, noalias set: &const sigset_t, noalias oset: ?&sigset_t) c_int;
pub extern "c" fn gettimeofday(tv: ?&timeval, tz: ?&timezone) c_int;
pub extern "c" fn sigaction(sig: c_int, noalias act: &const Sigaction, noalias oact: ?&Sigaction) c_int;
pub extern "c" fn nanosleep(rqtp: &const timespec, rmtp: ?&timespec) c_int;
pub extern "c" fn readlink(noalias path: *const u8, noalias buf: *u8, bufsize: usize) isize;
pub extern "c" fn realpath(noalias file_name: *const u8, noalias resolved_name: *u8) ?*u8;
pub extern "c" fn sigprocmask(how: c_int, noalias set: *const sigset_t, noalias oset: ?*sigset_t) c_int;
pub extern "c" fn gettimeofday(tv: ?*timeval, tz: ?*timezone) c_int;
pub extern "c" fn sigaction(sig: c_int, noalias act: *const Sigaction, noalias oact: ?*Sigaction) c_int;
pub extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int;
pub extern "c" fn setreuid(ruid: c_uint, euid: c_uint) c_int;
pub extern "c" fn setregid(rgid: c_uint, egid: c_uint) c_int;
pub extern "c" fn rmdir(path: &const u8) c_int;
pub extern "c" fn rmdir(path: *const u8) c_int;
pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?&c_void;
pub extern "c" fn malloc(usize) ?&c_void;
pub extern "c" fn realloc(&c_void, usize) ?&c_void;
pub extern "c" fn free(&c_void) void;
pub extern "c" fn posix_memalign(memptr: &&c_void, alignment: usize, size: usize) c_int;
pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void;
pub extern "c" fn malloc(usize) ?*c_void;
pub extern "c" fn realloc(*c_void, usize) ?*c_void;
pub extern "c" fn free(*c_void) void;
pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int;
pub extern "pthread" fn pthread_create(noalias newthread: &pthread_t, noalias attr: ?&const pthread_attr_t, start_routine: extern fn (?&c_void) ?&c_void, noalias arg: ?&c_void) c_int;
pub extern "pthread" fn pthread_attr_init(attr: &pthread_attr_t) c_int;
pub extern "pthread" fn pthread_attr_setstack(attr: &pthread_attr_t, stackaddr: &c_void, stacksize: usize) c_int;
pub extern "pthread" fn pthread_attr_destroy(attr: &pthread_attr_t) c_int;
pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?&?&c_void) c_int;
pub extern "pthread" fn pthread_create(noalias newthread: *pthread_t, noalias attr: ?*const pthread_attr_t, start_routine: extern fn (?*c_void) ?*c_void, noalias arg: ?*c_void) c_int;
pub extern "pthread" fn pthread_attr_init(attr: *pthread_attr_t) c_int;
pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int;
pub extern "pthread" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int;
pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int;
pub const pthread_t = &@OpaqueType();
pub const pthread_t = *@OpaqueType();

View File

@ -1,7 +1,7 @@
pub use @import("../os/linux/errno.zig");
pub extern "c" fn getrandom(buf_ptr: &u8, buf_len: usize, flags: c_uint) c_int;
extern "c" fn __errno_location() &c_int;
pub extern "c" fn getrandom(buf_ptr: *u8, buf_len: usize, flags: c_uint) c_int;
extern "c" fn __errno_location() *c_int;
pub const _errno = __errno_location;
pub const pthread_attr_t = extern struct {

View File

@ -1 +1 @@
pub extern "c" fn _errno() &c_int;
pub extern "c" fn _errno() *c_int;

View File

@ -75,7 +75,7 @@ fn Blake2s(comptime out_len: usize) type {
return s;
}
pub fn reset(d: &Self) void {
pub fn reset(d: *Self) void {
mem.copy(u32, d.h[0..], iv[0..]);
// No key plus default parameters
@ -90,7 +90,7 @@ fn Blake2s(comptime out_len: usize) type {
d.final(out);
}
pub fn update(d: &Self, b: []const u8) void {
pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@ -113,7 +113,7 @@ fn Blake2s(comptime out_len: usize) type {
d.buf_len += u8(b[off..].len);
}
pub fn final(d: &Self, out: []u8) void {
pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= out_len / 8);
mem.set(u8, d.buf[d.buf_len..], 0);
@ -127,7 +127,7 @@ fn Blake2s(comptime out_len: usize) type {
}
}
fn round(d: &Self, b: []const u8, last: bool) void {
fn round(d: *Self, b: []const u8, last: bool) void {
debug.assert(b.len == 64);
var m: [16]u32 = undefined;
@ -310,7 +310,7 @@ fn Blake2b(comptime out_len: usize) type {
return s;
}
pub fn reset(d: &Self) void {
pub fn reset(d: *Self) void {
mem.copy(u64, d.h[0..], iv[0..]);
// No key plus default parameters
@ -325,7 +325,7 @@ fn Blake2b(comptime out_len: usize) type {
d.final(out);
}
pub fn update(d: &Self, b: []const u8) void {
pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@ -348,7 +348,7 @@ fn Blake2b(comptime out_len: usize) type {
d.buf_len += u8(b[off..].len);
}
pub fn final(d: &Self, out: []u8) void {
pub fn final(d: *Self, out: []u8) void {
mem.set(u8, d.buf[d.buf_len..], 0);
d.t += d.buf_len;
d.round(d.buf[0..], true);
@ -360,7 +360,7 @@ fn Blake2b(comptime out_len: usize) type {
}
}
fn round(d: &Self, b: []const u8, last: bool) void {
fn round(d: *Self, b: []const u8, last: bool) void {
debug.assert(b.len == 128);
var m: [16]u64 = undefined;

View File

@ -44,7 +44,7 @@ pub const Md5 = struct {
return d;
}
pub fn reset(d: &Self) void {
pub fn reset(d: *Self) void {
d.s[0] = 0x67452301;
d.s[1] = 0xEFCDAB89;
d.s[2] = 0x98BADCFE;
@ -59,7 +59,7 @@ pub const Md5 = struct {
d.final(out);
}
pub fn update(d: &Self, b: []const u8) void {
pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@ -84,7 +84,7 @@ pub const Md5 = struct {
d.total_len +%= b.len;
}
pub fn final(d: &Self, out: []u8) void {
pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= 16);
// The buffer here will never be completely full.
@ -116,7 +116,7 @@ pub const Md5 = struct {
}
}
fn round(d: &Self, b: []const u8) void {
fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 64);
var s: [16]u32 = undefined;

View File

@ -43,7 +43,7 @@ pub const Sha1 = struct {
return d;
}
pub fn reset(d: &Self) void {
pub fn reset(d: *Self) void {
d.s[0] = 0x67452301;
d.s[1] = 0xEFCDAB89;
d.s[2] = 0x98BADCFE;
@ -59,7 +59,7 @@ pub const Sha1 = struct {
d.final(out);
}
pub fn update(d: &Self, b: []const u8) void {
pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@ -83,7 +83,7 @@ pub const Sha1 = struct {
d.total_len += b.len;
}
pub fn final(d: &Self, out: []u8) void {
pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= 20);
// The buffer here will never be completely full.
@ -115,7 +115,7 @@ pub const Sha1 = struct {
}
}
fn round(d: &Self, b: []const u8) void {
fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 64);
var s: [16]u32 = undefined;

View File

@ -93,7 +93,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
return d;
}
pub fn reset(d: &Self) void {
pub fn reset(d: *Self) void {
d.s[0] = params.iv0;
d.s[1] = params.iv1;
d.s[2] = params.iv2;
@ -112,7 +112,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
d.final(out);
}
pub fn update(d: &Self, b: []const u8) void {
pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@ -136,7 +136,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
d.total_len += b.len;
}
pub fn final(d: &Self, out: []u8) void {
pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= params.out_len / 8);
// The buffer here will never be completely full.
@ -171,7 +171,7 @@ fn Sha2_32(comptime params: Sha2Params32) type {
}
}
fn round(d: &Self, b: []const u8) void {
fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 64);
var s: [64]u32 = undefined;
@ -434,7 +434,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
return d;
}
pub fn reset(d: &Self) void {
pub fn reset(d: *Self) void {
d.s[0] = params.iv0;
d.s[1] = params.iv1;
d.s[2] = params.iv2;
@ -453,7 +453,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
d.final(out);
}
pub fn update(d: &Self, b: []const u8) void {
pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial buffer exists from previous update. Copy into buffer then hash.
@ -477,7 +477,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
d.total_len += b.len;
}
pub fn final(d: &Self, out: []u8) void {
pub fn final(d: *Self, out: []u8) void {
debug.assert(out.len >= params.out_len / 8);
// The buffer here will never be completely full.
@ -512,7 +512,7 @@ fn Sha2_64(comptime params: Sha2Params64) type {
}
}
fn round(d: &Self, b: []const u8) void {
fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 128);
var s: [80]u64 = undefined;

View File

@ -26,7 +26,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
return d;
}
pub fn reset(d: &Self) void {
pub fn reset(d: *Self) void {
mem.set(u8, d.s[0..], 0);
d.offset = 0;
d.rate = 200 - (bits / 4);
@ -38,7 +38,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
d.final(out);
}
pub fn update(d: &Self, b: []const u8) void {
pub fn update(d: *Self, b: []const u8) void {
var ip: usize = 0;
var len = b.len;
var rate = d.rate - d.offset;
@ -63,7 +63,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
d.offset = offset + len;
}
pub fn final(d: &Self, out: []u8) void {
pub fn final(d: *Self, out: []u8) void {
// padding
d.s[d.offset] ^= delim;
d.s[d.rate - 1] ^= 0x80;

View File

@ -15,8 +15,8 @@ const BytesToHash = 1024 * MiB;
pub fn main() !void {
var stdout_file = try std.io.getStdOut();
var stdout_out_stream = std.io.FileOutStream.init(&stdout_file);
const stdout = &stdout_out_stream.stream;
var stdout_out_stream = std.io.FileOutStream.init(*stdout_file);
const stdout = *stdout_out_stream.stream;
var block: [HashFunction.block_size]u8 = undefined;
std.mem.set(u8, block[0..], 0);

View File

@ -9,13 +9,13 @@ pub const line_sep = switch (builtin.os) {
else => "\n",
};
pub fn len(ptr: &const u8) usize {
pub fn len(ptr: *const u8) usize {
var count: usize = 0;
while (ptr[count] != 0) : (count += 1) {}
return count;
}
pub fn cmp(a: &const u8, b: &const u8) i8 {
pub fn cmp(a: *const u8, b: *const u8) i8 {
var index: usize = 0;
while (a[index] == b[index] and a[index] != 0) : (index += 1) {}
if (a[index] > b[index]) {
@ -27,11 +27,11 @@ pub fn cmp(a: &const u8, b: &const u8) i8 {
}
}
pub fn toSliceConst(str: &const u8) []const u8 {
pub fn toSliceConst(str: *const u8) []const u8 {
return str[0..len(str)];
}
pub fn toSlice(str: &u8) []u8 {
pub fn toSlice(str: *u8) []u8 {
return str[0..len(str)];
}
@ -47,7 +47,7 @@ fn testCStrFnsImpl() void {
/// Returns a mutable slice with 1 more byte of length which is a null byte.
/// Caller owns the returned memory.
pub fn addNullByte(allocator: &mem.Allocator, slice: []const u8) ![]u8 {
pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![]u8 {
const result = try allocator.alloc(u8, slice.len + 1);
mem.copy(u8, result, slice);
result[slice.len] = 0;
@ -55,13 +55,13 @@ pub fn addNullByte(allocator: &mem.Allocator, slice: []const u8) ![]u8 {
}
pub const NullTerminated2DArray = struct {
allocator: &mem.Allocator,
allocator: *mem.Allocator,
byte_count: usize,
ptr: ?&?&u8,
ptr: ?*?*u8,
/// Takes N lists of strings, concatenates the lists together, and adds a null terminator
/// Caller must deinit result
pub fn fromSlices(allocator: &mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
pub fn fromSlices(allocator: *mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray {
var new_len: usize = 1; // 1 for the list null
var byte_count: usize = 0;
for (slices) |slice| {
@ -75,11 +75,11 @@ pub const NullTerminated2DArray = struct {
const index_size = @sizeOf(usize) * new_len; // size of the ptrs
byte_count += index_size;
const buf = try allocator.alignedAlloc(u8, @alignOf(?&u8), byte_count);
const buf = try allocator.alignedAlloc(u8, @alignOf(?*u8), byte_count);
errdefer allocator.free(buf);
var write_index = index_size;
const index_buf = ([]?&u8)(buf);
const index_buf = ([]?*u8)(buf);
var i: usize = 0;
for (slices) |slice| {
@ -97,12 +97,12 @@ pub const NullTerminated2DArray = struct {
return NullTerminated2DArray{
.allocator = allocator,
.byte_count = byte_count,
.ptr = @ptrCast(?&?&u8, buf.ptr),
.ptr = @ptrCast(?*?*u8, buf.ptr),
};
}
pub fn deinit(self: &NullTerminated2DArray) void {
const buf = @ptrCast(&u8, self.ptr);
pub fn deinit(self: *NullTerminated2DArray) void {
const buf = @ptrCast(*u8, self.ptr);
self.allocator.free(buf[0..self.byte_count]);
}
};

View File

@ -7,12 +7,12 @@ pub const FailingAllocator = struct {
allocator: mem.Allocator,
index: usize,
fail_index: usize,
internal_allocator: &mem.Allocator,
internal_allocator: *mem.Allocator,
allocated_bytes: usize,
freed_bytes: usize,
deallocations: usize,
pub fn init(allocator: &mem.Allocator, fail_index: usize) FailingAllocator {
pub fn init(allocator: *mem.Allocator, fail_index: usize) FailingAllocator {
return FailingAllocator{
.internal_allocator = allocator,
.fail_index = fail_index,
@ -28,7 +28,7 @@ pub const FailingAllocator = struct {
};
}
fn alloc(allocator: &mem.Allocator, n: usize, alignment: u29) ![]u8 {
fn alloc(allocator: *mem.Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
if (self.index == self.fail_index) {
return error.OutOfMemory;
@ -39,7 +39,7 @@ pub const FailingAllocator = struct {
return result;
}
fn realloc(allocator: &mem.Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
fn realloc(allocator: *mem.Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
if (new_size <= old_mem.len) {
self.freed_bytes += old_mem.len - new_size;
@ -55,7 +55,7 @@ pub const FailingAllocator = struct {
return result;
}
fn free(allocator: &mem.Allocator, bytes: []u8) void {
fn free(allocator: *mem.Allocator, bytes: []u8) void {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
self.freed_bytes += bytes.len;
self.deallocations += 1;

View File

@ -16,12 +16,12 @@ pub const FailingAllocator = @import("failing_allocator.zig").FailingAllocator;
/// TODO atomic/multithread support
var stderr_file: os.File = undefined;
var stderr_file_out_stream: io.FileOutStream = undefined;
var stderr_stream: ?&io.OutStream(io.FileOutStream.Error) = null;
var stderr_stream: ?*io.OutStream(io.FileOutStream.Error) = null;
pub fn warn(comptime fmt: []const u8, args: ...) void {
const stderr = getStderrStream() catch return;
stderr.print(fmt, args) catch return;
}
fn getStderrStream() !&io.OutStream(io.FileOutStream.Error) {
fn getStderrStream() !*io.OutStream(io.FileOutStream.Error) {
if (stderr_stream) |st| {
return st;
} else {
@ -33,8 +33,8 @@ fn getStderrStream() !&io.OutStream(io.FileOutStream.Error) {
}
}
var self_debug_info: ?&ElfStackTrace = null;
pub fn getSelfDebugInfo() !&ElfStackTrace {
var self_debug_info: ?*ElfStackTrace = null;
pub fn getSelfDebugInfo() !*ElfStackTrace {
if (self_debug_info) |info| {
return info;
} else {
@ -58,7 +58,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
}
/// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned.
pub fn dumpStackTrace(stack_trace: &const builtin.StackTrace) void {
pub fn dumpStackTrace(stack_trace: *const builtin.StackTrace) void {
const stderr = getStderrStream() catch return;
const debug_info = getSelfDebugInfo() catch |err| {
stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", @errorName(err)) catch return;
@ -104,7 +104,7 @@ pub fn panic(comptime format: []const u8, args: ...) noreturn {
var panicking: u8 = 0; // TODO make this a bool
pub fn panicExtra(trace: ?&const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn {
pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn {
@setCold(true);
if (@atomicRmw(u8, &panicking, builtin.AtomicRmwOp.Xchg, 1, builtin.AtomicOrder.SeqCst) == 1) {
@ -130,7 +130,7 @@ const WHITE = "\x1b[37;1m";
const DIM = "\x1b[2m";
const RESET = "\x1b[0m";
pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: var, allocator: &mem.Allocator, debug_info: &ElfStackTrace, tty_color: bool) !void {
pub fn writeStackTrace(stack_trace: *const builtin.StackTrace, out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool) !void {
var frame_index: usize = undefined;
var frames_left: usize = undefined;
if (stack_trace.index < stack_trace.instruction_addresses.len) {
@ -150,7 +150,7 @@ pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: var,
}
}
pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator, debug_info: &ElfStackTrace, tty_color: bool, start_addr: ?usize) !void {
pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool, start_addr: ?usize) !void {
const AddressState = union(enum) {
NotLookingForStartAddress,
LookingForStartAddress: usize,
@ -166,8 +166,8 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator, debug_
}
var fp = @ptrToInt(@frameAddress());
while (fp != 0) : (fp = @intToPtr(&const usize, fp).*) {
const return_address = @intToPtr(&const usize, fp + @sizeOf(usize)).*;
while (fp != 0) : (fp = @intToPtr(*const usize, fp).*) {
const return_address = @intToPtr(*const usize, fp + @sizeOf(usize)).*;
switch (addr_state) {
AddressState.NotLookingForStartAddress => {},
@ -183,7 +183,7 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator, debug_
}
}
fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: var, address: usize) !void {
fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: usize) !void {
const ptr_hex = "0x{x}";
switch (builtin.os) {
@ -236,7 +236,7 @@ fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: var, address: us
}
}
pub fn openSelfDebugInfo(allocator: &mem.Allocator) !&ElfStackTrace {
pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
switch (builtin.object_format) {
builtin.ObjectFormat.elf => {
const st = try allocator.create(ElfStackTrace);
@ -289,7 +289,7 @@ pub fn openSelfDebugInfo(allocator: &mem.Allocator) !&ElfStackTrace {
}
}
fn printLineFromFile(allocator: &mem.Allocator, out_stream: var, line_info: &const LineInfo) !void {
fn printLineFromFile(allocator: *mem.Allocator, out_stream: var, line_info: *const LineInfo) !void {
var f = try os.File.openRead(allocator, line_info.file_name);
defer f.close();
// TODO fstat and make sure that the file has the correct size
@ -325,32 +325,32 @@ pub const ElfStackTrace = switch (builtin.os) {
builtin.Os.macosx => struct {
symbol_table: macho.SymbolTable,
pub fn close(self: &ElfStackTrace) void {
pub fn close(self: *ElfStackTrace) void {
self.symbol_table.deinit();
}
},
else => struct {
self_exe_file: os.File,
elf: elf.Elf,
debug_info: &elf.SectionHeader,
debug_abbrev: &elf.SectionHeader,
debug_str: &elf.SectionHeader,
debug_line: &elf.SectionHeader,
debug_ranges: ?&elf.SectionHeader,
debug_info: *elf.SectionHeader,
debug_abbrev: *elf.SectionHeader,
debug_str: *elf.SectionHeader,
debug_line: *elf.SectionHeader,
debug_ranges: ?*elf.SectionHeader,
abbrev_table_list: ArrayList(AbbrevTableHeader),
compile_unit_list: ArrayList(CompileUnit),
pub fn allocator(self: &const ElfStackTrace) &mem.Allocator {
pub fn allocator(self: *const ElfStackTrace) *mem.Allocator {
return self.abbrev_table_list.allocator;
}
pub fn readString(self: &ElfStackTrace) ![]u8 {
pub fn readString(self: *ElfStackTrace) ![]u8 {
var in_file_stream = io.FileInStream.init(&self.self_exe_file);
const in_stream = &in_file_stream.stream;
return readStringRaw(self.allocator(), in_stream);
}
pub fn close(self: &ElfStackTrace) void {
pub fn close(self: *ElfStackTrace) void {
self.self_exe_file.close();
self.elf.close();
}
@ -365,7 +365,7 @@ const PcRange = struct {
const CompileUnit = struct {
version: u16,
is_64: bool,
die: &Die,
die: *Die,
index: usize,
pc_range: ?PcRange,
};
@ -408,7 +408,7 @@ const Constant = struct {
payload: []u8,
signed: bool,
fn asUnsignedLe(self: &const Constant) !u64 {
fn asUnsignedLe(self: *const Constant) !u64 {
if (self.payload.len > @sizeOf(u64)) return error.InvalidDebugInfo;
if (self.signed) return error.InvalidDebugInfo;
return mem.readInt(self.payload, u64, builtin.Endian.Little);
@ -425,14 +425,14 @@ const Die = struct {
value: FormValue,
};
fn getAttr(self: &const Die, id: u64) ?&const FormValue {
fn getAttr(self: *const Die, id: u64) ?*const FormValue {
for (self.attrs.toSliceConst()) |*attr| {
if (attr.id == id) return &attr.value;
}
return null;
}
fn getAttrAddr(self: &const Die, id: u64) !u64 {
fn getAttrAddr(self: *const Die, id: u64) !u64 {
const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Address => |value| value,
@ -440,7 +440,7 @@ const Die = struct {
};
}
fn getAttrSecOffset(self: &const Die, id: u64) !u64 {
fn getAttrSecOffset(self: *const Die, id: u64) !u64 {
const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
@ -449,7 +449,7 @@ const Die = struct {
};
}
fn getAttrUnsignedLe(self: &const Die, id: u64) !u64 {
fn getAttrUnsignedLe(self: *const Die, id: u64) !u64 {
const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
@ -457,7 +457,7 @@ const Die = struct {
};
}
fn getAttrString(self: &const Die, st: &ElfStackTrace, id: u64) ![]u8 {
fn getAttrString(self: *const Die, st: *ElfStackTrace, id: u64) ![]u8 {
const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.String => |value| value,
@ -478,9 +478,9 @@ const LineInfo = struct {
line: usize,
column: usize,
file_name: []u8,
allocator: &mem.Allocator,
allocator: *mem.Allocator,
fn deinit(self: &const LineInfo) void {
fn deinit(self: *const LineInfo) void {
self.allocator.free(self.file_name);
}
};
@ -496,7 +496,7 @@ const LineNumberProgram = struct {
target_address: usize,
include_dirs: []const []const u8,
file_entries: &ArrayList(FileEntry),
file_entries: *ArrayList(FileEntry),
prev_address: usize,
prev_file: usize,
@ -506,7 +506,7 @@ const LineNumberProgram = struct {
prev_basic_block: bool,
prev_end_sequence: bool,
pub fn init(is_stmt: bool, include_dirs: []const []const u8, file_entries: &ArrayList(FileEntry), target_address: usize) LineNumberProgram {
pub fn init(is_stmt: bool, include_dirs: []const []const u8, file_entries: *ArrayList(FileEntry), target_address: usize) LineNumberProgram {
return LineNumberProgram{
.address = 0,
.file = 1,
@ -528,7 +528,7 @@ const LineNumberProgram = struct {
};
}
pub fn checkLineMatch(self: &LineNumberProgram) !?LineInfo {
pub fn checkLineMatch(self: *LineNumberProgram) !?LineInfo {
if (self.target_address >= self.prev_address and self.target_address < self.address) {
const file_entry = if (self.prev_file == 0) {
return error.MissingDebugInfo;
@ -562,7 +562,7 @@ const LineNumberProgram = struct {
}
};
fn readStringRaw(allocator: &mem.Allocator, in_stream: var) ![]u8 {
fn readStringRaw(allocator: *mem.Allocator, in_stream: var) ![]u8 {
var buf = ArrayList(u8).init(allocator);
while (true) {
const byte = try in_stream.readByte();
@ -572,30 +572,30 @@ fn readStringRaw(allocator: &mem.Allocator, in_stream: var) ![]u8 {
return buf.toSlice();
}
fn getString(st: &ElfStackTrace, offset: u64) ![]u8 {
fn getString(st: *ElfStackTrace, offset: u64) ![]u8 {
const pos = st.debug_str.offset + offset;
try st.self_exe_file.seekTo(pos);
return st.readString();
}
fn readAllocBytes(allocator: &mem.Allocator, in_stream: var, size: usize) ![]u8 {
fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8 {
const buf = try allocator.alloc(u8, size);
errdefer allocator.free(buf);
if ((try in_stream.read(buf)) < size) return error.EndOfFile;
return buf;
}
fn parseFormValueBlockLen(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
return FormValue{ .Block = buf };
}
fn parseFormValueBlock(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
const block_len = try in_stream.readVarInt(builtin.Endian.Little, usize, size);
return parseFormValueBlockLen(allocator, in_stream, block_len);
}
fn parseFormValueConstant(allocator: &mem.Allocator, in_stream: var, signed: bool, size: usize) !FormValue {
fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: bool, size: usize) !FormValue {
return FormValue{
.Const = Constant{
.signed = signed,
@ -612,12 +612,12 @@ fn parseFormValueTargetAddrSize(in_stream: var) !u64 {
return if (@sizeOf(usize) == 4) u64(try in_stream.readIntLe(u32)) else if (@sizeOf(usize) == 8) try in_stream.readIntLe(u64) else unreachable;
}
fn parseFormValueRefLen(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue {
fn parseFormValueRefLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue {
const buf = try readAllocBytes(allocator, in_stream, size);
return FormValue{ .Ref = buf };
}
fn parseFormValueRef(allocator: &mem.Allocator, in_stream: var, comptime T: type) !FormValue {
fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, comptime T: type) !FormValue {
const block_len = try in_stream.readIntLe(T);
return parseFormValueRefLen(allocator, in_stream, block_len);
}
@ -632,7 +632,7 @@ const ParseFormValueError = error{
OutOfMemory,
};
fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue {
fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue {
return switch (form_id) {
DW.FORM_addr => FormValue{ .Address = try parseFormValueTargetAddrSize(in_stream) },
DW.FORM_block1 => parseFormValueBlock(allocator, in_stream, 1),
@ -682,7 +682,7 @@ fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64
};
}
fn parseAbbrevTable(st: &ElfStackTrace) !AbbrevTable {
fn parseAbbrevTable(st: *ElfStackTrace) !AbbrevTable {
const in_file = &st.self_exe_file;
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
@ -712,7 +712,7 @@ fn parseAbbrevTable(st: &ElfStackTrace) !AbbrevTable {
/// Gets an already existing AbbrevTable given the abbrev_offset, or if not found,
/// seeks in the stream and parses it.
fn getAbbrevTable(st: &ElfStackTrace, abbrev_offset: u64) !&const AbbrevTable {
fn getAbbrevTable(st: *ElfStackTrace, abbrev_offset: u64) !*const AbbrevTable {
for (st.abbrev_table_list.toSlice()) |*header| {
if (header.offset == abbrev_offset) {
return &header.table;
@ -726,14 +726,14 @@ fn getAbbrevTable(st: &ElfStackTrace, abbrev_offset: u64) !&const AbbrevTable {
return &st.abbrev_table_list.items[st.abbrev_table_list.len - 1].table;
}
fn getAbbrevTableEntry(abbrev_table: &const AbbrevTable, abbrev_code: u64) ?&const AbbrevTableEntry {
fn getAbbrevTableEntry(abbrev_table: *const AbbrevTable, abbrev_code: u64) ?*const AbbrevTableEntry {
for (abbrev_table.toSliceConst()) |*table_entry| {
if (table_entry.abbrev_code == abbrev_code) return table_entry;
}
return null;
}
fn parseDie(st: &ElfStackTrace, abbrev_table: &const AbbrevTable, is_64: bool) !Die {
fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) !Die {
const in_file = &st.self_exe_file;
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
@ -755,7 +755,7 @@ fn parseDie(st: &ElfStackTrace, abbrev_table: &const AbbrevTable, is_64: bool) !
return result;
}
fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, target_address: usize) !LineInfo {
fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, target_address: usize) !LineInfo {
const compile_unit_cwd = try compile_unit.die.getAttrString(st, DW.AT_comp_dir);
const in_file = &st.self_exe_file;
@ -934,7 +934,7 @@ fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, targe
return error.MissingDebugInfo;
}
fn scanAllCompileUnits(st: &ElfStackTrace) !void {
fn scanAllCompileUnits(st: *ElfStackTrace) !void {
const debug_info_end = st.debug_info.offset + st.debug_info.size;
var this_unit_offset = st.debug_info.offset;
var cu_index: usize = 0;
@ -1005,7 +1005,7 @@ fn scanAllCompileUnits(st: &ElfStackTrace) !void {
}
}
fn findCompileUnit(st: &ElfStackTrace, target_address: u64) !&const CompileUnit {
fn findCompileUnit(st: *ElfStackTrace, target_address: u64) !*const CompileUnit {
var in_file_stream = io.FileInStream.init(&st.self_exe_file);
const in_stream = &in_file_stream.stream;
for (st.compile_unit_list.toSlice()) |*compile_unit| {
@ -1039,7 +1039,7 @@ fn findCompileUnit(st: &ElfStackTrace, target_address: u64) !&const CompileUnit
return error.MissingDebugInfo;
}
fn readInitialLength(comptime E: type, in_stream: &io.InStream(E), is_64: &bool) !u64 {
fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool) !u64 {
const first_32_bits = try in_stream.readIntLe(u32);
is_64.* = (first_32_bits == 0xffffffff);
if (is_64.*) {
@ -1096,10 +1096,10 @@ var global_fixed_allocator = std.heap.FixedBufferAllocator.init(global_allocator
var global_allocator_mem: [100 * 1024]u8 = undefined;
// TODO make thread safe
var debug_info_allocator: ?&mem.Allocator = null;
var debug_info_allocator: ?*mem.Allocator = null;
var debug_info_direct_allocator: std.heap.DirectAllocator = undefined;
var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
fn getDebugInfoAllocator() &mem.Allocator {
fn getDebugInfoAllocator() *mem.Allocator {
if (debug_info_allocator) |a| return a;
debug_info_direct_allocator = std.heap.DirectAllocator.init();

View File

@ -338,7 +338,7 @@ pub const SectionHeader = struct {
};
pub const Elf = struct {
in_file: &os.File,
in_file: *os.File,
auto_close_stream: bool,
is_64: bool,
endian: builtin.Endian,
@ -348,20 +348,20 @@ pub const Elf = struct {
program_header_offset: u64,
section_header_offset: u64,
string_section_index: u64,
string_section: &SectionHeader,
string_section: *SectionHeader,
section_headers: []SectionHeader,
allocator: &mem.Allocator,
allocator: *mem.Allocator,
prealloc_file: os.File,
/// Call close when done.
pub fn openPath(elf: &Elf, allocator: &mem.Allocator, path: []const u8) !void {
pub fn openPath(elf: *Elf, allocator: *mem.Allocator, path: []const u8) !void {
try elf.prealloc_file.open(path);
try elf.openFile(allocator, &elf.prealloc_file);
try elf.openFile(allocator, *elf.prealloc_file);
elf.auto_close_stream = true;
}
/// Call close when done.
pub fn openFile(elf: &Elf, allocator: &mem.Allocator, file: &os.File) !void {
pub fn openFile(elf: *Elf, allocator: *mem.Allocator, file: *os.File) !void {
elf.allocator = allocator;
elf.in_file = file;
elf.auto_close_stream = false;
@ -503,13 +503,13 @@ pub const Elf = struct {
}
}
pub fn close(elf: &Elf) void {
pub fn close(elf: *Elf) void {
elf.allocator.free(elf.section_headers);
if (elf.auto_close_stream) elf.in_file.close();
}
pub fn findSection(elf: &Elf, name: []const u8) !?&SectionHeader {
pub fn findSection(elf: *Elf, name: []const u8) !?*SectionHeader {
var file_stream = io.FileInStream.init(elf.in_file);
const in = &file_stream.stream;
@ -533,7 +533,7 @@ pub const Elf = struct {
return null;
}
pub fn seekToSection(elf: &Elf, elf_section: &SectionHeader) !void {
pub fn seekToSection(elf: *Elf, elf_section: *SectionHeader) !void {
try elf.in_file.seekTo(elf_section.offset);
}
};

View File

@ -6,9 +6,9 @@ const mem = std.mem;
const posix = std.os.posix;
pub const TcpServer = struct {
handleRequestFn: async<&mem.Allocator> fn (&TcpServer, &const std.net.Address, &const std.os.File) void,
handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void,
loop: &Loop,
loop: *Loop,
sockfd: i32,
accept_coro: ?promise,
listen_address: std.net.Address,
@ -17,7 +17,7 @@ pub const TcpServer = struct {
const PromiseNode = std.LinkedList(promise).Node;
pub fn init(loop: &Loop) !TcpServer {
pub fn init(loop: *Loop) !TcpServer {
const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp);
errdefer std.os.close(sockfd);
@ -32,7 +32,7 @@ pub const TcpServer = struct {
};
}
pub fn listen(self: &TcpServer, address: &const std.net.Address, handleRequestFn: async<&mem.Allocator> fn (&TcpServer, &const std.net.Address, &const std.os.File) void) !void {
pub fn listen(self: *TcpServer, address: *const std.net.Address, handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void) !void {
self.handleRequestFn = handleRequestFn;
try std.os.posixBind(self.sockfd, &address.os_addr);
@ -46,13 +46,13 @@ pub const TcpServer = struct {
errdefer self.loop.removeFd(self.sockfd);
}
pub fn deinit(self: &TcpServer) void {
pub fn deinit(self: *TcpServer) void {
self.loop.removeFd(self.sockfd);
if (self.accept_coro) |accept_coro| cancel accept_coro;
std.os.close(self.sockfd);
}
pub async fn handler(self: &TcpServer) void {
pub async fn handler(self: *TcpServer) void {
while (true) {
var accepted_addr: std.net.Address = undefined;
if (std.os.posixAccept(self.sockfd, &accepted_addr.os_addr, posix.SOCK_NONBLOCK | posix.SOCK_CLOEXEC)) |accepted_fd| {
@ -92,11 +92,11 @@ pub const TcpServer = struct {
};
pub const Loop = struct {
allocator: &mem.Allocator,
allocator: *mem.Allocator,
epollfd: i32,
keep_running: bool,
fn init(allocator: &mem.Allocator) !Loop {
fn init(allocator: *mem.Allocator) !Loop {
const epollfd = try std.os.linuxEpollCreate(std.os.linux.EPOLL_CLOEXEC);
return Loop{
.keep_running = true,
@ -105,7 +105,7 @@ pub const Loop = struct {
};
}
pub fn addFd(self: &Loop, fd: i32, prom: promise) !void {
pub fn addFd(self: *Loop, fd: i32, prom: promise) !void {
var ev = std.os.linux.epoll_event{
.events = std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | std.os.linux.EPOLLET,
.data = std.os.linux.epoll_data{ .ptr = @ptrToInt(prom) },
@ -113,23 +113,23 @@ pub const Loop = struct {
try std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_ADD, fd, &ev);
}
pub fn removeFd(self: &Loop, fd: i32) void {
pub fn removeFd(self: *Loop, fd: i32) void {
std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {};
}
async fn waitFd(self: &Loop, fd: i32) !void {
async fn waitFd(self: *Loop, fd: i32) !void {
defer self.removeFd(fd);
suspend |p| {
try self.addFd(fd, p);
}
}
pub fn stop(self: &Loop) void {
pub fn stop(self: *Loop) void {
// TODO make atomic
self.keep_running = false;
// TODO activate an fd in the epoll set
}
pub fn run(self: &Loop) void {
pub fn run(self: *Loop) void {
while (self.keep_running) {
var events: [16]std.os.linux.epoll_event = undefined;
const count = std.os.linuxEpollWait(self.epollfd, events[0..], -1);
@ -141,7 +141,7 @@ pub const Loop = struct {
}
};
pub async fn connect(loop: &Loop, _address: &const std.net.Address) !std.os.File {
pub async fn connect(loop: *Loop, _address: *const std.net.Address) !std.os.File {
var address = _address.*; // TODO https://github.com/ziglang/zig/issues/733
const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp);
@ -163,7 +163,7 @@ test "listen on a port, send bytes, receive bytes" {
tcp_server: TcpServer,
const Self = this;
async<&mem.Allocator> fn handler(tcp_server: &TcpServer, _addr: &const std.net.Address, _socket: &const std.os.File) void {
async<*mem.Allocator> fn handler(tcp_server: *TcpServer, _addr: *const std.net.Address, _socket: *const std.os.File) void {
const self = @fieldParentPtr(Self, "tcp_server", tcp_server);
var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733
defer socket.close();
@ -177,7 +177,7 @@ test "listen on a port, send bytes, receive bytes" {
cancel p;
}
}
async fn errorableHandler(self: &Self, _addr: &const std.net.Address, _socket: &const std.os.File) !void {
async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: *const std.os.File) !void {
const addr = _addr.*; // TODO https://github.com/ziglang/zig/issues/733
var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733
@ -199,7 +199,7 @@ test "listen on a port, send bytes, receive bytes" {
defer cancel p;
loop.run();
}
async fn doAsyncTest(loop: &Loop, address: &const std.net.Address) void {
async fn doAsyncTest(loop: *Loop, address: *const std.net.Address) void {
errdefer @panic("test failure");
var socket_file = try await try async event.connect(loop, address);

View File

@ -21,7 +21,7 @@ pub const RoundMode = enum {
/// Round a FloatDecimal as returned by errol3 to the specified fractional precision.
/// All digits after the specified precision should be considered invalid.
pub fn roundToPrecision(float_decimal: &FloatDecimal, precision: usize, mode: RoundMode) void {
pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: RoundMode) void {
// The round digit refers to the index which we should look at to determine
// whether we need to round to match the specified precision.
var round_digit: usize = 0;
@ -59,7 +59,7 @@ pub fn roundToPrecision(float_decimal: &FloatDecimal, precision: usize, mode: Ro
float_decimal.exp += 1;
// Re-size the buffer to use the reserved leading byte.
const one_before = @intToPtr(&u8, @ptrToInt(&float_decimal.digits[0]) - 1);
const one_before = @intToPtr(*u8, @ptrToInt(&float_decimal.digits[0]) - 1);
float_decimal.digits = one_before[0 .. float_decimal.digits.len + 1];
float_decimal.digits[0] = '1';
return;
@ -217,7 +217,7 @@ fn tableLowerBound(k: u64) usize {
/// @in: The HP number.
/// @val: The double.
/// &returns: The HP number.
fn hpProd(in: &const HP, val: f64) HP {
fn hpProd(in: *const HP, val: f64) HP {
var hi: f64 = undefined;
var lo: f64 = undefined;
split(in.val, &hi, &lo);
@ -239,7 +239,7 @@ fn hpProd(in: &const HP, val: f64) HP {
/// @val: The double.
/// @hi: The high bits.
/// @lo: The low bits.
fn split(val: f64, hi: &f64, lo: &f64) void {
fn split(val: f64, hi: *f64, lo: *f64) void {
hi.* = gethi(val);
lo.* = val - hi.*;
}
@ -252,7 +252,7 @@ fn gethi(in: f64) f64 {
/// Normalize the number by factoring in the error.
/// @hp: The float pair.
fn hpNormalize(hp: &HP) void {
fn hpNormalize(hp: *HP) void {
// Required to avoid segfaults causing buffer overrun during errol3 digit output termination.
@setFloatMode(this, @import("builtin").FloatMode.Strict);
@ -264,7 +264,7 @@ fn hpNormalize(hp: &HP) void {
/// Divide the high-precision number by ten.
/// @hp: The high-precision number
fn hpDiv10(hp: &HP) void {
fn hpDiv10(hp: *HP) void {
var val = hp.val;
hp.val /= 10.0;
@ -280,7 +280,7 @@ fn hpDiv10(hp: &HP) void {
/// Multiply the high-precision number by ten.
/// @hp: The high-precision number
fn hpMul10(hp: &HP) void {
fn hpMul10(hp: *HP) void {
const val = hp.val;
hp.val *= 10.0;

View File

@ -679,7 +679,7 @@ const FormatIntBuf = struct {
out_buf: []u8,
index: usize,
};
fn formatIntCallback(context: &FormatIntBuf, bytes: []const u8) (error{}!void) {
fn formatIntCallback(context: *FormatIntBuf, bytes: []const u8) (error{}!void) {
mem.copy(u8, context.out_buf[context.index..], bytes);
context.index += bytes.len;
}
@ -751,7 +751,7 @@ const BufPrintContext = struct {
remaining: []u8,
};
fn bufPrintWrite(context: &BufPrintContext, bytes: []const u8) !void {
fn bufPrintWrite(context: *BufPrintContext, bytes: []const u8) !void {
if (context.remaining.len < bytes.len) return error.BufferTooSmall;
mem.copy(u8, context.remaining, bytes);
context.remaining = context.remaining[bytes.len..];
@ -763,14 +763,14 @@ pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: ...) ![]u8 {
return buf[0 .. buf.len - context.remaining.len];
}
pub fn allocPrint(allocator: &mem.Allocator, comptime fmt: []const u8, args: ...) ![]u8 {
pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: ...) ![]u8 {
var size: usize = 0;
format(&size, error{}, countSize, fmt, args) catch |err| switch (err) {};
const buf = try allocator.alloc(u8, size);
return bufPrint(buf, fmt, args);
}
fn countSize(size: &usize, bytes: []const u8) (error{}!void) {
fn countSize(size: *usize, bytes: []const u8) (error{}!void) {
size.* += bytes.len;
}

View File

@ -18,7 +18,7 @@ pub const Adler32 = struct {
// This fast variant is taken from zlib. It reduces the required modulos and unrolls longer
// buffer inputs and should be much quicker.
pub fn update(self: &Adler32, input: []const u8) void {
pub fn update(self: *Adler32, input: []const u8) void {
var s1 = self.adler & 0xffff;
var s2 = (self.adler >> 16) & 0xffff;
@ -77,7 +77,7 @@ pub const Adler32 = struct {
self.adler = s1 | (s2 << 16);
}
pub fn final(self: &Adler32) u32 {
pub fn final(self: *Adler32) u32 {
return self.adler;
}

View File

@ -58,7 +58,7 @@ pub fn Crc32WithPoly(comptime poly: u32) type {
return Self{ .crc = 0xffffffff };
}
pub fn update(self: &Self, input: []const u8) void {
pub fn update(self: *Self, input: []const u8) void {
var i: usize = 0;
while (i + 8 <= input.len) : (i += 8) {
const p = input[i .. i + 8];
@ -86,7 +86,7 @@ pub fn Crc32WithPoly(comptime poly: u32) type {
}
}
pub fn final(self: &Self) u32 {
pub fn final(self: *Self) u32 {
return ~self.crc;
}
@ -143,14 +143,14 @@ pub fn Crc32SmallWithPoly(comptime poly: u32) type {
return Self{ .crc = 0xffffffff };
}
pub fn update(self: &Self, input: []const u8) void {
pub fn update(self: *Self, input: []const u8) void {
for (input) |b| {
self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 0))] ^ (self.crc >> 4);
self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 4))] ^ (self.crc >> 4);
}
}
pub fn final(self: &Self) u32 {
pub fn final(self: *Self) u32 {
return ~self.crc;
}

View File

@ -21,14 +21,14 @@ fn Fnv1a(comptime T: type, comptime prime: T, comptime offset: T) type {
return Self{ .value = offset };
}
pub fn update(self: &Self, input: []const u8) void {
pub fn update(self: *Self, input: []const u8) void {
for (input) |b| {
self.value ^= b;
self.value *%= prime;
}
}
pub fn final(self: &Self) T {
pub fn final(self: *Self) T {
return self.value;
}

View File

@ -63,7 +63,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
return d;
}
pub fn update(d: &Self, b: []const u8) void {
pub fn update(d: *Self, b: []const u8) void {
var off: usize = 0;
// Partial from previous.
@ -85,7 +85,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
d.msg_len +%= @truncate(u8, b.len);
}
pub fn final(d: &Self) T {
pub fn final(d: *Self) T {
// Padding
mem.set(u8, d.buf[d.buf_len..], 0);
d.buf[7] = d.msg_len;
@ -118,7 +118,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
return (u128(b2) << 64) | b1;
}
fn round(d: &Self, b: []const u8) void {
fn round(d: *Self, b: []const u8) void {
debug.assert(b.len == 8);
const m = mem.readInt(b[0..], u64, Endian.Little);
@ -132,7 +132,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
d.v0 ^= m;
}
fn sipRound(d: &Self) void {
fn sipRound(d: *Self) void {
d.v0 +%= d.v1;
d.v1 = math.rotl(u64, d.v1, u64(13));
d.v1 ^= d.v0;

View File

@ -14,7 +14,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
entries: []Entry,
size: usize,
max_distance_from_start_index: usize,
allocator: &Allocator,
allocator: *Allocator,
// this is used to detect bugs where a hashtable is edited while an iterator is running.
modification_count: debug_u32,
@ -28,7 +28,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
};
pub const Iterator = struct {
hm: &const Self,
hm: *const Self,
// how many items have we returned
count: usize,
// iterator through the entry array
@ -36,7 +36,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
// used to detect concurrent modification
initial_modification_count: debug_u32,
pub fn next(it: &Iterator) ?&Entry {
pub fn next(it: *Iterator) ?*Entry {
if (want_modification_safety) {
assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
}
@ -53,7 +53,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
// Reset the iterator to the initial index
pub fn reset(it: &Iterator) void {
pub fn reset(it: *Iterator) void {
it.count = 0;
it.index = 0;
// Resetting the modification count too
@ -61,7 +61,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
};
pub fn init(allocator: &Allocator) Self {
pub fn init(allocator: *Allocator) Self {
return Self{
.entries = []Entry{},
.allocator = allocator,
@ -71,11 +71,11 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
};
}
pub fn deinit(hm: &const Self) void {
pub fn deinit(hm: *const Self) void {
hm.allocator.free(hm.entries);
}
pub fn clear(hm: &Self) void {
pub fn clear(hm: *Self) void {
for (hm.entries) |*entry| {
entry.used = false;
}
@ -84,12 +84,12 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
hm.incrementModificationCount();
}
pub fn count(hm: &const Self) usize {
pub fn count(hm: *const Self) usize {
return hm.size;
}
/// Returns the value that was already there.
pub fn put(hm: &Self, key: K, value: &const V) !?V {
pub fn put(hm: *Self, key: K, value: *const V) !?V {
if (hm.entries.len == 0) {
try hm.initCapacity(16);
}
@ -111,18 +111,18 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return hm.internalPut(key, value);
}
pub fn get(hm: &const Self, key: K) ?&Entry {
pub fn get(hm: *const Self, key: K) ?*Entry {
if (hm.entries.len == 0) {
return null;
}
return hm.internalGet(key);
}
pub fn contains(hm: &const Self, key: K) bool {
pub fn contains(hm: *const Self, key: K) bool {
return hm.get(key) != null;
}
pub fn remove(hm: &Self, key: K) ?&Entry {
pub fn remove(hm: *Self, key: K) ?*Entry {
if (hm.entries.len == 0) return null;
hm.incrementModificationCount();
const start_index = hm.keyToIndex(key);
@ -154,7 +154,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return null;
}
pub fn iterator(hm: &const Self) Iterator {
pub fn iterator(hm: *const Self) Iterator {
return Iterator{
.hm = hm,
.count = 0,
@ -163,7 +163,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
};
}
fn initCapacity(hm: &Self, capacity: usize) !void {
fn initCapacity(hm: *Self, capacity: usize) !void {
hm.entries = try hm.allocator.alloc(Entry, capacity);
hm.size = 0;
hm.max_distance_from_start_index = 0;
@ -172,14 +172,14 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
}
fn incrementModificationCount(hm: &Self) void {
fn incrementModificationCount(hm: *Self) void {
if (want_modification_safety) {
hm.modification_count +%= 1;
}
}
/// Returns the value that was already there.
fn internalPut(hm: &Self, orig_key: K, orig_value: &const V) ?V {
fn internalPut(hm: *Self, orig_key: K, orig_value: *const V) ?V {
var key = orig_key;
var value = orig_value.*;
const start_index = hm.keyToIndex(key);
@ -231,7 +231,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
unreachable; // put into a full map
}
fn internalGet(hm: &const Self, key: K) ?&Entry {
fn internalGet(hm: *const Self, key: K) ?*Entry {
const start_index = hm.keyToIndex(key);
{
var roll_over: usize = 0;
@ -246,7 +246,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return null;
}
fn keyToIndex(hm: &const Self, key: K) usize {
fn keyToIndex(hm: *const Self, key: K) usize {
return usize(hash(key)) % hm.entries.len;
}
};

View File

@ -16,15 +16,15 @@ var c_allocator_state = Allocator{
.freeFn = cFree,
};
fn cAlloc(self: &Allocator, n: usize, alignment: u29) ![]u8 {
fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 {
assert(alignment <= @alignOf(c_longdouble));
return if (c.malloc(n)) |buf| @ptrCast(&u8, buf)[0..n] else error.OutOfMemory;
return if (c.malloc(n)) |buf| @ptrCast(*u8, buf)[0..n] else error.OutOfMemory;
}
fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const old_ptr = @ptrCast(&c_void, old_mem.ptr);
fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const old_ptr = @ptrCast(*c_void, old_mem.ptr);
if (c.realloc(old_ptr, new_size)) |buf| {
return @ptrCast(&u8, buf)[0..new_size];
return @ptrCast(*u8, buf)[0..new_size];
} else if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@ -32,8 +32,8 @@ fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![
}
}
fn cFree(self: &Allocator, old_mem: []u8) void {
const old_ptr = @ptrCast(&c_void, old_mem.ptr);
fn cFree(self: *Allocator, old_mem: []u8) void {
const old_ptr = @ptrCast(*c_void, old_mem.ptr);
c.free(old_ptr);
}
@ -55,7 +55,7 @@ pub const DirectAllocator = struct {
};
}
pub fn deinit(self: &DirectAllocator) void {
pub fn deinit(self: *DirectAllocator) void {
switch (builtin.os) {
Os.windows => if (self.heap_handle) |heap_handle| {
_ = os.windows.HeapDestroy(heap_handle);
@ -64,7 +64,7 @@ pub const DirectAllocator = struct {
}
}
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
@ -74,7 +74,7 @@ pub const DirectAllocator = struct {
const addr = p.mmap(null, alloc_size, p.PROT_READ | p.PROT_WRITE, p.MAP_PRIVATE | p.MAP_ANONYMOUS, -1, 0);
if (addr == p.MAP_FAILED) return error.OutOfMemory;
if (alloc_size == n) return @intToPtr(&u8, addr)[0..n];
if (alloc_size == n) return @intToPtr(*u8, addr)[0..n];
var aligned_addr = addr & ~usize(alignment - 1);
aligned_addr += alignment;
@ -93,7 +93,7 @@ pub const DirectAllocator = struct {
//It is impossible that there is an unoccupied page at the top of our
// mmap.
return @intToPtr(&u8, aligned_addr)[0..n];
return @intToPtr(*u8, aligned_addr)[0..n];
},
Os.windows => {
const amt = n + alignment + @sizeOf(usize);
@ -108,14 +108,14 @@ pub const DirectAllocator = struct {
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_addr = root_addr + march_forward_bytes;
const record_addr = adjusted_addr + n;
@intToPtr(&align(1) usize, record_addr).* = root_addr;
return @intToPtr(&u8, adjusted_addr)[0..n];
@intToPtr(*align(1) usize, record_addr).* = root_addr;
return @intToPtr(*u8, adjusted_addr)[0..n];
},
else => @compileError("Unsupported OS"),
}
}
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
@ -139,13 +139,13 @@ pub const DirectAllocator = struct {
Os.windows => {
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
const old_record_addr = old_adjusted_addr + old_mem.len;
const root_addr = @intToPtr(&align(1) usize, old_record_addr).*;
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
const old_ptr = @intToPtr(os.windows.LPVOID, root_addr);
const amt = new_size + alignment + @sizeOf(usize);
const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: {
if (new_size > old_mem.len) return error.OutOfMemory;
const new_record_addr = old_record_addr - new_size + old_mem.len;
@intToPtr(&align(1) usize, new_record_addr).* = root_addr;
@intToPtr(*align(1) usize, new_record_addr).* = root_addr;
return old_mem[0..new_size];
};
const offset = old_adjusted_addr - root_addr;
@ -153,14 +153,14 @@ pub const DirectAllocator = struct {
const new_adjusted_addr = new_root_addr + offset;
assert(new_adjusted_addr % alignment == 0);
const new_record_addr = new_adjusted_addr + new_size;
@intToPtr(&align(1) usize, new_record_addr).* = new_root_addr;
return @intToPtr(&u8, new_adjusted_addr)[0..new_size];
@intToPtr(*align(1) usize, new_record_addr).* = new_root_addr;
return @intToPtr(*u8, new_adjusted_addr)[0..new_size];
},
else => @compileError("Unsupported OS"),
}
}
fn free(allocator: &Allocator, bytes: []u8) void {
fn free(allocator: *Allocator, bytes: []u8) void {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
@ -169,7 +169,7 @@ pub const DirectAllocator = struct {
},
Os.windows => {
const record_addr = @ptrToInt(bytes.ptr) + bytes.len;
const root_addr = @intToPtr(&align(1) usize, record_addr).*;
const root_addr = @intToPtr(*align(1) usize, record_addr).*;
const ptr = @intToPtr(os.windows.LPVOID, root_addr);
_ = os.windows.HeapFree(??self.heap_handle, 0, ptr);
},
@ -183,13 +183,13 @@ pub const DirectAllocator = struct {
pub const ArenaAllocator = struct {
pub allocator: Allocator,
child_allocator: &Allocator,
child_allocator: *Allocator,
buffer_list: std.LinkedList([]u8),
end_index: usize,
const BufNode = std.LinkedList([]u8).Node;
pub fn init(child_allocator: &Allocator) ArenaAllocator {
pub fn init(child_allocator: *Allocator) ArenaAllocator {
return ArenaAllocator{
.allocator = Allocator{
.allocFn = alloc,
@ -202,7 +202,7 @@ pub const ArenaAllocator = struct {
};
}
pub fn deinit(self: &ArenaAllocator) void {
pub fn deinit(self: *ArenaAllocator) void {
var it = self.buffer_list.first;
while (it) |node| {
// this has to occur before the free because the free frees node
@ -212,7 +212,7 @@ pub const ArenaAllocator = struct {
}
}
fn createNode(self: &ArenaAllocator, prev_len: usize, minimum_size: usize) !&BufNode {
fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode {
const actual_min_size = minimum_size + @sizeOf(BufNode);
var len = prev_len;
while (true) {
@ -233,7 +233,7 @@ pub const ArenaAllocator = struct {
return buf_node;
}
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
var cur_node = if (self.buffer_list.last) |last_node| last_node else try self.createNode(0, n + alignment);
@ -254,7 +254,7 @@ pub const ArenaAllocator = struct {
}
}
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@ -264,7 +264,7 @@ pub const ArenaAllocator = struct {
}
}
fn free(allocator: &Allocator, bytes: []u8) void {}
fn free(allocator: *Allocator, bytes: []u8) void {}
};
pub const FixedBufferAllocator = struct {
@ -284,7 +284,7 @@ pub const FixedBufferAllocator = struct {
};
}
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
const rem = @rem(addr, alignment);
@ -300,7 +300,7 @@ pub const FixedBufferAllocator = struct {
return result;
}
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@ -310,7 +310,7 @@ pub const FixedBufferAllocator = struct {
}
}
fn free(allocator: &Allocator, bytes: []u8) void {}
fn free(allocator: *Allocator, bytes: []u8) void {}
};
/// lock free
@ -331,7 +331,7 @@ pub const ThreadSafeFixedBufferAllocator = struct {
};
}
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
while (true) {
@ -347,7 +347,7 @@ pub const ThreadSafeFixedBufferAllocator = struct {
}
}
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
@ -357,7 +357,7 @@ pub const ThreadSafeFixedBufferAllocator = struct {
}
}
fn free(allocator: &Allocator, bytes: []u8) void {}
fn free(allocator: *Allocator, bytes: []u8) void {}
};
test "c_allocator" {
@ -403,8 +403,8 @@ test "ThreadSafeFixedBufferAllocator" {
try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator);
}
fn testAllocator(allocator: &mem.Allocator) !void {
var slice = try allocator.alloc(&i32, 100);
fn testAllocator(allocator: *mem.Allocator) !void {
var slice = try allocator.alloc(*i32, 100);
for (slice) |*item, i| {
item.* = try allocator.create(i32);
@ -415,15 +415,15 @@ fn testAllocator(allocator: &mem.Allocator) !void {
allocator.destroy(item);
}
slice = try allocator.realloc(&i32, slice, 20000);
slice = try allocator.realloc(&i32, slice, 50);
slice = try allocator.realloc(&i32, slice, 25);
slice = try allocator.realloc(&i32, slice, 10);
slice = try allocator.realloc(*i32, slice, 20000);
slice = try allocator.realloc(*i32, slice, 50);
slice = try allocator.realloc(*i32, slice, 25);
slice = try allocator.realloc(*i32, slice, 10);
allocator.free(slice);
}
fn testAllocatorLargeAlignment(allocator: &mem.Allocator) mem.Allocator.Error!void {
fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void {
//Maybe a platform's page_size is actually the same as or
// very near usize?
if (os.page_size << 2 > @maxValue(usize)) return;

View File

@ -34,20 +34,20 @@ pub fn getStdIn() GetStdIoErrs!File {
/// Implementation of InStream trait for File
pub const FileInStream = struct {
file: &File,
file: *File,
stream: Stream,
pub const Error = @typeOf(File.read).ReturnType.ErrorSet;
pub const Stream = InStream(Error);
pub fn init(file: &File) FileInStream {
pub fn init(file: *File) FileInStream {
return FileInStream{
.file = file,
.stream = Stream{ .readFn = readFn },
};
}
fn readFn(in_stream: &Stream, buffer: []u8) Error!usize {
fn readFn(in_stream: *Stream, buffer: []u8) Error!usize {
const self = @fieldParentPtr(FileInStream, "stream", in_stream);
return self.file.read(buffer);
}
@ -55,20 +55,20 @@ pub const FileInStream = struct {
/// Implementation of OutStream trait for File
pub const FileOutStream = struct {
file: &File,
file: *File,
stream: Stream,
pub const Error = File.WriteError;
pub const Stream = OutStream(Error);
pub fn init(file: &File) FileOutStream {
pub fn init(file: *File) FileOutStream {
return FileOutStream{
.file = file,
.stream = Stream{ .writeFn = writeFn },
};
}
fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(FileOutStream, "stream", out_stream);
return self.file.write(bytes);
}
@ -82,12 +82,12 @@ pub fn InStream(comptime ReadError: type) type {
/// Return the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
readFn: fn (self: &Self, buffer: []u8) Error!usize,
readFn: fn (self: *Self, buffer: []u8) Error!usize,
/// Replaces `buffer` contents by reading from the stream until it is finished.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and
/// the contents read from the stream are lost.
pub fn readAllBuffer(self: &Self, buffer: &Buffer, max_size: usize) !void {
pub fn readAllBuffer(self: *Self, buffer: *Buffer, max_size: usize) !void {
try buffer.resize(0);
var actual_buf_len: usize = 0;
@ -111,7 +111,7 @@ pub fn InStream(comptime ReadError: type) type {
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readAllAlloc(self: &Self, allocator: &mem.Allocator, max_size: usize) ![]u8 {
pub fn readAllAlloc(self: *Self, allocator: *mem.Allocator, max_size: usize) ![]u8 {
var buf = Buffer.initNull(allocator);
defer buf.deinit();
@ -123,7 +123,7 @@ pub fn InStream(comptime ReadError: type) type {
/// Does not include the delimiter in the result.
/// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and the contents
/// read from the stream so far are lost.
pub fn readUntilDelimiterBuffer(self: &Self, buffer: &Buffer, delimiter: u8, max_size: usize) !void {
pub fn readUntilDelimiterBuffer(self: *Self, buffer: *Buffer, delimiter: u8, max_size: usize) !void {
try buffer.resize(0);
while (true) {
@ -145,7 +145,7 @@ pub fn InStream(comptime ReadError: type) type {
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterAlloc(self: &Self, allocator: &mem.Allocator, delimiter: u8, max_size: usize) ![]u8 {
pub fn readUntilDelimiterAlloc(self: *Self, allocator: *mem.Allocator, delimiter: u8, max_size: usize) ![]u8 {
var buf = Buffer.initNull(allocator);
defer buf.deinit();
@ -156,43 +156,43 @@ pub fn InStream(comptime ReadError: type) type {
/// Returns the number of bytes read. If the number read is smaller than buf.len, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
pub fn read(self: &Self, buffer: []u8) !usize {
pub fn read(self: *Self, buffer: []u8) !usize {
return self.readFn(self, buffer);
}
/// Same as `read` but end of stream returns `error.EndOfStream`.
pub fn readNoEof(self: &Self, buf: []u8) !void {
pub fn readNoEof(self: *Self, buf: []u8) !void {
const amt_read = try self.read(buf);
if (amt_read < buf.len) return error.EndOfStream;
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
pub fn readByte(self: &Self) !u8 {
pub fn readByte(self: *Self) !u8 {
var result: [1]u8 = undefined;
try self.readNoEof(result[0..]);
return result[0];
}
/// Same as `readByte` except the returned byte is signed.
pub fn readByteSigned(self: &Self) !i8 {
pub fn readByteSigned(self: *Self) !i8 {
return @bitCast(i8, try self.readByte());
}
pub fn readIntLe(self: &Self, comptime T: type) !T {
pub fn readIntLe(self: *Self, comptime T: type) !T {
return self.readInt(builtin.Endian.Little, T);
}
pub fn readIntBe(self: &Self, comptime T: type) !T {
pub fn readIntBe(self: *Self, comptime T: type) !T {
return self.readInt(builtin.Endian.Big, T);
}
pub fn readInt(self: &Self, endian: builtin.Endian, comptime T: type) !T {
pub fn readInt(self: *Self, endian: builtin.Endian, comptime T: type) !T {
var bytes: [@sizeOf(T)]u8 = undefined;
try self.readNoEof(bytes[0..]);
return mem.readInt(bytes, T, endian);
}
pub fn readVarInt(self: &Self, endian: builtin.Endian, comptime T: type, size: usize) !T {
pub fn readVarInt(self: *Self, endian: builtin.Endian, comptime T: type, size: usize) !T {
assert(size <= @sizeOf(T));
assert(size <= 8);
var input_buf: [8]u8 = undefined;
@ -208,22 +208,22 @@ pub fn OutStream(comptime WriteError: type) type {
const Self = this;
pub const Error = WriteError;
writeFn: fn (self: &Self, bytes: []const u8) Error!void,
writeFn: fn (self: *Self, bytes: []const u8) Error!void,
pub fn print(self: &Self, comptime format: []const u8, args: ...) !void {
pub fn print(self: *Self, comptime format: []const u8, args: ...) !void {
return std.fmt.format(self, Error, self.writeFn, format, args);
}
pub fn write(self: &Self, bytes: []const u8) !void {
pub fn write(self: *Self, bytes: []const u8) !void {
return self.writeFn(self, bytes);
}
pub fn writeByte(self: &Self, byte: u8) !void {
pub fn writeByte(self: *Self, byte: u8) !void {
const slice = (&byte)[0..1];
return self.writeFn(self, slice);
}
pub fn writeByteNTimes(self: &Self, byte: u8, n: usize) !void {
pub fn writeByteNTimes(self: *Self, byte: u8, n: usize) !void {
const slice = (&byte)[0..1];
var i: usize = 0;
while (i < n) : (i += 1) {
@ -234,14 +234,14 @@ pub fn OutStream(comptime WriteError: type) type {
}
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
pub fn writeFile(allocator: &mem.Allocator, path: []const u8, data: []const u8) !void {
pub fn writeFile(allocator: *mem.Allocator, path: []const u8, data: []const u8) !void {
var file = try File.openWrite(allocator, path);
defer file.close();
try file.write(data);
}
/// On success, caller owns returned buffer.
pub fn readFileAlloc(allocator: &mem.Allocator, path: []const u8) ![]u8 {
pub fn readFileAlloc(allocator: *mem.Allocator, path: []const u8) ![]u8 {
var file = try File.openRead(allocator, path);
defer file.close();
@ -265,13 +265,13 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
pub stream: Stream,
unbuffered_in_stream: &Stream,
unbuffered_in_stream: *Stream,
buffer: [buffer_size]u8,
start_index: usize,
end_index: usize,
pub fn init(unbuffered_in_stream: &Stream) Self {
pub fn init(unbuffered_in_stream: *Stream) Self {
return Self{
.unbuffered_in_stream = unbuffered_in_stream,
.buffer = undefined,
@ -287,7 +287,7 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type)
};
}
fn readFn(in_stream: &Stream, dest: []u8) !usize {
fn readFn(in_stream: *Stream, dest: []u8) !usize {
const self = @fieldParentPtr(Self, "stream", in_stream);
var dest_index: usize = 0;
@ -338,12 +338,12 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
pub stream: Stream,
unbuffered_out_stream: &Stream,
unbuffered_out_stream: *Stream,
buffer: [buffer_size]u8,
index: usize,
pub fn init(unbuffered_out_stream: &Stream) Self {
pub fn init(unbuffered_out_stream: *Stream) Self {
return Self{
.unbuffered_out_stream = unbuffered_out_stream,
.buffer = undefined,
@ -352,12 +352,12 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
};
}
pub fn flush(self: &Self) !void {
pub fn flush(self: *Self) !void {
try self.unbuffered_out_stream.write(self.buffer[0..self.index]);
self.index = 0;
}
fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(Self, "stream", out_stream);
if (bytes.len >= self.buffer.len) {
@ -383,20 +383,20 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr
/// Implementation of OutStream trait for Buffer
pub const BufferOutStream = struct {
buffer: &Buffer,
buffer: *Buffer,
stream: Stream,
pub const Error = error{OutOfMemory};
pub const Stream = OutStream(Error);
pub fn init(buffer: &Buffer) BufferOutStream {
pub fn init(buffer: *Buffer) BufferOutStream {
return BufferOutStream{
.buffer = buffer,
.stream = Stream{ .writeFn = writeFn },
};
}
fn writeFn(out_stream: &Stream, bytes: []const u8) !void {
fn writeFn(out_stream: *Stream, bytes: []const u8) !void {
const self = @fieldParentPtr(BufferOutStream, "stream", out_stream);
return self.buffer.append(bytes);
}
@ -407,7 +407,7 @@ pub const BufferedAtomicFile = struct {
file_stream: FileOutStream,
buffered_stream: BufferedOutStream(FileOutStream.Error),
pub fn create(allocator: &mem.Allocator, dest_path: []const u8) !&BufferedAtomicFile {
pub fn create(allocator: *mem.Allocator, dest_path: []const u8) !*BufferedAtomicFile {
// TODO with well defined copy elision we don't need this allocation
var self = try allocator.create(BufferedAtomicFile);
errdefer allocator.destroy(self);
@ -427,18 +427,18 @@ pub const BufferedAtomicFile = struct {
}
/// always call destroy, even after successful finish()
pub fn destroy(self: &BufferedAtomicFile) void {
pub fn destroy(self: *BufferedAtomicFile) void {
const allocator = self.atomic_file.allocator;
self.atomic_file.deinit();
allocator.destroy(self);
}
pub fn finish(self: &BufferedAtomicFile) !void {
pub fn finish(self: *BufferedAtomicFile) !void {
try self.buffered_stream.flush();
try self.atomic_file.finish();
}
pub fn stream(self: &BufferedAtomicFile) &OutStream(FileOutStream.Error) {
pub fn stream(self: *BufferedAtomicFile) *OutStream(FileOutStream.Error) {
return &self.buffered_stream.stream;
}
};

View File

@ -76,7 +76,7 @@ pub const Token = struct {
}
// Slice into the underlying input string.
pub fn slice(self: &const Token, input: []const u8, i: usize) []const u8 {
pub fn slice(self: *const Token, input: []const u8, i: usize) []const u8 {
return input[i + self.offset - self.count .. i + self.offset];
}
};
@ -115,7 +115,7 @@ pub const StreamingJsonParser = struct {
return p;
}
pub fn reset(p: &StreamingJsonParser) void {
pub fn reset(p: *StreamingJsonParser) void {
p.state = State.TopLevelBegin;
p.count = 0;
// Set before ever read in main transition function
@ -205,7 +205,7 @@ pub const StreamingJsonParser = struct {
// tokens. token2 is always null if token1 is null.
//
// There is currently no error recovery on a bad stream.
pub fn feed(p: &StreamingJsonParser, c: u8, token1: &?Token, token2: &?Token) Error!void {
pub fn feed(p: *StreamingJsonParser, c: u8, token1: *?Token, token2: *?Token) Error!void {
token1.* = null;
token2.* = null;
p.count += 1;
@ -217,7 +217,7 @@ pub const StreamingJsonParser = struct {
}
// Perform a single transition on the state machine and return any possible token.
fn transition(p: &StreamingJsonParser, c: u8, token: &?Token) Error!bool {
fn transition(p: *StreamingJsonParser, c: u8, token: *?Token) Error!bool {
switch (p.state) {
State.TopLevelBegin => switch (c) {
'{' => {
@ -861,7 +861,7 @@ pub fn validate(s: []const u8) bool {
var token1: ?Token = undefined;
var token2: ?Token = undefined;
p.feed(c, &token1, &token2) catch |err| {
p.feed(c, *token1, *token2) catch |err| {
return false;
};
}
@ -878,7 +878,7 @@ pub const ValueTree = struct {
arena: ArenaAllocator,
root: Value,
pub fn deinit(self: &ValueTree) void {
pub fn deinit(self: *ValueTree) void {
self.arena.deinit();
}
};
@ -894,7 +894,7 @@ pub const Value = union(enum) {
Array: ArrayList(Value),
Object: ObjectMap,
pub fn dump(self: &const Value) void {
pub fn dump(self: *const Value) void {
switch (self.*) {
Value.Null => {
std.debug.warn("null");
@ -941,7 +941,7 @@ pub const Value = union(enum) {
}
}
pub fn dumpIndent(self: &const Value, indent: usize) void {
pub fn dumpIndent(self: *const Value, indent: usize) void {
if (indent == 0) {
self.dump();
} else {
@ -949,7 +949,7 @@ pub const Value = union(enum) {
}
}
fn dumpIndentLevel(self: &const Value, indent: usize, level: usize) void {
fn dumpIndentLevel(self: *const Value, indent: usize, level: usize) void {
switch (self.*) {
Value.Null => {
std.debug.warn("null");
@ -1013,7 +1013,7 @@ pub const Value = union(enum) {
// A non-stream JSON parser which constructs a tree of Value's.
pub const JsonParser = struct {
allocator: &Allocator,
allocator: *Allocator,
state: State,
copy_strings: bool,
// Stores parent nodes and un-combined Values.
@ -1026,7 +1026,7 @@ pub const JsonParser = struct {
Simple,
};
pub fn init(allocator: &Allocator, copy_strings: bool) JsonParser {
pub fn init(allocator: *Allocator, copy_strings: bool) JsonParser {
return JsonParser{
.allocator = allocator,
.state = State.Simple,
@ -1035,16 +1035,16 @@ pub const JsonParser = struct {
};
}
pub fn deinit(p: &JsonParser) void {
pub fn deinit(p: *JsonParser) void {
p.stack.deinit();
}
pub fn reset(p: &JsonParser) void {
pub fn reset(p: *JsonParser) void {
p.state = State.Simple;
p.stack.shrink(0);
}
pub fn parse(p: &JsonParser, input: []const u8) !ValueTree {
pub fn parse(p: *JsonParser, input: []const u8) !ValueTree {
var mp = StreamingJsonParser.init();
var arena = ArenaAllocator.init(p.allocator);
@ -1090,7 +1090,7 @@ pub const JsonParser = struct {
// Even though p.allocator exists, we take an explicit allocator so that allocation state
// can be cleaned up on error correctly during a `parse` on call.
fn transition(p: &JsonParser, allocator: &Allocator, input: []const u8, i: usize, token: &const Token) !void {
fn transition(p: *JsonParser, allocator: *Allocator, input: []const u8, i: usize, token: *const Token) !void {
switch (p.state) {
State.ObjectKey => switch (token.id) {
Token.Id.ObjectEnd => {
@ -1223,7 +1223,7 @@ pub const JsonParser = struct {
}
}
fn pushToParent(p: &JsonParser, value: &const Value) !void {
fn pushToParent(p: *JsonParser, value: *const Value) !void {
switch (p.stack.at(p.stack.len - 1)) {
// Object Parent -> [ ..., object, <key>, value ]
Value.String => |key| {
@ -1244,14 +1244,14 @@ pub const JsonParser = struct {
}
}
fn parseString(p: &JsonParser, allocator: &Allocator, token: &const Token, input: []const u8, i: usize) !Value {
fn parseString(p: *JsonParser, allocator: *Allocator, token: *const Token, input: []const u8, i: usize) !Value {
// TODO: We don't strictly have to copy values which do not contain any escape
// characters if flagged with the option.
const slice = token.slice(input, i);
return Value{ .String = try mem.dupe(p.allocator, u8, slice) };
}
fn parseNumber(p: &JsonParser, token: &const Token, input: []const u8, i: usize) !Value {
fn parseNumber(p: *JsonParser, token: *const Token, input: []const u8, i: usize) !Value {
return if (token.number_is_integer)
Value{ .Integer = try std.fmt.parseInt(i64, token.slice(input, i), 10) }
else

View File

@ -21,11 +21,11 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Node inside the linked list wrapping the actual data.
pub const Node = struct {
prev: ?&Node,
next: ?&Node,
prev: ?*Node,
next: ?*Node,
data: T,
pub fn init(value: &const T) Node {
pub fn init(value: *const T) Node {
return Node{
.prev = null,
.next = null,
@ -38,14 +38,14 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
return Node.init({});
}
pub fn toData(node: &Node) &ParentType {
pub fn toData(node: *Node) *ParentType {
comptime assert(isIntrusive());
return @fieldParentPtr(ParentType, field_name, node);
}
};
first: ?&Node,
last: ?&Node,
first: ?*Node,
last: ?*Node,
len: usize,
/// Initialize a linked list.
@ -69,7 +69,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Arguments:
/// node: Pointer to a node in the list.
/// new_node: Pointer to the new node to insert.
pub fn insertAfter(list: &Self, node: &Node, new_node: &Node) void {
pub fn insertAfter(list: *Self, node: *Node, new_node: *Node) void {
new_node.prev = node;
if (node.next) |next_node| {
// Intermediate node.
@ -90,7 +90,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Arguments:
/// node: Pointer to a node in the list.
/// new_node: Pointer to the new node to insert.
pub fn insertBefore(list: &Self, node: &Node, new_node: &Node) void {
pub fn insertBefore(list: *Self, node: *Node, new_node: *Node) void {
new_node.next = node;
if (node.prev) |prev_node| {
// Intermediate node.
@ -110,7 +110,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Arguments:
/// new_node: Pointer to the new node to insert.
pub fn append(list: &Self, new_node: &Node) void {
pub fn append(list: *Self, new_node: *Node) void {
if (list.last) |last| {
// Insert after last.
list.insertAfter(last, new_node);
@ -124,7 +124,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Arguments:
/// new_node: Pointer to the new node to insert.
pub fn prepend(list: &Self, new_node: &Node) void {
pub fn prepend(list: *Self, new_node: *Node) void {
if (list.first) |first| {
// Insert before first.
list.insertBefore(first, new_node);
@ -143,7 +143,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Arguments:
/// node: Pointer to the node to be removed.
pub fn remove(list: &Self, node: &Node) void {
pub fn remove(list: *Self, node: *Node) void {
if (node.prev) |prev_node| {
// Intermediate node.
prev_node.next = node.next;
@ -168,7 +168,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the last node in the list.
pub fn pop(list: &Self) ?&Node {
pub fn pop(list: *Self) ?*Node {
const last = list.last ?? return null;
list.remove(last);
return last;
@ -178,7 +178,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the first node in the list.
pub fn popFirst(list: &Self) ?&Node {
pub fn popFirst(list: *Self) ?*Node {
const first = list.first ?? return null;
list.remove(first);
return first;
@ -191,7 +191,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the new node.
pub fn allocateNode(list: &Self, allocator: &Allocator) !&Node {
pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
return allocator.create(Node);
}
@ -201,7 +201,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Arguments:
/// node: Pointer to the node to deallocate.
/// allocator: Dynamic memory allocator.
pub fn destroyNode(list: &Self, node: &Node, allocator: &Allocator) void {
pub fn destroyNode(list: *Self, node: *Node, allocator: *Allocator) void {
comptime assert(!isIntrusive());
allocator.destroy(node);
}
@ -214,7 +214,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
///
/// Returns:
/// A pointer to the new node.
pub fn createNode(list: &Self, data: &const T, allocator: &Allocator) !&Node {
pub fn createNode(list: *Self, data: *const T, allocator: *Allocator) !*Node {
comptime assert(!isIntrusive());
var node = try list.allocateNode(allocator);
node.* = Node.init(data);

View File

@ -42,13 +42,13 @@ pub const Symbol = struct {
name: []const u8,
address: u64,
fn addressLessThan(lhs: &const Symbol, rhs: &const Symbol) bool {
fn addressLessThan(lhs: *const Symbol, rhs: *const Symbol) bool {
return lhs.address < rhs.address;
}
};
pub const SymbolTable = struct {
allocator: &mem.Allocator,
allocator: *mem.Allocator,
symbols: []const Symbol,
strings: []const u8,
@ -56,7 +56,7 @@ pub const SymbolTable = struct {
// Ideally we'd use _mh_execute_header because it's always at 0x100000000
// in the image but as it's located in a different section than executable
// code, its displacement is different.
pub fn deinit(self: &SymbolTable) void {
pub fn deinit(self: *SymbolTable) void {
self.allocator.free(self.symbols);
self.symbols = []const Symbol{};
@ -64,7 +64,7 @@ pub const SymbolTable = struct {
self.strings = []const u8{};
}
pub fn search(self: &const SymbolTable, address: usize) ?&const Symbol {
pub fn search(self: *const SymbolTable, address: usize) ?*const Symbol {
var min: usize = 0;
var max: usize = self.symbols.len - 1; // Exclude sentinel.
while (min < max) {
@ -83,7 +83,7 @@ pub const SymbolTable = struct {
}
};
pub fn loadSymbols(allocator: &mem.Allocator, in: &io.FileInStream) !SymbolTable {
pub fn loadSymbols(allocator: *mem.Allocator, in: *io.FileInStream) !SymbolTable {
var file = in.file;
try file.seekTo(0);
@ -160,13 +160,13 @@ pub fn loadSymbols(allocator: &mem.Allocator, in: &io.FileInStream) !SymbolTable
};
}
fn readNoEof(in: &io.FileInStream, comptime T: type, result: []T) !void {
fn readNoEof(in: *io.FileInStream, comptime T: type, result: []T) !void {
return in.stream.readNoEof(([]u8)(result));
}
fn readOneNoEof(in: &io.FileInStream, comptime T: type, result: &T) !void {
fn readOneNoEof(in: *io.FileInStream, comptime T: type, result: *T) !void {
return readNoEof(in, T, result[0..1]);
}
fn isSymbol(sym: &const Nlist64) bool {
fn isSymbol(sym: *const Nlist64) bool {
return sym.n_value != 0 and sym.n_desc == 0;
}

View File

@ -29,7 +29,7 @@ fn redupif32(x: f32) f32 {
return ((x - u * DP1) - u * DP2) - t * DP3;
}
fn atan32(z: &const Complex(f32)) Complex(f32) {
fn atan32(z: *const Complex(f32)) Complex(f32) {
const maxnum = 1.0e38;
const x = z.re;
@ -78,7 +78,7 @@ fn redupif64(x: f64) f64 {
return ((x - u * DP1) - u * DP2) - t * DP3;
}
fn atan64(z: &const Complex(f64)) Complex(f64) {
fn atan64(z: *const Complex(f64)) Complex(f64) {
const maxnum = 1.0e308;
const x = z.re;

View File

@ -15,7 +15,7 @@ pub fn cosh(z: var) Complex(@typeOf(z.re)) {
};
}
fn cosh32(z: &const Complex(f32)) Complex(f32) {
fn cosh32(z: *const Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
@ -78,7 +78,7 @@ fn cosh32(z: &const Complex(f32)) Complex(f32) {
return Complex(f32).new((x * x) * (y - y), (x + x) * (y - y));
}
fn cosh64(z: &const Complex(f64)) Complex(f64) {
fn cosh64(z: *const Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;

View File

@ -16,7 +16,7 @@ pub fn exp(z: var) Complex(@typeOf(z.re)) {
};
}
fn exp32(z: &const Complex(f32)) Complex(f32) {
fn exp32(z: *const Complex(f32)) Complex(f32) {
@setFloatMode(this, @import("builtin").FloatMode.Strict);
const exp_overflow = 0x42b17218; // max_exp * ln2 ~= 88.72283955
@ -63,7 +63,7 @@ fn exp32(z: &const Complex(f32)) Complex(f32) {
}
}
fn exp64(z: &const Complex(f64)) Complex(f64) {
fn exp64(z: *const Complex(f64)) Complex(f64) {
const exp_overflow = 0x40862e42; // high bits of max_exp * ln2 ~= 710
const cexp_overflow = 0x4096b8e4; // (max_exp - min_denorm_exp) * ln2

View File

@ -37,28 +37,28 @@ pub fn Complex(comptime T: type) type {
};
}
pub fn add(self: &const Self, other: &const Self) Self {
pub fn add(self: *const Self, other: *const Self) Self {
return Self{
.re = self.re + other.re,
.im = self.im + other.im,
};
}
pub fn sub(self: &const Self, other: &const Self) Self {
pub fn sub(self: *const Self, other: *const Self) Self {
return Self{
.re = self.re - other.re,
.im = self.im - other.im,
};
}
pub fn mul(self: &const Self, other: &const Self) Self {
pub fn mul(self: *const Self, other: *const Self) Self {
return Self{
.re = self.re * other.re - self.im * other.im,
.im = self.im * other.re + self.re * other.im,
};
}
pub fn div(self: &const Self, other: &const Self) Self {
pub fn div(self: *const Self, other: *const Self) Self {
const re_num = self.re * other.re + self.im * other.im;
const im_num = self.im * other.re - self.re * other.im;
const den = other.re * other.re + other.im * other.im;
@ -69,14 +69,14 @@ pub fn Complex(comptime T: type) type {
};
}
pub fn conjugate(self: &const Self) Self {
pub fn conjugate(self: *const Self) Self {
return Self{
.re = self.re,
.im = -self.im,
};
}
pub fn reciprocal(self: &const Self) Self {
pub fn reciprocal(self: *const Self) Self {
const m = self.re * self.re + self.im * self.im;
return Self{
.re = self.re / m,
@ -84,7 +84,7 @@ pub fn Complex(comptime T: type) type {
};
}
pub fn magnitude(self: &const Self) T {
pub fn magnitude(self: *const Self) T {
return math.sqrt(self.re * self.re + self.im * self.im);
}
};

View File

@ -14,7 +14,7 @@ pub fn ldexp_cexp(z: var, expt: i32) Complex(@typeOf(z.re)) {
};
}
fn frexp_exp32(x: f32, expt: &i32) f32 {
fn frexp_exp32(x: f32, expt: *i32) f32 {
const k = 235; // reduction constant
const kln2 = 162.88958740; // k * ln2
@ -24,7 +24,7 @@ fn frexp_exp32(x: f32, expt: &i32) f32 {
return @bitCast(f32, (hx & 0x7fffff) | ((0x7f + 127) << 23));
}
fn ldexp_cexp32(z: &const Complex(f32), expt: i32) Complex(f32) {
fn ldexp_cexp32(z: *const Complex(f32), expt: i32) Complex(f32) {
var ex_expt: i32 = undefined;
const exp_x = frexp_exp32(z.re, &ex_expt);
const exptf = expt + ex_expt;
@ -38,7 +38,7 @@ fn ldexp_cexp32(z: &const Complex(f32), expt: i32) Complex(f32) {
return Complex(f32).new(math.cos(z.im) * exp_x * scale1 * scale2, math.sin(z.im) * exp_x * scale1 * scale2);
}
fn frexp_exp64(x: f64, expt: &i32) f64 {
fn frexp_exp64(x: f64, expt: *i32) f64 {
const k = 1799; // reduction constant
const kln2 = 1246.97177782734161156; // k * ln2
@ -54,7 +54,7 @@ fn frexp_exp64(x: f64, expt: &i32) f64 {
return @bitCast(f64, (u64(high_word) << 32) | lx);
}
fn ldexp_cexp64(z: &const Complex(f64), expt: i32) Complex(f64) {
fn ldexp_cexp64(z: *const Complex(f64), expt: i32) Complex(f64) {
var ex_expt: i32 = undefined;
const exp_x = frexp_exp64(z.re, &ex_expt);
const exptf = i64(expt + ex_expt);

View File

@ -4,7 +4,7 @@ const math = std.math;
const cmath = math.complex;
const Complex = cmath.Complex;
pub fn pow(comptime T: type, z: &const T, c: &const T) T {
pub fn pow(comptime T: type, z: *const T, c: *const T) T {
const p = cmath.log(z);
const q = c.mul(p);
return cmath.exp(q);

View File

@ -15,7 +15,7 @@ pub fn sinh(z: var) Complex(@typeOf(z.re)) {
};
}
fn sinh32(z: &const Complex(f32)) Complex(f32) {
fn sinh32(z: *const Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
@ -78,7 +78,7 @@ fn sinh32(z: &const Complex(f32)) Complex(f32) {
return Complex(f32).new((x * x) * (y - y), (x + x) * (y - y));
}
fn sinh64(z: &const Complex(f64)) Complex(f64) {
fn sinh64(z: *const Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;

View File

@ -15,7 +15,7 @@ pub fn sqrt(z: var) Complex(@typeOf(z.re)) {
};
}
fn sqrt32(z: &const Complex(f32)) Complex(f32) {
fn sqrt32(z: *const Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
@ -57,7 +57,7 @@ fn sqrt32(z: &const Complex(f32)) Complex(f32) {
}
}
fn sqrt64(z: &const Complex(f64)) Complex(f64) {
fn sqrt64(z: *const Complex(f64)) Complex(f64) {
// may encounter overflow for im,re >= DBL_MAX / (1 + sqrt(2))
const threshold = 0x1.a827999fcef32p+1022;

View File

@ -13,7 +13,7 @@ pub fn tanh(z: var) Complex(@typeOf(z.re)) {
};
}
fn tanh32(z: &const Complex(f32)) Complex(f32) {
fn tanh32(z: *const Complex(f32)) Complex(f32) {
const x = z.re;
const y = z.im;
@ -51,7 +51,7 @@ fn tanh32(z: &const Complex(f32)) Complex(f32) {
return Complex(f32).new((beta * rho * s) / den, t / den);
}
fn tanh64(z: &const Complex(f64)) Complex(f64) {
fn tanh64(z: *const Complex(f64)) Complex(f64) {
const x = z.re;
const y = z.im;

View File

@ -52,7 +52,7 @@ fn hypot32(x: f32, y: f32) f32 {
return z * math.sqrt(f32(f64(x) * x + f64(y) * y));
}
fn sq(hi: &f64, lo: &f64, x: f64) void {
fn sq(hi: *f64, lo: *f64, x: f64) void {
const split: f64 = 0x1.0p27 + 1.0;
const xc = x * split;
const xh = x - xc + xc;

View File

@ -46,12 +46,12 @@ pub fn forceEval(value: var) void {
switch (T) {
f32 => {
var x: f32 = undefined;
const p = @ptrCast(&volatile f32, &x);
const p = @ptrCast(*volatile f32, &x);
p.* = x;
},
f64 => {
var x: f64 = undefined;
const p = @ptrCast(&volatile f64, &x);
const p = @ptrCast(*volatile f64, &x);
p.* = x;
},
else => {

View File

@ -13,7 +13,7 @@ pub const Allocator = struct {
/// The returned newly allocated memory is undefined.
/// `alignment` is guaranteed to be >= 1
/// `alignment` is guaranteed to be a power of 2
allocFn: fn (self: &Allocator, byte_count: usize, alignment: u29) Error![]u8,
allocFn: fn (self: *Allocator, byte_count: usize, alignment: u29) Error![]u8,
/// If `new_byte_count > old_mem.len`:
/// * `old_mem.len` is the same as what was returned from allocFn or reallocFn.
@ -26,22 +26,22 @@ pub const Allocator = struct {
/// The returned newly allocated memory is undefined.
/// `alignment` is guaranteed to be >= 1
/// `alignment` is guaranteed to be a power of 2
reallocFn: fn (self: &Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
reallocFn: fn (self: *Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
/// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
freeFn: fn (self: &Allocator, old_mem: []u8) void,
freeFn: fn (self: *Allocator, old_mem: []u8) void,
fn create(self: &Allocator, comptime T: type) !&T {
if (@sizeOf(T) == 0) return &{};
fn create(self: *Allocator, comptime T: type) !*T {
if (@sizeOf(T) == 0) return *{};
const slice = try self.alloc(T, 1);
return &slice[0];
}
// TODO once #733 is solved, this will replace create
fn construct(self: &Allocator, init: var) t: {
fn construct(self: *Allocator, init: var) t: {
// TODO this is a workaround for type getting parsed as Error!&const T
const T = @typeOf(init).Child;
break :t Error!&T;
break :t Error!*T;
} {
const T = @typeOf(init).Child;
if (@sizeOf(T) == 0) return &{};
@ -51,17 +51,17 @@ pub const Allocator = struct {
return ptr;
}
fn destroy(self: &Allocator, ptr: var) void {
fn destroy(self: *Allocator, ptr: var) void {
self.free(ptr[0..1]);
}
fn alloc(self: &Allocator, comptime T: type, n: usize) ![]T {
fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
return self.alignedAlloc(T, @alignOf(T), n);
}
fn alignedAlloc(self: &Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T {
fn alignedAlloc(self: *Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T {
if (n == 0) {
return (&align(alignment) T)(undefined)[0..0];
return (*align(alignment) T)(undefined)[0..0];
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.allocFn(self, byte_count, alignment);
@ -73,17 +73,17 @@ pub const Allocator = struct {
return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
}
fn realloc(self: &Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
return self.alignedRealloc(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
}
fn alignedRealloc(self: &Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T {
fn alignedRealloc(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T {
if (old_mem.len == 0) {
return self.alloc(T, n);
}
if (n == 0) {
self.free(old_mem);
return (&align(alignment) T)(undefined)[0..0];
return (*align(alignment) T)(undefined)[0..0];
}
const old_byte_slice = ([]u8)(old_mem);
@ -102,11 +102,11 @@ pub const Allocator = struct {
/// Reallocate, but `n` must be less than or equal to `old_mem.len`.
/// Unlike `realloc`, this function cannot fail.
/// Shrinking to 0 is the same as calling `free`.
fn shrink(self: &Allocator, comptime T: type, old_mem: []T, n: usize) []T {
fn shrink(self: *Allocator, comptime T: type, old_mem: []T, n: usize) []T {
return self.alignedShrink(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
}
fn alignedShrink(self: &Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T {
fn alignedShrink(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T {
if (n == 0) {
self.free(old_mem);
return old_mem[0..0];
@ -123,10 +123,10 @@ pub const Allocator = struct {
return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
}
fn free(self: &Allocator, memory: var) void {
fn free(self: *Allocator, memory: var) void {
const bytes = ([]const u8)(memory);
if (bytes.len == 0) return;
const non_const_ptr = @intToPtr(&u8, @ptrToInt(bytes.ptr));
const non_const_ptr = @intToPtr(*u8, @ptrToInt(bytes.ptr));
self.freeFn(self, non_const_ptr[0..bytes.len]);
}
};
@ -186,7 +186,7 @@ pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool {
}
/// Copies ::m to newly allocated memory. Caller is responsible to free it.
pub fn dupe(allocator: &Allocator, comptime T: type, m: []const T) ![]T {
pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T {
const new_buf = try allocator.alloc(T, m.len);
copy(T, new_buf, m);
return new_buf;
@ -457,7 +457,7 @@ pub const SplitIterator = struct {
split_bytes: []const u8,
index: usize,
pub fn next(self: &SplitIterator) ?[]const u8 {
pub fn next(self: *SplitIterator) ?[]const u8 {
// move to beginning of token
while (self.index < self.buffer.len and self.isSplitByte(self.buffer[self.index])) : (self.index += 1) {}
const start = self.index;
@ -473,14 +473,14 @@ pub const SplitIterator = struct {
}
/// Returns a slice of the remaining bytes. Does not affect iterator state.
pub fn rest(self: &const SplitIterator) []const u8 {
pub fn rest(self: *const SplitIterator) []const u8 {
// move to beginning of token
var index: usize = self.index;
while (index < self.buffer.len and self.isSplitByte(self.buffer[index])) : (index += 1) {}
return self.buffer[index..];
}
fn isSplitByte(self: &const SplitIterator, byte: u8) bool {
fn isSplitByte(self: *const SplitIterator, byte: u8) bool {
for (self.split_bytes) |split_byte| {
if (byte == split_byte) {
return true;
@ -492,7 +492,7 @@ pub const SplitIterator = struct {
/// Naively combines a series of strings with a separator.
/// Allocates memory for the result, which must be freed by the caller.
pub fn join(allocator: &Allocator, sep: u8, strings: ...) ![]u8 {
pub fn join(allocator: *Allocator, sep: u8, strings: ...) ![]u8 {
comptime assert(strings.len >= 1);
var total_strings_len: usize = strings.len; // 1 sep per string
{
@ -649,7 +649,7 @@ test "mem.max" {
assert(max(u8, "abcdefg") == 'g');
}
pub fn swap(comptime T: type, a: &T, b: &T) void {
pub fn swap(comptime T: type, a: *T, b: *T) void {
const tmp = a.*;
a.* = b.*;
b.* = tmp;

View File

@ -31,7 +31,7 @@ pub const Address = struct {
};
}
pub fn initIp6(ip6: &const Ip6Addr, port: u16) Address {
pub fn initIp6(ip6: *const Ip6Addr, port: u16) Address {
return Address{
.family = posix.AF_INET6,
.os_addr = posix.sockaddr{
@ -46,15 +46,15 @@ pub const Address = struct {
};
}
pub fn initPosix(addr: &const posix.sockaddr) Address {
pub fn initPosix(addr: *const posix.sockaddr) Address {
return Address{ .os_addr = addr.* };
}
pub fn format(self: &const Address, out_stream: var) !void {
pub fn format(self: *const Address, out_stream: var) !void {
switch (self.os_addr.in.family) {
posix.AF_INET => {
const native_endian_port = std.mem.endianSwapIfLe(u16, self.os_addr.in.port);
const bytes = ([]const u8)((&self.os_addr.in.addr)[0..1]);
const bytes = ([]const u8)((*self.os_addr.in.addr)[0..1]);
try out_stream.print("{}.{}.{}.{}:{}", bytes[0], bytes[1], bytes[2], bytes[3], native_endian_port);
},
posix.AF_INET6 => {

View File

@ -20,7 +20,7 @@ pub const ChildProcess = struct {
pub handle: if (is_windows) windows.HANDLE else void,
pub thread_handle: if (is_windows) windows.HANDLE else void,
pub allocator: &mem.Allocator,
pub allocator: *mem.Allocator,
pub stdin: ?os.File,
pub stdout: ?os.File,
@ -31,7 +31,7 @@ pub const ChildProcess = struct {
pub argv: []const []const u8,
/// Leave as null to use the current env map using the supplied allocator.
pub env_map: ?&const BufMap,
pub env_map: ?*const BufMap,
pub stdin_behavior: StdIo,
pub stdout_behavior: StdIo,
@ -47,7 +47,7 @@ pub const ChildProcess = struct {
pub cwd: ?[]const u8,
err_pipe: if (is_windows) void else [2]i32,
llnode: if (is_windows) void else LinkedList(&ChildProcess).Node,
llnode: if (is_windows) void else LinkedList(*ChildProcess).Node,
pub const SpawnError = error{
ProcessFdQuotaExceeded,
@ -84,7 +84,7 @@ pub const ChildProcess = struct {
/// First argument in argv is the executable.
/// On success must call deinit.
pub fn init(argv: []const []const u8, allocator: &mem.Allocator) !&ChildProcess {
pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess {
const child = try allocator.create(ChildProcess);
errdefer allocator.destroy(child);
@ -114,14 +114,14 @@ pub const ChildProcess = struct {
return child;
}
pub fn setUserName(self: &ChildProcess, name: []const u8) !void {
pub fn setUserName(self: *ChildProcess, name: []const u8) !void {
const user_info = try os.getUserInfo(name);
self.uid = user_info.uid;
self.gid = user_info.gid;
}
/// On success must call `kill` or `wait`.
pub fn spawn(self: &ChildProcess) !void {
pub fn spawn(self: *ChildProcess) !void {
if (is_windows) {
return self.spawnWindows();
} else {
@ -129,13 +129,13 @@ pub const ChildProcess = struct {
}
}
pub fn spawnAndWait(self: &ChildProcess) !Term {
pub fn spawnAndWait(self: *ChildProcess) !Term {
try self.spawn();
return self.wait();
}
/// Forcibly terminates child process and then cleans up all resources.
pub fn kill(self: &ChildProcess) !Term {
pub fn kill(self: *ChildProcess) !Term {
if (is_windows) {
return self.killWindows(1);
} else {
@ -143,7 +143,7 @@ pub const ChildProcess = struct {
}
}
pub fn killWindows(self: &ChildProcess, exit_code: windows.UINT) !Term {
pub fn killWindows(self: *ChildProcess, exit_code: windows.UINT) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@ -159,7 +159,7 @@ pub const ChildProcess = struct {
return ??self.term;
}
pub fn killPosix(self: &ChildProcess) !Term {
pub fn killPosix(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@ -179,7 +179,7 @@ pub const ChildProcess = struct {
}
/// Blocks until child process terminates and then cleans up all resources.
pub fn wait(self: &ChildProcess) !Term {
pub fn wait(self: *ChildProcess) !Term {
if (is_windows) {
return self.waitWindows();
} else {
@ -195,7 +195,7 @@ pub const ChildProcess = struct {
/// Spawns a child process, waits for it, collecting stdout and stderr, and then returns.
/// If it succeeds, the caller owns result.stdout and result.stderr memory.
pub fn exec(allocator: &mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env_map: ?&const BufMap, max_output_size: usize) !ExecResult {
pub fn exec(allocator: *mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env_map: ?*const BufMap, max_output_size: usize) !ExecResult {
const child = try ChildProcess.init(argv, allocator);
defer child.deinit();
@ -225,7 +225,7 @@ pub const ChildProcess = struct {
};
}
fn waitWindows(self: &ChildProcess) !Term {
fn waitWindows(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@ -235,7 +235,7 @@ pub const ChildProcess = struct {
return ??self.term;
}
fn waitPosix(self: &ChildProcess) !Term {
fn waitPosix(self: *ChildProcess) !Term {
if (self.term) |term| {
self.cleanupStreams();
return term;
@ -245,11 +245,11 @@ pub const ChildProcess = struct {
return ??self.term;
}
pub fn deinit(self: &ChildProcess) void {
pub fn deinit(self: *ChildProcess) void {
self.allocator.destroy(self);
}
fn waitUnwrappedWindows(self: &ChildProcess) !void {
fn waitUnwrappedWindows(self: *ChildProcess) !void {
const result = os.windowsWaitSingle(self.handle, windows.INFINITE);
self.term = (SpawnError!Term)(x: {
@ -267,7 +267,7 @@ pub const ChildProcess = struct {
return result;
}
fn waitUnwrapped(self: &ChildProcess) void {
fn waitUnwrapped(self: *ChildProcess) void {
var status: i32 = undefined;
while (true) {
const err = posix.getErrno(posix.waitpid(self.pid, &status, 0));
@ -283,11 +283,11 @@ pub const ChildProcess = struct {
}
}
fn handleWaitResult(self: &ChildProcess, status: i32) void {
fn handleWaitResult(self: *ChildProcess, status: i32) void {
self.term = self.cleanupAfterWait(status);
}
fn cleanupStreams(self: &ChildProcess) void {
fn cleanupStreams(self: *ChildProcess) void {
if (self.stdin) |*stdin| {
stdin.close();
self.stdin = null;
@ -302,7 +302,7 @@ pub const ChildProcess = struct {
}
}
fn cleanupAfterWait(self: &ChildProcess, status: i32) !Term {
fn cleanupAfterWait(self: *ChildProcess, status: i32) !Term {
defer {
os.close(self.err_pipe[0]);
os.close(self.err_pipe[1]);
@ -335,7 +335,7 @@ pub const ChildProcess = struct {
Term{ .Unknown = status };
}
fn spawnPosix(self: &ChildProcess) !void {
fn spawnPosix(self: *ChildProcess) !void {
const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try makePipe() else undefined;
errdefer if (self.stdin_behavior == StdIo.Pipe) {
destroyPipe(stdin_pipe);
@ -432,7 +432,7 @@ pub const ChildProcess = struct {
self.pid = pid;
self.err_pipe = err_pipe;
self.llnode = LinkedList(&ChildProcess).Node.init(self);
self.llnode = LinkedList(*ChildProcess).Node.init(self);
self.term = null;
if (self.stdin_behavior == StdIo.Pipe) {
@ -446,7 +446,7 @@ pub const ChildProcess = struct {
}
}
fn spawnWindows(self: &ChildProcess) !void {
fn spawnWindows(self: *ChildProcess) !void {
const saAttr = windows.SECURITY_ATTRIBUTES{
.nLength = @sizeOf(windows.SECURITY_ATTRIBUTES),
.bInheritHandle = windows.TRUE,
@ -639,8 +639,8 @@ pub const ChildProcess = struct {
}
};
fn windowsCreateProcess(app_name: &u8, cmd_line: &u8, envp_ptr: ?&u8, cwd_ptr: ?&u8, lpStartupInfo: &windows.STARTUPINFOA, lpProcessInformation: &windows.PROCESS_INFORMATION) !void {
if (windows.CreateProcessA(app_name, cmd_line, null, null, windows.TRUE, 0, @ptrCast(?&c_void, envp_ptr), cwd_ptr, lpStartupInfo, lpProcessInformation) == 0) {
fn windowsCreateProcess(app_name: *u8, cmd_line: *u8, envp_ptr: ?*u8, cwd_ptr: ?*u8, lpStartupInfo: *windows.STARTUPINFOA, lpProcessInformation: *windows.PROCESS_INFORMATION) !void {
if (windows.CreateProcessA(app_name, cmd_line, null, null, windows.TRUE, 0, @ptrCast(?*c_void, envp_ptr), cwd_ptr, lpStartupInfo, lpProcessInformation) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.FILE_NOT_FOUND, windows.ERROR.PATH_NOT_FOUND => error.FileNotFound,
@ -653,7 +653,7 @@ fn windowsCreateProcess(app_name: &u8, cmd_line: &u8, envp_ptr: ?&u8, cwd_ptr: ?
/// Caller must dealloc.
/// Guarantees a null byte at result[result.len].
fn windowsCreateCommandLine(allocator: &mem.Allocator, argv: []const []const u8) ![]u8 {
fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![]u8 {
var buf = try Buffer.initSize(allocator, 0);
defer buf.deinit();
@ -698,7 +698,7 @@ fn windowsDestroyPipe(rd: ?windows.HANDLE, wr: ?windows.HANDLE) void {
// a namespace field lookup
const SECURITY_ATTRIBUTES = windows.SECURITY_ATTRIBUTES;
fn windowsMakePipe(rd: &windows.HANDLE, wr: &windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void {
fn windowsMakePipe(rd: *windows.HANDLE, wr: *windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void {
if (windows.CreatePipe(rd, wr, sattr, 0) == 0) {
const err = windows.GetLastError();
return switch (err) {
@ -716,7 +716,7 @@ fn windowsSetHandleInfo(h: windows.HANDLE, mask: windows.DWORD, flags: windows.D
}
}
fn windowsMakePipeIn(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void {
fn windowsMakePipeIn(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void {
var rd_h: windows.HANDLE = undefined;
var wr_h: windows.HANDLE = undefined;
try windowsMakePipe(&rd_h, &wr_h, sattr);
@ -726,7 +726,7 @@ fn windowsMakePipeIn(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const S
wr.* = wr_h;
}
fn windowsMakePipeOut(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void {
fn windowsMakePipeOut(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void {
var rd_h: windows.HANDLE = undefined;
var wr_h: windows.HANDLE = undefined;
try windowsMakePipe(&rd_h, &wr_h, sattr);
@ -748,7 +748,7 @@ fn makePipe() ![2]i32 {
return fds;
}
fn destroyPipe(pipe: &const [2]i32) void {
fn destroyPipe(pipe: *const [2]i32) void {
os.close((pipe.*)[0]);
os.close((pipe.*)[1]);
}

View File

@ -309,7 +309,7 @@ pub fn isatty(fd: i32) bool {
return c.isatty(fd) != 0;
}
pub fn fstat(fd: i32, buf: &c.Stat) usize {
pub fn fstat(fd: i32, buf: *c.Stat) usize {
return errnoWrap(c.@"fstat$INODE64"(fd, buf));
}
@ -317,7 +317,7 @@ pub fn lseek(fd: i32, offset: isize, whence: c_int) usize {
return errnoWrap(c.lseek(fd, offset, whence));
}
pub fn open(path: &const u8, flags: u32, mode: usize) usize {
pub fn open(path: *const u8, flags: u32, mode: usize) usize {
return errnoWrap(c.open(path, @bitCast(c_int, flags), mode));
}
@ -325,79 +325,79 @@ pub fn raise(sig: i32) usize {
return errnoWrap(c.raise(sig));
}
pub fn read(fd: i32, buf: &u8, nbyte: usize) usize {
return errnoWrap(c.read(fd, @ptrCast(&c_void, buf), nbyte));
pub fn read(fd: i32, buf: *u8, nbyte: usize) usize {
return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte));
}
pub fn stat(noalias path: &const u8, noalias buf: &stat) usize {
pub fn stat(noalias path: *const u8, noalias buf: *stat) usize {
return errnoWrap(c.stat(path, buf));
}
pub fn write(fd: i32, buf: &const u8, nbyte: usize) usize {
return errnoWrap(c.write(fd, @ptrCast(&const c_void, buf), nbyte));
pub fn write(fd: i32, buf: *const u8, nbyte: usize) usize {
return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte));
}
pub fn mmap(address: ?&u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
const ptr_result = c.mmap(@ptrCast(&c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset);
pub fn mmap(address: ?*u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
const ptr_result = c.mmap(@ptrCast(*c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset);
const isize_result = @bitCast(isize, @ptrToInt(ptr_result));
return errnoWrap(isize_result);
}
pub fn munmap(address: usize, length: usize) usize {
return errnoWrap(c.munmap(@intToPtr(&c_void, address), length));
return errnoWrap(c.munmap(@intToPtr(*c_void, address), length));
}
pub fn unlink(path: &const u8) usize {
pub fn unlink(path: *const u8) usize {
return errnoWrap(c.unlink(path));
}
pub fn getcwd(buf: &u8, size: usize) usize {
pub fn getcwd(buf: *u8, size: usize) usize {
return if (c.getcwd(buf, size) == null) @bitCast(usize, -isize(c._errno().*)) else 0;
}
pub fn waitpid(pid: i32, status: &i32, options: u32) usize {
pub fn waitpid(pid: i32, status: *i32, options: u32) usize {
comptime assert(i32.bit_count == c_int.bit_count);
return errnoWrap(c.waitpid(pid, @ptrCast(&c_int, status), @bitCast(c_int, options)));
return errnoWrap(c.waitpid(pid, @ptrCast(*c_int, status), @bitCast(c_int, options)));
}
pub fn fork() usize {
return errnoWrap(c.fork());
}
pub fn access(path: &const u8, mode: u32) usize {
pub fn access(path: *const u8, mode: u32) usize {
return errnoWrap(c.access(path, mode));
}
pub fn pipe(fds: &[2]i32) usize {
pub fn pipe(fds: *[2]i32) usize {
comptime assert(i32.bit_count == c_int.bit_count);
return errnoWrap(c.pipe(@ptrCast(&c_int, fds)));
return errnoWrap(c.pipe(@ptrCast(*c_int, fds)));
}
pub fn getdirentries64(fd: i32, buf_ptr: &u8, buf_len: usize, basep: &i64) usize {
pub fn getdirentries64(fd: i32, buf_ptr: *u8, buf_len: usize, basep: *i64) usize {
return errnoWrap(@bitCast(isize, c.__getdirentries64(fd, buf_ptr, buf_len, basep)));
}
pub fn mkdir(path: &const u8, mode: u32) usize {
pub fn mkdir(path: *const u8, mode: u32) usize {
return errnoWrap(c.mkdir(path, mode));
}
pub fn symlink(existing: &const u8, new: &const u8) usize {
pub fn symlink(existing: *const u8, new: *const u8) usize {
return errnoWrap(c.symlink(existing, new));
}
pub fn rename(old: &const u8, new: &const u8) usize {
pub fn rename(old: *const u8, new: *const u8) usize {
return errnoWrap(c.rename(old, new));
}
pub fn rmdir(path: &const u8) usize {
pub fn rmdir(path: *const u8) usize {
return errnoWrap(c.rmdir(path));
}
pub fn chdir(path: &const u8) usize {
pub fn chdir(path: *const u8) usize {
return errnoWrap(c.chdir(path));
}
pub fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) usize {
pub fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) usize {
return errnoWrap(c.execve(path, argv, envp));
}
@ -405,19 +405,19 @@ pub fn dup2(old: i32, new: i32) usize {
return errnoWrap(c.dup2(old, new));
}
pub fn readlink(noalias path: &const u8, noalias buf_ptr: &u8, buf_len: usize) usize {
pub fn readlink(noalias path: *const u8, noalias buf_ptr: *u8, buf_len: usize) usize {
return errnoWrap(c.readlink(path, buf_ptr, buf_len));
}
pub fn gettimeofday(tv: ?&timeval, tz: ?&timezone) usize {
pub fn gettimeofday(tv: ?*timeval, tz: ?*timezone) usize {
return errnoWrap(c.gettimeofday(tv, tz));
}
pub fn nanosleep(req: &const timespec, rem: ?&timespec) usize {
pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return errnoWrap(c.nanosleep(req, rem));
}
pub fn realpath(noalias filename: &const u8, noalias resolved_name: &u8) usize {
pub fn realpath(noalias filename: *const u8, noalias resolved_name: *u8) usize {
return if (c.realpath(filename, resolved_name) == null) @bitCast(usize, -isize(c._errno().*)) else 0;
}
@ -429,11 +429,11 @@ pub fn setregid(rgid: u32, egid: u32) usize {
return errnoWrap(c.setregid(rgid, egid));
}
pub fn sigprocmask(flags: u32, noalias set: &const sigset_t, noalias oldset: ?&sigset_t) usize {
pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize {
return errnoWrap(c.sigprocmask(@bitCast(c_int, flags), set, oldset));
}
pub fn sigaction(sig: u5, noalias act: &const Sigaction, noalias oact: ?&Sigaction) usize {
pub fn sigaction(sig: u5, noalias act: *const Sigaction, noalias oact: ?*Sigaction) usize {
assert(sig != SIGKILL);
assert(sig != SIGSTOP);
var cact = c.Sigaction{
@ -442,7 +442,7 @@ pub fn sigaction(sig: u5, noalias act: &const Sigaction, noalias oact: ?&Sigacti
.sa_mask = act.mask,
};
var coact: c.Sigaction = undefined;
const result = errnoWrap(c.sigaction(sig, &cact, &coact));
const result = errnoWrap(c.sigaction(sig, *cact, *coact));
if (result != 0) {
return result;
}
@ -473,7 +473,7 @@ pub const Sigaction = struct {
flags: u32,
};
pub fn sigaddset(set: &sigset_t, signo: u5) void {
pub fn sigaddset(set: *sigset_t, signo: u5) void {
set.* |= u32(1) << (signo - 1);
}

View File

@ -19,7 +19,7 @@ pub const File = struct {
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
pub fn openRead(allocator: &mem.Allocator, path: []const u8) OpenError!File {
pub fn openRead(allocator: *mem.Allocator, path: []const u8) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_RDONLY;
const fd = try os.posixOpen(allocator, path, flags, 0);
@ -40,7 +40,7 @@ pub const File = struct {
}
/// Calls `openWriteMode` with os.default_file_mode for the mode.
pub fn openWrite(allocator: &mem.Allocator, path: []const u8) OpenError!File {
pub fn openWrite(allocator: *mem.Allocator, path: []const u8) OpenError!File {
return openWriteMode(allocator, path, os.default_file_mode);
}
@ -48,7 +48,7 @@ pub const File = struct {
/// If a file already exists in the destination it will be truncated.
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
pub fn openWriteMode(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
pub fn openWriteMode(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_TRUNC;
const fd = try os.posixOpen(allocator, path, flags, file_mode);
@ -72,7 +72,7 @@ pub const File = struct {
/// If a file already exists in the destination this returns OpenError.PathAlreadyExists
/// `path` needs to be copied in memory to add a null terminating byte, hence the allocator.
/// Call close to clean up.
pub fn openWriteNoClobber(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
pub fn openWriteNoClobber(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File {
if (is_posix) {
const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_EXCL;
const fd = try os.posixOpen(allocator, path, flags, file_mode);
@ -96,7 +96,7 @@ pub const File = struct {
return File{ .handle = handle };
}
pub fn access(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) !bool {
pub fn access(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) !bool {
const path_with_null = try std.cstr.addNullByte(allocator, path);
defer allocator.free(path_with_null);
@ -140,17 +140,17 @@ pub const File = struct {
/// Upon success, the stream is in an uninitialized state. To continue using it,
/// you must use the open() function.
pub fn close(self: &File) void {
pub fn close(self: *File) void {
os.close(self.handle);
self.handle = undefined;
}
/// Calls `os.isTty` on `self.handle`.
pub fn isTty(self: &File) bool {
pub fn isTty(self: *File) bool {
return os.isTty(self.handle);
}
pub fn seekForward(self: &File, amount: isize) !void {
pub fn seekForward(self: *File, amount: isize) !void {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const result = posix.lseek(self.handle, amount, posix.SEEK_CUR);
@ -179,7 +179,7 @@ pub const File = struct {
}
}
pub fn seekTo(self: &File, pos: usize) !void {
pub fn seekTo(self: *File, pos: usize) !void {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const ipos = try math.cast(isize, pos);
@ -210,7 +210,7 @@ pub const File = struct {
}
}
pub fn getPos(self: &File) !usize {
pub fn getPos(self: *File) !usize {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const result = posix.lseek(self.handle, 0, posix.SEEK_CUR);
@ -229,7 +229,7 @@ pub const File = struct {
},
Os.windows => {
var pos: windows.LARGE_INTEGER = undefined;
if (windows.SetFilePointerEx(self.handle, 0, &pos, windows.FILE_CURRENT) == 0) {
if (windows.SetFilePointerEx(self.handle, 0, *pos, windows.FILE_CURRENT) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_PARAMETER => error.BadFd,
@ -250,7 +250,7 @@ pub const File = struct {
}
}
pub fn getEndPos(self: &File) !usize {
pub fn getEndPos(self: *File) !usize {
if (is_posix) {
var stat: posix.Stat = undefined;
const err = posix.getErrno(posix.fstat(self.handle, &stat));
@ -285,7 +285,7 @@ pub const File = struct {
Unexpected,
};
fn mode(self: &File) ModeError!os.FileMode {
fn mode(self: *File) ModeError!os.FileMode {
if (is_posix) {
var stat: posix.Stat = undefined;
const err = posix.getErrno(posix.fstat(self.handle, &stat));
@ -309,7 +309,7 @@ pub const File = struct {
pub const ReadError = error{};
pub fn read(self: &File, buffer: []u8) !usize {
pub fn read(self: *File, buffer: []u8) !usize {
if (is_posix) {
var index: usize = 0;
while (index < buffer.len) {
@ -334,7 +334,7 @@ pub const File = struct {
while (index < buffer.len) {
const want_read_count = windows.DWORD(math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index));
var amt_read: windows.DWORD = undefined;
if (windows.ReadFile(self.handle, @ptrCast(&c_void, &buffer[index]), want_read_count, &amt_read, null) == 0) {
if (windows.ReadFile(self.handle, @ptrCast(*c_void, &buffer[index]), want_read_count, &amt_read, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.OPERATION_ABORTED => continue,
@ -353,7 +353,7 @@ pub const File = struct {
pub const WriteError = os.WindowsWriteError || os.PosixWriteError;
fn write(self: &File, bytes: []const u8) WriteError!void {
fn write(self: *File, bytes: []const u8) WriteError!void {
if (is_posix) {
try os.posixWrite(self.handle, bytes);
} else if (is_windows) {

View File

@ -77,8 +77,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
'0'...'9' => byte - '0',
else => return error.CorruptPasswordFile,
};
if (@mulWithOverflow(u32, uid, 10, &uid)) return error.CorruptPasswordFile;
if (@addWithOverflow(u32, uid, digit, &uid)) return error.CorruptPasswordFile;
if (@mulWithOverflow(u32, uid, 10, *uid)) return error.CorruptPasswordFile;
if (@addWithOverflow(u32, uid, digit, *uid)) return error.CorruptPasswordFile;
},
},
State.ReadGroupId => switch (byte) {
@ -93,8 +93,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
'0'...'9' => byte - '0',
else => return error.CorruptPasswordFile,
};
if (@mulWithOverflow(u32, gid, 10, &gid)) return error.CorruptPasswordFile;
if (@addWithOverflow(u32, gid, digit, &gid)) return error.CorruptPasswordFile;
if (@mulWithOverflow(u32, gid, 10, *gid)) return error.CorruptPasswordFile;
if (@addWithOverflow(u32, gid, digit, *gid)) return error.CorruptPasswordFile;
},
},
}

View File

@ -321,14 +321,14 @@ pub const PosixOpenError = error{
/// ::file_path needs to be copied in memory to add a null terminating byte.
/// Calls POSIX open, keeps trying if it gets interrupted, and translates
/// the return value into zig errors.
pub fn posixOpen(allocator: &Allocator, file_path: []const u8, flags: u32, perm: usize) PosixOpenError!i32 {
pub fn posixOpen(allocator: *Allocator, file_path: []const u8, flags: u32, perm: usize) PosixOpenError!i32 {
const path_with_null = try cstr.addNullByte(allocator, file_path);
defer allocator.free(path_with_null);
return posixOpenC(path_with_null.ptr, flags, perm);
}
pub fn posixOpenC(file_path: &const u8, flags: u32, perm: usize) !i32 {
pub fn posixOpenC(file_path: *const u8, flags: u32, perm: usize) !i32 {
while (true) {
const result = posix.open(file_path, flags, perm);
const err = posix.getErrno(result);
@ -374,10 +374,10 @@ pub fn posixDup2(old_fd: i32, new_fd: i32) !void {
}
}
pub fn createNullDelimitedEnvMap(allocator: &Allocator, env_map: &const BufMap) ![]?&u8 {
pub fn createNullDelimitedEnvMap(allocator: *Allocator, env_map: *const BufMap) ![]?*u8 {
const envp_count = env_map.count();
const envp_buf = try allocator.alloc(?&u8, envp_count + 1);
mem.set(?&u8, envp_buf, null);
const envp_buf = try allocator.alloc(?*u8, envp_count + 1);
mem.set(?*u8, envp_buf, null);
errdefer freeNullDelimitedEnvMap(allocator, envp_buf);
{
var it = env_map.iterator();
@ -397,7 +397,7 @@ pub fn createNullDelimitedEnvMap(allocator: &Allocator, env_map: &const BufMap)
return envp_buf;
}
pub fn freeNullDelimitedEnvMap(allocator: &Allocator, envp_buf: []?&u8) void {
pub fn freeNullDelimitedEnvMap(allocator: *Allocator, envp_buf: []?*u8) void {
for (envp_buf) |env| {
const env_buf = if (env) |ptr| ptr[0 .. cstr.len(ptr) + 1] else break;
allocator.free(env_buf);
@ -410,9 +410,9 @@ pub fn freeNullDelimitedEnvMap(allocator: &Allocator, envp_buf: []?&u8) void {
/// pointers after the args and after the environment variables.
/// `argv[0]` is the executable path.
/// This function also uses the PATH environment variable to get the full path to the executable.
pub fn posixExecve(argv: []const []const u8, env_map: &const BufMap, allocator: &Allocator) !void {
const argv_buf = try allocator.alloc(?&u8, argv.len + 1);
mem.set(?&u8, argv_buf, null);
pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator: *Allocator) !void {
const argv_buf = try allocator.alloc(?*u8, argv.len + 1);
mem.set(?*u8, argv_buf, null);
defer {
for (argv_buf) |arg| {
const arg_buf = if (arg) |ptr| cstr.toSlice(ptr) else break;
@ -494,10 +494,10 @@ fn posixExecveErrnoToErr(err: usize) PosixExecveError {
}
pub var linux_aux_raw = []usize{0} ** 38;
pub var posix_environ_raw: []&u8 = undefined;
pub var posix_environ_raw: []*u8 = undefined;
/// Caller must free result when done.
pub fn getEnvMap(allocator: &Allocator) !BufMap {
pub fn getEnvMap(allocator: *Allocator) !BufMap {
var result = BufMap.init(allocator);
errdefer result.deinit();
@ -557,7 +557,7 @@ pub fn getEnvPosix(key: []const u8) ?[]const u8 {
}
/// Caller must free returned memory.
pub fn getEnvVarOwned(allocator: &mem.Allocator, key: []const u8) ![]u8 {
pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) ![]u8 {
if (is_windows) {
const key_with_null = try cstr.addNullByte(allocator, key);
defer allocator.free(key_with_null);
@ -591,7 +591,7 @@ pub fn getEnvVarOwned(allocator: &mem.Allocator, key: []const u8) ![]u8 {
}
/// Caller must free the returned memory.
pub fn getCwd(allocator: &Allocator) ![]u8 {
pub fn getCwd(allocator: *Allocator) ![]u8 {
switch (builtin.os) {
Os.windows => {
var buf = try allocator.alloc(u8, 256);
@ -640,7 +640,7 @@ test "os.getCwd" {
pub const SymLinkError = PosixSymLinkError || WindowsSymLinkError;
pub fn symLink(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) SymLinkError!void {
pub fn symLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) SymLinkError!void {
if (is_windows) {
return symLinkWindows(allocator, existing_path, new_path);
} else {
@ -653,7 +653,7 @@ pub const WindowsSymLinkError = error{
Unexpected,
};
pub fn symLinkWindows(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) WindowsSymLinkError!void {
pub fn symLinkWindows(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) WindowsSymLinkError!void {
const existing_with_null = try cstr.addNullByte(allocator, existing_path);
defer allocator.free(existing_with_null);
const new_with_null = try cstr.addNullByte(allocator, new_path);
@ -683,7 +683,7 @@ pub const PosixSymLinkError = error{
Unexpected,
};
pub fn symLinkPosix(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) PosixSymLinkError!void {
pub fn symLinkPosix(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) PosixSymLinkError!void {
const full_buf = try allocator.alloc(u8, existing_path.len + new_path.len + 2);
defer allocator.free(full_buf);
@ -718,7 +718,7 @@ pub fn symLinkPosix(allocator: &Allocator, existing_path: []const u8, new_path:
// here we replace the standard +/ with -_ so that it can be used in a file name
const b64_fs_encoder = base64.Base64Encoder.init("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", base64.standard_pad_char);
pub fn atomicSymLink(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) !void {
pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
if (symLink(allocator, existing_path, new_path)) {
return;
} else |err| switch (err) {
@ -746,7 +746,7 @@ pub fn atomicSymLink(allocator: &Allocator, existing_path: []const u8, new_path:
}
}
pub fn deleteFile(allocator: &Allocator, file_path: []const u8) !void {
pub fn deleteFile(allocator: *Allocator, file_path: []const u8) !void {
if (builtin.os == Os.windows) {
return deleteFileWindows(allocator, file_path);
} else {
@ -754,7 +754,7 @@ pub fn deleteFile(allocator: &Allocator, file_path: []const u8) !void {
}
}
pub fn deleteFileWindows(allocator: &Allocator, file_path: []const u8) !void {
pub fn deleteFileWindows(allocator: *Allocator, file_path: []const u8) !void {
const buf = try allocator.alloc(u8, file_path.len + 1);
defer allocator.free(buf);
@ -772,7 +772,7 @@ pub fn deleteFileWindows(allocator: &Allocator, file_path: []const u8) !void {
}
}
pub fn deleteFilePosix(allocator: &Allocator, file_path: []const u8) !void {
pub fn deleteFilePosix(allocator: *Allocator, file_path: []const u8) !void {
const buf = try allocator.alloc(u8, file_path.len + 1);
defer allocator.free(buf);
@ -803,7 +803,7 @@ pub fn deleteFilePosix(allocator: &Allocator, file_path: []const u8) !void {
/// there is a possibility of power loss or application termination leaving temporary files present
/// in the same directory as dest_path.
/// Destination file will have the same mode as the source file.
pub fn copyFile(allocator: &Allocator, source_path: []const u8, dest_path: []const u8) !void {
pub fn copyFile(allocator: *Allocator, source_path: []const u8, dest_path: []const u8) !void {
var in_file = try os.File.openRead(allocator, source_path);
defer in_file.close();
@ -825,7 +825,7 @@ pub fn copyFile(allocator: &Allocator, source_path: []const u8, dest_path: []con
/// Guaranteed to be atomic. However until https://patchwork.kernel.org/patch/9636735/ is
/// merged and readily available,
/// there is a possibility of power loss or application termination leaving temporary files present
pub fn copyFileMode(allocator: &Allocator, source_path: []const u8, dest_path: []const u8, mode: FileMode) !void {
pub fn copyFileMode(allocator: *Allocator, source_path: []const u8, dest_path: []const u8, mode: FileMode) !void {
var in_file = try os.File.openRead(allocator, source_path);
defer in_file.close();
@ -843,7 +843,7 @@ pub fn copyFileMode(allocator: &Allocator, source_path: []const u8, dest_path: [
}
pub const AtomicFile = struct {
allocator: &Allocator,
allocator: *Allocator,
file: os.File,
tmp_path: []u8,
dest_path: []const u8,
@ -851,7 +851,7 @@ pub const AtomicFile = struct {
/// dest_path must remain valid for the lifetime of AtomicFile
/// call finish to atomically replace dest_path with contents
pub fn init(allocator: &Allocator, dest_path: []const u8, mode: FileMode) !AtomicFile {
pub fn init(allocator: *Allocator, dest_path: []const u8, mode: FileMode) !AtomicFile {
const dirname = os.path.dirname(dest_path);
var rand_buf: [12]u8 = undefined;
@ -888,7 +888,7 @@ pub const AtomicFile = struct {
}
/// always call deinit, even after successful finish()
pub fn deinit(self: &AtomicFile) void {
pub fn deinit(self: *AtomicFile) void {
if (!self.finished) {
self.file.close();
deleteFile(self.allocator, self.tmp_path) catch {};
@ -897,7 +897,7 @@ pub const AtomicFile = struct {
}
}
pub fn finish(self: &AtomicFile) !void {
pub fn finish(self: *AtomicFile) !void {
assert(!self.finished);
self.file.close();
try rename(self.allocator, self.tmp_path, self.dest_path);
@ -906,7 +906,7 @@ pub const AtomicFile = struct {
}
};
pub fn rename(allocator: &Allocator, old_path: []const u8, new_path: []const u8) !void {
pub fn rename(allocator: *Allocator, old_path: []const u8, new_path: []const u8) !void {
const full_buf = try allocator.alloc(u8, old_path.len + new_path.len + 2);
defer allocator.free(full_buf);
@ -951,7 +951,7 @@ pub fn rename(allocator: &Allocator, old_path: []const u8, new_path: []const u8)
}
}
pub fn makeDir(allocator: &Allocator, dir_path: []const u8) !void {
pub fn makeDir(allocator: *Allocator, dir_path: []const u8) !void {
if (is_windows) {
return makeDirWindows(allocator, dir_path);
} else {
@ -959,7 +959,7 @@ pub fn makeDir(allocator: &Allocator, dir_path: []const u8) !void {
}
}
pub fn makeDirWindows(allocator: &Allocator, dir_path: []const u8) !void {
pub fn makeDirWindows(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try cstr.addNullByte(allocator, dir_path);
defer allocator.free(path_buf);
@ -973,7 +973,7 @@ pub fn makeDirWindows(allocator: &Allocator, dir_path: []const u8) !void {
}
}
pub fn makeDirPosix(allocator: &Allocator, dir_path: []const u8) !void {
pub fn makeDirPosix(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try cstr.addNullByte(allocator, dir_path);
defer allocator.free(path_buf);
@ -999,7 +999,7 @@ pub fn makeDirPosix(allocator: &Allocator, dir_path: []const u8) !void {
/// Calls makeDir recursively to make an entire path. Returns success if the path
/// already exists and is a directory.
pub fn makePath(allocator: &Allocator, full_path: []const u8) !void {
pub fn makePath(allocator: *Allocator, full_path: []const u8) !void {
const resolved_path = try path.resolve(allocator, full_path);
defer allocator.free(resolved_path);
@ -1033,7 +1033,7 @@ pub fn makePath(allocator: &Allocator, full_path: []const u8) !void {
/// Returns ::error.DirNotEmpty if the directory is not empty.
/// To delete a directory recursively, see ::deleteTree
pub fn deleteDir(allocator: &Allocator, dir_path: []const u8) !void {
pub fn deleteDir(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try allocator.alloc(u8, dir_path.len + 1);
defer allocator.free(path_buf);
@ -1084,7 +1084,7 @@ const DeleteTreeError = error{
DirNotEmpty,
Unexpected,
};
pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!void {
pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!void {
start_over: while (true) {
var got_access_denied = false;
// First, try deleting the item as a file. This way we don't follow sym links.
@ -1153,7 +1153,7 @@ pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!
pub const Dir = struct {
fd: i32,
darwin_seek: darwin_seek_t,
allocator: &Allocator,
allocator: *Allocator,
buf: []u8,
index: usize,
end_index: usize,
@ -1180,7 +1180,7 @@ pub const Dir = struct {
};
};
pub fn open(allocator: &Allocator, dir_path: []const u8) !Dir {
pub fn open(allocator: *Allocator, dir_path: []const u8) !Dir {
const fd = switch (builtin.os) {
Os.windows => @compileError("TODO support Dir.open for windows"),
Os.linux => try posixOpen(allocator, dir_path, posix.O_RDONLY | posix.O_DIRECTORY | posix.O_CLOEXEC, 0),
@ -1206,14 +1206,14 @@ pub const Dir = struct {
};
}
pub fn close(self: &Dir) void {
pub fn close(self: *Dir) void {
self.allocator.free(self.buf);
os.close(self.fd);
}
/// Memory such as file names referenced in this returned entry becomes invalid
/// with subsequent calls to next, as well as when this ::Dir is deinitialized.
pub fn next(self: &Dir) !?Entry {
pub fn next(self: *Dir) !?Entry {
switch (builtin.os) {
Os.linux => return self.nextLinux(),
Os.macosx, Os.ios => return self.nextDarwin(),
@ -1222,7 +1222,7 @@ pub const Dir = struct {
}
}
fn nextDarwin(self: &Dir) !?Entry {
fn nextDarwin(self: *Dir) !?Entry {
start_over: while (true) {
if (self.index >= self.end_index) {
if (self.buf.len == 0) {
@ -1248,7 +1248,7 @@ pub const Dir = struct {
break;
}
}
const darwin_entry = @ptrCast(&align(1) posix.dirent, &self.buf[self.index]);
const darwin_entry = @ptrCast(*align(1) posix.dirent, &self.buf[self.index]);
const next_index = self.index + darwin_entry.d_reclen;
self.index = next_index;
@ -1277,11 +1277,11 @@ pub const Dir = struct {
}
}
fn nextWindows(self: &Dir) !?Entry {
fn nextWindows(self: *Dir) !?Entry {
@compileError("TODO support Dir.next for windows");
}
fn nextLinux(self: &Dir) !?Entry {
fn nextLinux(self: *Dir) !?Entry {
start_over: while (true) {
if (self.index >= self.end_index) {
if (self.buf.len == 0) {
@ -1307,7 +1307,7 @@ pub const Dir = struct {
break;
}
}
const linux_entry = @ptrCast(&align(1) posix.dirent, &self.buf[self.index]);
const linux_entry = @ptrCast(*align(1) posix.dirent, &self.buf[self.index]);
const next_index = self.index + linux_entry.d_reclen;
self.index = next_index;
@ -1337,7 +1337,7 @@ pub const Dir = struct {
}
};
pub fn changeCurDir(allocator: &Allocator, dir_path: []const u8) !void {
pub fn changeCurDir(allocator: *Allocator, dir_path: []const u8) !void {
const path_buf = try allocator.alloc(u8, dir_path.len + 1);
defer allocator.free(path_buf);
@ -1361,7 +1361,7 @@ pub fn changeCurDir(allocator: &Allocator, dir_path: []const u8) !void {
}
/// Read value of a symbolic link.
pub fn readLink(allocator: &Allocator, pathname: []const u8) ![]u8 {
pub fn readLink(allocator: *Allocator, pathname: []const u8) ![]u8 {
const path_buf = try allocator.alloc(u8, pathname.len + 1);
defer allocator.free(path_buf);
@ -1468,7 +1468,7 @@ pub const ArgIteratorPosix = struct {
};
}
pub fn next(self: &ArgIteratorPosix) ?[]const u8 {
pub fn next(self: *ArgIteratorPosix) ?[]const u8 {
if (self.index == self.count) return null;
const s = raw[self.index];
@ -1476,7 +1476,7 @@ pub const ArgIteratorPosix = struct {
return cstr.toSlice(s);
}
pub fn skip(self: &ArgIteratorPosix) bool {
pub fn skip(self: *ArgIteratorPosix) bool {
if (self.index == self.count) return false;
self.index += 1;
@ -1485,12 +1485,12 @@ pub const ArgIteratorPosix = struct {
/// This is marked as public but actually it's only meant to be used
/// internally by zig's startup code.
pub var raw: []&u8 = undefined;
pub var raw: []*u8 = undefined;
};
pub const ArgIteratorWindows = struct {
index: usize,
cmd_line: &const u8,
cmd_line: *const u8,
in_quote: bool,
quote_count: usize,
seen_quote_count: usize,
@ -1501,7 +1501,7 @@ pub const ArgIteratorWindows = struct {
return initWithCmdLine(windows.GetCommandLineA());
}
pub fn initWithCmdLine(cmd_line: &const u8) ArgIteratorWindows {
pub fn initWithCmdLine(cmd_line: *const u8) ArgIteratorWindows {
return ArgIteratorWindows{
.index = 0,
.cmd_line = cmd_line,
@ -1512,7 +1512,7 @@ pub const ArgIteratorWindows = struct {
}
/// You must free the returned memory when done.
pub fn next(self: &ArgIteratorWindows, allocator: &Allocator) ?(NextError![]u8) {
pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![]u8) {
// march forward over whitespace
while (true) : (self.index += 1) {
const byte = self.cmd_line[self.index];
@ -1526,7 +1526,7 @@ pub const ArgIteratorWindows = struct {
return self.internalNext(allocator);
}
pub fn skip(self: &ArgIteratorWindows) bool {
pub fn skip(self: *ArgIteratorWindows) bool {
// march forward over whitespace
while (true) : (self.index += 1) {
const byte = self.cmd_line[self.index];
@ -1565,7 +1565,7 @@ pub const ArgIteratorWindows = struct {
}
}
fn internalNext(self: &ArgIteratorWindows, allocator: &Allocator) NextError![]u8 {
fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![]u8 {
var buf = try Buffer.initSize(allocator, 0);
defer buf.deinit();
@ -1609,14 +1609,14 @@ pub const ArgIteratorWindows = struct {
}
}
fn emitBackslashes(self: &ArgIteratorWindows, buf: &Buffer, emit_count: usize) !void {
fn emitBackslashes(self: *ArgIteratorWindows, buf: *Buffer, emit_count: usize) !void {
var i: usize = 0;
while (i < emit_count) : (i += 1) {
try buf.appendByte('\\');
}
}
fn countQuotes(cmd_line: &const u8) usize {
fn countQuotes(cmd_line: *const u8) usize {
var result: usize = 0;
var backslash_count: usize = 0;
var index: usize = 0;
@ -1649,7 +1649,7 @@ pub const ArgIterator = struct {
pub const NextError = ArgIteratorWindows.NextError;
/// You must free the returned memory when done.
pub fn next(self: &ArgIterator, allocator: &Allocator) ?(NextError![]u8) {
pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![]u8) {
if (builtin.os == Os.windows) {
return self.inner.next(allocator);
} else {
@ -1658,13 +1658,13 @@ pub const ArgIterator = struct {
}
/// If you only are targeting posix you can call this and not need an allocator.
pub fn nextPosix(self: &ArgIterator) ?[]const u8 {
pub fn nextPosix(self: *ArgIterator) ?[]const u8 {
return self.inner.next();
}
/// Parse past 1 argument without capturing it.
/// Returns `true` if skipped an arg, `false` if we are at the end.
pub fn skip(self: &ArgIterator) bool {
pub fn skip(self: *ArgIterator) bool {
return self.inner.skip();
}
};
@ -1674,7 +1674,7 @@ pub fn args() ArgIterator {
}
/// Caller must call freeArgs on result.
pub fn argsAlloc(allocator: &mem.Allocator) ![]const []u8 {
pub fn argsAlloc(allocator: *mem.Allocator) ![]const []u8 {
// TODO refactor to only make 1 allocation.
var it = args();
var contents = try Buffer.initSize(allocator, 0);
@ -1711,12 +1711,12 @@ pub fn argsAlloc(allocator: &mem.Allocator) ![]const []u8 {
return result_slice_list;
}
pub fn argsFree(allocator: &mem.Allocator, args_alloc: []const []u8) void {
pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const []u8) void {
var total_bytes: usize = 0;
for (args_alloc) |arg| {
total_bytes += @sizeOf([]u8) + arg.len;
}
const unaligned_allocated_buf = @ptrCast(&const u8, args_alloc.ptr)[0..total_bytes];
const unaligned_allocated_buf = @ptrCast(*const u8, args_alloc.ptr)[0..total_bytes];
const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf);
return allocator.free(aligned_allocated_buf);
}
@ -1765,7 +1765,7 @@ test "windows arg parsing" {
});
}
fn testWindowsCmdLine(input_cmd_line: &const u8, expected_args: []const []const u8) void {
fn testWindowsCmdLine(input_cmd_line: *const u8, expected_args: []const []const u8) void {
var it = ArgIteratorWindows.initWithCmdLine(input_cmd_line);
for (expected_args) |expected_arg| {
const arg = ??it.next(debug.global_allocator) catch unreachable;
@ -1832,7 +1832,7 @@ test "openSelfExe" {
/// This function may return an error if the current executable
/// was deleted after spawning.
/// Caller owns returned memory.
pub fn selfExePath(allocator: &mem.Allocator) ![]u8 {
pub fn selfExePath(allocator: *mem.Allocator) ![]u8 {
switch (builtin.os) {
Os.linux => {
// If the currently executing binary has been deleted,
@ -1875,7 +1875,7 @@ pub fn selfExePath(allocator: &mem.Allocator) ![]u8 {
/// Get the directory path that contains the current executable.
/// Caller owns returned memory.
pub fn selfExeDirPath(allocator: &mem.Allocator) ![]u8 {
pub fn selfExeDirPath(allocator: *mem.Allocator) ![]u8 {
switch (builtin.os) {
Os.linux => {
// If the currently executing binary has been deleted,
@ -2001,7 +2001,7 @@ pub const PosixBindError = error{
};
/// addr is `&const T` where T is one of the sockaddr
pub fn posixBind(fd: i32, addr: &const posix.sockaddr) PosixBindError!void {
pub fn posixBind(fd: i32, addr: *const posix.sockaddr) PosixBindError!void {
const rc = posix.bind(fd, addr, @sizeOf(posix.sockaddr));
const err = posix.getErrno(rc);
switch (err) {
@ -2096,7 +2096,7 @@ pub const PosixAcceptError = error{
Unexpected,
};
pub fn posixAccept(fd: i32, addr: &posix.sockaddr, flags: u32) PosixAcceptError!i32 {
pub fn posixAccept(fd: i32, addr: *posix.sockaddr, flags: u32) PosixAcceptError!i32 {
while (true) {
var sockaddr_size = u32(@sizeOf(posix.sockaddr));
const rc = posix.accept4(fd, addr, &sockaddr_size, flags);
@ -2195,7 +2195,7 @@ pub const LinuxEpollCtlError = error{
Unexpected,
};
pub fn linuxEpollCtl(epfd: i32, op: u32, fd: i32, event: &linux.epoll_event) LinuxEpollCtlError!void {
pub fn linuxEpollCtl(epfd: i32, op: u32, fd: i32, event: *linux.epoll_event) LinuxEpollCtlError!void {
const rc = posix.epoll_ctl(epfd, op, fd, event);
const err = posix.getErrno(rc);
switch (err) {
@ -2288,7 +2288,7 @@ pub const PosixConnectError = error{
Unexpected,
};
pub fn posixConnect(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectError!void {
pub fn posixConnect(sockfd: i32, sockaddr: *const posix.sockaddr) PosixConnectError!void {
while (true) {
const rc = posix.connect(sockfd, sockaddr, @sizeOf(posix.sockaddr));
const err = posix.getErrno(rc);
@ -2319,7 +2319,7 @@ pub fn posixConnect(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectEr
/// Same as posixConnect except it is for blocking socket file descriptors.
/// It expects to receive EINPROGRESS.
pub fn posixConnectAsync(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectError!void {
pub fn posixConnectAsync(sockfd: i32, sockaddr: *const posix.sockaddr) PosixConnectError!void {
while (true) {
const rc = posix.connect(sockfd, sockaddr, @sizeOf(posix.sockaddr));
const err = posix.getErrno(rc);
@ -2350,7 +2350,7 @@ pub fn posixConnectAsync(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConn
pub fn posixGetSockOptConnectError(sockfd: i32) PosixConnectError!void {
var err_code: i32 = undefined;
var size: u32 = @sizeOf(i32);
const rc = posix.getsockopt(sockfd, posix.SOL_SOCKET, posix.SO_ERROR, @ptrCast(&u8, &err_code), &size);
const rc = posix.getsockopt(sockfd, posix.SOL_SOCKET, posix.SO_ERROR, @ptrCast(*u8, &err_code), &size);
assert(size == 4);
const err = posix.getErrno(rc);
switch (err) {
@ -2401,13 +2401,13 @@ pub const Thread = struct {
},
builtin.Os.windows => struct {
handle: windows.HANDLE,
alloc_start: &c_void,
alloc_start: *c_void,
heap_handle: windows.HANDLE,
},
else => @compileError("Unsupported OS"),
};
pub fn wait(self: &const Thread) void {
pub fn wait(self: *const Thread) void {
if (use_pthreads) {
const err = c.pthread_join(self.data.handle, null);
switch (err) {
@ -2473,7 +2473,7 @@ pub const SpawnThreadError = error{
/// fn startFn(@typeOf(context)) T
/// where T is u8, noreturn, void, or !void
/// caller must call wait on the returned thread
pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread {
pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread {
// TODO compile-time call graph analysis to determine stack upper bound
// https://github.com/ziglang/zig/issues/157
const default_stack_size = 8 * 1024 * 1024;
@ -2491,7 +2491,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
if (@sizeOf(Context) == 0) {
return startFn({});
} else {
return startFn(@ptrCast(&Context, @alignCast(@alignOf(Context), arg)).*);
return startFn(@ptrCast(*Context, @alignCast(@alignOf(Context), arg)).*);
}
}
};
@ -2500,13 +2500,13 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
const byte_count = @alignOf(WinThread.OuterContext) + @sizeOf(WinThread.OuterContext);
const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) ?? return SpawnThreadError.OutOfMemory;
errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0);
const bytes = @ptrCast(&u8, bytes_ptr)[0..byte_count];
const bytes = @ptrCast(*u8, bytes_ptr)[0..byte_count];
const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable;
outer_context.inner = context;
outer_context.thread.data.heap_handle = heap_handle;
outer_context.thread.data.alloc_start = bytes_ptr;
const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(&c_void, &outer_context.inner);
const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(*c_void, &outer_context.inner);
outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) ?? {
const err = windows.GetLastError();
return switch (err) {
@ -2521,15 +2521,15 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
if (@sizeOf(Context) == 0) {
return startFn({});
} else {
return startFn(@intToPtr(&const Context, ctx_addr).*);
return startFn(@intToPtr(*const Context, ctx_addr).*);
}
}
extern fn posixThreadMain(ctx: ?&c_void) ?&c_void {
extern fn posixThreadMain(ctx: ?*c_void) ?*c_void {
if (@sizeOf(Context) == 0) {
_ = startFn({});
return null;
} else {
_ = startFn(@ptrCast(&const Context, @alignCast(@alignOf(Context), ctx)).*);
_ = startFn(@ptrCast(*const Context, @alignCast(@alignOf(Context), ctx)).*);
return null;
}
}
@ -2548,7 +2548,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
stack_end -= @sizeOf(Context);
stack_end -= stack_end % @alignOf(Context);
assert(stack_end >= stack_addr);
const context_ptr = @alignCast(@alignOf(Context), @intToPtr(&Context, stack_end));
const context_ptr = @alignCast(@alignOf(Context), @intToPtr(*Context, stack_end));
context_ptr.* = context;
arg = stack_end;
}
@ -2556,7 +2556,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
stack_end -= @sizeOf(Thread);
stack_end -= stack_end % @alignOf(Thread);
assert(stack_end >= stack_addr);
const thread_ptr = @alignCast(@alignOf(Thread), @intToPtr(&Thread, stack_end));
const thread_ptr = @alignCast(@alignOf(Thread), @intToPtr(*Thread, stack_end));
thread_ptr.data.stack_addr = stack_addr;
thread_ptr.data.stack_len = mmap_len;
@ -2572,9 +2572,9 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread
// align to page
stack_end -= stack_end % os.page_size;
assert(c.pthread_attr_setstack(&attr, @intToPtr(&c_void, stack_addr), stack_end - stack_addr) == 0);
assert(c.pthread_attr_setstack(&attr, @intToPtr(*c_void, stack_addr), stack_end - stack_addr) == 0);
const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(&c_void, arg));
const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(*c_void, arg));
switch (err) {
0 => return thread_ptr,
posix.EAGAIN => return SpawnThreadError.SystemResources,

View File

@ -665,15 +665,15 @@ pub fn dup2(old: i32, new: i32) usize {
return syscall2(SYS_dup2, usize(old), usize(new));
}
pub fn chdir(path: &const u8) usize {
pub fn chdir(path: *const u8) usize {
return syscall1(SYS_chdir, @ptrToInt(path));
}
pub fn chroot(path: &const u8) usize {
pub fn chroot(path: *const u8) usize {
return syscall1(SYS_chroot, @ptrToInt(path));
}
pub fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) usize {
pub fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) usize {
return syscall3(SYS_execve, @ptrToInt(path), @ptrToInt(argv), @ptrToInt(envp));
}
@ -681,15 +681,15 @@ pub fn fork() usize {
return syscall0(SYS_fork);
}
pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?&timespec) usize {
pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?*timespec) usize {
return syscall4(SYS_futex, uaddr, futex_op, @bitCast(u32, val), @ptrToInt(timeout));
}
pub fn getcwd(buf: &u8, size: usize) usize {
pub fn getcwd(buf: *u8, size: usize) usize {
return syscall2(SYS_getcwd, @ptrToInt(buf), size);
}
pub fn getdents(fd: i32, dirp: &u8, count: usize) usize {
pub fn getdents(fd: i32, dirp: *u8, count: usize) usize {
return syscall3(SYS_getdents, usize(fd), @ptrToInt(dirp), count);
}
@ -698,27 +698,27 @@ pub fn isatty(fd: i32) bool {
return syscall3(SYS_ioctl, usize(fd), TIOCGWINSZ, @ptrToInt(&wsz)) == 0;
}
pub fn readlink(noalias path: &const u8, noalias buf_ptr: &u8, buf_len: usize) usize {
pub fn readlink(noalias path: *const u8, noalias buf_ptr: *u8, buf_len: usize) usize {
return syscall3(SYS_readlink, @ptrToInt(path), @ptrToInt(buf_ptr), buf_len);
}
pub fn mkdir(path: &const u8, mode: u32) usize {
pub fn mkdir(path: *const u8, mode: u32) usize {
return syscall2(SYS_mkdir, @ptrToInt(path), mode);
}
pub fn mount(special: &const u8, dir: &const u8, fstype: &const u8, flags: usize, data: usize) usize {
pub fn mount(special: *const u8, dir: *const u8, fstype: *const u8, flags: usize, data: usize) usize {
return syscall5(SYS_mount, @ptrToInt(special), @ptrToInt(dir), @ptrToInt(fstype), flags, data);
}
pub fn umount(special: &const u8) usize {
pub fn umount(special: *const u8) usize {
return syscall2(SYS_umount2, @ptrToInt(special), 0);
}
pub fn umount2(special: &const u8, flags: u32) usize {
pub fn umount2(special: *const u8, flags: u32) usize {
return syscall2(SYS_umount2, @ptrToInt(special), flags);
}
pub fn mmap(address: ?&u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
pub fn mmap(address: ?*u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
return syscall6(SYS_mmap, @ptrToInt(address), length, prot, flags, usize(fd), @bitCast(usize, offset));
}
@ -726,60 +726,60 @@ pub fn munmap(address: usize, length: usize) usize {
return syscall2(SYS_munmap, address, length);
}
pub fn read(fd: i32, buf: &u8, count: usize) usize {
pub fn read(fd: i32, buf: *u8, count: usize) usize {
return syscall3(SYS_read, usize(fd), @ptrToInt(buf), count);
}
pub fn rmdir(path: &const u8) usize {
pub fn rmdir(path: *const u8) usize {
return syscall1(SYS_rmdir, @ptrToInt(path));
}
pub fn symlink(existing: &const u8, new: &const u8) usize {
pub fn symlink(existing: *const u8, new: *const u8) usize {
return syscall2(SYS_symlink, @ptrToInt(existing), @ptrToInt(new));
}
pub fn pread(fd: i32, buf: &u8, count: usize, offset: usize) usize {
pub fn pread(fd: i32, buf: *u8, count: usize, offset: usize) usize {
return syscall4(SYS_pread, usize(fd), @ptrToInt(buf), count, offset);
}
pub fn access(path: &const u8, mode: u32) usize {
pub fn access(path: *const u8, mode: u32) usize {
return syscall2(SYS_access, @ptrToInt(path), mode);
}
pub fn pipe(fd: &[2]i32) usize {
pub fn pipe(fd: *[2]i32) usize {
return pipe2(fd, 0);
}
pub fn pipe2(fd: &[2]i32, flags: usize) usize {
pub fn pipe2(fd: *[2]i32, flags: usize) usize {
return syscall2(SYS_pipe2, @ptrToInt(fd), flags);
}
pub fn write(fd: i32, buf: &const u8, count: usize) usize {
pub fn write(fd: i32, buf: *const u8, count: usize) usize {
return syscall3(SYS_write, usize(fd), @ptrToInt(buf), count);
}
pub fn pwrite(fd: i32, buf: &const u8, count: usize, offset: usize) usize {
pub fn pwrite(fd: i32, buf: *const u8, count: usize, offset: usize) usize {
return syscall4(SYS_pwrite, usize(fd), @ptrToInt(buf), count, offset);
}
pub fn rename(old: &const u8, new: &const u8) usize {
pub fn rename(old: *const u8, new: *const u8) usize {
return syscall2(SYS_rename, @ptrToInt(old), @ptrToInt(new));
}
pub fn open(path: &const u8, flags: u32, perm: usize) usize {
pub fn open(path: *const u8, flags: u32, perm: usize) usize {
return syscall3(SYS_open, @ptrToInt(path), flags, perm);
}
pub fn create(path: &const u8, perm: usize) usize {
pub fn create(path: *const u8, perm: usize) usize {
return syscall2(SYS_creat, @ptrToInt(path), perm);
}
pub fn openat(dirfd: i32, path: &const u8, flags: usize, mode: usize) usize {
pub fn openat(dirfd: i32, path: *const u8, flags: usize, mode: usize) usize {
return syscall4(SYS_openat, usize(dirfd), @ptrToInt(path), flags, mode);
}
/// See also `clone` (from the arch-specific include)
pub fn clone5(flags: usize, child_stack_ptr: usize, parent_tid: &i32, child_tid: &i32, newtls: usize) usize {
pub fn clone5(flags: usize, child_stack_ptr: usize, parent_tid: *i32, child_tid: *i32, newtls: usize) usize {
return syscall5(SYS_clone, flags, child_stack_ptr, @ptrToInt(parent_tid), @ptrToInt(child_tid), newtls);
}
@ -801,7 +801,7 @@ pub fn exit(status: i32) noreturn {
unreachable;
}
pub fn getrandom(buf: &u8, count: usize, flags: u32) usize {
pub fn getrandom(buf: *u8, count: usize, flags: u32) usize {
return syscall3(SYS_getrandom, @ptrToInt(buf), count, usize(flags));
}
@ -809,15 +809,15 @@ pub fn kill(pid: i32, sig: i32) usize {
return syscall2(SYS_kill, @bitCast(usize, isize(pid)), usize(sig));
}
pub fn unlink(path: &const u8) usize {
pub fn unlink(path: *const u8) usize {
return syscall1(SYS_unlink, @ptrToInt(path));
}
pub fn waitpid(pid: i32, status: &i32, options: i32) usize {
pub fn waitpid(pid: i32, status: *i32, options: i32) usize {
return syscall4(SYS_wait4, @bitCast(usize, isize(pid)), @ptrToInt(status), @bitCast(usize, isize(options)), 0);
}
pub fn clock_gettime(clk_id: i32, tp: &timespec) usize {
pub fn clock_gettime(clk_id: i32, tp: *timespec) usize {
if (VDSO_CGT_SYM.len != 0) {
const f = @atomicLoad(@typeOf(init_vdso_clock_gettime), &vdso_clock_gettime, builtin.AtomicOrder.Unordered);
if (@ptrToInt(f) != 0) {
@ -831,7 +831,7 @@ pub fn clock_gettime(clk_id: i32, tp: &timespec) usize {
return syscall2(SYS_clock_gettime, @bitCast(usize, isize(clk_id)), @ptrToInt(tp));
}
var vdso_clock_gettime = init_vdso_clock_gettime;
extern fn init_vdso_clock_gettime(clk: i32, ts: &timespec) usize {
extern fn init_vdso_clock_gettime(clk: i32, ts: *timespec) usize {
const addr = vdso.lookup(VDSO_CGT_VER, VDSO_CGT_SYM);
var f = @intToPtr(@typeOf(init_vdso_clock_gettime), addr);
_ = @cmpxchgStrong(@typeOf(init_vdso_clock_gettime), &vdso_clock_gettime, init_vdso_clock_gettime, f, builtin.AtomicOrder.Monotonic, builtin.AtomicOrder.Monotonic);
@ -839,23 +839,23 @@ extern fn init_vdso_clock_gettime(clk: i32, ts: &timespec) usize {
return f(clk, ts);
}
pub fn clock_getres(clk_id: i32, tp: &timespec) usize {
pub fn clock_getres(clk_id: i32, tp: *timespec) usize {
return syscall2(SYS_clock_getres, @bitCast(usize, isize(clk_id)), @ptrToInt(tp));
}
pub fn clock_settime(clk_id: i32, tp: &const timespec) usize {
pub fn clock_settime(clk_id: i32, tp: *const timespec) usize {
return syscall2(SYS_clock_settime, @bitCast(usize, isize(clk_id)), @ptrToInt(tp));
}
pub fn gettimeofday(tv: &timeval, tz: &timezone) usize {
pub fn gettimeofday(tv: *timeval, tz: *timezone) usize {
return syscall2(SYS_gettimeofday, @ptrToInt(tv), @ptrToInt(tz));
}
pub fn settimeofday(tv: &const timeval, tz: &const timezone) usize {
pub fn settimeofday(tv: *const timeval, tz: *const timezone) usize {
return syscall2(SYS_settimeofday, @ptrToInt(tv), @ptrToInt(tz));
}
pub fn nanosleep(req: &const timespec, rem: ?&timespec) usize {
pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return syscall2(SYS_nanosleep, @ptrToInt(req), @ptrToInt(rem));
}
@ -899,11 +899,11 @@ pub fn setegid(egid: u32) usize {
return syscall1(SYS_setegid, egid);
}
pub fn getresuid(ruid: &u32, euid: &u32, suid: &u32) usize {
pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
return syscall3(SYS_getresuid, @ptrToInt(ruid), @ptrToInt(euid), @ptrToInt(suid));
}
pub fn getresgid(rgid: &u32, egid: &u32, sgid: &u32) usize {
pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
return syscall3(SYS_getresgid, @ptrToInt(rgid), @ptrToInt(egid), @ptrToInt(sgid));
}
@ -915,11 +915,11 @@ pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
return syscall3(SYS_setresgid, rgid, egid, sgid);
}
pub fn getgroups(size: usize, list: &u32) usize {
pub fn getgroups(size: usize, list: *u32) usize {
return syscall2(SYS_getgroups, size, @ptrToInt(list));
}
pub fn setgroups(size: usize, list: &const u32) usize {
pub fn setgroups(size: usize, list: *const u32) usize {
return syscall2(SYS_setgroups, size, @ptrToInt(list));
}
@ -927,11 +927,11 @@ pub fn getpid() i32 {
return @bitCast(i32, u32(syscall0(SYS_getpid)));
}
pub fn sigprocmask(flags: u32, noalias set: &const sigset_t, noalias oldset: ?&sigset_t) usize {
pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize {
return syscall4(SYS_rt_sigprocmask, flags, @ptrToInt(set), @ptrToInt(oldset), NSIG / 8);
}
pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigaction) usize {
pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigaction) usize {
assert(sig >= 1);
assert(sig != SIGKILL);
assert(sig != SIGSTOP);
@ -942,8 +942,8 @@ pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigacti
.restorer = @ptrCast(extern fn () void, restore_rt),
};
var ksa_old: k_sigaction = undefined;
@memcpy(@ptrCast(&u8, &ksa.mask), @ptrCast(&const u8, &act.mask), 8);
const result = syscall4(SYS_rt_sigaction, sig, @ptrToInt(&ksa), @ptrToInt(&ksa_old), @sizeOf(@typeOf(ksa.mask)));
@memcpy(@ptrCast(*u8, *ksa.mask), @ptrCast(*const u8, *act.mask), 8);
const result = syscall4(SYS_rt_sigaction, sig, @ptrToInt(*ksa), @ptrToInt(*ksa_old), @sizeOf(@typeOf(ksa.mask)));
const err = getErrno(result);
if (err != 0) {
return result;
@ -951,7 +951,7 @@ pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigacti
if (oact) |old| {
old.handler = ksa_old.handler;
old.flags = @truncate(u32, ksa_old.flags);
@memcpy(@ptrCast(&u8, &old.mask), @ptrCast(&const u8, &ksa_old.mask), @sizeOf(@typeOf(ksa_old.mask)));
@memcpy(@ptrCast(*u8, *old.mask), @ptrCast(*const u8, *ksa_old.mask), @sizeOf(@typeOf(ksa_old.mask)));
}
return 0;
}
@ -989,24 +989,24 @@ pub fn raise(sig: i32) usize {
return ret;
}
fn blockAllSignals(set: &sigset_t) void {
fn blockAllSignals(set: *sigset_t) void {
_ = syscall4(SYS_rt_sigprocmask, SIG_BLOCK, @ptrToInt(&all_mask), @ptrToInt(set), NSIG / 8);
}
fn blockAppSignals(set: &sigset_t) void {
fn blockAppSignals(set: *sigset_t) void {
_ = syscall4(SYS_rt_sigprocmask, SIG_BLOCK, @ptrToInt(&app_mask), @ptrToInt(set), NSIG / 8);
}
fn restoreSignals(set: &sigset_t) void {
fn restoreSignals(set: *sigset_t) void {
_ = syscall4(SYS_rt_sigprocmask, SIG_SETMASK, @ptrToInt(set), 0, NSIG / 8);
}
pub fn sigaddset(set: &sigset_t, sig: u6) void {
pub fn sigaddset(set: *sigset_t, sig: u6) void {
const s = sig - 1;
(set.*)[usize(s) / usize.bit_count] |= usize(1) << (s & (usize.bit_count - 1));
}
pub fn sigismember(set: &const sigset_t, sig: u6) bool {
pub fn sigismember(set: *const sigset_t, sig: u6) bool {
const s = sig - 1;
return ((set.*)[usize(s) / usize.bit_count] & (usize(1) << (s & (usize.bit_count - 1)))) != 0;
}
@ -1036,15 +1036,15 @@ pub const sockaddr_in6 = extern struct {
};
pub const iovec = extern struct {
iov_base: &u8,
iov_base: *u8,
iov_len: usize,
};
pub fn getsockname(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize {
pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return syscall3(SYS_getsockname, usize(fd), @ptrToInt(addr), @ptrToInt(len));
}
pub fn getpeername(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize {
pub fn getpeername(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return syscall3(SYS_getpeername, usize(fd), @ptrToInt(addr), @ptrToInt(len));
}
@ -1052,27 +1052,27 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize {
return syscall3(SYS_socket, domain, socket_type, protocol);
}
pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: &const u8, optlen: socklen_t) usize {
pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: *const u8, optlen: socklen_t) usize {
return syscall5(SYS_setsockopt, usize(fd), level, optname, usize(optval), @ptrToInt(optlen));
}
pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: &u8, noalias optlen: &socklen_t) usize {
pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: *u8, noalias optlen: *socklen_t) usize {
return syscall5(SYS_getsockopt, usize(fd), level, optname, @ptrToInt(optval), @ptrToInt(optlen));
}
pub fn sendmsg(fd: i32, msg: &const msghdr, flags: u32) usize {
pub fn sendmsg(fd: i32, msg: *const msghdr, flags: u32) usize {
return syscall3(SYS_sendmsg, usize(fd), @ptrToInt(msg), flags);
}
pub fn connect(fd: i32, addr: &const sockaddr, len: socklen_t) usize {
pub fn connect(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
return syscall3(SYS_connect, usize(fd), @ptrToInt(addr), usize(len));
}
pub fn recvmsg(fd: i32, msg: &msghdr, flags: u32) usize {
pub fn recvmsg(fd: i32, msg: *msghdr, flags: u32) usize {
return syscall3(SYS_recvmsg, usize(fd), @ptrToInt(msg), flags);
}
pub fn recvfrom(fd: i32, noalias buf: &u8, len: usize, flags: u32, noalias addr: ?&sockaddr, noalias alen: ?&socklen_t) usize {
pub fn recvfrom(fd: i32, noalias buf: *u8, len: usize, flags: u32, noalias addr: ?*sockaddr, noalias alen: ?*socklen_t) usize {
return syscall6(SYS_recvfrom, usize(fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen));
}
@ -1080,7 +1080,7 @@ pub fn shutdown(fd: i32, how: i32) usize {
return syscall2(SYS_shutdown, usize(fd), usize(how));
}
pub fn bind(fd: i32, addr: &const sockaddr, len: socklen_t) usize {
pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize {
return syscall3(SYS_bind, usize(fd), @ptrToInt(addr), usize(len));
}
@ -1088,79 +1088,79 @@ pub fn listen(fd: i32, backlog: u32) usize {
return syscall2(SYS_listen, usize(fd), backlog);
}
pub fn sendto(fd: i32, buf: &const u8, len: usize, flags: u32, addr: ?&const sockaddr, alen: socklen_t) usize {
pub fn sendto(fd: i32, buf: *const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize {
return syscall6(SYS_sendto, usize(fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), usize(alen));
}
pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: [2]i32) usize {
return syscall4(SYS_socketpair, usize(domain), usize(socket_type), usize(protocol), @ptrToInt(&fd[0]));
return syscall4(SYS_socketpair, usize(domain), usize(socket_type), usize(protocol), @ptrToInt(*fd[0]));
}
pub fn accept(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize {
pub fn accept(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
return accept4(fd, addr, len, 0);
}
pub fn accept4(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t, flags: u32) usize {
pub fn accept4(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t, flags: u32) usize {
return syscall4(SYS_accept4, usize(fd), @ptrToInt(addr), @ptrToInt(len), flags);
}
pub fn fstat(fd: i32, stat_buf: &Stat) usize {
pub fn fstat(fd: i32, stat_buf: *Stat) usize {
return syscall2(SYS_fstat, usize(fd), @ptrToInt(stat_buf));
}
pub fn stat(pathname: &const u8, statbuf: &Stat) usize {
pub fn stat(pathname: *const u8, statbuf: *Stat) usize {
return syscall2(SYS_stat, @ptrToInt(pathname), @ptrToInt(statbuf));
}
pub fn lstat(pathname: &const u8, statbuf: &Stat) usize {
pub fn lstat(pathname: *const u8, statbuf: *Stat) usize {
return syscall2(SYS_lstat, @ptrToInt(pathname), @ptrToInt(statbuf));
}
pub fn listxattr(path: &const u8, list: &u8, size: usize) usize {
pub fn listxattr(path: *const u8, list: *u8, size: usize) usize {
return syscall3(SYS_listxattr, @ptrToInt(path), @ptrToInt(list), size);
}
pub fn llistxattr(path: &const u8, list: &u8, size: usize) usize {
pub fn llistxattr(path: *const u8, list: *u8, size: usize) usize {
return syscall3(SYS_llistxattr, @ptrToInt(path), @ptrToInt(list), size);
}
pub fn flistxattr(fd: usize, list: &u8, size: usize) usize {
pub fn flistxattr(fd: usize, list: *u8, size: usize) usize {
return syscall3(SYS_flistxattr, fd, @ptrToInt(list), size);
}
pub fn getxattr(path: &const u8, name: &const u8, value: &void, size: usize) usize {
pub fn getxattr(path: *const u8, name: *const u8, value: *void, size: usize) usize {
return syscall4(SYS_getxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size);
}
pub fn lgetxattr(path: &const u8, name: &const u8, value: &void, size: usize) usize {
pub fn lgetxattr(path: *const u8, name: *const u8, value: *void, size: usize) usize {
return syscall4(SYS_lgetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size);
}
pub fn fgetxattr(fd: usize, name: &const u8, value: &void, size: usize) usize {
pub fn fgetxattr(fd: usize, name: *const u8, value: *void, size: usize) usize {
return syscall4(SYS_lgetxattr, fd, @ptrToInt(name), @ptrToInt(value), size);
}
pub fn setxattr(path: &const u8, name: &const u8, value: &const void, size: usize, flags: usize) usize {
pub fn setxattr(path: *const u8, name: *const u8, value: *const void, size: usize, flags: usize) usize {
return syscall5(SYS_setxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags);
}
pub fn lsetxattr(path: &const u8, name: &const u8, value: &const void, size: usize, flags: usize) usize {
pub fn lsetxattr(path: *const u8, name: *const u8, value: *const void, size: usize, flags: usize) usize {
return syscall5(SYS_lsetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags);
}
pub fn fsetxattr(fd: usize, name: &const u8, value: &const void, size: usize, flags: usize) usize {
pub fn fsetxattr(fd: usize, name: *const u8, value: *const void, size: usize, flags: usize) usize {
return syscall5(SYS_fsetxattr, fd, @ptrToInt(name), @ptrToInt(value), size, flags);
}
pub fn removexattr(path: &const u8, name: &const u8) usize {
pub fn removexattr(path: *const u8, name: *const u8) usize {
return syscall2(SYS_removexattr, @ptrToInt(path), @ptrToInt(name));
}
pub fn lremovexattr(path: &const u8, name: &const u8) usize {
pub fn lremovexattr(path: *const u8, name: *const u8) usize {
return syscall2(SYS_lremovexattr, @ptrToInt(path), @ptrToInt(name));
}
pub fn fremovexattr(fd: usize, name: &const u8) usize {
pub fn fremovexattr(fd: usize, name: *const u8) usize {
return syscall2(SYS_fremovexattr, fd, @ptrToInt(name));
}
@ -1184,11 +1184,11 @@ pub fn epoll_create1(flags: usize) usize {
return syscall1(SYS_epoll_create1, flags);
}
pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: &epoll_event) usize {
pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: *epoll_event) usize {
return syscall4(SYS_epoll_ctl, usize(epoll_fd), usize(op), usize(fd), @ptrToInt(ev));
}
pub fn epoll_wait(epoll_fd: i32, events: &epoll_event, maxevents: u32, timeout: i32) usize {
pub fn epoll_wait(epoll_fd: i32, events: *epoll_event, maxevents: u32, timeout: i32) usize {
return syscall4(SYS_epoll_wait, usize(epoll_fd), @ptrToInt(events), usize(maxevents), usize(timeout));
}
@ -1201,11 +1201,11 @@ pub const itimerspec = extern struct {
it_value: timespec,
};
pub fn timerfd_gettime(fd: i32, curr_value: &itimerspec) usize {
pub fn timerfd_gettime(fd: i32, curr_value: *itimerspec) usize {
return syscall2(SYS_timerfd_gettime, usize(fd), @ptrToInt(curr_value));
}
pub fn timerfd_settime(fd: i32, flags: u32, new_value: &const itimerspec, old_value: ?&itimerspec) usize {
pub fn timerfd_settime(fd: i32, flags: u32, new_value: *const itimerspec, old_value: ?*itimerspec) usize {
return syscall4(SYS_timerfd_settime, usize(fd), usize(flags), @ptrToInt(new_value), @ptrToInt(old_value));
}
@ -1300,8 +1300,8 @@ pub fn CAP_TO_INDEX(cap: u8) u8 {
}
pub const cap_t = extern struct {
hdrp: &cap_user_header_t,
datap: &cap_user_data_t,
hdrp: *cap_user_header_t,
datap: *cap_user_data_t,
};
pub const cap_user_header_t = extern struct {
@ -1319,11 +1319,11 @@ pub fn unshare(flags: usize) usize {
return syscall1(SYS_unshare, usize(flags));
}
pub fn capget(hdrp: &cap_user_header_t, datap: &cap_user_data_t) usize {
pub fn capget(hdrp: *cap_user_header_t, datap: *cap_user_data_t) usize {
return syscall2(SYS_capget, @ptrToInt(hdrp), @ptrToInt(datap));
}
pub fn capset(hdrp: &cap_user_header_t, datap: &const cap_user_data_t) usize {
pub fn capset(hdrp: *cap_user_header_t, datap: *const cap_user_data_t) usize {
return syscall2(SYS_capset, @ptrToInt(hdrp), @ptrToInt(datap));
}

View File

@ -8,11 +8,11 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
const vdso_addr = std.os.linux_aux_raw[std.elf.AT_SYSINFO_EHDR];
if (vdso_addr == 0) return 0;
const eh = @intToPtr(&elf.Ehdr, vdso_addr);
const eh = @intToPtr(*elf.Ehdr, vdso_addr);
var ph_addr: usize = vdso_addr + eh.e_phoff;
const ph = @intToPtr(&elf.Phdr, ph_addr);
const ph = @intToPtr(*elf.Phdr, ph_addr);
var maybe_dynv: ?&usize = null;
var maybe_dynv: ?*usize = null;
var base: usize = @maxValue(usize);
{
var i: usize = 0;
@ -20,10 +20,10 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
i += 1;
ph_addr += eh.e_phentsize;
}) {
const this_ph = @intToPtr(&elf.Phdr, ph_addr);
const this_ph = @intToPtr(*elf.Phdr, ph_addr);
switch (this_ph.p_type) {
elf.PT_LOAD => base = vdso_addr + this_ph.p_offset - this_ph.p_vaddr,
elf.PT_DYNAMIC => maybe_dynv = @intToPtr(&usize, vdso_addr + this_ph.p_offset),
elf.PT_DYNAMIC => maybe_dynv = @intToPtr(*usize, vdso_addr + this_ph.p_offset),
else => {},
}
}
@ -31,22 +31,22 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
const dynv = maybe_dynv ?? return 0;
if (base == @maxValue(usize)) return 0;
var maybe_strings: ?&u8 = null;
var maybe_syms: ?&elf.Sym = null;
var maybe_hashtab: ?&linux.Elf_Symndx = null;
var maybe_versym: ?&u16 = null;
var maybe_verdef: ?&elf.Verdef = null;
var maybe_strings: ?*u8 = null;
var maybe_syms: ?*elf.Sym = null;
var maybe_hashtab: ?*linux.Elf_Symndx = null;
var maybe_versym: ?*u16 = null;
var maybe_verdef: ?*elf.Verdef = null;
{
var i: usize = 0;
while (dynv[i] != 0) : (i += 2) {
const p = base + dynv[i + 1];
switch (dynv[i]) {
elf.DT_STRTAB => maybe_strings = @intToPtr(&u8, p),
elf.DT_SYMTAB => maybe_syms = @intToPtr(&elf.Sym, p),
elf.DT_HASH => maybe_hashtab = @intToPtr(&linux.Elf_Symndx, p),
elf.DT_VERSYM => maybe_versym = @intToPtr(&u16, p),
elf.DT_VERDEF => maybe_verdef = @intToPtr(&elf.Verdef, p),
elf.DT_STRTAB => maybe_strings = @intToPtr(*u8, p),
elf.DT_SYMTAB => maybe_syms = @intToPtr(*elf.Sym, p),
elf.DT_HASH => maybe_hashtab = @intToPtr(*linux.Elf_Symndx, p),
elf.DT_VERSYM => maybe_versym = @intToPtr(*u16, p),
elf.DT_VERDEF => maybe_verdef = @intToPtr(*elf.Verdef, p),
else => {},
}
}
@ -76,7 +76,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
return 0;
}
fn checkver(def_arg: &elf.Verdef, vsym_arg: i32, vername: []const u8, strings: &u8) bool {
fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: *u8) bool {
var def = def_arg;
const vsym = @bitCast(u32, vsym_arg) & 0x7fff;
while (true) {
@ -84,8 +84,8 @@ fn checkver(def_arg: &elf.Verdef, vsym_arg: i32, vername: []const u8, strings: &
break;
if (def.vd_next == 0)
return false;
def = @intToPtr(&elf.Verdef, @ptrToInt(def) + def.vd_next);
def = @intToPtr(*elf.Verdef, @ptrToInt(def) + def.vd_next);
}
const aux = @intToPtr(&elf.Verdaux, @ptrToInt(def) + def.vd_aux);
const aux = @intToPtr(*elf.Verdaux, @ptrToInt(def) + def.vd_aux);
return mem.eql(u8, vername, cstr.toSliceConst(&strings[aux.vda_name]));
}

View File

@ -463,7 +463,7 @@ pub fn syscall6(
}
/// This matches the libc clone function.
pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: usize, arg: usize, ptid: &i32, tls: usize, ctid: &i32) usize;
pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: usize, arg: usize, ptid: *i32, tls: usize, ctid: *i32) usize;
pub nakedcc fn restore_rt() void {
return asm volatile ("syscall"
@ -474,12 +474,12 @@ pub nakedcc fn restore_rt() void {
}
pub const msghdr = extern struct {
msg_name: &u8,
msg_name: *u8,
msg_namelen: socklen_t,
msg_iov: &iovec,
msg_iov: *iovec,
msg_iovlen: i32,
__pad1: i32,
msg_control: &u8,
msg_control: *u8,
msg_controllen: socklen_t,
__pad2: socklen_t,
msg_flags: i32,

View File

@ -32,7 +32,7 @@ pub fn isSep(byte: u8) bool {
/// Naively combines a series of paths with the native path seperator.
/// Allocates memory for the result, which must be freed by the caller.
pub fn join(allocator: &Allocator, paths: ...) ![]u8 {
pub fn join(allocator: *Allocator, paths: ...) ![]u8 {
if (is_windows) {
return joinWindows(allocator, paths);
} else {
@ -40,11 +40,11 @@ pub fn join(allocator: &Allocator, paths: ...) ![]u8 {
}
}
pub fn joinWindows(allocator: &Allocator, paths: ...) ![]u8 {
pub fn joinWindows(allocator: *Allocator, paths: ...) ![]u8 {
return mem.join(allocator, sep_windows, paths);
}
pub fn joinPosix(allocator: &Allocator, paths: ...) ![]u8 {
pub fn joinPosix(allocator: *Allocator, paths: ...) ![]u8 {
return mem.join(allocator, sep_posix, paths);
}
@ -310,7 +310,7 @@ fn asciiEqlIgnoreCase(s1: []const u8, s2: []const u8) bool {
}
/// Converts the command line arguments into a slice and calls `resolveSlice`.
pub fn resolve(allocator: &Allocator, args: ...) ![]u8 {
pub fn resolve(allocator: *Allocator, args: ...) ![]u8 {
var paths: [args.len][]const u8 = undefined;
comptime var arg_i = 0;
inline while (arg_i < args.len) : (arg_i += 1) {
@ -320,7 +320,7 @@ pub fn resolve(allocator: &Allocator, args: ...) ![]u8 {
}
/// On Windows, this calls `resolveWindows` and on POSIX it calls `resolvePosix`.
pub fn resolveSlice(allocator: &Allocator, paths: []const []const u8) ![]u8 {
pub fn resolveSlice(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (is_windows) {
return resolveWindows(allocator, paths);
} else {
@ -334,7 +334,7 @@ pub fn resolveSlice(allocator: &Allocator, paths: []const []const u8) ![]u8 {
/// If all paths are relative it uses the current working directory as a starting point.
/// Each drive has its own current working directory.
/// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters.
pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(is_windows); // resolveWindows called on non windows can't use getCwd
return os.getCwd(allocator);
@ -513,7 +513,7 @@ pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 {
/// It resolves "." and "..".
/// The result does not have a trailing path separator.
/// If all paths are relative it uses the current working directory as a starting point.
pub fn resolvePosix(allocator: &Allocator, paths: []const []const u8) ![]u8 {
pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
if (paths.len == 0) {
assert(!is_windows); // resolvePosix called on windows can't use getCwd
return os.getCwd(allocator);
@ -883,7 +883,7 @@ fn testBasenameWindows(input: []const u8, expected_output: []const u8) void {
/// resolve to the same path (after calling `resolve` on each), a zero-length
/// string is returned.
/// On Windows this canonicalizes the drive to a capital letter and paths to `\\`.
pub fn relative(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
if (is_windows) {
return relativeWindows(allocator, from, to);
} else {
@ -891,7 +891,7 @@ pub fn relative(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
}
}
pub fn relativeWindows(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolveWindows(allocator, [][]const u8{from});
defer allocator.free(resolved_from);
@ -964,7 +964,7 @@ pub fn relativeWindows(allocator: &Allocator, from: []const u8, to: []const u8)
return []u8{};
}
pub fn relativePosix(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 {
pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 {
const resolved_from = try resolvePosix(allocator, [][]const u8{from});
defer allocator.free(resolved_from);
@ -1063,7 +1063,7 @@ fn testRelativeWindows(from: []const u8, to: []const u8, expected_output: []cons
/// Expands all symbolic links and resolves references to `.`, `..`, and
/// extra `/` characters in ::pathname.
/// Caller must deallocate result.
pub fn real(allocator: &Allocator, pathname: []const u8) ![]u8 {
pub fn real(allocator: *Allocator, pathname: []const u8) ![]u8 {
switch (builtin.os) {
Os.windows => {
const pathname_buf = try allocator.alloc(u8, pathname.len + 1);

View File

@ -63,7 +63,7 @@ fn start1(ctx: void) u8 {
return 0;
}
fn start2(ctx: &i32) u8 {
fn start2(ctx: *i32) u8 {
_ = @atomicRmw(i32, ctx, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
return 0;
}

View File

@ -200,7 +200,7 @@ pub const Timer = struct {
}
/// Reads the timer value since start or the last reset in nanoseconds
pub fn read(self: &Timer) u64 {
pub fn read(self: *Timer) u64 {
var clock = clockNative() - self.start_time;
return switch (builtin.os) {
Os.windows => @divFloor(clock * ns_per_s, self.frequency),
@ -211,12 +211,12 @@ pub const Timer = struct {
}
/// Resets the timer value to 0/now.
pub fn reset(self: &Timer) void {
pub fn reset(self: *Timer) void {
self.start_time = clockNative();
}
/// Returns the current value of the timer in nanoseconds, then resets it
pub fn lap(self: &Timer) u64 {
pub fn lap(self: *Timer) u64 {
var now = clockNative();
var lap_time = self.read();
self.start_time = now;

View File

@ -1,7 +1,7 @@
pub const ERROR = @import("error.zig");
pub extern "advapi32" stdcallcc fn CryptAcquireContextA(
phProv: &HCRYPTPROV,
phProv: *HCRYPTPROV,
pszContainer: ?LPCSTR,
pszProvider: ?LPCSTR,
dwProvType: DWORD,
@ -10,13 +10,13 @@ pub extern "advapi32" stdcallcc fn CryptAcquireContextA(
pub extern "advapi32" stdcallcc fn CryptReleaseContext(hProv: HCRYPTPROV, dwFlags: DWORD) BOOL;
pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: &BYTE) BOOL;
pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: *BYTE) BOOL;
pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL;
pub extern "kernel32" stdcallcc fn CreateDirectoryA(
lpPathName: LPCSTR,
lpSecurityAttributes: ?&SECURITY_ATTRIBUTES,
lpSecurityAttributes: ?*SECURITY_ATTRIBUTES,
) BOOL;
pub extern "kernel32" stdcallcc fn CreateFileA(
@ -30,23 +30,23 @@ pub extern "kernel32" stdcallcc fn CreateFileA(
) HANDLE;
pub extern "kernel32" stdcallcc fn CreatePipe(
hReadPipe: &HANDLE,
hWritePipe: &HANDLE,
lpPipeAttributes: &const SECURITY_ATTRIBUTES,
hReadPipe: *HANDLE,
hWritePipe: *HANDLE,
lpPipeAttributes: *const SECURITY_ATTRIBUTES,
nSize: DWORD,
) BOOL;
pub extern "kernel32" stdcallcc fn CreateProcessA(
lpApplicationName: ?LPCSTR,
lpCommandLine: LPSTR,
lpProcessAttributes: ?&SECURITY_ATTRIBUTES,
lpThreadAttributes: ?&SECURITY_ATTRIBUTES,
lpProcessAttributes: ?*SECURITY_ATTRIBUTES,
lpThreadAttributes: ?*SECURITY_ATTRIBUTES,
bInheritHandles: BOOL,
dwCreationFlags: DWORD,
lpEnvironment: ?&c_void,
lpEnvironment: ?*c_void,
lpCurrentDirectory: ?LPCSTR,
lpStartupInfo: &STARTUPINFOA,
lpProcessInformation: &PROCESS_INFORMATION,
lpStartupInfo: *STARTUPINFOA,
lpProcessInformation: *PROCESS_INFORMATION,
) BOOL;
pub extern "kernel32" stdcallcc fn CreateSymbolicLinkA(
@ -65,7 +65,7 @@ pub extern "kernel32" stdcallcc fn FreeEnvironmentStringsA(penv: LPCH) BOOL;
pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR;
pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: &DWORD) BOOL;
pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: *DWORD) BOOL;
pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: WORD, lpBuffer: ?LPSTR) DWORD;
@ -73,9 +73,9 @@ pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?LPCH;
pub extern "kernel32" stdcallcc fn GetEnvironmentVariableA(lpName: LPCSTR, lpBuffer: LPSTR, nSize: DWORD) DWORD;
pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: &DWORD) BOOL;
pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: *DWORD) BOOL;
pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: &LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) DWORD;
@ -84,7 +84,7 @@ pub extern "kernel32" stdcallcc fn GetLastError() DWORD;
pub extern "kernel32" stdcallcc fn GetFileInformationByHandleEx(
in_hFile: HANDLE,
in_FileInformationClass: FILE_INFO_BY_HANDLE_CLASS,
out_lpFileInformation: &c_void,
out_lpFileInformation: *c_void,
in_dwBufferSize: DWORD,
) BOOL;
@ -97,21 +97,21 @@ pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA(
pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE;
pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?&FILETIME) void;
pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?*FILETIME) void;
pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE;
pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL;
pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: &c_void, dwBytes: SIZE_T) ?&c_void;
pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: &const c_void) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: &const c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void;
pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL;
pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE;
pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?&c_void;
pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?*c_void;
pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: &c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL;
pub extern "kernel32" stdcallcc fn MoveFileExA(
lpExistingFileName: LPCSTR,
@ -119,24 +119,24 @@ pub extern "kernel32" stdcallcc fn MoveFileExA(
dwFlags: DWORD,
) BOOL;
pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: &LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: &LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL;
pub extern "kernel32" stdcallcc fn PathFileExists(pszPath: ?LPCTSTR) BOOL;
pub extern "kernel32" stdcallcc fn ReadFile(
in_hFile: HANDLE,
out_lpBuffer: &c_void,
out_lpBuffer: *c_void,
in_nNumberOfBytesToRead: DWORD,
out_lpNumberOfBytesRead: &DWORD,
in_out_lpOverlapped: ?&OVERLAPPED,
out_lpNumberOfBytesRead: *DWORD,
in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL;
pub extern "kernel32" stdcallcc fn SetFilePointerEx(
in_fFile: HANDLE,
in_liDistanceToMove: LARGE_INTEGER,
out_opt_ldNewFilePointer: ?&LARGE_INTEGER,
out_opt_ldNewFilePointer: ?*LARGE_INTEGER,
in_dwMoveMethod: DWORD,
) BOOL;
@ -150,10 +150,10 @@ pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMillis
pub extern "kernel32" stdcallcc fn WriteFile(
in_hFile: HANDLE,
in_lpBuffer: &const c_void,
in_lpBuffer: *const c_void,
in_nNumberOfBytesToWrite: DWORD,
out_lpNumberOfBytesWritten: ?&DWORD,
in_out_lpOverlapped: ?&OVERLAPPED,
out_lpNumberOfBytesWritten: ?*DWORD,
in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL;
//TODO: call unicode versions instead of relying on ANSI code page
@ -171,23 +171,23 @@ pub const BYTE = u8;
pub const CHAR = u8;
pub const DWORD = u32;
pub const FLOAT = f32;
pub const HANDLE = &c_void;
pub const HANDLE = *c_void;
pub const HCRYPTPROV = ULONG_PTR;
pub const HINSTANCE = &@OpaqueType();
pub const HMODULE = &@OpaqueType();
pub const HINSTANCE = *@OpaqueType();
pub const HMODULE = *@OpaqueType();
pub const INT = c_int;
pub const LPBYTE = &BYTE;
pub const LPCH = &CHAR;
pub const LPCSTR = &const CHAR;
pub const LPCTSTR = &const TCHAR;
pub const LPCVOID = &const c_void;
pub const LPDWORD = &DWORD;
pub const LPSTR = &CHAR;
pub const LPBYTE = *BYTE;
pub const LPCH = *CHAR;
pub const LPCSTR = *const CHAR;
pub const LPCTSTR = *const TCHAR;
pub const LPCVOID = *const c_void;
pub const LPDWORD = *DWORD;
pub const LPSTR = *CHAR;
pub const LPTSTR = if (UNICODE) LPWSTR else LPSTR;
pub const LPVOID = &c_void;
pub const LPWSTR = &WCHAR;
pub const PVOID = &c_void;
pub const PWSTR = &WCHAR;
pub const LPVOID = *c_void;
pub const LPWSTR = *WCHAR;
pub const PVOID = *c_void;
pub const PWSTR = *WCHAR;
pub const SIZE_T = usize;
pub const TCHAR = if (UNICODE) WCHAR else u8;
pub const UINT = c_uint;
@ -218,7 +218,7 @@ pub const OVERLAPPED = extern struct {
Pointer: PVOID,
hEvent: HANDLE,
};
pub const LPOVERLAPPED = &OVERLAPPED;
pub const LPOVERLAPPED = *OVERLAPPED;
pub const MAX_PATH = 260;
@ -271,11 +271,11 @@ pub const VOLUME_NAME_NT = 0x2;
pub const SECURITY_ATTRIBUTES = extern struct {
nLength: DWORD,
lpSecurityDescriptor: ?&c_void,
lpSecurityDescriptor: ?*c_void,
bInheritHandle: BOOL,
};
pub const PSECURITY_ATTRIBUTES = &SECURITY_ATTRIBUTES;
pub const LPSECURITY_ATTRIBUTES = &SECURITY_ATTRIBUTES;
pub const PSECURITY_ATTRIBUTES = *SECURITY_ATTRIBUTES;
pub const LPSECURITY_ATTRIBUTES = *SECURITY_ATTRIBUTES;
pub const GENERIC_READ = 0x80000000;
pub const GENERIC_WRITE = 0x40000000;

View File

@ -42,7 +42,7 @@ pub const WriteError = error{
};
pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void {
if (windows.WriteFile(handle, @ptrCast(&const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) {
if (windows.WriteFile(handle, @ptrCast(*const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources,
@ -68,11 +68,11 @@ pub fn windowsIsCygwinPty(handle: windows.HANDLE) bool {
const size = @sizeOf(windows.FILE_NAME_INFO);
var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = []u8{0} ** (size + windows.MAX_PATH);
if (windows.GetFileInformationByHandleEx(handle, windows.FileNameInfo, @ptrCast(&c_void, &name_info_bytes[0]), u32(name_info_bytes.len)) == 0) {
if (windows.GetFileInformationByHandleEx(handle, windows.FileNameInfo, @ptrCast(*c_void, &name_info_bytes[0]), u32(name_info_bytes.len)) == 0) {
return true;
}
const name_info = @ptrCast(&const windows.FILE_NAME_INFO, &name_info_bytes[0]);
const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]);
const name_bytes = name_info_bytes[size .. size + usize(name_info.FileNameLength)];
const name_wide = ([]u16)(name_bytes);
return mem.indexOf(u16, name_wide, []u16{ 'm', 's', 'y', 's', '-' }) != null or
@ -91,7 +91,7 @@ pub const OpenError = error{
/// `file_path` needs to be copied in memory to add a null terminating byte, hence the allocator.
pub fn windowsOpen(
allocator: &mem.Allocator,
allocator: *mem.Allocator,
file_path: []const u8,
desired_access: windows.DWORD,
share_mode: windows.DWORD,
@ -119,7 +119,7 @@ pub fn windowsOpen(
}
/// Caller must free result.
pub fn createWindowsEnvBlock(allocator: &mem.Allocator, env_map: &const BufMap) ![]u8 {
pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u8 {
// count bytes needed
const bytes_needed = x: {
var bytes_needed: usize = 1; // 1 for the final null byte
@ -150,7 +150,7 @@ pub fn createWindowsEnvBlock(allocator: &mem.Allocator, env_map: &const BufMap)
return result;
}
pub fn windowsLoadDll(allocator: &mem.Allocator, dll_path: []const u8) !windows.HMODULE {
pub fn windowsLoadDll(allocator: *mem.Allocator, dll_path: []const u8) !windows.HMODULE {
const padded_buff = try cstr.addNullByte(allocator, dll_path);
defer allocator.free(padded_buff);
return windows.LoadLibraryA(padded_buff.ptr) ?? error.DllNotFound;

View File

@ -8,7 +8,7 @@ pub const Message = struct {
type: usize,
payload: usize,
pub fn from(mailbox_id: &const MailboxId) Message {
pub fn from(mailbox_id: *const MailboxId) Message {
return Message{
.sender = MailboxId.Undefined,
.receiver = *mailbox_id,
@ -17,7 +17,7 @@ pub const Message = struct {
};
}
pub fn to(mailbox_id: &const MailboxId, msg_type: usize) Message {
pub fn to(mailbox_id: *const MailboxId, msg_type: usize) Message {
return Message{
.sender = MailboxId.This,
.receiver = *mailbox_id,
@ -26,7 +26,7 @@ pub const Message = struct {
};
}
pub fn withData(mailbox_id: &const MailboxId, msg_type: usize, payload: usize) Message {
pub fn withData(mailbox_id: *const MailboxId, msg_type: usize, payload: usize) Message {
return Message{
.sender = MailboxId.This,
.receiver = *mailbox_id,
@ -67,7 +67,7 @@ pub const getErrno = @import("linux/index.zig").getErrno;
use @import("linux/errno.zig");
// TODO: implement this correctly.
pub fn read(fd: i32, buf: &u8, count: usize) usize {
pub fn read(fd: i32, buf: *u8, count: usize) usize {
switch (fd) {
STDIN_FILENO => {
var i: usize = 0;
@ -75,7 +75,7 @@ pub fn read(fd: i32, buf: &u8, count: usize) usize {
send(Message.to(Server.Keyboard, 0));
var message = Message.from(MailboxId.This);
receive(&message);
receive(*message);
buf[i] = u8(message.payload);
}
@ -86,7 +86,7 @@ pub fn read(fd: i32, buf: &u8, count: usize) usize {
}
// TODO: implement this correctly.
pub fn write(fd: i32, buf: &const u8, count: usize) usize {
pub fn write(fd: i32, buf: *const u8, count: usize) usize {
switch (fd) {
STDOUT_FILENO, STDERR_FILENO => {
var i: usize = 0;
@ -126,22 +126,22 @@ pub fn exit(status: i32) noreturn {
unreachable;
}
pub fn createPort(mailbox_id: &const MailboxId) void {
pub fn createPort(mailbox_id: *const MailboxId) void {
_ = switch (*mailbox_id) {
MailboxId.Port => |id| syscall1(Syscall.createPort, id),
else => unreachable,
};
}
pub fn send(message: &const Message) void {
pub fn send(message: *const Message) void {
_ = syscall1(Syscall.send, @ptrToInt(message));
}
pub fn receive(destination: &Message) void {
pub fn receive(destination: *Message) void {
_ = syscall1(Syscall.receive, @ptrToInt(destination));
}
pub fn subscribeIRQ(irq: u8, mailbox_id: &const MailboxId) void {
pub fn subscribeIRQ(irq: u8, mailbox_id: *const MailboxId) void {
_ = syscall2(Syscall.subscribeIRQ, irq, @ptrToInt(mailbox_id));
}

View File

@ -28,15 +28,15 @@ pub const DefaultPrng = Xoroshiro128;
pub const DefaultCsprng = Isaac64;
pub const Random = struct {
fillFn: fn (r: &Random, buf: []u8) void,
fillFn: fn (r: *Random, buf: []u8) void,
/// Read random bytes into the specified buffer until fill.
pub fn bytes(r: &Random, buf: []u8) void {
pub fn bytes(r: *Random, buf: []u8) void {
r.fillFn(r, buf);
}
/// Return a random integer/boolean type.
pub fn scalar(r: &Random, comptime T: type) T {
pub fn scalar(r: *Random, comptime T: type) T {
var rand_bytes: [@sizeOf(T)]u8 = undefined;
r.bytes(rand_bytes[0..]);
@ -50,7 +50,7 @@ pub const Random = struct {
/// Get a random unsigned integer with even distribution between `start`
/// inclusive and `end` exclusive.
pub fn range(r: &Random, comptime T: type, start: T, end: T) T {
pub fn range(r: *Random, comptime T: type, start: T, end: T) T {
assert(start <= end);
if (T.is_signed) {
const uint = @IntType(false, T.bit_count);
@ -92,7 +92,7 @@ pub const Random = struct {
}
/// Return a floating point value evenly distributed in the range [0, 1).
pub fn float(r: &Random, comptime T: type) T {
pub fn float(r: *Random, comptime T: type) T {
// Generate a uniform value between [1, 2) and scale down to [0, 1).
// Note: The lowest mantissa bit is always set to 0 so we only use half the available range.
switch (T) {
@ -113,7 +113,7 @@ pub const Random = struct {
/// Return a floating point value normally distributed with mean = 0, stddev = 1.
///
/// To use different parameters, use: floatNorm(...) * desiredStddev + desiredMean.
pub fn floatNorm(r: &Random, comptime T: type) T {
pub fn floatNorm(r: *Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.NormDist);
switch (T) {
f32 => return f32(value),
@ -125,7 +125,7 @@ pub const Random = struct {
/// Return an exponentially distributed float with a rate parameter of 1.
///
/// To use a different rate parameter, use: floatExp(...) / desiredRate.
pub fn floatExp(r: &Random, comptime T: type) T {
pub fn floatExp(r: *Random, comptime T: type) T {
const value = ziggurat.next_f64(r, ziggurat.ExpDist);
switch (T) {
f32 => return f32(value),
@ -135,7 +135,7 @@ pub const Random = struct {
}
/// Shuffle a slice into a random order.
pub fn shuffle(r: &Random, comptime T: type, buf: []T) void {
pub fn shuffle(r: *Random, comptime T: type, buf: []T) void {
if (buf.len < 2) {
return;
}
@ -159,7 +159,7 @@ const SplitMix64 = struct {
return SplitMix64{ .s = seed };
}
pub fn next(self: &SplitMix64) u64 {
pub fn next(self: *SplitMix64) u64 {
self.s +%= 0x9e3779b97f4a7c15;
var z = self.s;
@ -208,7 +208,7 @@ pub const Pcg = struct {
return pcg;
}
fn next(self: &Pcg) u32 {
fn next(self: *Pcg) u32 {
const l = self.s;
self.s = l *% default_multiplier +% (self.i | 1);
@ -218,13 +218,13 @@ pub const Pcg = struct {
return (xor_s >> u5(rot)) | (xor_s << u5((0 -% rot) & 31));
}
fn seed(self: &Pcg, init_s: u64) void {
fn seed(self: *Pcg, init_s: u64) void {
// Pcg requires 128-bits of seed.
var gen = SplitMix64.init(init_s);
self.seedTwo(gen.next(), gen.next());
}
fn seedTwo(self: &Pcg, init_s: u64, init_i: u64) void {
fn seedTwo(self: *Pcg, init_s: u64, init_i: u64) void {
self.s = 0;
self.i = (init_s << 1) | 1;
self.s = self.s *% default_multiplier +% self.i;
@ -232,7 +232,7 @@ pub const Pcg = struct {
self.s = self.s *% default_multiplier +% self.i;
}
fn fill(r: &Random, buf: []u8) void {
fn fill(r: *Random, buf: []u8) void {
const self = @fieldParentPtr(Pcg, "random", r);
var i: usize = 0;
@ -297,7 +297,7 @@ pub const Xoroshiro128 = struct {
return x;
}
fn next(self: &Xoroshiro128) u64 {
fn next(self: *Xoroshiro128) u64 {
const s0 = self.s[0];
var s1 = self.s[1];
const r = s0 +% s1;
@ -310,7 +310,7 @@ pub const Xoroshiro128 = struct {
}
// Skip 2^64 places ahead in the sequence
fn jump(self: &Xoroshiro128) void {
fn jump(self: *Xoroshiro128) void {
var s0: u64 = 0;
var s1: u64 = 0;
@ -334,7 +334,7 @@ pub const Xoroshiro128 = struct {
self.s[1] = s1;
}
fn seed(self: &Xoroshiro128, init_s: u64) void {
fn seed(self: *Xoroshiro128, init_s: u64) void {
// Xoroshiro requires 128-bits of seed.
var gen = SplitMix64.init(init_s);
@ -342,7 +342,7 @@ pub const Xoroshiro128 = struct {
self.s[1] = gen.next();
}
fn fill(r: &Random, buf: []u8) void {
fn fill(r: *Random, buf: []u8) void {
const self = @fieldParentPtr(Xoroshiro128, "random", r);
var i: usize = 0;
@ -435,7 +435,7 @@ pub const Isaac64 = struct {
return isaac;
}
fn step(self: &Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2: usize) void {
fn step(self: *Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2: usize) void {
const x = self.m[base + m1];
self.a = mix +% self.m[base + m2];
@ -446,7 +446,7 @@ pub const Isaac64 = struct {
self.r[self.r.len - 1 - base - m1] = self.b;
}
fn refill(self: &Isaac64) void {
fn refill(self: *Isaac64) void {
const midpoint = self.r.len / 2;
self.c +%= 1;
@ -475,7 +475,7 @@ pub const Isaac64 = struct {
self.i = 0;
}
fn next(self: &Isaac64) u64 {
fn next(self: *Isaac64) u64 {
if (self.i >= self.r.len) {
self.refill();
}
@ -485,7 +485,7 @@ pub const Isaac64 = struct {
return value;
}
fn seed(self: &Isaac64, init_s: u64, comptime rounds: usize) void {
fn seed(self: *Isaac64, init_s: u64, comptime rounds: usize) void {
// We ignore the multi-pass requirement since we don't currently expose full access to
// seeding the self.m array completely.
mem.set(u64, self.m[0..], 0);
@ -551,7 +551,7 @@ pub const Isaac64 = struct {
self.i = self.r.len; // trigger refill on first value
}
fn fill(r: &Random, buf: []u8) void {
fn fill(r: *Random, buf: []u8) void {
const self = @fieldParentPtr(Isaac64, "random", r);
var i: usize = 0;
@ -666,7 +666,7 @@ test "Random range" {
testRange(&prng.random, 10, 14);
}
fn testRange(r: &Random, start: i32, end: i32) void {
fn testRange(r: *Random, start: i32, end: i32) void {
const count = usize(end - start);
var values_buffer = []bool{false} ** 20;
const values = values_buffer[0..count];

View File

@ -12,7 +12,7 @@ const std = @import("../index.zig");
const math = std.math;
const Random = std.rand.Random;
pub fn next_f64(random: &Random, comptime tables: &const ZigTable) f64 {
pub fn next_f64(random: *Random, comptime tables: *const ZigTable) f64 {
while (true) {
// We manually construct a float from parts as we can avoid an extra random lookup here by
// using the unused exponent for the lookup table entry.
@ -60,7 +60,7 @@ pub const ZigTable = struct {
// whether the distribution is symmetric
is_symmetric: bool,
// fallback calculation in the case we are in the 0 block
zero_case: fn (&Random, f64) f64,
zero_case: fn (*Random, f64) f64,
};
// zigNorInit
@ -70,7 +70,7 @@ fn ZigTableGen(
comptime v: f64,
comptime f: fn (f64) f64,
comptime f_inv: fn (f64) f64,
comptime zero_case: fn (&Random, f64) f64,
comptime zero_case: fn (*Random, f64) f64,
) ZigTable {
var tables: ZigTable = undefined;
@ -110,7 +110,7 @@ fn norm_f(x: f64) f64 {
fn norm_f_inv(y: f64) f64 {
return math.sqrt(-2.0 * math.ln(y));
}
fn norm_zero_case(random: &Random, u: f64) f64 {
fn norm_zero_case(random: *Random, u: f64) f64 {
var x: f64 = 1;
var y: f64 = 0;
@ -149,7 +149,7 @@ fn exp_f(x: f64) f64 {
fn exp_f_inv(y: f64) f64 {
return -math.ln(y);
}
fn exp_zero_case(random: &Random, _: f64) f64 {
fn exp_zero_case(random: *Random, _: f64) f64 {
return exp_r - math.ln(random.float(f64));
}

View File

@ -87,49 +87,49 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
const ShelfIndex = std.math.Log2Int(usize);
prealloc_segment: [prealloc_item_count]T,
dynamic_segments: []&T,
allocator: &Allocator,
dynamic_segments: []*T,
allocator: *Allocator,
len: usize,
pub const prealloc_count = prealloc_item_count;
/// Deinitialize with `deinit`
pub fn init(allocator: &Allocator) Self {
pub fn init(allocator: *Allocator) Self {
return Self{
.allocator = allocator,
.len = 0,
.prealloc_segment = undefined,
.dynamic_segments = []&T{},
.dynamic_segments = []*T{},
};
}
pub fn deinit(self: &Self) void {
pub fn deinit(self: *Self) void {
self.freeShelves(ShelfIndex(self.dynamic_segments.len), 0);
self.allocator.free(self.dynamic_segments);
self.* = undefined;
}
pub fn at(self: &Self, i: usize) &T {
pub fn at(self: *Self, i: usize) *T {
assert(i < self.len);
return self.uncheckedAt(i);
}
pub fn count(self: &const Self) usize {
pub fn count(self: *const Self) usize {
return self.len;
}
pub fn push(self: &Self, item: &const T) !void {
pub fn push(self: *Self, item: *const T) !void {
const new_item_ptr = try self.addOne();
new_item_ptr.* = item.*;
}
pub fn pushMany(self: &Self, items: []const T) !void {
pub fn pushMany(self: *Self, items: []const T) !void {
for (items) |item| {
try self.push(item);
}
}
pub fn pop(self: &Self) ?T {
pub fn pop(self: *Self) ?T {
if (self.len == 0) return null;
const index = self.len - 1;
@ -138,7 +138,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return result;
}
pub fn addOne(self: &Self) !&T {
pub fn addOne(self: *Self) !*T {
const new_length = self.len + 1;
try self.growCapacity(new_length);
const result = self.uncheckedAt(self.len);
@ -147,7 +147,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
/// Grows or shrinks capacity to match usage.
pub fn setCapacity(self: &Self, new_capacity: usize) !void {
pub fn setCapacity(self: *Self, new_capacity: usize) !void {
if (new_capacity <= usize(1) << (prealloc_exp + self.dynamic_segments.len)) {
return self.shrinkCapacity(new_capacity);
} else {
@ -156,15 +156,15 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
/// Only grows capacity, or retains current capacity
pub fn growCapacity(self: &Self, new_capacity: usize) !void {
pub fn growCapacity(self: *Self, new_capacity: usize) !void {
const new_cap_shelf_count = shelfCount(new_capacity);
const old_shelf_count = ShelfIndex(self.dynamic_segments.len);
if (new_cap_shelf_count > old_shelf_count) {
self.dynamic_segments = try self.allocator.realloc(&T, self.dynamic_segments, new_cap_shelf_count);
self.dynamic_segments = try self.allocator.realloc(*T, self.dynamic_segments, new_cap_shelf_count);
var i = old_shelf_count;
errdefer {
self.freeShelves(i, old_shelf_count);
self.dynamic_segments = self.allocator.shrink(&T, self.dynamic_segments, old_shelf_count);
self.dynamic_segments = self.allocator.shrink(*T, self.dynamic_segments, old_shelf_count);
}
while (i < new_cap_shelf_count) : (i += 1) {
self.dynamic_segments[i] = (try self.allocator.alloc(T, shelfSize(i))).ptr;
@ -173,12 +173,12 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
/// Only shrinks capacity or retains current capacity
pub fn shrinkCapacity(self: &Self, new_capacity: usize) void {
pub fn shrinkCapacity(self: *Self, new_capacity: usize) void {
if (new_capacity <= prealloc_item_count) {
const len = ShelfIndex(self.dynamic_segments.len);
self.freeShelves(len, 0);
self.allocator.free(self.dynamic_segments);
self.dynamic_segments = []&T{};
self.dynamic_segments = []*T{};
return;
}
@ -190,10 +190,10 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
self.freeShelves(old_shelf_count, new_cap_shelf_count);
self.dynamic_segments = self.allocator.shrink(&T, self.dynamic_segments, new_cap_shelf_count);
self.dynamic_segments = self.allocator.shrink(*T, self.dynamic_segments, new_cap_shelf_count);
}
pub fn uncheckedAt(self: &Self, index: usize) &T {
pub fn uncheckedAt(self: *Self, index: usize) *T {
if (index < prealloc_item_count) {
return &self.prealloc_segment[index];
}
@ -230,7 +230,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return list_index + prealloc_item_count - (usize(1) << ((prealloc_exp + 1) + shelf_index));
}
fn freeShelves(self: &Self, from_count: ShelfIndex, to_count: ShelfIndex) void {
fn freeShelves(self: *Self, from_count: ShelfIndex, to_count: ShelfIndex) void {
var i = from_count;
while (i != to_count) {
i -= 1;
@ -239,13 +239,13 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
pub const Iterator = struct {
list: &Self,
list: *Self,
index: usize,
box_index: usize,
shelf_index: ShelfIndex,
shelf_size: usize,
pub fn next(it: &Iterator) ?&T {
pub fn next(it: *Iterator) ?*T {
if (it.index >= it.list.len) return null;
if (it.index < prealloc_item_count) {
const ptr = &it.list.prealloc_segment[it.index];
@ -269,7 +269,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return ptr;
}
pub fn prev(it: &Iterator) ?&T {
pub fn prev(it: *Iterator) ?*T {
if (it.index == 0) return null;
it.index -= 1;
@ -286,7 +286,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return &it.list.dynamic_segments[it.shelf_index][it.box_index];
}
pub fn peek(it: &Iterator) ?&T {
pub fn peek(it: *Iterator) ?*T {
if (it.index >= it.list.len)
return null;
if (it.index < prealloc_item_count)
@ -295,7 +295,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return &it.list.dynamic_segments[it.shelf_index][it.box_index];
}
pub fn set(it: &Iterator, index: usize) void {
pub fn set(it: *Iterator, index: usize) void {
it.index = index;
if (index < prealloc_item_count) return;
it.shelf_index = shelfIndex(index);
@ -304,7 +304,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
};
pub fn iterator(self: &Self, start_index: usize) Iterator {
pub fn iterator(self: *Self, start_index: usize) Iterator {
var it = Iterator{
.list = self,
.index = undefined,
@ -331,7 +331,7 @@ test "std.SegmentedList" {
try testSegmentedList(16, a);
}
fn testSegmentedList(comptime prealloc: usize, allocator: &Allocator) !void {
fn testSegmentedList(comptime prealloc: usize, allocator: *Allocator) !void {
var list = SegmentedList(i32, prealloc).init(allocator);
defer list.deinit();

View File

@ -5,7 +5,7 @@ const math = std.math;
const builtin = @import("builtin");
/// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. O(1) memory (no allocator required).
pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) void {
pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) void {
{
var i: usize = 1;
while (i < items.len) : (i += 1) {
@ -30,7 +30,7 @@ const Range = struct {
};
}
fn length(self: &const Range) usize {
fn length(self: *const Range) usize {
return self.end - self.start;
}
};
@ -58,12 +58,12 @@ const Iterator = struct {
};
}
fn begin(self: &Iterator) void {
fn begin(self: *Iterator) void {
self.numerator = 0;
self.decimal = 0;
}
fn nextRange(self: &Iterator) Range {
fn nextRange(self: *Iterator) Range {
const start = self.decimal;
self.decimal += self.decimal_step;
@ -79,11 +79,11 @@ const Iterator = struct {
};
}
fn finished(self: &Iterator) bool {
fn finished(self: *Iterator) bool {
return self.decimal >= self.size;
}
fn nextLevel(self: &Iterator) bool {
fn nextLevel(self: *Iterator) bool {
self.decimal_step += self.decimal_step;
self.numerator_step += self.numerator_step;
if (self.numerator_step >= self.denominator) {
@ -94,7 +94,7 @@ const Iterator = struct {
return (self.decimal_step < self.size);
}
fn length(self: &Iterator) usize {
fn length(self: *Iterator) usize {
return self.decimal_step;
}
};
@ -108,7 +108,7 @@ const Pull = struct {
/// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. O(1) memory (no allocator required).
/// Currently implemented as block sort.
pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) void {
pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) void {
// Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c
var cache: [512]T = undefined;
@ -741,7 +741,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &con
}
// merge operation without a buffer
fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const Range, lessThan: fn (&const T, &const T) bool) void {
fn mergeInPlace(comptime T: type, items: []T, A_arg: *const Range, B_arg: *const Range, lessThan: fn (*const T, *const T) bool) void {
if (A_arg.length() == 0 or B_arg.length() == 0) return;
// this just repeatedly binary searches into B and rotates A into position.
@ -783,7 +783,7 @@ fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const
}
// merge operation using an internal buffer
fn mergeInternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, buffer: &const Range) void {
fn mergeInternal(comptime T: type, items: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, buffer: *const Range) void {
// whenever we find a value to add to the final array, swap it with the value that's already in that spot
// when this algorithm is finished, 'buffer' will contain its original contents, but in a different order
var A_count: usize = 0;
@ -819,7 +819,7 @@ fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_s
// combine a linear search with a binary search to reduce the number of comparisons in situations
// where have some idea as to how many unique values there are and where the next value might be
fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
fn findFirstForward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@ -833,7 +833,7 @@ fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const
return binaryFirst(T, items, value, Range.init(index - skip, index), lessThan);
}
fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
fn findFirstBackward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@ -847,7 +847,7 @@ fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &cons
return binaryFirst(T, items, value, Range.init(index, index + skip), lessThan);
}
fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
fn findLastForward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@ -861,7 +861,7 @@ fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const
return binaryLast(T, items, value, Range.init(index - skip, index), lessThan);
}
fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize {
fn findLastBackward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize {
if (range.length() == 0) return range.start;
const skip = math.max(range.length() / unique, usize(1));
@ -875,7 +875,7 @@ fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const
return binaryLast(T, items, value, Range.init(index, index + skip), lessThan);
}
fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool) usize {
fn binaryFirst(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
@ -893,7 +893,7 @@ fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Rang
return start;
}
fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool) usize {
fn binaryLast(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool) usize {
var start = range.start;
var end = range.end - 1;
if (range.start >= range.end) return range.end;
@ -911,7 +911,7 @@ fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range
return start;
}
fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, into: []T) void {
fn mergeInto(comptime T: type, from: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, into: []T) void {
var A_index: usize = A.start;
var B_index: usize = B.start;
const A_last = A.end;
@ -941,7 +941,7 @@ fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, less
}
}
fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, cache: []T) void {
fn mergeExternal(comptime T: type, items: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, cache: []T) void {
// A fits into the cache, so use that instead of the internal buffer
var A_index: usize = 0;
var B_index: usize = B.start;
@ -969,26 +969,26 @@ fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range,
mem.copy(T, items[insert_index..], cache[A_index..A_last]);
}
fn swap(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool, order: &[8]u8, x: usize, y: usize) void {
fn swap(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool, order: *[8]u8, x: usize, y: usize) void {
if (lessThan(items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(items[x], items[y]))) {
mem.swap(T, &items[x], &items[y]);
mem.swap(u8, &(order.*)[x], &(order.*)[y]);
}
}
fn i32asc(lhs: &const i32, rhs: &const i32) bool {
fn i32asc(lhs: *const i32, rhs: *const i32) bool {
return lhs.* < rhs.*;
}
fn i32desc(lhs: &const i32, rhs: &const i32) bool {
fn i32desc(lhs: *const i32, rhs: *const i32) bool {
return rhs.* < lhs.*;
}
fn u8asc(lhs: &const u8, rhs: &const u8) bool {
fn u8asc(lhs: *const u8, rhs: *const u8) bool {
return lhs.* < rhs.*;
}
fn u8desc(lhs: &const u8, rhs: &const u8) bool {
fn u8desc(lhs: *const u8, rhs: *const u8) bool {
return rhs.* < lhs.*;
}
@ -1125,7 +1125,7 @@ const IdAndValue = struct {
id: usize,
value: i32,
};
fn cmpByValue(a: &const IdAndValue, b: &const IdAndValue) bool {
fn cmpByValue(a: *const IdAndValue, b: *const IdAndValue) bool {
return i32asc(a.value, b.value);
}
@ -1324,7 +1324,7 @@ test "sort fuzz testing" {
var fixed_buffer_mem: [100 * 1024]u8 = undefined;
fn fuzzTest(rng: &std.rand.Random) void {
fn fuzzTest(rng: *std.rand.Random) void {
const array_size = rng.range(usize, 0, 1000);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var array = fixed_allocator.allocator.alloc(IdAndValue, array_size) catch unreachable;
@ -1345,7 +1345,7 @@ fn fuzzTest(rng: &std.rand.Random) void {
}
}
pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) T {
pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) T {
var i: usize = 0;
var smallest = items[0];
for (items[1..]) |item| {
@ -1356,7 +1356,7 @@ pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &cons
return smallest;
}
pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) T {
pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) T {
var i: usize = 0;
var biggest = items[0];
for (items[1..]) |item| {

View File

@ -5,7 +5,7 @@ const root = @import("@root");
const std = @import("std");
const builtin = @import("builtin");
var argc_ptr: &usize = undefined;
var argc_ptr: *usize = undefined;
comptime {
const strong_linkage = builtin.GlobalLinkage.Strong;
@ -28,12 +28,12 @@ nakedcc fn _start() noreturn {
switch (builtin.arch) {
builtin.Arch.x86_64 => {
argc_ptr = asm ("lea (%%rsp), %[argc]"
: [argc] "=r" (-> &usize)
: [argc] "=r" (-> *usize)
);
},
builtin.Arch.i386 => {
argc_ptr = asm ("lea (%%esp), %[argc]"
: [argc] "=r" (-> &usize)
: [argc] "=r" (-> *usize)
);
},
else => @compileError("unsupported arch"),
@ -51,13 +51,13 @@ extern fn WinMainCRTStartup() noreturn {
fn posixCallMainAndExit() noreturn {
const argc = argc_ptr.*;
const argv = @ptrCast(&&u8, &argc_ptr[1]);
const envp_nullable = @ptrCast(&?&u8, &argv[argc + 1]);
const argv = @ptrCast(**u8, &argc_ptr[1]);
const envp_nullable = @ptrCast(*?*u8, &argv[argc + 1]);
var envp_count: usize = 0;
while (envp_nullable[envp_count]) |_| : (envp_count += 1) {}
const envp = @ptrCast(&&u8, envp_nullable)[0..envp_count];
const envp = @ptrCast(**u8, envp_nullable)[0..envp_count];
if (builtin.os == builtin.Os.linux) {
const auxv = &@ptrCast(&usize, envp.ptr)[envp_count + 1];
const auxv = &@ptrCast(*usize, envp.ptr)[envp_count + 1];
var i: usize = 0;
while (auxv[i] != 0) : (i += 2) {
if (auxv[i] < std.os.linux_aux_raw.len) std.os.linux_aux_raw[auxv[i]] = auxv[i + 1];
@ -68,16 +68,16 @@ fn posixCallMainAndExit() noreturn {
std.os.posix.exit(callMainWithArgs(argc, argv, envp));
}
fn callMainWithArgs(argc: usize, argv: &&u8, envp: []&u8) u8 {
fn callMainWithArgs(argc: usize, argv: **u8, envp: []*u8) u8 {
std.os.ArgIteratorPosix.raw = argv[0..argc];
std.os.posix_environ_raw = envp;
return callMain();
}
extern fn main(c_argc: i32, c_argv: &&u8, c_envp: &?&u8) i32 {
extern fn main(c_argc: i32, c_argv: **u8, c_envp: *?*u8) i32 {
var env_count: usize = 0;
while (c_envp[env_count] != null) : (env_count += 1) {}
const envp = @ptrCast(&&u8, c_envp)[0..env_count];
const envp = @ptrCast(**u8, c_envp)[0..env_count];
return callMainWithArgs(usize(c_argc), c_argv, envp);
}

View File

@ -1,10 +1,10 @@
const Builder = @import("std").build.Builder;
pub fn build(b: &Builder) void {
pub fn build(b: *Builder) void {
const mode = b.standardReleaseOptions();
const exe = b.addExecutable("YOUR_NAME_HERE", "src/main.zig");
exe.setBuildMode(mode);
b.default_step.dependOn(&exe.step);
b.default_step.dependOn(*exe.step);
b.installArtifact(exe);
}

View File

@ -129,7 +129,7 @@ pub fn main() !void {
};
}
fn runBuild(builder: &Builder) error!void {
fn runBuild(builder: *Builder) error!void {
switch (@typeId(@typeOf(root.build).ReturnType)) {
builtin.TypeId.Void => root.build(builder),
builtin.TypeId.ErrorUnion => try root.build(builder),
@ -137,7 +137,7 @@ fn runBuild(builder: &Builder) error!void {
}
}
fn usage(builder: &Builder, already_ran_build: bool, out_stream: var) !void {
fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void {
// run the build script to collect the options
if (!already_ran_build) {
builder.setInstallPrefix(null);
@ -195,7 +195,7 @@ fn usage(builder: &Builder, already_ran_build: bool, out_stream: var) !void {
);
}
fn usageAndErr(builder: &Builder, already_ran_build: bool, out_stream: var) error {
fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: var) error {
usage(builder, already_ran_build, out_stream) catch {};
return error.InvalidArgs;
}

View File

@ -5,7 +5,7 @@ const builtin = @import("builtin");
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn {
pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
if (builtin.is_test) {
@setCold(true);
@import("std").debug.panic("{}", msg);
@ -14,7 +14,7 @@ pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn
}
}
export fn memset(dest: ?&u8, c: u8, n: usize) ?&u8 {
export fn memset(dest: ?*u8, c: u8, n: usize) ?*u8 {
@setRuntimeSafety(false);
var index: usize = 0;
@ -24,7 +24,7 @@ export fn memset(dest: ?&u8, c: u8, n: usize) ?&u8 {
return dest;
}
export fn memcpy(noalias dest: ?&u8, noalias src: ?&const u8, n: usize) ?&u8 {
export fn memcpy(noalias dest: ?*u8, noalias src: ?*const u8, n: usize) ?*u8 {
@setRuntimeSafety(false);
var index: usize = 0;
@ -34,7 +34,7 @@ export fn memcpy(noalias dest: ?&u8, noalias src: ?&const u8, n: usize) ?&u8 {
return dest;
}
export fn memmove(dest: ?&u8, src: ?&const u8, n: usize) ?&u8 {
export fn memmove(dest: ?*u8, src: ?*const u8, n: usize) ?*u8 {
@setRuntimeSafety(false);
if (@ptrToInt(dest) < @ptrToInt(src)) {

View File

@ -78,7 +78,7 @@ const __udivmoddi4 = @import("udivmoddi4.zig").__udivmoddi4;
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn {
pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
@setCold(true);
if (is_test) {
std.debug.panic("{}", msg);
@ -284,7 +284,7 @@ nakedcc fn ___chkstk_ms() align(4) void {
);
}
extern fn __udivmodsi4(a: u32, b: u32, rem: &u32) u32 {
extern fn __udivmodsi4(a: u32, b: u32, rem: *u32) u32 {
@setRuntimeSafety(is_test);
const d = __udivsi3(a, b);

View File

@ -7,15 +7,15 @@ const low = switch (builtin.endian) {
};
const high = 1 - low;
pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?&DoubleInt) DoubleInt {
pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt {
@setRuntimeSafety(is_test);
const SingleInt = @IntType(false, @divExact(DoubleInt.bit_count, 2));
const SignedDoubleInt = @IntType(true, DoubleInt.bit_count);
const Log2SingleInt = @import("std").math.Log2Int(SingleInt);
const n = @ptrCast(&const [2]SingleInt, &a).*; // TODO issue #421
const d = @ptrCast(&const [2]SingleInt, &b).*; // TODO issue #421
const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421
const d = @ptrCast(*const [2]SingleInt, &b).*; // TODO issue #421
var q: [2]SingleInt = undefined;
var r: [2]SingleInt = undefined;
var sr: c_uint = undefined;
@ -57,7 +57,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
if (maybe_rem) |rem| {
r[high] = n[high] % d[high];
r[low] = 0;
rem.* = @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
}
return n[high] / d[high];
}
@ -69,7 +69,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
if (maybe_rem) |rem| {
r[low] = n[low];
r[high] = n[high] & (d[high] - 1);
rem.* = @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
}
return n[high] >> Log2SingleInt(@ctz(d[high]));
}
@ -109,7 +109,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
sr = @ctz(d[low]);
q[high] = n[high] >> Log2SingleInt(sr);
q[low] = (n[high] << Log2SingleInt(SingleInt.bit_count - sr)) | (n[low] >> Log2SingleInt(sr));
return @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
return @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
}
// K X
// ---
@ -183,13 +183,13 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// r.all -= b;
// carry = 1;
// }
r_all = @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
r_all = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
const s: SignedDoubleInt = SignedDoubleInt(b -% r_all -% 1) >> (DoubleInt.bit_count - 1);
carry = u32(s & 1);
r_all -= b & @bitCast(DoubleInt, s);
r = @ptrCast(&[2]SingleInt, &r_all).*; // TODO issue #421
r = @ptrCast(*[2]SingleInt, &r_all).*; // TODO issue #421
}
const q_all = ((@ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &q[0]).*) << 1) | carry; // TODO issue #421
const q_all = ((@ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*) << 1) | carry; // TODO issue #421
if (maybe_rem) |rem| {
rem.* = r_all;
}

View File

@ -1,7 +1,7 @@
const udivmod = @import("udivmod.zig").udivmod;
const builtin = @import("builtin");
pub extern fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?&u64) u64 {
pub extern fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?*u64) u64 {
@setRuntimeSafety(builtin.is_test);
return udivmod(u64, a, b, maybe_rem);
}

View File

@ -2,12 +2,12 @@ const udivmod = @import("udivmod.zig").udivmod;
const builtin = @import("builtin");
const compiler_rt = @import("index.zig");
pub extern fn __udivmodti4(a: u128, b: u128, maybe_rem: ?&u128) u128 {
pub extern fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) u128 {
@setRuntimeSafety(builtin.is_test);
return udivmod(u128, a, b, maybe_rem);
}
pub extern fn __udivmodti4_windows_x86_64(a: &const u128, b: &const u128, maybe_rem: ?&u128) void {
pub extern fn __udivmodti4_windows_x86_64(a: *const u128, b: *const u128, maybe_rem: ?*u128) void {
@setRuntimeSafety(builtin.is_test);
compiler_rt.setXmm0(u128, udivmod(u128, a.*, b.*, maybe_rem));
}

View File

@ -6,7 +6,7 @@ pub extern fn __udivti3(a: u128, b: u128) u128 {
return udivmodti4.__udivmodti4(a, b, null);
}
pub extern fn __udivti3_windows_x86_64(a: &const u128, b: &const u128) void {
pub extern fn __udivti3_windows_x86_64(a: *const u128, b: *const u128) void {
@setRuntimeSafety(builtin.is_test);
udivmodti4.__udivmodti4_windows_x86_64(a, b, null);
}

View File

@ -9,7 +9,7 @@ pub extern fn __umodti3(a: u128, b: u128) u128 {
return r;
}
pub extern fn __umodti3_windows_x86_64(a: &const u128, b: &const u128) void {
pub extern fn __umodti3_windows_x86_64(a: *const u128, b: *const u128) void {
@setRuntimeSafety(builtin.is_test);
compiler_rt.setXmm0(u128, __umodti3(a.*, b.*));
}

Some files were not shown because too many files have changed in this diff Show More