From 652f4bdf6242462182005f4c7149f13beaaa3259 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 5 Jun 2018 18:03:21 -0400
Subject: [PATCH 01/49] disallow unknown-length pointer to opaque
This also means that translate-c has to detect when a pointer to
opaque is happening, and use `*` instead of `[*]`.
See #1059
---
src/analyze.cpp | 1 +
src/ir.cpp | 10 +++++-----
src/tokenizer.hpp | 2 ++
src/translate_c.cpp | 37 +++++++++++++++++++++++++++++++++----
std/c/index.zig | 20 ++++++++++----------
std/heap.zig | 8 ++++----
std/os/darwin.zig | 8 ++++----
std/os/file.zig | 2 +-
std/os/index.zig | 4 ++--
std/os/windows/index.zig | 14 +++++++-------
std/os/windows/util.zig | 2 +-
test/compare_output.zig | 4 ++--
test/compile_errors.zig | 7 +++++++
test/translate_c.zig | 20 ++++++++++----------
14 files changed, 89 insertions(+), 50 deletions(-)
diff --git a/src/analyze.cpp b/src/analyze.cpp
index b0f0196020..0adb992798 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -384,6 +384,7 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count)
{
assert(!type_is_invalid(child_type));
+ assert(ptr_len == PtrLenSingle || child_type->id != TypeTableEntryIdOpaque);
TypeId type_id = {};
TypeTableEntry **parent_pointer = nullptr;
diff --git a/src/ir.cpp b/src/ir.cpp
index 9578795fcc..5c44e7c0ff 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -4620,11 +4620,8 @@ static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *
static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypePointerType);
- // The null check here is for C imports which don't set a token on the AST node. We could potentially
- // update that code to create a fake token and then remove this check.
- PtrLen ptr_len = (node->data.pointer_type.star_token != nullptr &&
- (node->data.pointer_type.star_token->id == TokenIdStar ||
- node->data.pointer_type.star_token->id == TokenIdStarStar)) ? PtrLenSingle : PtrLenUnknown;
+ PtrLen ptr_len = (node->data.pointer_type.star_token->id == TokenIdStar ||
+ node->data.pointer_type.star_token->id == TokenIdStarStar) ? PtrLenSingle : PtrLenUnknown;
bool is_const = node->data.pointer_type.is_const;
bool is_volatile = node->data.pointer_type.is_volatile;
AstNode *expr_node = node->data.pointer_type.op_expr;
@@ -18973,6 +18970,9 @@ static TypeTableEntry *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstruc
if (child_type->id == TypeTableEntryIdUnreachable) {
ir_add_error(ira, &instruction->base, buf_sprintf("pointer to noreturn not allowed"));
return ira->codegen->builtin_types.entry_invalid;
+ } else if (child_type->id == TypeTableEntryIdOpaque && instruction->ptr_len == PtrLenUnknown) {
+ ir_add_error(ira, &instruction->base, buf_sprintf("unknown-length pointer to opaque"));
+ return ira->codegen->builtin_types.entry_invalid;
}
uint32_t align_bytes;
diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp
index d659c0a772..d0089909cd 100644
--- a/src/tokenizer.hpp
+++ b/src/tokenizer.hpp
@@ -170,6 +170,8 @@ struct Token {
TokenCharLit char_lit;
} data;
};
+// work around conflicting name Token which is also found in libclang
+typedef Token ZigToken;
struct Tokenization {
ZigList *tokens;
diff --git a/src/translate_c.cpp b/src/translate_c.cpp
index db541d34f3..d78bd1fa70 100644
--- a/src/translate_c.cpp
+++ b/src/translate_c.cpp
@@ -276,8 +276,11 @@ static AstNode *maybe_suppress_result(Context *c, ResultUsed result_used, AstNod
node);
}
-static AstNode *trans_create_node_ptr_type(Context *c, bool is_const, bool is_volatile, AstNode *child_node) {
+static AstNode *trans_create_node_ptr_type(Context *c, bool is_const, bool is_volatile, AstNode *child_node, PtrLen ptr_len) {
AstNode *node = trans_create_node(c, NodeTypePointerType);
+ node->data.pointer_type.star_token = allocate(1);
+ node->data.pointer_type.star_token->id = (ptr_len == PtrLenSingle) ? TokenIdStar: TokenIdBracketStarBracket;
+ node->data.pointer_type.is_const = is_const;
node->data.pointer_type.is_const = is_const;
node->data.pointer_type.is_volatile = is_volatile;
node->data.pointer_type.op_expr = child_node;
@@ -731,6 +734,30 @@ static bool qual_type_has_wrapping_overflow(Context *c, QualType qt) {
}
}
+static bool type_is_opaque(Context *c, const Type *ty, const SourceLocation &source_loc) {
+ switch (ty->getTypeClass()) {
+ case Type::Builtin: {
+ const BuiltinType *builtin_ty = static_cast(ty);
+ return builtin_ty->getKind() == BuiltinType::Void;
+ }
+ case Type::Record: {
+ const RecordType *record_ty = static_cast(ty);
+ return record_ty->getDecl()->getDefinition() == nullptr;
+ }
+ case Type::Elaborated: {
+ const ElaboratedType *elaborated_ty = static_cast(ty);
+ return type_is_opaque(c, elaborated_ty->getNamedType().getTypePtr(), source_loc);
+ }
+ case Type::Typedef: {
+ const TypedefType *typedef_ty = static_cast(ty);
+ const TypedefNameDecl *typedef_decl = typedef_ty->getDecl();
+ return type_is_opaque(c, typedef_decl->getUnderlyingType().getTypePtr(), source_loc);
+ }
+ default:
+ return false;
+ }
+}
+
static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &source_loc) {
switch (ty->getTypeClass()) {
case Type::Builtin:
@@ -855,8 +882,10 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
return trans_create_node_prefix_op(c, PrefixOpMaybe, child_node);
}
+ PtrLen ptr_len = type_is_opaque(c, child_qt.getTypePtr(), source_loc) ? PtrLenSingle : PtrLenUnknown;
+
AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
- child_qt.isVolatileQualified(), child_node);
+ child_qt.isVolatileQualified(), child_node, ptr_len);
return trans_create_node_prefix_op(c, PrefixOpMaybe, pointer_node);
}
case Type::Typedef:
@@ -1041,7 +1070,7 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
return nullptr;
}
AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
- child_qt.isVolatileQualified(), child_type_node);
+ child_qt.isVolatileQualified(), child_type_node, PtrLenUnknown);
return pointer_node;
}
case Type::BlockPointer:
@@ -4448,7 +4477,7 @@ static AstNode *parse_ctok_suffix_op_expr(Context *c, CTokenize *ctok, size_t *t
} else if (first_tok->id == CTokIdAsterisk) {
*tok_i += 1;
- node = trans_create_node_ptr_type(c, false, false, node);
+ node = trans_create_node_ptr_type(c, false, false, node, PtrLenUnknown);
} else {
return node;
}
diff --git a/std/c/index.zig b/std/c/index.zig
index ade37f36c1..7de8634d07 100644
--- a/std/c/index.zig
+++ b/std/c/index.zig
@@ -20,11 +20,11 @@ pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: *Stat) c_int;
pub extern "c" fn lseek(fd: c_int, offset: isize, whence: c_int) isize;
pub extern "c" fn open(path: [*]const u8, oflag: c_int, ...) c_int;
pub extern "c" fn raise(sig: c_int) c_int;
-pub extern "c" fn read(fd: c_int, buf: [*]c_void, nbyte: usize) isize;
+pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize;
pub extern "c" fn stat(noalias path: [*]const u8, noalias buf: *Stat) c_int;
-pub extern "c" fn write(fd: c_int, buf: [*]const c_void, nbyte: usize) isize;
-pub extern "c" fn mmap(addr: ?[*]c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?[*]c_void;
-pub extern "c" fn munmap(addr: [*]c_void, len: usize) c_int;
+pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize;
+pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void;
+pub extern "c" fn munmap(addr: *c_void, len: usize) c_int;
pub extern "c" fn unlink(path: [*]const u8) c_int;
pub extern "c" fn getcwd(buf: [*]u8, size: usize) ?[*]u8;
pub extern "c" fn waitpid(pid: c_int, stat_loc: *c_int, options: c_int) c_int;
@@ -48,15 +48,15 @@ pub extern "c" fn setreuid(ruid: c_uint, euid: c_uint) c_int;
pub extern "c" fn setregid(rgid: c_uint, egid: c_uint) c_int;
pub extern "c" fn rmdir(path: [*]const u8) c_int;
-pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?[*]c_void;
-pub extern "c" fn malloc(usize) ?[*]c_void;
-pub extern "c" fn realloc([*]c_void, usize) ?[*]c_void;
-pub extern "c" fn free([*]c_void) void;
-pub extern "c" fn posix_memalign(memptr: *[*]c_void, alignment: usize, size: usize) c_int;
+pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void;
+pub extern "c" fn malloc(usize) ?*c_void;
+pub extern "c" fn realloc(*c_void, usize) ?*c_void;
+pub extern "c" fn free(*c_void) void;
+pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int;
pub extern "pthread" fn pthread_create(noalias newthread: *pthread_t, noalias attr: ?*const pthread_attr_t, start_routine: extern fn (?*c_void) ?*c_void, noalias arg: ?*c_void) c_int;
pub extern "pthread" fn pthread_attr_init(attr: *pthread_attr_t) c_int;
-pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: [*]c_void, stacksize: usize) c_int;
+pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int;
pub extern "pthread" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int;
pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int;
diff --git a/std/heap.zig b/std/heap.zig
index 4444a2307a..5d430bc761 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -22,7 +22,7 @@ fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 {
}
fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
- const old_ptr = @ptrCast([*]c_void, old_mem.ptr);
+ const old_ptr = @ptrCast(*c_void, old_mem.ptr);
if (c.realloc(old_ptr, new_size)) |buf| {
return @ptrCast([*]u8, buf)[0..new_size];
} else if (new_size <= old_mem.len) {
@@ -33,7 +33,7 @@ fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![
}
fn cFree(self: *Allocator, old_mem: []u8) void {
- const old_ptr = @ptrCast([*]c_void, old_mem.ptr);
+ const old_ptr = @ptrCast(*c_void, old_mem.ptr);
c.free(old_ptr);
}
@@ -140,7 +140,7 @@ pub const DirectAllocator = struct {
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
const old_record_addr = old_adjusted_addr + old_mem.len;
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
- const old_ptr = @intToPtr([*]c_void, root_addr);
+ const old_ptr = @intToPtr(*c_void, root_addr);
const amt = new_size + alignment + @sizeOf(usize);
const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: {
if (new_size > old_mem.len) return error.OutOfMemory;
@@ -170,7 +170,7 @@ pub const DirectAllocator = struct {
Os.windows => {
const record_addr = @ptrToInt(bytes.ptr) + bytes.len;
const root_addr = @intToPtr(*align(1) usize, record_addr).*;
- const ptr = @intToPtr([*]c_void, root_addr);
+ const ptr = @intToPtr(*c_void, root_addr);
_ = os.windows.HeapFree(??self.heap_handle, 0, ptr);
},
else => @compileError("Unsupported OS"),
diff --git a/std/os/darwin.zig b/std/os/darwin.zig
index b8e18561cc..a835959103 100644
--- a/std/os/darwin.zig
+++ b/std/os/darwin.zig
@@ -327,7 +327,7 @@ pub fn raise(sig: i32) usize {
}
pub fn read(fd: i32, buf: [*]u8, nbyte: usize) usize {
- return errnoWrap(c.read(fd, @ptrCast([*]c_void, buf), nbyte));
+ return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte));
}
pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize {
@@ -335,17 +335,17 @@ pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize {
}
pub fn write(fd: i32, buf: [*]const u8, nbyte: usize) usize {
- return errnoWrap(c.write(fd, @ptrCast([*]const c_void, buf), nbyte));
+ return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte));
}
pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
- const ptr_result = c.mmap(@ptrCast([*]c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset);
+ const ptr_result = c.mmap(@ptrCast(*c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset);
const isize_result = @bitCast(isize, @ptrToInt(ptr_result));
return errnoWrap(isize_result);
}
pub fn munmap(address: usize, length: usize) usize {
- return errnoWrap(c.munmap(@intToPtr([*]c_void, address), length));
+ return errnoWrap(c.munmap(@intToPtr(*c_void, address), length));
}
pub fn unlink(path: [*]const u8) usize {
diff --git a/std/os/file.zig b/std/os/file.zig
index 378782507b..d5af55b5e4 100644
--- a/std/os/file.zig
+++ b/std/os/file.zig
@@ -334,7 +334,7 @@ pub const File = struct {
while (index < buffer.len) {
const want_read_count = windows.DWORD(math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index));
var amt_read: windows.DWORD = undefined;
- if (windows.ReadFile(self.handle, @ptrCast([*]c_void, buffer.ptr + index), want_read_count, &amt_read, null) == 0) {
+ if (windows.ReadFile(self.handle, @ptrCast(*c_void, buffer.ptr + index), want_read_count, &amt_read, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.OPERATION_ABORTED => continue,
diff --git a/std/os/index.zig b/std/os/index.zig
index 6023929b04..fe5ecc38ba 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -2362,7 +2362,7 @@ pub const Thread = struct {
},
builtin.Os.windows => struct {
handle: windows.HANDLE,
- alloc_start: [*]c_void,
+ alloc_start: *c_void,
heap_handle: windows.HANDLE,
},
else => @compileError("Unsupported OS"),
@@ -2533,7 +2533,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
// align to page
stack_end -= stack_end % os.page_size;
- assert(c.pthread_attr_setstack(&attr, @intToPtr([*]c_void, stack_addr), stack_end - stack_addr) == 0);
+ assert(c.pthread_attr_setstack(&attr, @intToPtr(*c_void, stack_addr), stack_end - stack_addr) == 0);
const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(*c_void, arg));
switch (err) {
diff --git a/std/os/windows/index.zig b/std/os/windows/index.zig
index c491ae6538..53e12500e7 100644
--- a/std/os/windows/index.zig
+++ b/std/os/windows/index.zig
@@ -101,17 +101,17 @@ pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?*FILETIME) void;
pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE;
pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL;
-pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]c_void, dwBytes: SIZE_T) ?[*]c_void;
-pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]const c_void) SIZE_T;
-pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]const c_void) BOOL;
+pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void;
+pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T;
+pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL;
pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE;
-pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?[*]c_void;
+pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?*c_void;
-pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]c_void) BOOL;
+pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL;
pub extern "kernel32" stdcallcc fn MoveFileExA(
lpExistingFileName: LPCSTR,
@@ -127,7 +127,7 @@ pub extern "kernel32" stdcallcc fn PathFileExists(pszPath: ?LPCTSTR) BOOL;
pub extern "kernel32" stdcallcc fn ReadFile(
in_hFile: HANDLE,
- out_lpBuffer: [*]c_void,
+ out_lpBuffer: *c_void,
in_nNumberOfBytesToRead: DWORD,
out_lpNumberOfBytesRead: *DWORD,
in_out_lpOverlapped: ?*OVERLAPPED,
@@ -150,7 +150,7 @@ pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMillis
pub extern "kernel32" stdcallcc fn WriteFile(
in_hFile: HANDLE,
- in_lpBuffer: [*]const c_void,
+ in_lpBuffer: *const c_void,
in_nNumberOfBytesToWrite: DWORD,
out_lpNumberOfBytesWritten: ?*DWORD,
in_out_lpOverlapped: ?*OVERLAPPED,
diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig
index 5a40567310..7170346108 100644
--- a/std/os/windows/util.zig
+++ b/std/os/windows/util.zig
@@ -42,7 +42,7 @@ pub const WriteError = error{
};
pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void {
- if (windows.WriteFile(handle, @ptrCast([*]const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) {
+ if (windows.WriteFile(handle, @ptrCast(*const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources,
diff --git a/test/compare_output.zig b/test/compare_output.zig
index 8d5dc68d45..eec077ef85 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -284,7 +284,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
cases.addC("expose function pointer to C land",
\\const c = @cImport(@cInclude("stdlib.h"));
\\
- \\export fn compare_fn(a: ?[*]const c_void, b: ?[*]const c_void) c_int {
+ \\export fn compare_fn(a: ?*const c_void, b: ?*const c_void) c_int {
\\ const a_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), a));
\\ const b_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), b));
\\ if (a_int.* < b_int.*) {
@@ -299,7 +299,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\export fn main() c_int {
\\ var array = []u32{ 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 };
\\
- \\ c.qsort(@ptrCast(?[*]c_void, array[0..].ptr), c_ulong(array.len), @sizeOf(i32), compare_fn);
+ \\ c.qsort(@ptrCast(?*c_void, array[0..].ptr), c_ulong(array.len), @sizeOf(i32), compare_fn);
\\
\\ for (array) |item, i| {
\\ if (item != i) {
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 4bd6e9bc24..9cecb859fa 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,13 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "unknown length pointer to opaque",
+ \\export const T = [*]@OpaqueType();
+ ,
+ ".tmp_source.zig:1:18: error: unknown-length pointer to opaque",
+ );
+
cases.add(
"error when evaluating return type",
\\const Foo = struct {
diff --git a/test/translate_c.zig b/test/translate_c.zig
index ac0a98e6cc..3489f9da21 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -99,7 +99,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
cases.add("restrict -> noalias",
\\void foo(void *restrict bar, void *restrict);
,
- \\pub extern fn foo(noalias bar: ?[*]c_void, noalias arg1: ?[*]c_void) void;
+ \\pub extern fn foo(noalias bar: ?*c_void, noalias arg1: ?*c_void) void;
);
cases.add("simple struct",
@@ -172,7 +172,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
,
\\pub const struct_Foo = @OpaqueType();
,
- \\pub extern fn some_func(foo: ?[*]struct_Foo, x: c_int) ?[*]struct_Foo;
+ \\pub extern fn some_func(foo: ?*struct_Foo, x: c_int) ?*struct_Foo;
,
\\pub const Foo = struct_Foo;
);
@@ -233,7 +233,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
,
\\pub const Foo = c_void;
,
- \\pub extern fn fun(a: ?[*]Foo) Foo;
+ \\pub extern fn fun(a: ?*Foo) Foo;
);
cases.add("generate inline func for #define global extern fn",
@@ -505,7 +505,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ return 6;
\\}
,
- \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?[*]c_void) c_int {
+ \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ if ((a != 0) and (b != 0)) return 0;
\\ if ((b != 0) and (c != null)) return 1;
\\ if ((a != 0) and (c != null)) return 2;
@@ -653,8 +653,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ return x;
\\}
,
- \\pub export fn foo(x: ?[*]c_ushort) ?[*]c_void {
- \\ return @ptrCast(?[*]c_void, x);
+ \\pub export fn foo(x: ?[*]c_ushort) ?*c_void {
+ \\ return @ptrCast(?*c_void, x);
\\}
);
@@ -1173,7 +1173,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ return !c;
\\}
,
- \\pub fn foo(a: c_int, b: f32, c: ?[*]c_void) c_int {
+ \\pub fn foo(a: c_int, b: f32, c: ?*c_void) c_int {
\\ return !(a == 0);
\\ return !(a != 0);
\\ return !(b != 0);
@@ -1231,7 +1231,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ B,
\\ C,
\\};
- \\pub fn if_none_bool(a: c_int, b: f32, c: ?[*]c_void, d: enum_SomeEnum) c_int {
+ \\pub fn if_none_bool(a: c_int, b: f32, c: ?*c_void, d: enum_SomeEnum) c_int {
\\ if (a != 0) return 0;
\\ if (b != 0) return 1;
\\ if (c != null) return 2;
@@ -1248,7 +1248,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ return 3;
\\}
,
- \\pub fn while_none_bool(a: c_int, b: f32, c: ?[*]c_void) c_int {
+ \\pub fn while_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ while (a != 0) return 0;
\\ while (b != 0) return 1;
\\ while (c != null) return 2;
@@ -1264,7 +1264,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ return 3;
\\}
,
- \\pub fn for_none_bool(a: c_int, b: f32, c: ?[*]c_void) c_int {
+ \\pub fn for_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ while (a != 0) return 0;
\\ while (b != 0) return 1;
\\ while (c != null) return 2;
From bbb565a21e40f305b9fa10c385124455fafe647f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 5 Jun 2018 21:56:19 -0400
Subject: [PATCH 02/49] README: update support table
macosx does not run on some of these architectures
---
README.md | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/README.md b/README.md
index b5bf13f095..a5868bf44e 100644
--- a/README.md
+++ b/README.md
@@ -55,18 +55,18 @@ that counts as "freestanding" for the purposes of this table.
|i386 | OK | planned | OK | planned | planned |
|x86_64 | OK | OK | OK | OK | planned |
|arm | OK | planned | planned | N/A | planned |
-|aarch64 | OK | planned | planned | planned | planned |
-|bpf | OK | planned | planned | N/A | planned |
-|hexagon | OK | planned | planned | N/A | planned |
-|mips | OK | planned | planned | N/A | planned |
-|powerpc | OK | planned | planned | N/A | planned |
-|r600 | OK | planned | planned | N/A | planned |
-|amdgcn | OK | planned | planned | N/A | planned |
-|sparc | OK | planned | planned | N/A | planned |
-|s390x | OK | planned | planned | N/A | planned |
-|thumb | OK | planned | planned | N/A | planned |
-|spir | OK | planned | planned | N/A | planned |
-|lanai | OK | planned | planned | N/A | planned |
+|aarch64 | OK | planned | N/A | planned | planned |
+|bpf | OK | planned | N/A | N/A | planned |
+|hexagon | OK | planned | N/A | N/A | planned |
+|mips | OK | planned | N/A | N/A | planned |
+|powerpc | OK | planned | N/A | N/A | planned |
+|r600 | OK | planned | N/A | N/A | planned |
+|amdgcn | OK | planned | N/A | N/A | planned |
+|sparc | OK | planned | N/A | N/A | planned |
+|s390x | OK | planned | N/A | N/A | planned |
+|thumb | OK | planned | N/A | N/A | planned |
+|spir | OK | planned | N/A | N/A | planned |
+|lanai | OK | planned | N/A | N/A | planned |
## Community
From 0ccc18686921dce8e7f2feb95eed83b894ca8df4 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 5 Jun 2018 20:24:11 -0400
Subject: [PATCH 03/49] disable field access for unknown length pointers
See #770
---
src/analyze.cpp | 4 ++--
test/compile_errors.zig | 13 +++++++++++++
2 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 0adb992798..15f08aa3fe 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -3753,13 +3753,13 @@ static bool is_container(TypeTableEntry *type_entry) {
}
bool is_container_ref(TypeTableEntry *type_entry) {
- return (type_entry->id == TypeTableEntryIdPointer) ?
+ return (type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle) ?
is_container(type_entry->data.pointer.child_type) : is_container(type_entry);
}
TypeTableEntry *container_ref_type(TypeTableEntry *type_entry) {
assert(is_container_ref(type_entry));
- return (type_entry->id == TypeTableEntryIdPointer) ?
+ return (type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle) ?
type_entry->data.pointer.child_type : type_entry;
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 9cecb859fa..ab539dd94a 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,19 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "field access of unknown length pointer",
+ \\const Foo = extern struct {
+ \\ a: i32,
+ \\};
+ \\
+ \\export fn entry(foo: [*]Foo) void {
+ \\ foo.a += 1;
+ \\}
+ ,
+ ".tmp_source.zig:6:8: error: type '[*]Foo' does not support field access",
+ );
+
cases.add(
"unknown length pointer to opaque",
\\export const T = [*]@OpaqueType();
From bd13e757e7e36994d2a1fd4595c617d14e22b7c6 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 5 Jun 2018 22:23:23 -0400
Subject: [PATCH 04/49] disable deref syntax for unknown length pointers
See #770
---
src/ir.cpp | 12 ++++++++++++
std/special/bootstrap.zig | 2 +-
test/compile_errors.zig | 9 +++++++++
3 files changed, 22 insertions(+), 1 deletion(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 5c44e7c0ff..a6686aae76 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -11132,7 +11132,13 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp *
static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) {
IrInstruction *op1 = bin_op_instruction->op1->other;
+ if (type_is_invalid(op1->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *op2 = bin_op_instruction->op2->other;
+ if (type_is_invalid(op2->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
IrBinOp op_id = bin_op_instruction->op_id;
// look for pointer math
@@ -12851,6 +12857,12 @@ static TypeTableEntry *ir_analyze_dereference(IrAnalyze *ira, IrInstructionUnOp
if (type_is_invalid(ptr_type)) {
return ira->codegen->builtin_types.entry_invalid;
} else if (ptr_type->id == TypeTableEntryIdPointer) {
+ if (ptr_type->data.pointer.ptr_len == PtrLenUnknown) {
+ ir_add_error_node(ira, un_op_instruction->base.source_node,
+ buf_sprintf("index syntax required for unknown-length pointer type '%s'",
+ buf_ptr(&ptr_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
child_type = ptr_type->data.pointer.child_type;
} else {
ir_add_error_node(ira, un_op_instruction->base.source_node,
diff --git a/std/special/bootstrap.zig b/std/special/bootstrap.zig
index 64eae79ce4..8aefe4751f 100644
--- a/std/special/bootstrap.zig
+++ b/std/special/bootstrap.zig
@@ -51,7 +51,7 @@ extern fn WinMainCRTStartup() noreturn {
// TODO https://github.com/ziglang/zig/issues/265
fn posixCallMainAndExit() noreturn {
- const argc = argc_ptr.*;
+ const argc = argc_ptr[0];
const argv = @ptrCast([*][*]u8, argc_ptr + 1);
const envp_nullable = @ptrCast([*]?[*]u8, argv + argc + 1);
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index ab539dd94a..412b2d5fc9 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,15 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "dereference unknown length pointer",
+ \\export fn entry(x: [*]i32) i32 {
+ \\ return x.*;
+ \\}
+ ,
+ ".tmp_source.zig:2:13: error: index syntax required for unknown-length pointer type '[*]i32'",
+ );
+
cases.add(
"field access of unknown length pointer",
\\const Foo = extern struct {
From 76c8efd56c84c189a52d3dc559fff109d5d34ce4 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 5 Jun 2018 23:54:14 -0400
Subject: [PATCH 05/49] add test for not allowing implicit cast from T to
[*]const T
See #770
---
test/compile_errors.zig | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 412b2d5fc9..c995cd679e 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,15 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "attempted implicit cast from T to [*]const T",
+ \\export fn entry() void {
+ \\ const x: [*]const bool = true;
+ \\}
+ ,
+ ".tmp_source.zig:2:30: error: expected type '[*]const bool', found 'bool'",
+ );
+
cases.add(
"dereference unknown length pointer",
\\export fn entry(x: [*]i32) i32 {
From d3693dca73dfc726aed32908691437abe614e5cf Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 6 Jun 2018 00:39:39 -0400
Subject: [PATCH 06/49] Pointer Reform: update @typeInfo
* add assertion for trying to do @typeInfo on global error set
* remove TypeInfo.Slice
* add TypeInfo.Pointer.Size with possible values
- One
- Many
- Slice
See #770
---
src/analyze.cpp | 2 +-
src/codegen.cpp | 11 ++++--
src/ir.cpp | 80 +++++++++++++++++++++++++---------------
std/fmt/index.zig | 31 ++++++++++------
test/cases/type_info.zig | 33 +++++++++++++----
5 files changed, 102 insertions(+), 55 deletions(-)
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 15f08aa3fe..93373f6ec2 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -5981,7 +5981,7 @@ size_t type_id_index(TypeTableEntry *entry) {
return 7;
case TypeTableEntryIdStruct:
if (entry->data.structure.is_slice)
- return 25;
+ return 6;
return 8;
case TypeTableEntryIdComptimeFloat:
return 9;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index a977c34daf..7f95f335d1 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -6481,7 +6481,6 @@ static void define_builtin_compile_vars(CodeGen *g) {
const TypeTableEntryId id = type_id_at_index(i);
buf_appendf(contents, " %s,\n", type_id_name(id));
}
- buf_appendf(contents, " Slice,\n");
buf_appendf(contents, "};\n\n");
}
{
@@ -6494,7 +6493,6 @@ static void define_builtin_compile_vars(CodeGen *g) {
" Int: Int,\n"
" Float: Float,\n"
" Pointer: Pointer,\n"
- " Slice: Slice,\n"
" Array: Array,\n"
" Struct: Struct,\n"
" ComptimeFloat: void,\n"
@@ -6524,13 +6522,18 @@ static void define_builtin_compile_vars(CodeGen *g) {
" };\n"
"\n"
" pub const Pointer = struct {\n"
+ " size: Size,\n"
" is_const: bool,\n"
" is_volatile: bool,\n"
" alignment: u32,\n"
" child: type,\n"
- " };\n"
"\n"
- " pub const Slice = Pointer;\n"
+ " pub const Size = enum {\n"
+ " One,\n"
+ " Many,\n"
+ " Slice,\n"
+ " };\n"
+ " };\n"
"\n"
" pub const Array = struct {\n"
" len: usize,\n"
diff --git a/src/ir.cpp b/src/ir.cpp
index a6686aae76..3486e8c047 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -16222,8 +16222,7 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
return true;
}
-static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *type_entry)
-{
+static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *type_entry) {
assert(type_entry != nullptr);
assert(!type_is_invalid(type_entry));
@@ -16248,38 +16247,67 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
enum_field_val->data.x_struct.fields = inner_fields;
};
- const auto create_ptr_like_type_info = [ira](const char *name, TypeTableEntry *ptr_type_entry) {
+ const auto create_ptr_like_type_info = [ira](TypeTableEntry *ptr_type_entry) {
+ TypeTableEntry *attrs_type;
+ uint32_t size_enum_index;
+ if (is_slice(ptr_type_entry)) {
+ attrs_type = ptr_type_entry->data.structure.fields[slice_ptr_index].type_entry;
+ size_enum_index = 2;
+ } else if (ptr_type_entry->id == TypeTableEntryIdPointer) {
+ attrs_type = ptr_type_entry;
+ size_enum_index = (ptr_type_entry->data.pointer.ptr_len == PtrLenSingle) ? 0 : 1;
+ } else {
+ zig_unreachable();
+ }
+
+ TypeTableEntry *type_info_pointer_type = ir_type_info_get_type(ira, "Pointer");
+ ensure_complete_type(ira->codegen, type_info_pointer_type);
+ assert(!type_is_invalid(type_info_pointer_type));
+
ConstExprValue *result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, name);
+ result->type = type_info_pointer_type;
- ConstExprValue *fields = create_const_vals(4);
+ ConstExprValue *fields = create_const_vals(5);
result->data.x_struct.fields = fields;
- // is_const: bool
- ensure_field_index(result->type, "is_const", 0);
+ // size: Size
+ ensure_field_index(result->type, "size", 0);
+ TypeTableEntry *type_info_pointer_size_type = ir_type_info_get_type(ira, "Size", type_info_pointer_type);
+ ensure_complete_type(ira->codegen, type_info_pointer_size_type);
+ assert(!type_is_invalid(type_info_pointer_size_type));
fields[0].special = ConstValSpecialStatic;
- fields[0].type = ira->codegen->builtin_types.entry_bool;
- fields[0].data.x_bool = ptr_type_entry->data.pointer.is_const;
- // is_volatile: bool
- ensure_field_index(result->type, "is_volatile", 1);
+ fields[0].type = type_info_pointer_size_type;
+ bigint_init_unsigned(&fields[0].data.x_enum_tag, size_enum_index);
+
+ // is_const: bool
+ ensure_field_index(result->type, "is_const", 1);
fields[1].special = ConstValSpecialStatic;
fields[1].type = ira->codegen->builtin_types.entry_bool;
- fields[1].data.x_bool = ptr_type_entry->data.pointer.is_volatile;
- // alignment: u32
- ensure_field_index(result->type, "alignment", 2);
+ fields[1].data.x_bool = attrs_type->data.pointer.is_const;
+ // is_volatile: bool
+ ensure_field_index(result->type, "is_volatile", 2);
fields[2].special = ConstValSpecialStatic;
- fields[2].type = ira->codegen->builtin_types.entry_u32;
- bigint_init_unsigned(&fields[2].data.x_bigint, ptr_type_entry->data.pointer.alignment);
- // child: type
- ensure_field_index(result->type, "child", 3);
+ fields[2].type = ira->codegen->builtin_types.entry_bool;
+ fields[2].data.x_bool = attrs_type->data.pointer.is_volatile;
+ // alignment: u32
+ ensure_field_index(result->type, "alignment", 3);
fields[3].special = ConstValSpecialStatic;
- fields[3].type = ira->codegen->builtin_types.entry_type;
- fields[3].data.x_type = ptr_type_entry->data.pointer.child_type;
+ fields[3].type = ira->codegen->builtin_types.entry_u32;
+ bigint_init_unsigned(&fields[3].data.x_bigint, attrs_type->data.pointer.alignment);
+ // child: type
+ ensure_field_index(result->type, "child", 4);
+ fields[4].special = ConstValSpecialStatic;
+ fields[4].type = ira->codegen->builtin_types.entry_type;
+ fields[4].data.x_type = attrs_type->data.pointer.child_type;
return result;
};
+ if (type_entry == ira->codegen->builtin_types.entry_global_error_set) {
+ zig_panic("TODO implement @typeInfo for global error set");
+ }
+
ConstExprValue *result = nullptr;
switch (type_entry->id)
{
@@ -16348,7 +16376,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
}
case TypeTableEntryIdPointer:
{
- result = create_ptr_like_type_info("Pointer", type_entry);
+ result = create_ptr_like_type_info(type_entry);
break;
}
case TypeTableEntryIdArray:
@@ -16621,15 +16649,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
case TypeTableEntryIdStruct:
{
if (type_entry->data.structure.is_slice) {
- Buf ptr_field_name = BUF_INIT;
- buf_init_from_str(&ptr_field_name, "ptr");
- TypeTableEntry *ptr_type = type_entry->data.structure.fields_by_name.get(&ptr_field_name)->type_entry;
- ensure_complete_type(ira->codegen, ptr_type);
- if (type_is_invalid(ptr_type))
- return nullptr;
- buf_deinit(&ptr_field_name);
-
- result = create_ptr_like_type_info("Slice", ptr_type);
+ result = create_ptr_like_type_info(type_entry);
break;
}
diff --git a/std/fmt/index.zig b/std/fmt/index.zig
index 047a154bb8..bbf48df0cf 100644
--- a/std/fmt/index.zig
+++ b/std/fmt/index.zig
@@ -97,7 +97,11 @@ pub fn formatType(
output: fn (@typeOf(context), []const u8) Errors!void,
) Errors!void {
const T = @typeOf(value);
- switch (@typeId(T)) {
+ if (T == error) {
+ try output(context, "error.");
+ return output(context, @errorName(value));
+ }
+ switch (@typeInfo(T)) {
builtin.TypeId.Int, builtin.TypeId.Float => {
return formatValue(value, fmt, context, Errors, output);
},
@@ -125,12 +129,13 @@ pub fn formatType(
try output(context, "error.");
return output(context, @errorName(value));
},
- builtin.TypeId.Pointer => {
- switch (@typeId(T.Child)) {
- builtin.TypeId.Array => {
- if (T.Child.Child == u8) {
+ builtin.TypeId.Pointer => |ptr_info| switch (ptr_info.size) {
+ builtin.TypeInfo.Pointer.Size.One => switch (@typeInfo(ptr_info.child)) {
+ builtin.TypeId.Array => |info| {
+ if (info.child == u8) {
return formatText(value, fmt, context, Errors, output);
}
+ return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
},
builtin.TypeId.Enum, builtin.TypeId.Union, builtin.TypeId.Struct => {
const has_cust_fmt = comptime cf: {
@@ -154,14 +159,16 @@ pub fn formatType(
return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
},
else => return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)),
- }
- },
- else => if (@canImplicitCast([]const u8, value)) {
- const casted_value = ([]const u8)(value);
- return output(context, casted_value);
- } else {
- @compileError("Unable to format type '" ++ @typeName(T) ++ "'");
+ },
+ builtin.TypeInfo.Pointer.Size.Many => {
+ return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
+ },
+ builtin.TypeInfo.Pointer.Size.Slice => {
+ const casted_value = ([]const u8)(value);
+ return output(context, casted_value);
+ },
},
+ else => @compileError("Unable to format type '" ++ @typeName(T) ++ "'"),
}
}
diff --git a/test/cases/type_info.zig b/test/cases/type_info.zig
index 921ff785a7..b452c8e9f6 100644
--- a/test/cases/type_info.zig
+++ b/test/cases/type_info.zig
@@ -39,12 +39,28 @@ test "type info: pointer type info" {
fn testPointer() void {
const u32_ptr_info = @typeInfo(*u32);
assert(TypeId(u32_ptr_info) == TypeId.Pointer);
+ assert(u32_ptr_info.Pointer.size == TypeInfo.Pointer.Size.One);
assert(u32_ptr_info.Pointer.is_const == false);
assert(u32_ptr_info.Pointer.is_volatile == false);
- assert(u32_ptr_info.Pointer.alignment == 4);
+ assert(u32_ptr_info.Pointer.alignment == @alignOf(u32));
assert(u32_ptr_info.Pointer.child == u32);
}
+test "type info: unknown length pointer type info" {
+ testUnknownLenPtr();
+ comptime testUnknownLenPtr();
+}
+
+fn testUnknownLenPtr() void {
+ const u32_ptr_info = @typeInfo([*]const volatile f64);
+ assert(TypeId(u32_ptr_info) == TypeId.Pointer);
+ assert(u32_ptr_info.Pointer.size == TypeInfo.Pointer.Size.Many);
+ assert(u32_ptr_info.Pointer.is_const == true);
+ assert(u32_ptr_info.Pointer.is_volatile == true);
+ assert(u32_ptr_info.Pointer.alignment == @alignOf(f64));
+ assert(u32_ptr_info.Pointer.child == f64);
+}
+
test "type info: slice type info" {
testSlice();
comptime testSlice();
@@ -52,11 +68,12 @@ test "type info: slice type info" {
fn testSlice() void {
const u32_slice_info = @typeInfo([]u32);
- assert(TypeId(u32_slice_info) == TypeId.Slice);
- assert(u32_slice_info.Slice.is_const == false);
- assert(u32_slice_info.Slice.is_volatile == false);
- assert(u32_slice_info.Slice.alignment == 4);
- assert(u32_slice_info.Slice.child == u32);
+ assert(TypeId(u32_slice_info) == TypeId.Pointer);
+ assert(u32_slice_info.Pointer.size == TypeInfo.Pointer.Size.Slice);
+ assert(u32_slice_info.Pointer.is_const == false);
+ assert(u32_slice_info.Pointer.is_volatile == false);
+ assert(u32_slice_info.Pointer.alignment == 4);
+ assert(u32_slice_info.Pointer.child == u32);
}
test "type info: array type info" {
@@ -149,11 +166,11 @@ fn testUnion() void {
assert(TypeId(typeinfo_info) == TypeId.Union);
assert(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto);
assert(typeinfo_info.Union.tag_type == TypeId);
- assert(typeinfo_info.Union.fields.len == 26);
+ assert(typeinfo_info.Union.fields.len == 25);
assert(typeinfo_info.Union.fields[4].enum_field != null);
assert((??typeinfo_info.Union.fields[4].enum_field).value == 4);
assert(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int));
- assert(typeinfo_info.Union.defs.len == 21);
+ assert(typeinfo_info.Union.defs.len == 20);
const TestNoTagUnion = union {
Foo: void,
From 212449bc231047571ab27af0d1ae112ed0ffea47 Mon Sep 17 00:00:00 2001
From: Marc Tiehuis
Date: Wed, 6 Jun 2018 22:41:55 +1200
Subject: [PATCH 07/49] Fix Log2Int type construction
The following case for example, would previously fail:
const a = u24(1) << Log2Int(u24)(22);
---
std/math/index.zig | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/std/math/index.zig b/std/math/index.zig
index 33bc1082f7..a118f3ed47 100644
--- a/std/math/index.zig
+++ b/std/math/index.zig
@@ -306,7 +306,14 @@ test "math.rotl" {
}
pub fn Log2Int(comptime T: type) type {
- return @IntType(false, log2(T.bit_count));
+ // comptime ceil log2
+ comptime var count: usize = 0;
+ comptime var s = T.bit_count - 1;
+ inline while (s != 0) : (s >>= 1) {
+ count += 1;
+ }
+
+ return @IntType(false, count);
}
test "math overflow functions" {
From f389e5373580a5a4ac48ccdb8da9dc951c01dee5 Mon Sep 17 00:00:00 2001
From: Braedon
Date: Thu, 7 Jun 2018 00:45:19 +1000
Subject: [PATCH 08/49] Add newline to zig fmt error (#1064)
---
src-self-hosted/main.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 7a62f4985b..a264b5484a 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -734,7 +734,7 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
defer file.close();
const source_code = io.readFileAlloc(allocator, file_path) catch |err| {
- try stderr.print("unable to open '{}': {}", file_path, err);
+ try stderr.print("unable to open '{}': {}\n", file_path, err);
fmt_errors = true;
continue;
};
From e7f141b3762b9b6c07e17cfa68f9d4c3fd02aba2 Mon Sep 17 00:00:00 2001
From: Marc Tiehuis
Date: Thu, 7 Jun 2018 03:24:36 +1200
Subject: [PATCH 09/49] Add json.TokenStream (#1062)
This hides some of the low-level parsing details from the
StreamingParser. These don't need to be known when parsing a complete
slice at once (which is we can usually do).
Also, remove `Json` from Parser names. The namespace `json` is sufficient.
---
std/json.zig | 237 ++++++++++++++++++++++++++++++++++-----------------
1 file changed, 159 insertions(+), 78 deletions(-)
diff --git a/std/json.zig b/std/json.zig
index 71673ad20f..6cf83eef1a 100644
--- a/std/json.zig
+++ b/std/json.zig
@@ -3,6 +3,7 @@
// https://tools.ietf.org/html/rfc8259
const std = @import("index.zig");
+const debug = std.debug;
const mem = std.mem;
const u1 = @IntType(false, 1);
@@ -86,7 +87,9 @@ pub const Token = struct {
// parsing state requires ~40-50 bytes of stack space.
//
// Conforms strictly to RFC8529.
-pub const StreamingJsonParser = struct {
+//
+// For a non-byte based wrapper, consider using TokenStream instead.
+pub const StreamingParser = struct {
// Current state
state: State,
// How many bytes we have counted for the current token
@@ -109,13 +112,13 @@ pub const StreamingJsonParser = struct {
const array_bit = 1;
const max_stack_size = @maxValue(u8);
- pub fn init() StreamingJsonParser {
- var p: StreamingJsonParser = undefined;
+ pub fn init() StreamingParser {
+ var p: StreamingParser = undefined;
p.reset();
return p;
}
- pub fn reset(p: *StreamingJsonParser) void {
+ pub fn reset(p: *StreamingParser) void {
p.state = State.TopLevelBegin;
p.count = 0;
// Set before ever read in main transition function
@@ -175,7 +178,7 @@ pub const StreamingJsonParser = struct {
// Only call this function to generate array/object final state.
pub fn fromInt(x: var) State {
- std.debug.assert(x == 0 or x == 1);
+ debug.assert(x == 0 or x == 1);
const T = @TagType(State);
return State(T(x));
}
@@ -205,7 +208,7 @@ pub const StreamingJsonParser = struct {
// tokens. token2 is always null if token1 is null.
//
// There is currently no error recovery on a bad stream.
- pub fn feed(p: *StreamingJsonParser, c: u8, token1: *?Token, token2: *?Token) Error!void {
+ pub fn feed(p: *StreamingParser, c: u8, token1: *?Token, token2: *?Token) Error!void {
token1.* = null;
token2.* = null;
p.count += 1;
@@ -217,7 +220,7 @@ pub const StreamingJsonParser = struct {
}
// Perform a single transition on the state machine and return any possible token.
- fn transition(p: *StreamingJsonParser, c: u8, token: *?Token) Error!bool {
+ fn transition(p: *StreamingParser, c: u8, token: *?Token) Error!bool {
switch (p.state) {
State.TopLevelBegin => switch (c) {
'{' => {
@@ -852,10 +855,116 @@ pub const StreamingJsonParser = struct {
}
};
+// A small wrapper over a StreamingParser for full slices. Returns a stream of json Tokens.
+pub const TokenStream = struct {
+ i: usize,
+ slice: []const u8,
+ parser: StreamingParser,
+ token: ?Token,
+
+ pub fn init(slice: []const u8) TokenStream {
+ return TokenStream{
+ .i = 0,
+ .slice = slice,
+ .parser = StreamingParser.init(),
+ .token = null,
+ };
+ }
+
+ pub fn next(self: *TokenStream) !?Token {
+ if (self.token) |token| {
+ self.token = null;
+ return token;
+ }
+
+ var t1: ?Token = undefined;
+ var t2: ?Token = undefined;
+
+ while (self.i < self.slice.len) {
+ try self.parser.feed(self.slice[self.i], &t1, &t2);
+ self.i += 1;
+
+ if (t1) |token| {
+ self.token = t2;
+ return token;
+ }
+ }
+
+ if (self.i > self.slice.len) {
+ try self.parser.feed(' ', &t1, &t2);
+ self.i += 1;
+
+ if (t1) |token| {
+ return token;
+ }
+ }
+
+ return null;
+ }
+};
+
+fn checkNext(p: *TokenStream, id: Token.Id) void {
+ const token = ??(p.next() catch unreachable);
+ debug.assert(token.id == id);
+}
+
+test "token" {
+ const s =
+ \\{
+ \\ "Image": {
+ \\ "Width": 800,
+ \\ "Height": 600,
+ \\ "Title": "View from 15th Floor",
+ \\ "Thumbnail": {
+ \\ "Url": "http://www.example.com/image/481989943",
+ \\ "Height": 125,
+ \\ "Width": 100
+ \\ },
+ \\ "Animated" : false,
+ \\ "IDs": [116, 943, 234, 38793]
+ \\ }
+ \\}
+ ;
+
+ var p = TokenStream.init(s);
+
+ checkNext(&p, Token.Id.ObjectBegin);
+ checkNext(&p, Token.Id.String); // Image
+ checkNext(&p, Token.Id.ObjectBegin);
+ checkNext(&p, Token.Id.String); // Width
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.String); // Height
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.String); // Title
+ checkNext(&p, Token.Id.String);
+ checkNext(&p, Token.Id.String); // Thumbnail
+ checkNext(&p, Token.Id.ObjectBegin);
+ checkNext(&p, Token.Id.String); // Url
+ checkNext(&p, Token.Id.String);
+ checkNext(&p, Token.Id.String); // Height
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.String); // Width
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.ObjectEnd);
+ checkNext(&p, Token.Id.String); // Animated
+ checkNext(&p, Token.Id.False);
+ checkNext(&p, Token.Id.String); // IDs
+ checkNext(&p, Token.Id.ArrayBegin);
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.ArrayEnd);
+ checkNext(&p, Token.Id.ObjectEnd);
+ checkNext(&p, Token.Id.ObjectEnd);
+
+ debug.assert((try p.next()) == null);
+}
+
// Validate a JSON string. This does not limit number precision so a decoder may not necessarily
// be able to decode the string even if this returns true.
pub fn validate(s: []const u8) bool {
- var p = StreamingJsonParser.init();
+ var p = StreamingParser.init();
for (s) |c, i| {
var token1: ?Token = undefined;
@@ -897,46 +1006,46 @@ pub const Value = union(enum) {
pub fn dump(self: *const Value) void {
switch (self.*) {
Value.Null => {
- std.debug.warn("null");
+ debug.warn("null");
},
Value.Bool => |inner| {
- std.debug.warn("{}", inner);
+ debug.warn("{}", inner);
},
Value.Integer => |inner| {
- std.debug.warn("{}", inner);
+ debug.warn("{}", inner);
},
Value.Float => |inner| {
- std.debug.warn("{.5}", inner);
+ debug.warn("{.5}", inner);
},
Value.String => |inner| {
- std.debug.warn("\"{}\"", inner);
+ debug.warn("\"{}\"", inner);
},
Value.Array => |inner| {
var not_first = false;
- std.debug.warn("[");
+ debug.warn("[");
for (inner.toSliceConst()) |value| {
if (not_first) {
- std.debug.warn(",");
+ debug.warn(",");
}
not_first = true;
value.dump();
}
- std.debug.warn("]");
+ debug.warn("]");
},
Value.Object => |inner| {
var not_first = false;
- std.debug.warn("{{");
+ debug.warn("{{");
var it = inner.iterator();
while (it.next()) |entry| {
if (not_first) {
- std.debug.warn(",");
+ debug.warn(",");
}
not_first = true;
- std.debug.warn("\"{}\":", entry.key);
+ debug.warn("\"{}\":", entry.key);
entry.value.dump();
}
- std.debug.warn("}}");
+ debug.warn("}}");
},
}
}
@@ -952,53 +1061,53 @@ pub const Value = union(enum) {
fn dumpIndentLevel(self: *const Value, indent: usize, level: usize) void {
switch (self.*) {
Value.Null => {
- std.debug.warn("null");
+ debug.warn("null");
},
Value.Bool => |inner| {
- std.debug.warn("{}", inner);
+ debug.warn("{}", inner);
},
Value.Integer => |inner| {
- std.debug.warn("{}", inner);
+ debug.warn("{}", inner);
},
Value.Float => |inner| {
- std.debug.warn("{.5}", inner);
+ debug.warn("{.5}", inner);
},
Value.String => |inner| {
- std.debug.warn("\"{}\"", inner);
+ debug.warn("\"{}\"", inner);
},
Value.Array => |inner| {
var not_first = false;
- std.debug.warn("[\n");
+ debug.warn("[\n");
for (inner.toSliceConst()) |value| {
if (not_first) {
- std.debug.warn(",\n");
+ debug.warn(",\n");
}
not_first = true;
padSpace(level + indent);
value.dumpIndentLevel(indent, level + indent);
}
- std.debug.warn("\n");
+ debug.warn("\n");
padSpace(level);
- std.debug.warn("]");
+ debug.warn("]");
},
Value.Object => |inner| {
var not_first = false;
- std.debug.warn("{{\n");
+ debug.warn("{{\n");
var it = inner.iterator();
while (it.next()) |entry| {
if (not_first) {
- std.debug.warn(",\n");
+ debug.warn(",\n");
}
not_first = true;
padSpace(level + indent);
- std.debug.warn("\"{}\": ", entry.key);
+ debug.warn("\"{}\": ", entry.key);
entry.value.dumpIndentLevel(indent, level + indent);
}
- std.debug.warn("\n");
+ debug.warn("\n");
padSpace(level);
- std.debug.warn("}}");
+ debug.warn("}}");
},
}
}
@@ -1006,13 +1115,13 @@ pub const Value = union(enum) {
fn padSpace(indent: usize) void {
var i: usize = 0;
while (i < indent) : (i += 1) {
- std.debug.warn(" ");
+ debug.warn(" ");
}
}
};
// A non-stream JSON parser which constructs a tree of Value's.
-pub const JsonParser = struct {
+pub const Parser = struct {
allocator: *Allocator,
state: State,
copy_strings: bool,
@@ -1026,8 +1135,8 @@ pub const JsonParser = struct {
Simple,
};
- pub fn init(allocator: *Allocator, copy_strings: bool) JsonParser {
- return JsonParser{
+ pub fn init(allocator: *Allocator, copy_strings: bool) Parser {
+ return Parser{
.allocator = allocator,
.state = State.Simple,
.copy_strings = copy_strings,
@@ -1035,52 +1144,26 @@ pub const JsonParser = struct {
};
}
- pub fn deinit(p: *JsonParser) void {
+ pub fn deinit(p: *Parser) void {
p.stack.deinit();
}
- pub fn reset(p: *JsonParser) void {
+ pub fn reset(p: *Parser) void {
p.state = State.Simple;
p.stack.shrink(0);
}
- pub fn parse(p: *JsonParser, input: []const u8) !ValueTree {
- var mp = StreamingJsonParser.init();
+ pub fn parse(p: *Parser, input: []const u8) !ValueTree {
+ var s = TokenStream.init(input);
var arena = ArenaAllocator.init(p.allocator);
errdefer arena.deinit();
- for (input) |c, i| {
- var mt1: ?Token = undefined;
- var mt2: ?Token = undefined;
-
- try mp.feed(c, &mt1, &mt2);
- if (mt1) |t1| {
- try p.transition(&arena.allocator, input, i, t1);
-
- if (mt2) |t2| {
- try p.transition(&arena.allocator, input, i, t2);
- }
- }
+ while (try s.next()) |token| {
+ try p.transition(&arena.allocator, input, s.i - 1, token);
}
- // Handle top-level lonely number values.
- {
- const i = input.len;
- var mt1: ?Token = undefined;
- var mt2: ?Token = undefined;
-
- try mp.feed(' ', &mt1, &mt2);
- if (mt1) |t1| {
- try p.transition(&arena.allocator, input, i, t1);
- }
- }
-
- if (!mp.complete) {
- return error.IncompleteJsonInput;
- }
-
- std.debug.assert(p.stack.len == 1);
+ debug.assert(p.stack.len == 1);
return ValueTree{
.arena = arena,
@@ -1090,7 +1173,7 @@ pub const JsonParser = struct {
// Even though p.allocator exists, we take an explicit allocator so that allocation state
// can be cleaned up on error correctly during a `parse` on call.
- fn transition(p: *JsonParser, allocator: *Allocator, input: []const u8, i: usize, token: *const Token) !void {
+ fn transition(p: *Parser, allocator: *Allocator, input: []const u8, i: usize, token: *const Token) !void {
switch (p.state) {
State.ObjectKey => switch (token.id) {
Token.Id.ObjectEnd => {
@@ -1223,7 +1306,7 @@ pub const JsonParser = struct {
}
}
- fn pushToParent(p: *JsonParser, value: *const Value) !void {
+ fn pushToParent(p: *Parser, value: *const Value) !void {
switch (p.stack.at(p.stack.len - 1)) {
// Object Parent -> [ ..., object, , value ]
Value.String => |key| {
@@ -1244,14 +1327,14 @@ pub const JsonParser = struct {
}
}
- fn parseString(p: *JsonParser, allocator: *Allocator, token: *const Token, input: []const u8, i: usize) !Value {
+ fn parseString(p: *Parser, allocator: *Allocator, token: *const Token, input: []const u8, i: usize) !Value {
// TODO: We don't strictly have to copy values which do not contain any escape
// characters if flagged with the option.
const slice = token.slice(input, i);
return Value{ .String = try mem.dupe(p.allocator, u8, slice) };
}
- fn parseNumber(p: *JsonParser, token: *const Token, input: []const u8, i: usize) !Value {
+ fn parseNumber(p: *Parser, token: *const Token, input: []const u8, i: usize) !Value {
return if (token.number_is_integer)
Value{ .Integer = try std.fmt.parseInt(i64, token.slice(input, i), 10) }
else
@@ -1259,10 +1342,8 @@ pub const JsonParser = struct {
}
};
-const debug = std.debug;
-
test "json parser dynamic" {
- var p = JsonParser.init(std.debug.global_allocator, false);
+ var p = Parser.init(debug.global_allocator, false);
defer p.deinit();
const s =
From 4fc601895b9f89bf0d3d3c1de1b0bbc959444298 Mon Sep 17 00:00:00 2001
From: isaachier
Date: Wed, 6 Jun 2018 14:09:47 -0400
Subject: [PATCH 10/49] Fix const-ness of buffer in replaceContents method
(#1065)
---
std/buffer.zig | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/std/buffer.zig b/std/buffer.zig
index 3b2936d223..469f81709b 100644
--- a/std/buffer.zig
+++ b/std/buffer.zig
@@ -28,7 +28,6 @@ pub const Buffer = struct {
/// Must deinitialize with deinit.
/// None of the other operations are valid until you do one of these:
/// * ::replaceContents
- /// * ::replaceContentsBuffer
/// * ::resize
pub fn initNull(allocator: *Allocator) Buffer {
return Buffer{ .list = ArrayList(u8).init(allocator) };
@@ -116,7 +115,7 @@ pub const Buffer = struct {
return mem.eql(u8, self.list.items[start..l], m);
}
- pub fn replaceContents(self: *const Buffer, m: []const u8) !void {
+ pub fn replaceContents(self: *Buffer, m: []const u8) !void {
try self.resize(m.len);
mem.copy(u8, self.list.toSlice(), m);
}
From b11c5d8f8256fc19e08eacfc50be209878f00e73 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 6 Jun 2018 15:36:47 -0400
Subject: [PATCH 11/49] fix std.os.windows.PathFileExists specified in the
wrong DLL (#1066)
closes #1054
---
std/os/file.zig | 3 ++-
std/os/windows/index.zig | 4 ++--
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/std/os/file.zig b/std/os/file.zig
index d5af55b5e4..f15fa77688 100644
--- a/std/os/file.zig
+++ b/std/os/file.zig
@@ -123,7 +123,8 @@ pub const File = struct {
}
return true;
} else if (is_windows) {
- if (os.windows.PathFileExists(path_with_null.ptr) == os.windows.TRUE) {
+ // TODO do not depend on shlwapi.dll
+ if (os.windows.PathFileExistsA(path_with_null.ptr) == os.windows.TRUE) {
return true;
}
diff --git a/std/os/windows/index.zig b/std/os/windows/index.zig
index 53e12500e7..0934c3fd90 100644
--- a/std/os/windows/index.zig
+++ b/std/os/windows/index.zig
@@ -123,8 +123,6 @@ pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *
pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL;
-pub extern "kernel32" stdcallcc fn PathFileExists(pszPath: ?LPCTSTR) BOOL;
-
pub extern "kernel32" stdcallcc fn ReadFile(
in_hFile: HANDLE,
out_lpBuffer: *c_void,
@@ -163,6 +161,8 @@ pub extern "kernel32" stdcallcc fn FreeLibrary(hModule: HMODULE) BOOL;
pub extern "user32" stdcallcc fn MessageBoxA(hWnd: ?HANDLE, lpText: ?LPCTSTR, lpCaption: ?LPCTSTR, uType: UINT) c_int;
+pub extern "shlwapi" stdcallcc fn PathFileExistsA(pszPath: ?LPCTSTR) BOOL;
+
pub const PROV_RSA_FULL = 1;
pub const BOOL = c_int;
From 31aefa6a2179dfae752020195fb193c6333bae7e Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 7 Jun 2018 17:26:41 -0400
Subject: [PATCH 12/49] fix structs that contain types which require comptime
Now, if a struct has any fields which require comptime,
such as `type`, then the struct is marked as requiring
comptime as well. Same goes for unions.
This means that a function will implicitly be called
at comptime if the return type is a struct which contains
a field of type `type`.
closes #586
---
src/all_types.hpp | 8 +++
src/analyze.cpp | 23 ++++++++-
src/ir.cpp | 112 ++++++++++++----------------------------
test/cases/eval.zig | 13 +++++
test/compile_errors.zig | 2 +-
5 files changed, 77 insertions(+), 81 deletions(-)
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 3b2ea02b71..b193fe8ae8 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1037,6 +1037,10 @@ struct TypeTableEntryStruct {
// whether we've finished resolving it
bool complete;
+ // whether any of the fields require comptime
+ // the value is not valid until zero_bits_known == true
+ bool requires_comptime;
+
bool zero_bits_loop_flag;
bool zero_bits_known;
uint32_t abi_alignment; // also figured out with zero_bits pass
@@ -1105,6 +1109,10 @@ struct TypeTableEntryUnion {
// whether we've finished resolving it
bool complete;
+ // whether any of the fields require comptime
+ // the value is not valid until zero_bits_known == true
+ bool requires_comptime;
+
bool zero_bits_loop_flag;
bool zero_bits_known;
uint32_t abi_alignment; // also figured out with zero_bits pass
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 93373f6ec2..e05fb23237 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -2533,6 +2533,10 @@ static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) {
continue;
}
+ if (type_requires_comptime(field_type)) {
+ struct_type->data.structure.requires_comptime = true;
+ }
+
if (!type_has_bits(field_type))
continue;
@@ -2724,6 +2728,11 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
}
union_field->type_entry = field_type;
+ if (type_requires_comptime(field_type)) {
+ union_type->data.unionation.requires_comptime = true;
+ }
+
+
if (field_node->data.struct_field.value != nullptr && !decl_node->data.container_decl.auto_enum) {
ErrorMsg *msg = add_node_error(g, field_node->data.struct_field.value,
buf_sprintf("non-enum union field assignment"));
@@ -4944,17 +4953,29 @@ bool type_requires_comptime(TypeTableEntry *type_entry) {
case TypeTableEntryIdArgTuple:
return true;
case TypeTableEntryIdArray:
+ return type_requires_comptime(type_entry->data.array.child_type);
case TypeTableEntryIdStruct:
+ assert(type_has_zero_bits_known(type_entry));
+ return type_entry->data.structure.requires_comptime;
case TypeTableEntryIdUnion:
+ assert(type_has_zero_bits_known(type_entry));
+ return type_entry->data.unionation.requires_comptime;
case TypeTableEntryIdMaybe:
+ return type_requires_comptime(type_entry->data.maybe.child_type);
case TypeTableEntryIdErrorUnion:
+ return type_requires_comptime(type_entry->data.error_union.payload_type);
+ case TypeTableEntryIdPointer:
+ if (type_entry->data.pointer.child_type->id == TypeTableEntryIdOpaque) {
+ return false;
+ } else {
+ return type_requires_comptime(type_entry->data.pointer.child_type);
+ }
case TypeTableEntryIdEnum:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
case TypeTableEntryIdBool:
case TypeTableEntryIdInt:
case TypeTableEntryIdFloat:
- case TypeTableEntryIdPointer:
case TypeTableEntryIdVoid:
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdPromise:
diff --git a/src/ir.cpp b/src/ir.cpp
index 3486e8c047..304127b099 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -11624,61 +11624,6 @@ static TypeTableEntry *ir_analyze_instruction_bin_op(IrAnalyze *ira, IrInstructi
zig_unreachable();
}
-enum VarClassRequired {
- VarClassRequiredAny,
- VarClassRequiredConst,
- VarClassRequiredIllegal,
-};
-
-static VarClassRequired get_var_class_required(TypeTableEntry *type_entry) {
- switch (type_entry->id) {
- case TypeTableEntryIdInvalid:
- zig_unreachable();
- case TypeTableEntryIdUnreachable:
- return VarClassRequiredIllegal;
- case TypeTableEntryIdBool:
- case TypeTableEntryIdInt:
- case TypeTableEntryIdFloat:
- case TypeTableEntryIdVoid:
- case TypeTableEntryIdErrorSet:
- case TypeTableEntryIdFn:
- case TypeTableEntryIdPromise:
- return VarClassRequiredAny;
- case TypeTableEntryIdComptimeFloat:
- case TypeTableEntryIdComptimeInt:
- case TypeTableEntryIdUndefined:
- case TypeTableEntryIdBlock:
- case TypeTableEntryIdNull:
- case TypeTableEntryIdOpaque:
- case TypeTableEntryIdMetaType:
- case TypeTableEntryIdNamespace:
- case TypeTableEntryIdBoundFn:
- case TypeTableEntryIdArgTuple:
- return VarClassRequiredConst;
-
- case TypeTableEntryIdPointer:
- if (type_entry->data.pointer.child_type->id == TypeTableEntryIdOpaque) {
- return VarClassRequiredAny;
- } else {
- return get_var_class_required(type_entry->data.pointer.child_type);
- }
- case TypeTableEntryIdArray:
- return get_var_class_required(type_entry->data.array.child_type);
- case TypeTableEntryIdMaybe:
- return get_var_class_required(type_entry->data.maybe.child_type);
- case TypeTableEntryIdErrorUnion:
- return get_var_class_required(type_entry->data.error_union.payload_type);
-
- case TypeTableEntryIdStruct:
- case TypeTableEntryIdEnum:
- case TypeTableEntryIdUnion:
- // TODO check the fields of these things and make sure that they don't recursively
- // contain any of the other variable classes
- return VarClassRequiredAny;
- }
- zig_unreachable();
-}
-
static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstructionDeclVar *decl_var_instruction) {
VariableTableEntry *var = decl_var_instruction->var;
@@ -11713,36 +11658,41 @@ static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstruc
if (type_is_invalid(result_type)) {
result_type = ira->codegen->builtin_types.entry_invalid;
} else {
- switch (get_var_class_required(result_type)) {
- case VarClassRequiredIllegal:
+ type_ensure_zero_bits_known(ira->codegen, result_type);
+ if (type_is_invalid(result_type)) {
+ result_type = ira->codegen->builtin_types.entry_invalid;
+ }
+ }
+
+ if (!type_is_invalid(result_type)) {
+ if (result_type->id == TypeTableEntryIdUnreachable ||
+ result_type->id == TypeTableEntryIdOpaque)
+ {
+ ir_add_error_node(ira, source_node,
+ buf_sprintf("variable of type '%s' not allowed", buf_ptr(&result_type->name)));
+ result_type = ira->codegen->builtin_types.entry_invalid;
+ } else if (type_requires_comptime(result_type)) {
+ var_class_requires_const = true;
+ if (!var->src_is_const && !is_comptime_var) {
ir_add_error_node(ira, source_node,
- buf_sprintf("variable of type '%s' not allowed", buf_ptr(&result_type->name)));
+ buf_sprintf("variable of type '%s' must be const or comptime",
+ buf_ptr(&result_type->name)));
result_type = ira->codegen->builtin_types.entry_invalid;
- break;
- case VarClassRequiredConst:
+ }
+ } else {
+ if (casted_init_value->value.special == ConstValSpecialStatic &&
+ casted_init_value->value.type->id == TypeTableEntryIdFn &&
+ casted_init_value->value.data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways)
+ {
var_class_requires_const = true;
if (!var->src_is_const && !is_comptime_var) {
- ir_add_error_node(ira, source_node,
- buf_sprintf("variable of type '%s' must be const or comptime",
- buf_ptr(&result_type->name)));
+ ErrorMsg *msg = ir_add_error_node(ira, source_node,
+ buf_sprintf("functions marked inline must be stored in const or comptime var"));
+ AstNode *proto_node = casted_init_value->value.data.x_ptr.data.fn.fn_entry->proto_node;
+ add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here"));
result_type = ira->codegen->builtin_types.entry_invalid;
}
- break;
- case VarClassRequiredAny:
- if (casted_init_value->value.special == ConstValSpecialStatic &&
- casted_init_value->value.type->id == TypeTableEntryIdFn &&
- casted_init_value->value.data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways)
- {
- var_class_requires_const = true;
- if (!var->src_is_const && !is_comptime_var) {
- ErrorMsg *msg = ir_add_error_node(ira, source_node,
- buf_sprintf("functions marked inline must be stored in const or comptime var"));
- AstNode *proto_node = casted_init_value->value.data.x_ptr.data.fn.fn_entry->proto_node;
- add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here"));
- result_type = ira->codegen->builtin_types.entry_invalid;
- }
- }
- break;
+ }
}
}
@@ -12623,6 +12573,10 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
inst_fn_type_id.return_type = specified_return_type;
}
+ type_ensure_zero_bits_known(ira->codegen, specified_return_type);
+ if (type_is_invalid(specified_return_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
if (type_requires_comptime(specified_return_type)) {
// Throw out our work and call the function as if it were comptime.
return ir_analyze_fn_call(ira, call_instruction, fn_entry, fn_type, fn_ref, first_arg_ptr, true, FnInlineAuto);
diff --git a/test/cases/eval.zig b/test/cases/eval.zig
index 461408afea..9612466a86 100644
--- a/test/cases/eval.zig
+++ b/test/cases/eval.zig
@@ -610,3 +610,16 @@ test "slice of type" {
}
}
}
+
+const Wrapper = struct {
+ T: type,
+};
+
+fn wrap(comptime T: type) Wrapper {
+ return Wrapper{ .T = T };
+}
+
+test "function which returns struct with type field causes implicit comptime" {
+ const ty = wrap(i32).T;
+ assert(ty == i32);
+}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index c995cd679e..102c4e428d 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -3329,7 +3329,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
".tmp_source.zig:9:4: error: variable of type 'comptime_float' must be const or comptime",
".tmp_source.zig:10:4: error: variable of type '(block)' must be const or comptime",
".tmp_source.zig:11:4: error: variable of type '(null)' must be const or comptime",
- ".tmp_source.zig:12:4: error: variable of type 'Opaque' must be const or comptime",
+ ".tmp_source.zig:12:4: error: variable of type 'Opaque' not allowed",
".tmp_source.zig:13:4: error: variable of type 'type' must be const or comptime",
".tmp_source.zig:14:4: error: variable of type '(namespace)' must be const or comptime",
".tmp_source.zig:15:4: error: variable of type '(bound fn(*const Foo) void)' must be const or comptime",
From 688ff2830d82ea36a9f022ecb7cf4c2bf2e4c586 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 7 Jun 2018 19:10:45 -0400
Subject: [PATCH 13/49] langref: automatic update of builtin.zig
now the docs can't get out of date for this
See #367
---
doc/docgen.zig | 18 ++
doc/langref.html.in | 423 +-------------------------------------------
src/codegen.cpp | 21 ++-
src/codegen.hpp | 2 +
src/main.cpp | 15 ++
5 files changed, 51 insertions(+), 428 deletions(-)
diff --git a/doc/docgen.zig b/doc/docgen.zig
index fed4bb8eba..ed0e1be273 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -300,6 +300,7 @@ const Link = struct {
const Node = union(enum) {
Content: []const u8,
Nav,
+ Builtin,
HeaderOpen: HeaderOpen,
SeeAlso: []const SeeAlsoItem,
Code: Code,
@@ -356,6 +357,9 @@ fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
_ = try eatToken(tokenizer, Token.Id.BracketClose);
try nodes.append(Node.Nav);
+ } else if (mem.eql(u8, tag_name, "builtin")) {
+ _ = try eatToken(tokenizer, Token.Id.BracketClose);
+ try nodes.append(Node.Builtin);
} else if (mem.eql(u8, tag_name, "header_open")) {
_ = try eatToken(tokenizer, Token.Id.Separator);
const content_token = try eatToken(tokenizer, Token.Id.TagContent);
@@ -690,6 +694,9 @@ fn termColor(allocator: *mem.Allocator, input: []const u8) ![]u8 {
fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: var, zig_exe: []const u8) !void {
var code_progress_index: usize = 0;
+
+ const builtin_code = try escapeHtml(allocator, try getBuiltinCode(allocator, zig_exe));
+
for (toc.nodes) |node| {
switch (node) {
Node.Content => |data| {
@@ -704,6 +711,9 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: var
Node.Nav => {
try out.write(toc.toc);
},
+ Node.Builtin => {
+ try out.print("{}
", builtin_code);
+ },
Node.HeaderOpen => |info| {
try out.print("{}\n", info.n, info.url, info.name, info.n);
},
@@ -1060,3 +1070,11 @@ fn exec(allocator: *mem.Allocator, args: []const []const u8) !os.ChildProcess.Ex
}
return result;
}
+
+fn getBuiltinCode(allocator: *mem.Allocator, zig_exe: []const u8) ![]const u8 {
+ const result = try exec(allocator, []const []const u8{
+ zig_exe,
+ "builtin",
+ });
+ return result.stdout;
+}
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 4359cadb58..adb5470d98 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -5474,425 +5474,7 @@ const separator = if (builtin.os == builtin.Os.windows) '\\' else '/';
Example of what is imported with @import("builtin"):
- {#code_begin|syntax#}
-pub const StackTrace = struct {
- index: usize,
- instruction_addresses: []usize,
-};
-
-pub const Os = enum {
- freestanding,
- ananas,
- cloudabi,
- dragonfly,
- freebsd,
- fuchsia,
- ios,
- kfreebsd,
- linux,
- lv2,
- macosx,
- netbsd,
- openbsd,
- solaris,
- windows,
- haiku,
- minix,
- rtems,
- nacl,
- cnk,
- aix,
- cuda,
- nvcl,
- amdhsa,
- ps4,
- elfiamcu,
- tvos,
- watchos,
- mesa3d,
- contiki,
- amdpal,
- zen,
-};
-
-pub const Arch = enum {
- armv8_3a,
- armv8_2a,
- armv8_1a,
- armv8,
- armv8r,
- armv8m_baseline,
- armv8m_mainline,
- armv7,
- armv7em,
- armv7m,
- armv7s,
- armv7k,
- armv7ve,
- armv6,
- armv6m,
- armv6k,
- armv6t2,
- armv5,
- armv5te,
- armv4t,
- armebv8_3a,
- armebv8_2a,
- armebv8_1a,
- armebv8,
- armebv8r,
- armebv8m_baseline,
- armebv8m_mainline,
- armebv7,
- armebv7em,
- armebv7m,
- armebv7s,
- armebv7k,
- armebv7ve,
- armebv6,
- armebv6m,
- armebv6k,
- armebv6t2,
- armebv5,
- armebv5te,
- armebv4t,
- aarch64,
- aarch64_be,
- arc,
- avr,
- bpfel,
- bpfeb,
- hexagon,
- mips,
- mipsel,
- mips64,
- mips64el,
- msp430,
- nios2,
- powerpc,
- powerpc64,
- powerpc64le,
- r600,
- amdgcn,
- riscv32,
- riscv64,
- sparc,
- sparcv9,
- sparcel,
- s390x,
- tce,
- tcele,
- thumb,
- thumbeb,
- i386,
- x86_64,
- xcore,
- nvptx,
- nvptx64,
- le32,
- le64,
- amdil,
- amdil64,
- hsail,
- hsail64,
- spir,
- spir64,
- kalimbav3,
- kalimbav4,
- kalimbav5,
- shave,
- lanai,
- wasm32,
- wasm64,
- renderscript32,
- renderscript64,
-};
-
-pub const Environ = enum {
- unknown,
- gnu,
- gnuabin32,
- gnuabi64,
- gnueabi,
- gnueabihf,
- gnux32,
- code16,
- eabi,
- eabihf,
- android,
- musl,
- musleabi,
- musleabihf,
- msvc,
- itanium,
- cygnus,
- amdopencl,
- coreclr,
- opencl,
- simulator,
-};
-
-pub const ObjectFormat = enum {
- unknown,
- coff,
- elf,
- macho,
- wasm,
-};
-
-pub const GlobalLinkage = enum {
- Internal,
- Strong,
- Weak,
- LinkOnce,
-};
-
-pub const AtomicOrder = enum {
- Unordered,
- Monotonic,
- Acquire,
- Release,
- AcqRel,
- SeqCst,
-};
-
-pub const AtomicRmwOp = enum {
- Xchg,
- Add,
- Sub,
- And,
- Nand,
- Or,
- Xor,
- Max,
- Min,
-};
-
-pub const Mode = enum {
- Debug,
- ReleaseSafe,
- ReleaseFast,
- ReleaseSmall,
-};
-
-pub const TypeId = enum {
- Type,
- Void,
- Bool,
- NoReturn,
- Int,
- Float,
- Pointer,
- Array,
- Struct,
- ComptimeFloat,
- ComptimeInt,
- Undefined,
- Null,
- Nullable,
- ErrorUnion,
- ErrorSet,
- Enum,
- Union,
- Fn,
- Namespace,
- Block,
- BoundFn,
- ArgTuple,
- Opaque,
- Promise,
-};
-
-pub const TypeInfo = union(TypeId) {
- Type: void,
- Void: void,
- Bool: void,
- NoReturn: void,
- Int: Int,
- Float: Float,
- Pointer: Pointer,
- Array: Array,
- Struct: Struct,
- ComptimeFloat: void,
- ComptimeInt: void,
- Undefined: void,
- Null: void,
- Nullable: Nullable,
- ErrorUnion: ErrorUnion,
- ErrorSet: ErrorSet,
- Enum: Enum,
- Union: Union,
- Fn: Fn,
- Namespace: void,
- Block: void,
- BoundFn: Fn,
- ArgTuple: void,
- Opaque: void,
- Promise: Promise,
-
-
- pub const Int = struct {
- is_signed: bool,
- bits: u8,
- };
-
- pub const Float = struct {
- bits: u8,
- };
-
- pub const Pointer = struct {
- is_const: bool,
- is_volatile: bool,
- alignment: u32,
- child: type,
- };
-
- pub const Array = struct {
- len: usize,
- child: type,
- };
-
- pub const ContainerLayout = enum {
- Auto,
- Extern,
- Packed,
- };
-
- pub const StructField = struct {
- name: []const u8,
- offset: ?usize,
- field_type: type,
- };
-
- pub const Struct = struct {
- layout: ContainerLayout,
- fields: []StructField,
- defs: []Definition,
- };
-
- pub const Nullable = struct {
- child: type,
- };
-
- pub const ErrorUnion = struct {
- error_set: type,
- payload: type,
- };
-
- pub const Error = struct {
- name: []const u8,
- value: usize,
- };
-
- pub const ErrorSet = struct {
- errors: []Error,
- };
-
- pub const EnumField = struct {
- name: []const u8,
- value: usize,
- };
-
- pub const Enum = struct {
- layout: ContainerLayout,
- tag_type: type,
- fields: []EnumField,
- defs: []Definition,
- };
-
- pub const UnionField = struct {
- name: []const u8,
- enum_field: ?EnumField,
- field_type: type,
- };
-
- pub const Union = struct {
- layout: ContainerLayout,
- tag_type: type,
- fields: []UnionField,
- defs: []Definition,
- };
-
- pub const CallingConvention = enum {
- Unspecified,
- C,
- Cold,
- Naked,
- Stdcall,
- Async,
- };
-
- pub const FnArg = struct {
- is_generic: bool,
- is_noalias: bool,
- arg_type: type,
- };
-
- pub const Fn = struct {
- calling_convention: CallingConvention,
- is_generic: bool,
- is_var_args: bool,
- return_type: type,
- async_allocator_type: type,
- args: []FnArg,
- };
-
- pub const Promise = struct {
- child: type,
- };
-
- pub const Definition = struct {
- name: []const u8,
- is_pub: bool,
- data: Data,
-
- pub const Data = union(enum) {
- Type: type,
- Var: type,
- Fn: FnDef,
-
- pub const FnDef = struct {
- fn_type: type,
- inline_type: Inline,
- calling_convention: CallingConvention,
- is_var_args: bool,
- is_extern: bool,
- is_export: bool,
- lib_name: ?[]const u8,
- return_type: type,
- arg_names: [][] const u8,
-
- pub const Inline = enum {
- Auto,
- Always,
- Never,
- };
- };
- };
- };
-};
-
-pub const FloatMode = enum {
- Optimized,
- Strict,
-};
-
-pub const Endian = enum {
- Big,
- Little,
-};
-
-pub const endian = Endian.Little;
-pub const is_test = true;
-pub const os = Os.linux;
-pub const arch = Arch.x86_64;
-pub const environ = Environ.gnu;
-pub const object_format = ObjectFormat.elf;
-pub const mode = Mode.Debug;
-pub const link_libc = false;
-pub const have_error_return_tracing = true;
-pub const __zig_test_fn_slice = {}; // overwritten later
- {#code_end#}
+ {#builtin#}
{#see_also|Build Mode#}
{#header_close#}
{#header_open|Root Source File#}
@@ -6053,8 +5635,7 @@ pub fn build(b: *Builder) void {
b.default_step.dependOn(&exe.step);
}
{#code_end#}
- {#header_close#}
- {#header_open|Terminal#}
+ terminal
$ zig build
$ ./test
all your base are belong to us
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 7f95f335d1..fb59ca7569 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -6335,13 +6335,7 @@ static const char *build_mode_to_str(BuildMode build_mode) {
zig_unreachable();
}
-static void define_builtin_compile_vars(CodeGen *g) {
- if (g->std_package == nullptr)
- return;
-
- const char *builtin_zig_basename = "builtin.zig";
- Buf *builtin_zig_path = buf_alloc();
- os_path_join(g->cache_dir, buf_create_from_str(builtin_zig_basename), builtin_zig_path);
+Buf *codegen_generate_builtin_source(CodeGen *g) {
Buf *contents = buf_alloc();
// Modifications to this struct must be coordinated with code that does anything with
@@ -6707,6 +6701,19 @@ static void define_builtin_compile_vars(CodeGen *g) {
buf_appendf(contents, "pub const __zig_test_fn_slice = {}; // overwritten later\n");
+
+ return contents;
+}
+
+static void define_builtin_compile_vars(CodeGen *g) {
+ if (g->std_package == nullptr)
+ return;
+
+ const char *builtin_zig_basename = "builtin.zig";
+ Buf *builtin_zig_path = buf_alloc();
+ os_path_join(g->cache_dir, buf_create_from_str(builtin_zig_basename), builtin_zig_path);
+
+ Buf *contents = codegen_generate_builtin_source(g);
ensure_cache_dir(g);
os_write_file(builtin_zig_path, contents);
diff --git a/src/codegen.hpp b/src/codegen.hpp
index a7a4b748c4..b5f3374ec4 100644
--- a/src/codegen.hpp
+++ b/src/codegen.hpp
@@ -59,5 +59,7 @@ void codegen_add_object(CodeGen *g, Buf *object_path);
void codegen_translate_c(CodeGen *g, Buf *path);
+Buf *codegen_generate_builtin_source(CodeGen *g);
+
#endif
diff --git a/src/main.cpp b/src/main.cpp
index 9c36f9b091..c63a143bff 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -23,6 +23,7 @@ static int usage(const char *arg0) {
" build-exe [source] create executable from source or object files\n"
" build-lib [source] create library from source or object files\n"
" build-obj [source] create object from source or assembly\n"
+ " builtin show the source code of that @import(\"builtin\")\n"
" run [source] create executable and run immediately\n"
" translate-c [source] convert c code to zig code\n"
" targets list available compilation targets\n"
@@ -214,6 +215,7 @@ static Buf *resolve_zig_lib_dir(void) {
enum Cmd {
CmdInvalid,
CmdBuild,
+ CmdBuiltin,
CmdRun,
CmdTest,
CmdVersion,
@@ -664,6 +666,8 @@ int main(int argc, char **argv) {
out_type = OutTypeExe;
} else if (strcmp(arg, "targets") == 0) {
cmd = CmdTargets;
+ } else if (strcmp(arg, "builtin") == 0) {
+ cmd = CmdBuiltin;
} else {
fprintf(stderr, "Unrecognized command: %s\n", arg);
return usage(arg0);
@@ -681,6 +685,7 @@ int main(int argc, char **argv) {
return usage(arg0);
}
break;
+ case CmdBuiltin:
case CmdVersion:
case CmdZen:
case CmdTargets:
@@ -727,6 +732,16 @@ int main(int argc, char **argv) {
}
switch (cmd) {
+ case CmdBuiltin: {
+ Buf *zig_lib_dir_buf = resolve_zig_lib_dir();
+ CodeGen *g = codegen_create(nullptr, target, out_type, build_mode, zig_lib_dir_buf);
+ Buf *builtin_source = codegen_generate_builtin_source(g);
+ if (fwrite(buf_ptr(builtin_source), 1, buf_len(builtin_source), stdout) != buf_len(builtin_source)) {
+ fprintf(stderr, "unable to write to stdout: %s\n", strerror(ferror(stdout)));
+ return EXIT_FAILURE;
+ }
+ return EXIT_SUCCESS;
+ }
case CmdRun:
case CmdBuild:
case CmdTranslateC:
From b65203f5736199bdc8d98d27728be5e92a17d565 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 7 Jun 2018 19:50:25 -0400
Subject: [PATCH 14/49] remove @canImplicitCast builtin
nobody will miss it
---
doc/langref.html.in | 11 +--------
src/all_types.hpp | 9 -------
src/codegen.cpp | 2 --
src/ir.cpp | 60 ---------------------------------------------
src/ir_print.cpp | 11 ---------
test/cases/misc.zig | 8 ------
6 files changed, 1 insertion(+), 100 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index adb5470d98..6a1f1c3102 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -3845,9 +3845,6 @@ pub fn printValue(self: *OutStream, value: var) !void {
return self.printInt(T, value);
} else if (@isFloat(T)) {
return self.printFloat(T, value);
- } else if (@canImplicitCast([]const u8, value)) {
- const casted_value = ([]const u8)(value);
- return self.write(casted_value);
} else {
@compileError("Unable to print type '" ++ @typeName(T) ++ "'");
}
@@ -4102,12 +4099,6 @@ comptime {
{#see_also|Import from C Header File|@cImport|@cDefine|@cInclude#}
{#header_close#}
- {#header_open|@canImplicitCast#}
- @canImplicitCast(comptime T: type, value) bool
-
- Returns whether a value can be implicitly casted to a given type.
-
- {#header_close#}
{#header_open|@clz#}
@clz(x: T) U
@@ -6136,7 +6127,7 @@ hljs.registerLanguage("zig", function(t) {
a = t.IR + "\\s*\\(",
c = {
keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong",
- built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic canImplicitCast ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall",
+ built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall",
literal: "true false null undefined"
},
n = [e, t.CLCM, t.CBCM, s, r];
diff --git a/src/all_types.hpp b/src/all_types.hpp
index b193fe8ae8..9d41b86fa0 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1354,7 +1354,6 @@ enum BuiltinFnId {
BuiltinFnIdSetRuntimeSafety,
BuiltinFnIdSetFloatMode,
BuiltinFnIdTypeName,
- BuiltinFnIdCanImplicitCast,
BuiltinFnIdPanic,
BuiltinFnIdPtrCast,
BuiltinFnIdBitCast,
@@ -2065,7 +2064,6 @@ enum IrInstructionId {
IrInstructionIdCheckSwitchProngs,
IrInstructionIdCheckStatementIsVoid,
IrInstructionIdTypeName,
- IrInstructionIdCanImplicitCast,
IrInstructionIdDeclRef,
IrInstructionIdPanic,
IrInstructionIdTagName,
@@ -2858,13 +2856,6 @@ struct IrInstructionTypeName {
IrInstruction *type_value;
};
-struct IrInstructionCanImplicitCast {
- IrInstruction base;
-
- IrInstruction *type_value;
- IrInstruction *target_value;
-};
-
struct IrInstructionDeclRef {
IrInstruction base;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index fb59ca7569..d156a8a178 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -4625,7 +4625,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdCheckSwitchProngs:
case IrInstructionIdCheckStatementIsVoid:
case IrInstructionIdTypeName:
- case IrInstructionIdCanImplicitCast:
case IrInstructionIdDeclRef:
case IrInstructionIdSwitchVar:
case IrInstructionIdOffsetOf:
@@ -6277,7 +6276,6 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdCImport, "cImport", 1);
create_builtin_fn(g, BuiltinFnIdErrName, "errorName", 1);
create_builtin_fn(g, BuiltinFnIdTypeName, "typeName", 1);
- create_builtin_fn(g, BuiltinFnIdCanImplicitCast, "canImplicitCast", 2);
create_builtin_fn(g, BuiltinFnIdEmbedFile, "embedFile", 1);
create_builtin_fn(g, BuiltinFnIdCmpxchgWeak, "cmpxchgWeak", 6);
create_builtin_fn(g, BuiltinFnIdCmpxchgStrong, "cmpxchgStrong", 6);
diff --git a/src/ir.cpp b/src/ir.cpp
index 304127b099..3c9adab796 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -585,10 +585,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTypeName *) {
return IrInstructionIdTypeName;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCanImplicitCast *) {
- return IrInstructionIdCanImplicitCast;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionDeclRef *) {
return IrInstructionIdDeclRef;
}
@@ -2348,20 +2344,6 @@ static IrInstruction *ir_build_type_name(IrBuilder *irb, Scope *scope, AstNode *
return &instruction->base;
}
-static IrInstruction *ir_build_can_implicit_cast(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *type_value, IrInstruction *target_value)
-{
- IrInstructionCanImplicitCast *instruction = ir_build_instruction(
- irb, scope, source_node);
- instruction->type_value = type_value;
- instruction->target_value = target_value;
-
- ir_ref_instruction(type_value, irb->current_basic_block);
- ir_ref_instruction(target_value, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_decl_ref(IrBuilder *irb, Scope *scope, AstNode *source_node,
Tld *tld, LVal lval)
{
@@ -4132,21 +4114,6 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
IrInstruction *type_name = ir_build_type_name(irb, scope, node, arg0_value);
return ir_lval_wrap(irb, scope, type_name, lval);
}
- case BuiltinFnIdCanImplicitCast:
- {
- AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
- IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
- if (arg0_value == irb->codegen->invalid_instruction)
- return arg0_value;
-
- AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
- IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
- if (arg1_value == irb->codegen->invalid_instruction)
- return arg1_value;
-
- IrInstruction *can_implicit_cast = ir_build_can_implicit_cast(irb, scope, node, arg0_value, arg1_value);
- return ir_lval_wrap(irb, scope, can_implicit_cast, lval);
- }
case BuiltinFnIdPanic:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
@@ -18405,30 +18372,6 @@ static TypeTableEntry *ir_analyze_instruction_check_statement_is_void(IrAnalyze
return ira->codegen->builtin_types.entry_void;
}
-static TypeTableEntry *ir_analyze_instruction_can_implicit_cast(IrAnalyze *ira,
- IrInstructionCanImplicitCast *instruction)
-{
- IrInstruction *type_value = instruction->type_value->other;
- TypeTableEntry *type_entry = ir_resolve_type(ira, type_value);
- if (type_is_invalid(type_entry))
- return ira->codegen->builtin_types.entry_invalid;
-
- IrInstruction *target_value = instruction->target_value->other;
- if (type_is_invalid(target_value->value.type))
- return ira->codegen->builtin_types.entry_invalid;
-
- ImplicitCastMatchResult result = ir_types_match_with_implicit_cast(ira, type_entry, target_value->value.type,
- target_value);
-
- if (result == ImplicitCastMatchResultReportedError) {
- zig_panic("TODO refactor implicit cast tester to return bool without reporting errors");
- }
-
- ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- out_val->data.x_bool = (result == ImplicitCastMatchResultYes);
- return ira->codegen->builtin_types.entry_bool;
-}
-
static TypeTableEntry *ir_analyze_instruction_panic(IrAnalyze *ira, IrInstructionPanic *instruction) {
IrInstruction *msg = instruction->msg->other;
if (type_is_invalid(msg->value.type))
@@ -19762,8 +19705,6 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_check_switch_prongs(ira, (IrInstructionCheckSwitchProngs *)instruction);
case IrInstructionIdCheckStatementIsVoid:
return ir_analyze_instruction_check_statement_is_void(ira, (IrInstructionCheckStatementIsVoid *)instruction);
- case IrInstructionIdCanImplicitCast:
- return ir_analyze_instruction_can_implicit_cast(ira, (IrInstructionCanImplicitCast *)instruction);
case IrInstructionIdDeclRef:
return ir_analyze_instruction_decl_ref(ira, (IrInstructionDeclRef *)instruction);
case IrInstructionIdPanic:
@@ -20043,7 +19984,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdIntToEnum:
case IrInstructionIdIntToErr:
case IrInstructionIdErrToInt:
- case IrInstructionIdCanImplicitCast:
case IrInstructionIdDeclRef:
case IrInstructionIdErrName:
case IrInstructionIdTypeName:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 3c177a8bbf..776ef64566 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -913,14 +913,6 @@ static void ir_print_tag_name(IrPrint *irp, IrInstructionTagName *instruction) {
ir_print_other_instruction(irp, instruction->target);
}
-static void ir_print_can_implicit_cast(IrPrint *irp, IrInstructionCanImplicitCast *instruction) {
- fprintf(irp->f, "@canImplicitCast(");
- ir_print_other_instruction(irp, instruction->type_value);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->target_value);
- fprintf(irp->f, ")");
-}
-
static void ir_print_ptr_type(IrPrint *irp, IrInstructionPtrType *instruction) {
fprintf(irp->f, "&");
if (instruction->align_value != nullptr) {
@@ -1524,9 +1516,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdTagName:
ir_print_tag_name(irp, (IrInstructionTagName *)instruction);
break;
- case IrInstructionIdCanImplicitCast:
- ir_print_can_implicit_cast(irp, (IrInstructionCanImplicitCast *)instruction);
- break;
case IrInstructionIdPtrType:
ir_print_ptr_type(irp, (IrInstructionPtrType *)instruction);
break;
diff --git a/test/cases/misc.zig b/test/cases/misc.zig
index 9450cf5e6e..369d8e5cf3 100644
--- a/test/cases/misc.zig
+++ b/test/cases/misc.zig
@@ -523,14 +523,6 @@ test "@typeId" {
}
}
-test "@canImplicitCast" {
- comptime {
- assert(@canImplicitCast(i64, i32(3)));
- assert(!@canImplicitCast(i32, f32(1.234)));
- assert(@canImplicitCast([]const u8, "aoeu"));
- }
-}
-
test "@typeName" {
const Struct = struct {};
const Union = union {
From f0b6dac1f2d37ea9eff0116bec34e9b2be9f3ce7 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 7 Jun 2018 22:19:00 -0400
Subject: [PATCH 15/49] add implicit casts from `*[N]T`
* to `[]T`
* to `[*]T`
See #770
---
src/all_types.hpp | 1 +
src/codegen.cpp | 26 +++++++++-
src/ir.cpp | 122 ++++++++++++++++++++++++++++++++++++++++++++
test/cases/cast.zig | 16 ++++++
4 files changed, 163 insertions(+), 2 deletions(-)
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 9d41b86fa0..c671682363 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -583,6 +583,7 @@ enum CastOp {
CastOpNumLitToConcrete,
CastOpErrSet,
CastOpBitCast,
+ CastOpPtrOfArrayToSlice,
};
struct AstNodeFnCallExpr {
diff --git a/src/codegen.cpp b/src/codegen.cpp
index d156a8a178..fab2ad659e 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -2530,7 +2530,7 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
assert(wanted_type->data.structure.is_slice);
assert(actual_type->id == TypeTableEntryIdArray);
- TypeTableEntry *wanted_pointer_type = wanted_type->data.structure.fields[0].type_entry;
+ TypeTableEntry *wanted_pointer_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
TypeTableEntry *wanted_child_type = wanted_pointer_type->data.pointer.child_type;
@@ -2576,6 +2576,29 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
return expr_val;
case CastOpBitCast:
return LLVMBuildBitCast(g->builder, expr_val, wanted_type->type_ref, "");
+ case CastOpPtrOfArrayToSlice: {
+ assert(cast_instruction->tmp_ptr);
+ assert(actual_type->id == TypeTableEntryIdPointer);
+ TypeTableEntry *array_type = actual_type->data.pointer.child_type;
+ assert(array_type->id == TypeTableEntryIdArray);
+
+ LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, cast_instruction->tmp_ptr,
+ slice_ptr_index, "");
+ LLVMValueRef indices[] = {
+ LLVMConstNull(g->builtin_types.entry_usize->type_ref),
+ LLVMConstInt(g->builtin_types.entry_usize->type_ref, 0, false),
+ };
+ LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, expr_val, indices, 2, "");
+ gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false);
+
+ LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, cast_instruction->tmp_ptr,
+ slice_len_index, "");
+ LLVMValueRef len_value = LLVMConstInt(g->builtin_types.entry_usize->type_ref,
+ array_type->data.array.len, false);
+ gen_store_untyped(g, len_value, len_field_ptr, 0, false);
+
+ return cast_instruction->tmp_ptr;
+ }
}
zig_unreachable();
}
@@ -3815,7 +3838,6 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
} else {
end_val = LLVMConstInt(g->builtin_types.entry_usize->type_ref, array_type->data.array.len, false);
}
-
if (want_runtime_safety) {
add_bounds_check(g, start_val, LLVMIntEQ, nullptr, LLVMIntULE, end_val);
if (instruction->end) {
diff --git a/src/ir.cpp b/src/ir.cpp
index 3c9adab796..cc4ffb44a9 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -108,6 +108,7 @@ static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
static TypeTableEntry *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op);
static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval);
static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, uint32_t new_align);
+static TypeTableEntry *adjust_slice_align(CodeGen *g, TypeTableEntry *slice_type, uint32_t new_align);
ConstExprValue *const_ptr_pointee(CodeGen *g, ConstExprValue *const_val) {
assert(const_val->type->id == TypeTableEntryIdPointer);
@@ -8024,6 +8025,33 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
}
}
+ // implicit *[N]T to [*]T
+ if (expected_type->id == TypeTableEntryIdPointer &&
+ expected_type->data.pointer.ptr_len == PtrLenUnknown &&
+ actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == TypeTableEntryIdArray &&
+ types_match_const_cast_only(ira, expected_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ {
+ return ImplicitCastMatchResultYes;
+ }
+
+ // implicit *[N]T to []T
+ if (is_slice(expected_type) &&
+ actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
+ {
+ TypeTableEntry *slice_ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry;
+ assert(slice_ptr_type->id == TypeTableEntryIdPointer);
+ if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ {
+ return ImplicitCastMatchResultYes;
+ }
+ }
+
// implicit [N]T to ?[]const T
if (expected_type->id == TypeTableEntryIdMaybe &&
is_slice(expected_type->data.maybe.child_type) &&
@@ -8699,6 +8727,7 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
zig_unreachable();
case CastOpErrSet:
case CastOpBitCast:
+ case CastOpPtrOfArrayToSlice:
zig_panic("TODO");
case CastOpNoop:
{
@@ -8786,6 +8815,63 @@ static IrInstruction *ir_resolve_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
+static IrInstruction *ir_resolve_ptr_of_array_to_unknown_len_ptr(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *value, TypeTableEntry *wanted_type)
+{
+ assert(value->value.type->id == TypeTableEntryIdPointer);
+ wanted_type = adjust_ptr_align(ira->codegen, wanted_type, value->value.type->data.pointer.alignment);
+
+ if (instr_is_comptime(value)) {
+ ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &value->value);
+ if (pointee->special != ConstValSpecialRuntime) {
+ IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
+ source_instr->source_node, wanted_type);
+ result->value.type = wanted_type;
+ result->value.data.x_ptr.special = ConstPtrSpecialBaseArray;
+ result->value.data.x_ptr.mut = value->value.data.x_ptr.mut;
+ result->value.data.x_ptr.data.base_array.array_val = pointee;
+ result->value.data.x_ptr.data.base_array.elem_index = 0;
+ result->value.data.x_ptr.data.base_array.is_cstr = false;
+ return result;
+ }
+ }
+
+ IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node,
+ wanted_type, value, CastOpBitCast);
+ result->value.type = wanted_type;
+ return result;
+}
+
+static IrInstruction *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *value, TypeTableEntry *wanted_type)
+{
+ wanted_type = adjust_slice_align(ira->codegen, wanted_type, value->value.type->data.pointer.alignment);
+
+ if (instr_is_comptime(value)) {
+ ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &value->value);
+ if (pointee->special != ConstValSpecialRuntime) {
+ assert(value->value.type->id == TypeTableEntryIdPointer);
+ TypeTableEntry *array_type = value->value.type->data.pointer.child_type;
+ assert(is_slice(wanted_type));
+ bool is_const = wanted_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const;
+
+ IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
+ source_instr->source_node, wanted_type);
+ init_const_slice(ira->codegen, &result->value, pointee, 0, array_type->data.array.len, is_const);
+ result->value.data.x_struct.fields[slice_ptr_index].data.x_ptr.mut =
+ value->value.data.x_ptr.mut;
+ result->value.type = wanted_type;
+ return result;
+ }
+ }
+
+ IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node,
+ wanted_type, value, CastOpPtrOfArrayToSlice);
+ result->value.type = wanted_type;
+ ir_add_alloca(ira, result, wanted_type);
+ return result;
+}
+
static bool is_container(TypeTableEntry *type) {
return type->id == TypeTableEntryIdStruct ||
type->id == TypeTableEntryIdEnum ||
@@ -9937,6 +10023,35 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
+ // explicit *[N]T to [*]T
+ if (wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.ptr_len == PtrLenUnknown &&
+ actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == TypeTableEntryIdArray &&
+ actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment &&
+ types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ {
+ return ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value, wanted_type);
+ }
+
+ // explicit *[N]T to []T
+ if (is_slice(wanted_type) &&
+ actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
+ {
+ TypeTableEntry *slice_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ assert(slice_ptr_type->id == TypeTableEntryIdPointer);
+ if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ {
+ return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, wanted_type);
+ }
+ }
+
+
// explicit cast from child type of maybe type to maybe type
if (wanted_type->id == TypeTableEntryIdMaybe) {
TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type;
@@ -13150,6 +13265,13 @@ static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, ui
ptr_type->data.pointer.bit_offset, ptr_type->data.pointer.unaligned_bit_count);
}
+static TypeTableEntry *adjust_slice_align(CodeGen *g, TypeTableEntry *slice_type, uint32_t new_align) {
+ assert(is_slice(slice_type));
+ TypeTableEntry *ptr_type = adjust_ptr_align(g, slice_type->data.structure.fields[slice_ptr_index].type_entry,
+ new_align);
+ return get_slice_type(g, ptr_type);
+}
+
static TypeTableEntry *adjust_ptr_len(CodeGen *g, TypeTableEntry *ptr_type, PtrLen ptr_len) {
assert(ptr_type->id == TypeTableEntryIdPointer);
return get_pointer_to_type_extra(g,
diff --git a/test/cases/cast.zig b/test/cases/cast.zig
index 7358a4ffd8..c3ef24cd78 100644
--- a/test/cases/cast.zig
+++ b/test/cases/cast.zig
@@ -384,3 +384,19 @@ test "const slice widen cast" {
assert(@bitCast(u32, bytes) == 0x12121212);
}
+
+test "single-item pointer of array to slice and to unknown length pointer" {
+ testCastPtrOfArrayToSliceAndPtr();
+ comptime testCastPtrOfArrayToSliceAndPtr();
+}
+
+fn testCastPtrOfArrayToSliceAndPtr() void {
+ var array = "ao" ++ "eu"; // TODO https://github.com/ziglang/zig/issues/1076
+ const x: [*]u8 = &array;
+ x[0] += 1;
+ assert(mem.eql(u8, array[0..], "boeu"));
+ const y: []u8 = &array;
+ y[0] += 1;
+ assert(mem.eql(u8, array[0..], "coeu"));
+}
+
From ffb089a9f5fa95fd559a7c88081310d0be73f206 Mon Sep 17 00:00:00 2001
From: Marc Tiehuis
Date: Fri, 8 Jun 2018 17:43:13 +1200
Subject: [PATCH 16/49] Fix json parser comma after empty object case
---
std/json.zig | 18 +++++++++++++-----
std/json_test.zig | 10 ++++++++++
2 files changed, 23 insertions(+), 5 deletions(-)
diff --git a/std/json.zig b/std/json.zig
index 6cf83eef1a..03b19a7fa4 100644
--- a/std/json.zig
+++ b/std/json.zig
@@ -324,7 +324,9 @@ pub const StreamingParser = struct {
p.complete = true;
p.state = State.TopLevelEnd;
},
- else => {},
+ else => {
+ p.state = State.ValueEnd;
+ },
}
token.* = Token.initMarker(Token.Id.ObjectEnd);
@@ -348,7 +350,9 @@ pub const StreamingParser = struct {
p.complete = true;
p.state = State.TopLevelEnd;
},
- else => {},
+ else => {
+ p.state = State.ValueEnd;
+ },
}
token.* = Token.initMarker(Token.Id.ArrayEnd);
@@ -970,7 +974,7 @@ pub fn validate(s: []const u8) bool {
var token1: ?Token = undefined;
var token2: ?Token = undefined;
- p.feed(c, *token1, *token2) catch |err| {
+ p.feed(c, &token1, &token2) catch |err| {
return false;
};
}
@@ -978,6 +982,10 @@ pub fn validate(s: []const u8) bool {
return p.complete;
}
+test "json validate" {
+ debug.assert(validate("{}"));
+}
+
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const ArrayList = std.ArrayList;
@@ -1230,7 +1238,7 @@ pub const Parser = struct {
_ = p.stack.pop();
p.state = State.ObjectKey;
},
- else => {
+ Token.Id.ObjectEnd, Token.Id.ArrayEnd => {
unreachable;
},
}
@@ -1270,7 +1278,7 @@ pub const Parser = struct {
Token.Id.Null => {
try array.append(Value.Null);
},
- else => {
+ Token.Id.ObjectEnd => {
unreachable;
},
}
diff --git a/std/json_test.zig b/std/json_test.zig
index cb054d8e4e..8c8862441a 100644
--- a/std/json_test.zig
+++ b/std/json_test.zig
@@ -17,6 +17,16 @@ fn any(comptime s: []const u8) void {
std.debug.assert(true);
}
+////////////////////////////////////////////////////////////////////////////////////////////////////
+//
+// Additional tests not part of test JSONTestSuite.
+
+test "y_trailing_comma_after_empty" {
+ ok(
+ \\{"1":[],"2":{},"3":"4"}
+ );
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
test "y_array_arraysWithSpaces" {
From bf3d1c1aab336c4a650bb67dcaca132d4a0f6164 Mon Sep 17 00:00:00 2001
From: Jimmi HC
Date: Fri, 8 Jun 2018 09:21:31 +0200
Subject: [PATCH 17/49] Allow access of array.len through a pointer
---
src/analyze.cpp | 14 ++++++++++++--
src/analyze.hpp | 2 ++
src/ir.cpp | 8 ++++++--
test/cases/array.zig | 10 +++++++++-
4 files changed, 29 insertions(+), 5 deletions(-)
diff --git a/src/analyze.cpp b/src/analyze.cpp
index e05fb23237..84f1473ea1 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -3761,14 +3761,24 @@ static bool is_container(TypeTableEntry *type_entry) {
zig_unreachable();
}
+bool is_ref(TypeTableEntry *type_entry) {
+ return type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle;
+}
+
+bool is_array_ref(TypeTableEntry *type_entry) {
+ TypeTableEntry *array = is_ref(type_entry) ?
+ type_entry->data.pointer.child_type : type_entry;
+ return array->id == TypeTableEntryIdArray;
+}
+
bool is_container_ref(TypeTableEntry *type_entry) {
- return (type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle) ?
+ return is_ref(type_entry) ?
is_container(type_entry->data.pointer.child_type) : is_container(type_entry);
}
TypeTableEntry *container_ref_type(TypeTableEntry *type_entry) {
assert(is_container_ref(type_entry));
- return (type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle) ?
+ return is_ref(type_entry) ?
type_entry->data.pointer.child_type : type_entry;
}
diff --git a/src/analyze.hpp b/src/analyze.hpp
index 25bda198d6..88e06b2390 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -70,6 +70,8 @@ TypeUnionField *find_union_type_field(TypeTableEntry *type_entry, Buf *name);
TypeEnumField *find_enum_field_by_tag(TypeTableEntry *enum_type, const BigInt *tag);
TypeUnionField *find_union_field_by_tag(TypeTableEntry *type_entry, const BigInt *tag);
+bool is_ref(TypeTableEntry *type_entry);
+bool is_array_ref(TypeTableEntry *type_entry);
bool is_container_ref(TypeTableEntry *type_entry);
void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node);
void scan_import(CodeGen *g, ImportTableEntry *import);
diff --git a/src/ir.cpp b/src/ir.cpp
index cc4ffb44a9..4766bff5e7 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -13846,10 +13846,14 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
ir_link_new_instruction(result, &field_ptr_instruction->base);
return result->value.type;
}
- } else if (container_type->id == TypeTableEntryIdArray) {
+ } else if (is_array_ref(container_type)) {
if (buf_eql_str(field_name, "len")) {
ConstExprValue *len_val = create_const_vals(1);
- init_const_usize(ira->codegen, len_val, container_type->data.array.len);
+ if (container_type->id == TypeTableEntryIdPointer) {
+ init_const_usize(ira->codegen, len_val, container_type->data.pointer.child_type->data.array.len);
+ } else {
+ init_const_usize(ira->codegen, len_val, container_type->data.array.len);
+ }
TypeTableEntry *usize = ira->codegen->builtin_types.entry_usize;
bool ptr_is_const = true;
diff --git a/test/cases/array.zig b/test/cases/array.zig
index ef919b27bd..b481261b4f 100644
--- a/test/cases/array.zig
+++ b/test/cases/array.zig
@@ -116,6 +116,15 @@ test "array len property" {
assert(@typeOf(x).len == 5);
}
+test "array len field" {
+ var arr = [4]u8{ 0, 0, 0, 0 };
+ var ptr = &arr;
+ assert(arr.len == 4);
+ comptime assert(arr.len == 4);
+ assert(ptr.len == 4);
+ comptime assert(ptr.len == 4);
+}
+
test "single-item pointer to array indexing and slicing" {
testSingleItemPtrArrayIndexSlice();
comptime testSingleItemPtrArrayIndexSlice();
@@ -143,4 +152,3 @@ fn testImplicitCastSingleItemPtr() void {
slice[0] += 1;
assert(byte == 101);
}
-
From 39fa313ad881d242c4fbb6789bab26fed72449a2 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Fri, 8 Jun 2018 14:57:16 -0400
Subject: [PATCH 18/49] disable some implicit casts for unknown length pointers
closes #770
---
src/ir.cpp | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 4766bff5e7..e62ec71875 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -7994,6 +7994,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
// implicit &const [N]T to []const T
if (is_slice(expected_type) &&
actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
actual_type->data.pointer.is_const &&
actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
{
@@ -8012,6 +8013,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
// implicit [N]T to &const []const T
if (expected_type->id == TypeTableEntryIdPointer &&
expected_type->data.pointer.is_const &&
+ expected_type->data.pointer.ptr_len == PtrLenSingle &&
is_slice(expected_type->data.pointer.child_type) &&
actual_type->id == TypeTableEntryIdArray)
{
@@ -8074,6 +8076,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
actual_type->id == TypeTableEntryIdComptimeInt)
{
if (expected_type->id == TypeTableEntryIdPointer &&
+ expected_type->data.pointer.ptr_len == PtrLenSingle &&
expected_type->data.pointer.is_const)
{
if (ir_num_lit_fits_in_other_type(ira, value, expected_type->data.pointer.child_type, false)) {
@@ -8121,7 +8124,10 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
}
// implicit enum to &const union which has the enum as the tag type
- if (actual_type->id == TypeTableEntryIdEnum && expected_type->id == TypeTableEntryIdPointer) {
+ if (actual_type->id == TypeTableEntryIdEnum &&
+ expected_type->id == TypeTableEntryIdPointer &&
+ expected_type->data.pointer.ptr_len == PtrLenSingle)
+ {
TypeTableEntry *union_type = expected_type->data.pointer.child_type;
if (union_type->data.unionation.decl_node->data.container_decl.auto_enum ||
union_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr)
@@ -8141,7 +8147,11 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
// implicitly take a const pointer to something
if (!type_requires_comptime(actual_type)) {
TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true);
- if (types_match_const_cast_only(ira, expected_type, const_ptr_actual, source_node).id == ConstCastResultIdOk) {
+ if (expected_type->id == TypeTableEntryIdPointer &&
+ expected_type->data.pointer.ptr_len == PtrLenSingle &&
+ types_match_const_cast_only(ira, expected_type, const_ptr_actual,
+ source_node).id == ConstCastResultIdOk)
+ {
return ImplicitCastMatchResultYes;
}
}
From 1a9d2f3aae780873eefedaf3fdf095b3cd87b55f Mon Sep 17 00:00:00 2001
From: isaachier
Date: Fri, 8 Jun 2018 19:24:48 -0400
Subject: [PATCH 19/49] Fix error handling in Buffer::fromOwnedSlice (#1082)
---
std/buffer.zig | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/std/buffer.zig b/std/buffer.zig
index 469f81709b..0d82918580 100644
--- a/std/buffer.zig
+++ b/std/buffer.zig
@@ -41,9 +41,9 @@ pub const Buffer = struct {
/// Buffer takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Must deinitialize with deinit.
- pub fn fromOwnedSlice(allocator: *Allocator, slice: []u8) Buffer {
+ pub fn fromOwnedSlice(allocator: *Allocator, slice: []u8) !Buffer {
var self = Buffer{ .list = ArrayList(u8).fromOwnedSlice(allocator, slice) };
- self.list.append(0);
+ try self.list.append(0);
return self;
}
From 6edd81109d16178f1dc688dacee4b38964b617c4 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 9 Jun 2018 00:15:23 -0400
Subject: [PATCH 20/49] nullable pointers follow const-casting rules
any *T -> ?*T cast is allowed implicitly, even
when it occurs deep inside the type, and the cast
is a no-op at runtime.
in order to add this I had to make the comptime value
representation of nullable pointers the same as the
comptime value representation of normal pointers,
so that we don't have to do any recursive transformation
of values when doing this kind of cast.
---
src/all_types.hpp | 5 +-
src/analyze.cpp | 280 ++++++++++++++++++++++++--------------------
src/codegen.cpp | 158 ++++++++++++-------------
src/ir.cpp | 121 +++++++++++++------
test/cases/cast.zig | 10 +-
5 files changed, 322 insertions(+), 252 deletions(-)
diff --git a/src/all_types.hpp b/src/all_types.hpp
index c671682363..14a44ea768 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -144,6 +144,9 @@ enum ConstPtrSpecial {
// understand the value of pointee at compile time. However, we will still
// emit a binary with a compile time known address.
// In this case index is the numeric address value.
+ // We also use this for null pointer. We need the data layout for ConstCastOnly == true
+ // types to be the same, so all nullables of pointer types use x_ptr
+ // instead of x_nullable
ConstPtrSpecialHardCodedAddr,
// This means that the pointer represents memory of assigning to _.
// That is, storing discards the data, and loading is invalid.
@@ -251,7 +254,7 @@ struct ConstExprValue {
bool x_bool;
ConstBoundFnValue x_bound_fn;
TypeTableEntry *x_type;
- ConstExprValue *x_maybe;
+ ConstExprValue *x_nullable;
ConstErrValue x_err_union;
ErrorTableEntry *x_err_set;
BigInt x_enum_tag;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 84f1473ea1..16b2cb0590 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -4578,6 +4578,52 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) {
return true;
}
+static uint32_t hash_const_val_ptr(ConstExprValue *const_val) {
+ uint32_t hash_val = 0;
+ switch (const_val->data.x_ptr.mut) {
+ case ConstPtrMutRuntimeVar:
+ hash_val += (uint32_t)3500721036;
+ break;
+ case ConstPtrMutComptimeConst:
+ hash_val += (uint32_t)4214318515;
+ break;
+ case ConstPtrMutComptimeVar:
+ hash_val += (uint32_t)1103195694;
+ break;
+ }
+ switch (const_val->data.x_ptr.special) {
+ case ConstPtrSpecialInvalid:
+ zig_unreachable();
+ case ConstPtrSpecialRef:
+ hash_val += (uint32_t)2478261866;
+ hash_val += hash_ptr(const_val->data.x_ptr.data.ref.pointee);
+ return hash_val;
+ case ConstPtrSpecialBaseArray:
+ hash_val += (uint32_t)1764906839;
+ hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val);
+ hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index);
+ hash_val += const_val->data.x_ptr.data.base_array.is_cstr ? 1297263887 : 200363492;
+ return hash_val;
+ case ConstPtrSpecialBaseStruct:
+ hash_val += (uint32_t)3518317043;
+ hash_val += hash_ptr(const_val->data.x_ptr.data.base_struct.struct_val);
+ hash_val += hash_size(const_val->data.x_ptr.data.base_struct.field_index);
+ return hash_val;
+ case ConstPtrSpecialHardCodedAddr:
+ hash_val += (uint32_t)4048518294;
+ hash_val += hash_size(const_val->data.x_ptr.data.hard_coded_addr.addr);
+ return hash_val;
+ case ConstPtrSpecialDiscard:
+ hash_val += 2010123162;
+ return hash_val;
+ case ConstPtrSpecialFunction:
+ hash_val += (uint32_t)2590901619;
+ hash_val += hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
+ return hash_val;
+ }
+ zig_unreachable();
+}
+
static uint32_t hash_const_val(ConstExprValue *const_val) {
assert(const_val->special == ConstValSpecialStatic);
switch (const_val->type->id) {
@@ -4646,51 +4692,7 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
assert(const_val->data.x_ptr.special == ConstPtrSpecialFunction);
return 3677364617 ^ hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
case TypeTableEntryIdPointer:
- {
- uint32_t hash_val = 0;
- switch (const_val->data.x_ptr.mut) {
- case ConstPtrMutRuntimeVar:
- hash_val += (uint32_t)3500721036;
- break;
- case ConstPtrMutComptimeConst:
- hash_val += (uint32_t)4214318515;
- break;
- case ConstPtrMutComptimeVar:
- hash_val += (uint32_t)1103195694;
- break;
- }
- switch (const_val->data.x_ptr.special) {
- case ConstPtrSpecialInvalid:
- zig_unreachable();
- case ConstPtrSpecialRef:
- hash_val += (uint32_t)2478261866;
- hash_val += hash_ptr(const_val->data.x_ptr.data.ref.pointee);
- return hash_val;
- case ConstPtrSpecialBaseArray:
- hash_val += (uint32_t)1764906839;
- hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val);
- hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index);
- hash_val += const_val->data.x_ptr.data.base_array.is_cstr ? 1297263887 : 200363492;
- return hash_val;
- case ConstPtrSpecialBaseStruct:
- hash_val += (uint32_t)3518317043;
- hash_val += hash_ptr(const_val->data.x_ptr.data.base_struct.struct_val);
- hash_val += hash_size(const_val->data.x_ptr.data.base_struct.field_index);
- return hash_val;
- case ConstPtrSpecialHardCodedAddr:
- hash_val += (uint32_t)4048518294;
- hash_val += hash_size(const_val->data.x_ptr.data.hard_coded_addr.addr);
- return hash_val;
- case ConstPtrSpecialDiscard:
- hash_val += 2010123162;
- return hash_val;
- case ConstPtrSpecialFunction:
- hash_val += (uint32_t)2590901619;
- hash_val += hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
- return hash_val;
- }
- zig_unreachable();
- }
+ return hash_const_val_ptr(const_val);
case TypeTableEntryIdPromise:
// TODO better hashing algorithm
return 223048345;
@@ -4708,10 +4710,14 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
// TODO better hashing algorithm
return 2709806591;
case TypeTableEntryIdMaybe:
- if (const_val->data.x_maybe) {
- return hash_const_val(const_val->data.x_maybe) * 1992916303;
+ if (get_codegen_ptr_type(const_val->type) != nullptr) {
+ return hash_const_val(const_val) * 1992916303;
} else {
- return 4016830364;
+ if (const_val->data.x_nullable) {
+ return hash_const_val(const_val->data.x_nullable) * 1992916303;
+ } else {
+ return 4016830364;
+ }
}
case TypeTableEntryIdErrorUnion:
// TODO better hashing algorithm
@@ -4812,9 +4818,11 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) {
return false;
case TypeTableEntryIdMaybe:
- if (value->data.x_maybe == nullptr)
+ if (get_codegen_ptr_type(value->type) != nullptr)
+ return value->data.x_ptr.mut == ConstPtrMutComptimeVar;
+ if (value->data.x_nullable == nullptr)
return false;
- return can_mutate_comptime_var_state(value->data.x_maybe);
+ return can_mutate_comptime_var_state(value->data.x_nullable);
case TypeTableEntryIdErrorUnion:
if (value->data.x_err_union.err != nullptr)
@@ -5340,6 +5348,52 @@ bool ir_get_var_is_comptime(VariableTableEntry *var) {
return var->is_comptime->value.data.x_bool;
}
+bool const_values_equal_ptr(ConstExprValue *a, ConstExprValue *b) {
+ if (a->data.x_ptr.special != b->data.x_ptr.special)
+ return false;
+ if (a->data.x_ptr.mut != b->data.x_ptr.mut)
+ return false;
+ switch (a->data.x_ptr.special) {
+ case ConstPtrSpecialInvalid:
+ zig_unreachable();
+ case ConstPtrSpecialRef:
+ if (a->data.x_ptr.data.ref.pointee != b->data.x_ptr.data.ref.pointee)
+ return false;
+ return true;
+ case ConstPtrSpecialBaseArray:
+ if (a->data.x_ptr.data.base_array.array_val != b->data.x_ptr.data.base_array.array_val &&
+ a->data.x_ptr.data.base_array.array_val->global_refs !=
+ b->data.x_ptr.data.base_array.array_val->global_refs)
+ {
+ return false;
+ }
+ if (a->data.x_ptr.data.base_array.elem_index != b->data.x_ptr.data.base_array.elem_index)
+ return false;
+ if (a->data.x_ptr.data.base_array.is_cstr != b->data.x_ptr.data.base_array.is_cstr)
+ return false;
+ return true;
+ case ConstPtrSpecialBaseStruct:
+ if (a->data.x_ptr.data.base_struct.struct_val != b->data.x_ptr.data.base_struct.struct_val &&
+ a->data.x_ptr.data.base_struct.struct_val->global_refs !=
+ b->data.x_ptr.data.base_struct.struct_val->global_refs)
+ {
+ return false;
+ }
+ if (a->data.x_ptr.data.base_struct.field_index != b->data.x_ptr.data.base_struct.field_index)
+ return false;
+ return true;
+ case ConstPtrSpecialHardCodedAddr:
+ if (a->data.x_ptr.data.hard_coded_addr.addr != b->data.x_ptr.data.hard_coded_addr.addr)
+ return false;
+ return true;
+ case ConstPtrSpecialDiscard:
+ return true;
+ case ConstPtrSpecialFunction:
+ return a->data.x_ptr.data.fn.fn_entry == b->data.x_ptr.data.fn.fn_entry;
+ }
+ zig_unreachable();
+}
+
bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
assert(a->type->id == b->type->id);
assert(a->special == ConstValSpecialStatic);
@@ -5391,49 +5445,7 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
return bigint_cmp(&a->data.x_bigint, &b->data.x_bigint) == CmpEQ;
case TypeTableEntryIdPointer:
case TypeTableEntryIdFn:
- if (a->data.x_ptr.special != b->data.x_ptr.special)
- return false;
- if (a->data.x_ptr.mut != b->data.x_ptr.mut)
- return false;
- switch (a->data.x_ptr.special) {
- case ConstPtrSpecialInvalid:
- zig_unreachable();
- case ConstPtrSpecialRef:
- if (a->data.x_ptr.data.ref.pointee != b->data.x_ptr.data.ref.pointee)
- return false;
- return true;
- case ConstPtrSpecialBaseArray:
- if (a->data.x_ptr.data.base_array.array_val != b->data.x_ptr.data.base_array.array_val &&
- a->data.x_ptr.data.base_array.array_val->global_refs !=
- b->data.x_ptr.data.base_array.array_val->global_refs)
- {
- return false;
- }
- if (a->data.x_ptr.data.base_array.elem_index != b->data.x_ptr.data.base_array.elem_index)
- return false;
- if (a->data.x_ptr.data.base_array.is_cstr != b->data.x_ptr.data.base_array.is_cstr)
- return false;
- return true;
- case ConstPtrSpecialBaseStruct:
- if (a->data.x_ptr.data.base_struct.struct_val != b->data.x_ptr.data.base_struct.struct_val &&
- a->data.x_ptr.data.base_struct.struct_val->global_refs !=
- b->data.x_ptr.data.base_struct.struct_val->global_refs)
- {
- return false;
- }
- if (a->data.x_ptr.data.base_struct.field_index != b->data.x_ptr.data.base_struct.field_index)
- return false;
- return true;
- case ConstPtrSpecialHardCodedAddr:
- if (a->data.x_ptr.data.hard_coded_addr.addr != b->data.x_ptr.data.hard_coded_addr.addr)
- return false;
- return true;
- case ConstPtrSpecialDiscard:
- return true;
- case ConstPtrSpecialFunction:
- return a->data.x_ptr.data.fn.fn_entry == b->data.x_ptr.data.fn.fn_entry;
- }
- zig_unreachable();
+ return const_values_equal_ptr(a, b);
case TypeTableEntryIdArray:
zig_panic("TODO");
case TypeTableEntryIdStruct:
@@ -5449,10 +5461,12 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
case TypeTableEntryIdNull:
zig_panic("TODO");
case TypeTableEntryIdMaybe:
- if (a->data.x_maybe == nullptr || b->data.x_maybe == nullptr) {
- return (a->data.x_maybe == nullptr && b->data.x_maybe == nullptr);
+ if (get_codegen_ptr_type(a->type) != nullptr)
+ return const_values_equal_ptr(a, b);
+ if (a->data.x_nullable == nullptr || b->data.x_nullable == nullptr) {
+ return (a->data.x_nullable == nullptr && b->data.x_nullable == nullptr);
} else {
- return const_values_equal(a->data.x_maybe, b->data.x_maybe);
+ return const_values_equal(a->data.x_nullable, b->data.x_nullable);
}
case TypeTableEntryIdErrorUnion:
zig_panic("TODO");
@@ -5525,6 +5539,41 @@ void eval_min_max_value(CodeGen *g, TypeTableEntry *type_entry, ConstExprValue *
}
}
+void render_const_val_ptr(CodeGen *g, Buf *buf, ConstExprValue *const_val, TypeTableEntry *type_entry) {
+ switch (const_val->data.x_ptr.special) {
+ case ConstPtrSpecialInvalid:
+ zig_unreachable();
+ case ConstPtrSpecialRef:
+ case ConstPtrSpecialBaseStruct:
+ buf_appendf(buf, "*");
+ render_const_value(g, buf, const_ptr_pointee(g, const_val));
+ return;
+ case ConstPtrSpecialBaseArray:
+ if (const_val->data.x_ptr.data.base_array.is_cstr) {
+ buf_appendf(buf, "*(c str lit)");
+ return;
+ } else {
+ buf_appendf(buf, "*");
+ render_const_value(g, buf, const_ptr_pointee(g, const_val));
+ return;
+ }
+ case ConstPtrSpecialHardCodedAddr:
+ buf_appendf(buf, "(*%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->data.pointer.child_type->name),
+ const_val->data.x_ptr.data.hard_coded_addr.addr);
+ return;
+ case ConstPtrSpecialDiscard:
+ buf_append_str(buf, "*_");
+ return;
+ case ConstPtrSpecialFunction:
+ {
+ FnTableEntry *fn_entry = const_val->data.x_ptr.data.fn.fn_entry;
+ buf_appendf(buf, "@ptrCast(%s, %s)", buf_ptr(&const_val->type->name), buf_ptr(&fn_entry->symbol_name));
+ return;
+ }
+ }
+ zig_unreachable();
+}
+
void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
switch (const_val->special) {
case ConstValSpecialRuntime:
@@ -5601,38 +5650,7 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
return;
}
case TypeTableEntryIdPointer:
- switch (const_val->data.x_ptr.special) {
- case ConstPtrSpecialInvalid:
- zig_unreachable();
- case ConstPtrSpecialRef:
- case ConstPtrSpecialBaseStruct:
- buf_appendf(buf, "&");
- render_const_value(g, buf, const_ptr_pointee(g, const_val));
- return;
- case ConstPtrSpecialBaseArray:
- if (const_val->data.x_ptr.data.base_array.is_cstr) {
- buf_appendf(buf, "&(c str lit)");
- return;
- } else {
- buf_appendf(buf, "&");
- render_const_value(g, buf, const_ptr_pointee(g, const_val));
- return;
- }
- case ConstPtrSpecialHardCodedAddr:
- buf_appendf(buf, "(&%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->data.pointer.child_type->name),
- const_val->data.x_ptr.data.hard_coded_addr.addr);
- return;
- case ConstPtrSpecialDiscard:
- buf_append_str(buf, "&_");
- return;
- case ConstPtrSpecialFunction:
- {
- FnTableEntry *fn_entry = const_val->data.x_ptr.data.fn.fn_entry;
- buf_appendf(buf, "@ptrCast(%s, %s)", buf_ptr(&const_val->type->name), buf_ptr(&fn_entry->symbol_name));
- return;
- }
- }
- zig_unreachable();
+ return render_const_val_ptr(g, buf, const_val, type_entry);
case TypeTableEntryIdBlock:
{
AstNode *node = const_val->data.x_block->source_node;
@@ -5692,8 +5710,10 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
}
case TypeTableEntryIdMaybe:
{
- if (const_val->data.x_maybe) {
- render_const_value(g, buf, const_val->data.x_maybe);
+ if (get_codegen_ptr_type(const_val->type) != nullptr)
+ return render_const_val_ptr(g, buf, const_val, type_entry->data.maybe.child_type);
+ if (const_val->data.x_nullable) {
+ render_const_value(g, buf, const_val->data.x_nullable);
} else {
buf_appendf(buf, "null");
}
diff --git a/src/codegen.cpp b/src/codegen.cpp
index fab2ad659e..65b465a519 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -5020,6 +5020,79 @@ static bool is_llvm_value_unnamed_type(TypeTableEntry *type_entry, LLVMValueRef
return LLVMTypeOf(val) != type_entry->type_ref;
}
+static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, const char *name) {
+ render_const_val_global(g, const_val, name);
+ switch (const_val->data.x_ptr.special) {
+ case ConstPtrSpecialInvalid:
+ case ConstPtrSpecialDiscard:
+ zig_unreachable();
+ case ConstPtrSpecialRef:
+ {
+ ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee;
+ render_const_val(g, pointee, "");
+ render_const_val_global(g, pointee, "");
+ ConstExprValue *other_val = pointee;
+ const_val->global_refs->llvm_value = LLVMConstBitCast(other_val->global_refs->llvm_global, const_val->type->type_ref);
+ render_const_val_global(g, const_val, "");
+ return const_val->global_refs->llvm_value;
+ }
+ case ConstPtrSpecialBaseArray:
+ {
+ ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val;
+ size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
+ assert(array_const_val->type->id == TypeTableEntryIdArray);
+ if (array_const_val->type->zero_bits) {
+ // make this a null pointer
+ TypeTableEntry *usize = g->builtin_types.entry_usize;
+ const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref),
+ const_val->type->type_ref);
+ render_const_val_global(g, const_val, "");
+ return const_val->global_refs->llvm_value;
+ }
+ LLVMValueRef uncasted_ptr_val = gen_const_ptr_array_recursive(g, array_const_val,
+ elem_index);
+ LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref);
+ const_val->global_refs->llvm_value = ptr_val;
+ render_const_val_global(g, const_val, "");
+ return ptr_val;
+ }
+ case ConstPtrSpecialBaseStruct:
+ {
+ ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val;
+ assert(struct_const_val->type->id == TypeTableEntryIdStruct);
+ if (struct_const_val->type->zero_bits) {
+ // make this a null pointer
+ TypeTableEntry *usize = g->builtin_types.entry_usize;
+ const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref),
+ const_val->type->type_ref);
+ render_const_val_global(g, const_val, "");
+ return const_val->global_refs->llvm_value;
+ }
+ size_t src_field_index = const_val->data.x_ptr.data.base_struct.field_index;
+ size_t gen_field_index =
+ struct_const_val->type->data.structure.fields[src_field_index].gen_index;
+ LLVMValueRef uncasted_ptr_val = gen_const_ptr_struct_recursive(g, struct_const_val,
+ gen_field_index);
+ LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref);
+ const_val->global_refs->llvm_value = ptr_val;
+ render_const_val_global(g, const_val, "");
+ return ptr_val;
+ }
+ case ConstPtrSpecialHardCodedAddr:
+ {
+ uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr;
+ TypeTableEntry *usize = g->builtin_types.entry_usize;
+ const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false),
+ const_val->type->type_ref);
+ render_const_val_global(g, const_val, "");
+ return const_val->global_refs->llvm_value;
+ }
+ case ConstPtrSpecialFunction:
+ return LLVMConstBitCast(fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry), const_val->type->type_ref);
+ }
+ zig_unreachable();
+}
+
static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const char *name) {
TypeTableEntry *type_entry = const_val->type;
assert(!type_entry->zero_bits);
@@ -5068,19 +5141,15 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
if (child_type->zero_bits) {
- return LLVMConstInt(LLVMInt1Type(), const_val->data.x_maybe ? 1 : 0, false);
+ return LLVMConstInt(LLVMInt1Type(), const_val->data.x_nullable ? 1 : 0, false);
} else if (type_is_codegen_pointer(child_type)) {
- if (const_val->data.x_maybe) {
- return gen_const_val(g, const_val->data.x_maybe, "");
- } else {
- return LLVMConstNull(child_type->type_ref);
- }
+ return gen_const_val_ptr(g, const_val, name);
} else {
LLVMValueRef child_val;
LLVMValueRef maybe_val;
bool make_unnamed_struct;
- if (const_val->data.x_maybe) {
- child_val = gen_const_val(g, const_val->data.x_maybe, "");
+ if (const_val->data.x_nullable) {
+ child_val = gen_const_val(g, const_val->data.x_nullable, "");
maybe_val = LLVMConstAllOnes(LLVMInt1Type());
make_unnamed_struct = is_llvm_value_unnamed_type(const_val->type, child_val);
@@ -5270,78 +5339,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
assert(const_val->data.x_ptr.mut == ConstPtrMutComptimeConst);
return fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry);
case TypeTableEntryIdPointer:
- {
- render_const_val_global(g, const_val, name);
- switch (const_val->data.x_ptr.special) {
- case ConstPtrSpecialInvalid:
- case ConstPtrSpecialDiscard:
- zig_unreachable();
- case ConstPtrSpecialRef:
- {
- ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee;
- render_const_val(g, pointee, "");
- render_const_val_global(g, pointee, "");
- ConstExprValue *other_val = pointee;
- const_val->global_refs->llvm_value = LLVMConstBitCast(other_val->global_refs->llvm_global, const_val->type->type_ref);
- render_const_val_global(g, const_val, "");
- return const_val->global_refs->llvm_value;
- }
- case ConstPtrSpecialBaseArray:
- {
- ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val;
- size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
- assert(array_const_val->type->id == TypeTableEntryIdArray);
- if (array_const_val->type->zero_bits) {
- // make this a null pointer
- TypeTableEntry *usize = g->builtin_types.entry_usize;
- const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref),
- const_val->type->type_ref);
- render_const_val_global(g, const_val, "");
- return const_val->global_refs->llvm_value;
- }
- LLVMValueRef uncasted_ptr_val = gen_const_ptr_array_recursive(g, array_const_val,
- elem_index);
- LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref);
- const_val->global_refs->llvm_value = ptr_val;
- render_const_val_global(g, const_val, "");
- return ptr_val;
- }
- case ConstPtrSpecialBaseStruct:
- {
- ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val;
- assert(struct_const_val->type->id == TypeTableEntryIdStruct);
- if (struct_const_val->type->zero_bits) {
- // make this a null pointer
- TypeTableEntry *usize = g->builtin_types.entry_usize;
- const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref),
- const_val->type->type_ref);
- render_const_val_global(g, const_val, "");
- return const_val->global_refs->llvm_value;
- }
- size_t src_field_index = const_val->data.x_ptr.data.base_struct.field_index;
- size_t gen_field_index =
- struct_const_val->type->data.structure.fields[src_field_index].gen_index;
- LLVMValueRef uncasted_ptr_val = gen_const_ptr_struct_recursive(g, struct_const_val,
- gen_field_index);
- LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref);
- const_val->global_refs->llvm_value = ptr_val;
- render_const_val_global(g, const_val, "");
- return ptr_val;
- }
- case ConstPtrSpecialHardCodedAddr:
- {
- uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr;
- TypeTableEntry *usize = g->builtin_types.entry_usize;
- const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false),
- const_val->type->type_ref);
- render_const_val_global(g, const_val, "");
- return const_val->global_refs->llvm_value;
- }
- case ConstPtrSpecialFunction:
- return LLVMConstBitCast(fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry), const_val->type->type_ref);
- }
- }
- zig_unreachable();
+ return gen_const_val_ptr(g, const_val, name);
case TypeTableEntryIdErrorUnion:
{
TypeTableEntry *payload_type = type_entry->data.error_union.payload_type;
diff --git a/src/ir.cpp b/src/ir.cpp
index e62ec71875..13ecfd4233 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -62,6 +62,7 @@ enum ConstCastResultId {
ConstCastResultIdType,
ConstCastResultIdUnresolvedInferredErrSet,
ConstCastResultIdAsyncAllocatorType,
+ ConstCastResultIdNullWrapPtr,
};
struct ConstCastErrSetMismatch {
@@ -90,6 +91,7 @@ struct ConstCastOnly {
ConstCastOnly *error_union_error_set;
ConstCastOnly *return_type;
ConstCastOnly *async_allocator_type;
+ ConstCastOnly *null_wrap_ptr_child;
ConstCastArg fn_arg;
ConstCastArgNoAlias arg_no_alias;
} data;
@@ -7660,6 +7662,21 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
if (expected_type == actual_type)
return result;
+ // * and [*] can do a const-cast-only to ?* and ?[*], respectively
+ if (expected_type->id == TypeTableEntryIdMaybe &&
+ expected_type->data.maybe.child_type->id == TypeTableEntryIdPointer &&
+ actual_type->id == TypeTableEntryIdPointer)
+ {
+ ConstCastOnly child = types_match_const_cast_only(ira,
+ expected_type->data.maybe.child_type, actual_type, source_node);
+ if (child.id != ConstCastResultIdOk) {
+ result.id = ConstCastResultIdNullWrapPtr;
+ result.data.null_wrap_ptr_child = allocate_nonzero(1);
+ *result.data.null_wrap_ptr_child = child;
+ }
+ return result;
+ }
+
// pointer const
if (expected_type->id == TypeTableEntryIdPointer &&
actual_type->id == TypeTableEntryIdPointer &&
@@ -8741,7 +8758,8 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
zig_panic("TODO");
case CastOpNoop:
{
- copy_const_val(const_val, other_val, other_val->special == ConstValSpecialStatic);
+ bool same_global_refs = other_val->special == ConstValSpecialStatic;
+ copy_const_val(const_val, other_val, same_global_refs);
const_val->type = new_type;
break;
}
@@ -9189,9 +9207,13 @@ static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *sourc
IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb,
source_instr->scope, source_instr->source_node);
- const_instruction->base.value.type = wanted_type;
const_instruction->base.value.special = ConstValSpecialStatic;
- const_instruction->base.value.data.x_maybe = val;
+ if (get_codegen_ptr_type(wanted_type) != nullptr) {
+ copy_const_val(&const_instruction->base.value, val, val->data.x_ptr.mut == ConstPtrMutComptimeConst);
+ } else {
+ const_instruction->base.value.data.x_nullable = val;
+ }
+ const_instruction->base.value.type = wanted_type;
return &const_instruction->base;
}
@@ -9346,9 +9368,14 @@ static IrInstruction *ir_analyze_null_to_maybe(IrAnalyze *ira, IrInstruction *so
assert(val);
IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb, source_instr->scope, source_instr->source_node);
- const_instruction->base.value.type = wanted_type;
const_instruction->base.value.special = ConstValSpecialStatic;
- const_instruction->base.value.data.x_maybe = nullptr;
+ if (get_codegen_ptr_type(wanted_type) != nullptr) {
+ const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
+ const_instruction->base.value.data.x_ptr.data.hard_coded_addr.addr = 0;
+ } else {
+ const_instruction->base.value.data.x_nullable = nullptr;
+ }
+ const_instruction->base.value.type = wanted_type;
return &const_instruction->base;
}
@@ -10062,7 +10089,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
- // explicit cast from child type of maybe type to maybe type
+ // explicit cast from T to ?T
+ // note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism
if (wanted_type->id == TypeTableEntryIdMaybe) {
TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type;
if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk) {
@@ -10113,7 +10141,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from [N]T to %[]const T
+ // explicit cast from [N]T to E![]const T
if (wanted_type->id == TypeTableEntryIdErrorUnion &&
is_slice(wanted_type->data.error_union.payload_type) &&
actual_type->id == TypeTableEntryIdArray)
@@ -10143,7 +10171,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
return ir_analyze_err_wrap_code(ira, source_instr, value, wanted_type);
}
- // explicit cast from T to %?T
+ // explicit cast from T to E!?T
if (wanted_type->id == TypeTableEntryIdErrorUnion &&
wanted_type->data.error_union.payload_type->id == TypeTableEntryIdMaybe &&
actual_type->id != TypeTableEntryIdMaybe)
@@ -10167,7 +10195,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
// explicit cast from number literal to another type
- // explicit cast from number literal to &const integer
+ // explicit cast from number literal to *const integer
if (actual_type->id == TypeTableEntryIdComptimeFloat ||
actual_type->id == TypeTableEntryIdComptimeInt)
{
@@ -10391,6 +10419,7 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc
IrInstruction *result = ir_create_const(&ira->new_irb, source_instruction->scope,
source_instruction->source_node, child_type);
copy_const_val(&result->value, pointee, ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst);
+ result->value.type = child_type;
return result;
}
}
@@ -10708,6 +10737,16 @@ static bool resolve_cmp_op_id(IrBinOp op_id, Cmp cmp) {
}
}
+static bool nullable_value_is_null(ConstExprValue *val) {
+ assert(val->special == ConstValSpecialStatic);
+ if (get_codegen_ptr_type(val->type) != nullptr) {
+ return val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr &&
+ val->data.x_ptr.data.hard_coded_addr.addr == 0;
+ } else {
+ return val->data.x_nullable == nullptr;
+ }
+}
+
static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) {
IrInstruction *op1 = bin_op_instruction->op1->other;
IrInstruction *op2 = bin_op_instruction->op2->other;
@@ -10737,7 +10776,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
ConstExprValue *maybe_val = ir_resolve_const(ira, maybe_op, UndefBad);
if (!maybe_val)
return ira->codegen->builtin_types.entry_invalid;
- bool is_null = (maybe_val->data.x_maybe == nullptr);
+ bool is_null = nullable_value_is_null(maybe_val);
ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base);
out_val->data.x_bool = (op_id == IrBinOpCmpEq) ? is_null : !is_null;
return ira->codegen->builtin_types.entry_bool;
@@ -12015,7 +12054,9 @@ static TypeTableEntry *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
TypeTableEntry *nullable_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type);
if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) {
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- out_val->data.x_maybe = nullptr;
+ assert(get_codegen_ptr_type(nullable_type) != nullptr);
+ out_val->data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
+ out_val->data.x_ptr.data.hard_coded_addr.addr = 0;
return nullable_type;
}
IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope,
@@ -14207,6 +14248,9 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
static TypeTableEntry *ir_analyze_instruction_load_ptr(IrAnalyze *ira, IrInstructionLoadPtr *load_ptr_instruction) {
IrInstruction *ptr = load_ptr_instruction->ptr->other;
+ if (type_is_invalid(ptr->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *result = ir_get_deref(ira, &load_ptr_instruction->base, ptr);
ir_link_new_instruction(result, &load_ptr_instruction->base);
assert(result->value.type);
@@ -14773,7 +14817,7 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn
return ira->codegen->builtin_types.entry_invalid;
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- out_val->data.x_bool = (maybe_val->data.x_maybe != nullptr);
+ out_val->data.x_bool = !nullable_value_is_null(maybe_val);
return ira->codegen->builtin_types.entry_bool;
}
@@ -14837,13 +14881,18 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
ConstExprValue *maybe_val = const_ptr_pointee(ira->codegen, val);
if (val->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
- if (!maybe_val->data.x_maybe) {
+ if (nullable_value_is_null(maybe_val)) {
ir_add_error(ira, &unwrap_maybe_instruction->base, buf_sprintf("unable to unwrap null"));
return ira->codegen->builtin_types.entry_invalid;
}
ConstExprValue *out_val = ir_build_const_from(ira, &unwrap_maybe_instruction->base);
out_val->data.x_ptr.special = ConstPtrSpecialRef;
- out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_maybe;
+ out_val->data.x_ptr.mut = val->data.x_ptr.mut;
+ if (type_is_codegen_pointer(child_type)) {
+ out_val->data.x_ptr.data.ref.pointee = maybe_val;
+ } else {
+ out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_nullable;
+ }
return result_type;
}
}
@@ -16206,12 +16255,12 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
0, 0);
fn_def_fields[6].type = get_maybe_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr));
if (fn_node->is_extern && buf_len(fn_node->lib_name) > 0) {
- fn_def_fields[6].data.x_maybe = create_const_vals(1);
+ fn_def_fields[6].data.x_nullable = create_const_vals(1);
ConstExprValue *lib_name = create_const_str_lit(ira->codegen, fn_node->lib_name);
- init_const_slice(ira->codegen, fn_def_fields[6].data.x_maybe, lib_name, 0, buf_len(fn_node->lib_name), true);
+ init_const_slice(ira->codegen, fn_def_fields[6].data.x_nullable, lib_name, 0, buf_len(fn_node->lib_name), true);
+ } else {
+ fn_def_fields[6].data.x_nullable = nullptr;
}
- else
- fn_def_fields[6].data.x_maybe = nullptr;
// return_type: type
ensure_field_index(fn_def_val->type, "return_type", 7);
fn_def_fields[7].special = ConstValSpecialStatic;
@@ -16664,8 +16713,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
TypeTableEntry *type_info_enum_field_type = ir_type_info_get_type(ira, "EnumField");
- for (uint32_t union_field_index = 0; union_field_index < union_field_count; union_field_index++)
- {
+ for (uint32_t union_field_index = 0; union_field_index < union_field_count; union_field_index++) {
TypeUnionField *union_field = &type_entry->data.unionation.fields[union_field_index];
ConstExprValue *union_field_val = &union_field_array->data.x_array.s_none.elements[union_field_index];
@@ -16676,12 +16724,11 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
inner_fields[1].special = ConstValSpecialStatic;
inner_fields[1].type = get_maybe_type(ira->codegen, type_info_enum_field_type);
- if (fields[1].data.x_type == ira->codegen->builtin_types.entry_undef)
- inner_fields[1].data.x_maybe = nullptr;
- else
- {
- inner_fields[1].data.x_maybe = create_const_vals(1);
- make_enum_field_val(inner_fields[1].data.x_maybe, union_field->enum_field, type_info_enum_field_type);
+ if (fields[1].data.x_type == ira->codegen->builtin_types.entry_undef) {
+ inner_fields[1].data.x_nullable = nullptr;
+ } else {
+ inner_fields[1].data.x_nullable = create_const_vals(1);
+ make_enum_field_val(inner_fields[1].data.x_nullable, union_field->enum_field, type_info_enum_field_type);
}
inner_fields[2].special = ConstValSpecialStatic;
@@ -16737,8 +16784,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
init_const_slice(ira->codegen, &fields[1], struct_field_array, 0, struct_field_count, false);
- for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++)
- {
+ for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++) {
TypeStructField *struct_field = &type_entry->data.structure.fields[struct_field_index];
ConstExprValue *struct_field_val = &struct_field_array->data.x_array.s_none.elements[struct_field_index];
@@ -16749,15 +16795,14 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
inner_fields[1].special = ConstValSpecialStatic;
inner_fields[1].type = get_maybe_type(ira->codegen, ira->codegen->builtin_types.entry_usize);
- if (!type_has_bits(struct_field->type_entry))
- inner_fields[1].data.x_maybe = nullptr;
- else
- {
+ if (!type_has_bits(struct_field->type_entry)) {
+ inner_fields[1].data.x_nullable = nullptr;
+ } else {
size_t byte_offset = LLVMOffsetOfElement(ira->codegen->target_data_ref, type_entry->type_ref, struct_field->gen_index);
- inner_fields[1].data.x_maybe = create_const_vals(1);
- inner_fields[1].data.x_maybe->special = ConstValSpecialStatic;
- inner_fields[1].data.x_maybe->type = ira->codegen->builtin_types.entry_usize;
- bigint_init_unsigned(&inner_fields[1].data.x_maybe->data.x_bigint, byte_offset);
+ inner_fields[1].data.x_nullable = create_const_vals(1);
+ inner_fields[1].data.x_nullable->special = ConstValSpecialStatic;
+ inner_fields[1].data.x_nullable->type = ira->codegen->builtin_types.entry_usize;
+ bigint_init_unsigned(&inner_fields[1].data.x_nullable->data.x_bigint, byte_offset);
}
inner_fields[2].special = ConstValSpecialStatic;
@@ -19008,9 +19053,6 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr
ConstExprValue *val = ir_resolve_const(ira, target, UndefBad);
if (!val)
return ira->codegen->builtin_types.entry_invalid;
- if (target->value.type->id == TypeTableEntryIdMaybe) {
- val = val->data.x_maybe;
- }
if (val->type->id == TypeTableEntryIdPointer && val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) {
IrInstruction *result = ir_create_const(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, usize);
@@ -19936,6 +19978,7 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
static TypeTableEntry *ir_analyze_instruction(IrAnalyze *ira, IrInstruction *instruction) {
TypeTableEntry *instruction_type = ir_analyze_instruction_nocast(ira, instruction);
instruction->value.type = instruction_type;
+
if (instruction->other) {
instruction->other->value.type = instruction_type;
} else {
diff --git a/test/cases/cast.zig b/test/cases/cast.zig
index c3ef24cd78..da3cba7d80 100644
--- a/test/cases/cast.zig
+++ b/test/cases/cast.zig
@@ -1,5 +1,6 @@
-const assert = @import("std").debug.assert;
-const mem = @import("std").mem;
+const std = @import("std");
+const assert = std.debug.assert;
+const mem = std.mem;
test "int to ptr cast" {
const x = usize(13);
@@ -400,3 +401,8 @@ fn testCastPtrOfArrayToSliceAndPtr() void {
assert(mem.eql(u8, array[0..], "coeu"));
}
+test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
+ const window_name = [1][*]const u8{c"window name"};
+ const x: [*]const ?[*]const u8 = &window_name;
+ assert(mem.eql(u8, std.cstr.toSliceConst(??x[0]), "window name"));
+}
From 9046b5eac01540a783740451a6593ef0207c181e Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 9 Jun 2018 11:41:59 -0400
Subject: [PATCH 21/49] fix assertion failure when debug printing comptime
values
---
src/ir.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 13ecfd4233..10098f3c32 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -113,7 +113,7 @@ static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, ui
static TypeTableEntry *adjust_slice_align(CodeGen *g, TypeTableEntry *slice_type, uint32_t new_align);
ConstExprValue *const_ptr_pointee(CodeGen *g, ConstExprValue *const_val) {
- assert(const_val->type->id == TypeTableEntryIdPointer);
+ assert(get_codegen_ptr_type(const_val->type) != nullptr);
assert(const_val->special == ConstValSpecialStatic);
switch (const_val->data.x_ptr.special) {
case ConstPtrSpecialInvalid:
From e0092ee4a573502a4110a6d4aeb7e7d3cdc8987b Mon Sep 17 00:00:00 2001
From: Arthur Elliott
Date: Thu, 7 Jun 2018 10:00:27 -0400
Subject: [PATCH 22/49] add set function to arraylist
so you can set a value without growing the underlying buffer,
with range safety checks
---
std/array_list.zig | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)
diff --git a/std/array_list.zig b/std/array_list.zig
index 07a1db6451..7fc97474e6 100644
--- a/std/array_list.zig
+++ b/std/array_list.zig
@@ -44,6 +44,11 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
return l.toSliceConst()[n];
}
+ pub fn set(self: *const Self, n: usize, item: *const T) !void {
+ if (n >= self.len) return error.OutOfBounds;
+ self.items[n] = item.*;
+ }
+
pub fn count(self: *const Self) usize {
return self.len;
}
@@ -162,6 +167,11 @@ test "basic ArrayList test" {
var list = ArrayList(i32).init(debug.global_allocator);
defer list.deinit();
+ // setting on empty list is out of bounds
+ list.set(0, 1) catch |err| {
+ assert(err == error.OutOfBounds);
+ };
+
{
var i: usize = 0;
while (i < 10) : (i += 1) {
@@ -200,6 +210,20 @@ test "basic ArrayList test" {
list.appendSlice([]const i32{}) catch unreachable;
assert(list.len == 9);
+
+ // can only set on indices < self.len
+ list.set(7, 33) catch unreachable;
+ list.set(8, 42) catch unreachable;
+
+ list.set(9, 99) catch |err| {
+ assert(err == error.OutOfBounds);
+ };
+ list.set(10, 123) catch |err| {
+ assert(err == error.OutOfBounds);
+ };
+
+ assert(list.pop() == 42);
+ assert(list.pop() == 33);
}
test "iterator ArrayList test" {
From fc6446702ed8261a1d02b7fbb8410a303cb5daaa Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 9 Jun 2018 12:03:11 -0400
Subject: [PATCH 23/49] clean up std.ArrayList
* add `std.debug.assertError`
* `std.ArrayList` update everything to follow `self` convention
* rename `std.ArrayList.set` to `std.ArrayList.setOrError`
* add `std.ArrayList.set` which asserts
Before 1.0.0 we might remove some of this API, because you can use
`toSlice()` for everything, but it's ok to add these functions as
an experiment before then.
---
std/array_list.zig | 118 +++++++++++++++++++++++---------------------
std/debug/index.zig | 10 ++++
2 files changed, 72 insertions(+), 56 deletions(-)
diff --git a/std/array_list.zig b/std/array_list.zig
index 7fc97474e6..30715f4d6f 100644
--- a/std/array_list.zig
+++ b/std/array_list.zig
@@ -1,6 +1,7 @@
const std = @import("index.zig");
const debug = std.debug;
const assert = debug.assert;
+const assertError = debug.assertError;
const mem = std.mem;
const Allocator = mem.Allocator;
@@ -28,25 +29,33 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
};
}
- pub fn deinit(l: *const Self) void {
- l.allocator.free(l.items);
+ pub fn deinit(self: *const Self) void {
+ self.allocator.free(self.items);
}
- pub fn toSlice(l: *const Self) []align(A) T {
- return l.items[0..l.len];
+ pub fn toSlice(self: *const Self) []align(A) T {
+ return self.items[0..self.len];
}
- pub fn toSliceConst(l: *const Self) []align(A) const T {
- return l.items[0..l.len];
+ pub fn toSliceConst(self: *const Self) []align(A) const T {
+ return self.items[0..self.len];
}
- pub fn at(l: *const Self, n: usize) T {
- return l.toSliceConst()[n];
+ pub fn at(self: *const Self, n: usize) T {
+ return self.toSliceConst()[n];
}
- pub fn set(self: *const Self, n: usize, item: *const T) !void {
- if (n >= self.len) return error.OutOfBounds;
- self.items[n] = item.*;
+ /// Sets the value at index `i`, or returns `error.OutOfBounds` if
+ /// the index is not in range.
+ pub fn setOrError(self: *const Self, i: usize, item: *const T) !void {
+ if (i >= self.len) return error.OutOfBounds;
+ self.items[i] = item.*;
+ }
+
+ /// Sets the value at index `i`, asserting that the value is in range.
+ pub fn set(self: *const Self, i: usize, item: *const T) void {
+ assert(i < self.len);
+ self.items[i] = item.*;
}
pub fn count(self: *const Self) usize {
@@ -72,58 +81,58 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
return result;
}
- pub fn insert(l: *Self, n: usize, item: *const T) !void {
- try l.ensureCapacity(l.len + 1);
- l.len += 1;
+ pub fn insert(self: *Self, n: usize, item: *const T) !void {
+ try self.ensureCapacity(self.len + 1);
+ self.len += 1;
- mem.copy(T, l.items[n + 1 .. l.len], l.items[n .. l.len - 1]);
- l.items[n] = item.*;
+ mem.copy(T, self.items[n + 1 .. self.len], self.items[n .. self.len - 1]);
+ self.items[n] = item.*;
}
- pub fn insertSlice(l: *Self, n: usize, items: []align(A) const T) !void {
- try l.ensureCapacity(l.len + items.len);
- l.len += items.len;
+ pub fn insertSlice(self: *Self, n: usize, items: []align(A) const T) !void {
+ try self.ensureCapacity(self.len + items.len);
+ self.len += items.len;
- mem.copy(T, l.items[n + items.len .. l.len], l.items[n .. l.len - items.len]);
- mem.copy(T, l.items[n .. n + items.len], items);
+ mem.copy(T, self.items[n + items.len .. self.len], self.items[n .. self.len - items.len]);
+ mem.copy(T, self.items[n .. n + items.len], items);
}
- pub fn append(l: *Self, item: *const T) !void {
- const new_item_ptr = try l.addOne();
+ pub fn append(self: *Self, item: *const T) !void {
+ const new_item_ptr = try self.addOne();
new_item_ptr.* = item.*;
}
- pub fn appendSlice(l: *Self, items: []align(A) const T) !void {
- try l.ensureCapacity(l.len + items.len);
- mem.copy(T, l.items[l.len..], items);
- l.len += items.len;
+ pub fn appendSlice(self: *Self, items: []align(A) const T) !void {
+ try self.ensureCapacity(self.len + items.len);
+ mem.copy(T, self.items[self.len..], items);
+ self.len += items.len;
}
- pub fn resize(l: *Self, new_len: usize) !void {
- try l.ensureCapacity(new_len);
- l.len = new_len;
+ pub fn resize(self: *Self, new_len: usize) !void {
+ try self.ensureCapacity(new_len);
+ self.len = new_len;
}
- pub fn shrink(l: *Self, new_len: usize) void {
- assert(new_len <= l.len);
- l.len = new_len;
+ pub fn shrink(self: *Self, new_len: usize) void {
+ assert(new_len <= self.len);
+ self.len = new_len;
}
- pub fn ensureCapacity(l: *Self, new_capacity: usize) !void {
- var better_capacity = l.items.len;
+ pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
+ var better_capacity = self.items.len;
if (better_capacity >= new_capacity) return;
while (true) {
better_capacity += better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
- l.items = try l.allocator.alignedRealloc(T, A, l.items, better_capacity);
+ self.items = try self.allocator.alignedRealloc(T, A, self.items, better_capacity);
}
- pub fn addOne(l: *Self) !*T {
- const new_length = l.len + 1;
- try l.ensureCapacity(new_length);
- const result = &l.items[l.len];
- l.len = new_length;
+ pub fn addOne(self: *Self) !*T {
+ const new_length = self.len + 1;
+ try self.ensureCapacity(new_length);
+ const result = &self.items[self.len];
+ self.len = new_length;
return result;
}
@@ -164,13 +173,14 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
}
test "basic ArrayList test" {
- var list = ArrayList(i32).init(debug.global_allocator);
+ var bytes: [1024]u8 = undefined;
+ const allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator;
+
+ var list = ArrayList(i32).init(allocator);
defer list.deinit();
- // setting on empty list is out of bounds
- list.set(0, 1) catch |err| {
- assert(err == error.OutOfBounds);
- };
+ // setting on empty list is out of bounds
+ assertError(list.setOrError(0, 1), error.OutOfBounds);
{
var i: usize = 0;
@@ -210,17 +220,13 @@ test "basic ArrayList test" {
list.appendSlice([]const i32{}) catch unreachable;
assert(list.len == 9);
-
+
// can only set on indices < self.len
- list.set(7, 33) catch unreachable;
- list.set(8, 42) catch unreachable;
-
- list.set(9, 99) catch |err| {
- assert(err == error.OutOfBounds);
- };
- list.set(10, 123) catch |err| {
- assert(err == error.OutOfBounds);
- };
+ list.set(7, 33);
+ list.set(8, 42);
+
+ assertError(list.setOrError(9, 99), error.OutOfBounds);
+ assertError(list.setOrError(10, 123), error.OutOfBounds);
assert(list.pop() == 42);
assert(list.pop() == 33);
diff --git a/std/debug/index.zig b/std/debug/index.zig
index 00d9bef121..be47ab76bc 100644
--- a/std/debug/index.zig
+++ b/std/debug/index.zig
@@ -88,6 +88,16 @@ pub fn assert(ok: bool) void {
}
}
+/// TODO: add `==` operator for `error_union == error_set`, and then
+/// remove this function
+pub fn assertError(value: var, expected_error: error) void {
+ if (value) {
+ @panic("expected error");
+ } else |actual_error| {
+ assert(actual_error == expected_error);
+ }
+}
+
/// Call this function when you want to panic if the condition is not true.
/// If `ok` is `false`, this function will panic in every release mode.
pub fn assertOrPanic(ok: bool) void {
From 7a9635555b5ddc681134ebe0e0e9f4f373ac5025 Mon Sep 17 00:00:00 2001
From: marleck55 <40122305+marleck55@users.noreply.github.com>
Date: Sat, 9 Jun 2018 18:05:58 +0200
Subject: [PATCH 24/49] std/fmt: Use lowercase k for kilo in base 1000 (#1090)
---
std/fmt/index.zig | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/std/fmt/index.zig b/std/fmt/index.zig
index bbf48df0cf..3844fbb10a 100644
--- a/std/fmt/index.zig
+++ b/std/fmt/index.zig
@@ -559,14 +559,19 @@ pub fn formatBytes(
return output(context, "0B");
}
- const mags = " KMGTPEZY";
+ const mags_si = " kMGTPEZY";
+ const mags_iec = " KMGTPEZY";
const magnitude = switch (radix) {
- 1000 => math.min(math.log2(value) / comptime math.log2(1000), mags.len - 1),
- 1024 => math.min(math.log2(value) / 10, mags.len - 1),
+ 1000 => math.min(math.log2(value) / comptime math.log2(1000), mags_si.len - 1),
+ 1024 => math.min(math.log2(value) / 10, mags_iec.len - 1),
else => unreachable,
};
const new_value = f64(value) / math.pow(f64, f64(radix), f64(magnitude));
- const suffix = mags[magnitude];
+ const suffix = switch (radix) {
+ 1000 => mags_si[magnitude],
+ 1024 => mags_iec[magnitude],
+ else => unreachable,
+ };
try formatFloatDecimal(new_value, width, context, Errors, output);
From d464b2532200de3778ac7362e701791a11150d55 Mon Sep 17 00:00:00 2001
From: Ben Noordhuis
Date: Sun, 10 Jun 2018 04:39:22 +0200
Subject: [PATCH 25/49] support `--target-arch wasm32` (#1094)
Add wasm32 support to the build-obj, build-exe and build-lib commands
of the stage 1 compiler. Wasm64 should work transparently once it's
supported in upstream LLVM.
To export a function:
// lib.zig - for exposition, not necessary for this example
pub use @import("add.zig");
// add.zig
export fn add(a: i32, b: i32) i32 {
return a + b;
}
To import a function:
// cube.zig
extern fn square(x: i32) i32;
export fn cube(x: i32) i32 {
return x * square(x);
}
---
build.zig | 1 +
src/link.cpp | 15 ++++++++++++++-
src/target.cpp | 7 +++++--
src/zig_llvm.cpp | 2 +-
4 files changed, 21 insertions(+), 4 deletions(-)
diff --git a/build.zig b/build.zig
index 109a799ac9..08a47570ef 100644
--- a/build.zig
+++ b/build.zig
@@ -63,6 +63,7 @@ pub fn build(b: *Builder) !void {
exe.addObjectFile(lib);
}
} else {
+ addCppLib(b, exe, cmake_binary_dir, "embedded_lld_wasm");
addCppLib(b, exe, cmake_binary_dir, "embedded_lld_elf");
addCppLib(b, exe, cmake_binary_dir, "embedded_lld_coff");
addCppLib(b, exe, cmake_binary_dir, "embedded_lld_lib");
diff --git a/src/link.cpp b/src/link.cpp
index d454d77aae..d2925cb5a8 100644
--- a/src/link.cpp
+++ b/src/link.cpp
@@ -391,6 +391,19 @@ static void construct_linker_job_elf(LinkJob *lj) {
}
}
+static void construct_linker_job_wasm(LinkJob *lj) {
+ CodeGen *g = lj->codegen;
+
+ lj->args.append("--relocatable"); // So lld doesn't look for _start.
+ lj->args.append("-o");
+ lj->args.append(buf_ptr(&lj->out_file));
+
+ // .o files
+ for (size_t i = 0; i < g->link_objects.length; i += 1) {
+ lj->args.append((const char *)buf_ptr(g->link_objects.at(i)));
+ }
+}
+
//static bool is_target_cyg_mingw(const ZigTarget *target) {
// return (target->os == ZigLLVM_Win32 && target->env_type == ZigLLVM_Cygnus) ||
// (target->os == ZigLLVM_Win32 && target->env_type == ZigLLVM_GNU);
@@ -924,7 +937,7 @@ static void construct_linker_job(LinkJob *lj) {
case ZigLLVM_MachO:
return construct_linker_job_macho(lj);
case ZigLLVM_Wasm:
- zig_panic("TODO link wasm");
+ return construct_linker_job_wasm(lj);
}
}
diff --git a/src/target.cpp b/src/target.cpp
index c53ed74d14..bd4aa4d4c2 100644
--- a/src/target.cpp
+++ b/src/target.cpp
@@ -597,12 +597,15 @@ void resolve_target_object_format(ZigTarget *target) {
case ZigLLVM_tce:
case ZigLLVM_tcele:
case ZigLLVM_thumbeb:
- case ZigLLVM_wasm32:
- case ZigLLVM_wasm64:
case ZigLLVM_xcore:
target->oformat= ZigLLVM_ELF;
return;
+ case ZigLLVM_wasm32:
+ case ZigLLVM_wasm64:
+ target->oformat = ZigLLVM_Wasm;
+ return;
+
case ZigLLVM_ppc:
case ZigLLVM_ppc64:
if (is_os_darwin(target)) {
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index 5905fa8167..24f2a8a343 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -838,7 +838,7 @@ bool ZigLLDLink(ZigLLVM_ObjectFormatType oformat, const char **args, size_t arg_
return lld::mach_o::link(array_ref_args, diag);
case ZigLLVM_Wasm:
- assert(false); // TODO ZigLLDLink for Wasm
+ return lld::wasm::link(array_ref_args, false, diag);
}
assert(false); // unreachable
abort();
From ec1b6f66737f8c3cbc0420715c2c502c7e710081 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sat, 9 Jun 2018 23:42:14 -0400
Subject: [PATCH 26/49] breaking syntax change: ??x to x.? (#1095)
See #1023
This also renames Nullable/Maybe to Optional
---
build.zig | 2 +-
doc/codegen.md | 8 +-
doc/langref.html.in | 122 ++++++++++++-----------
example/cat/main.zig | 2 +-
src-self-hosted/arg.zig | 10 +-
src-self-hosted/llvm.zig | 2 +-
src-self-hosted/main.zig | 8 +-
src/all_types.hpp | 44 ++++-----
src/analyze.cpp | 70 +++++++-------
src/ast_render.cpp | 6 +-
src/codegen.cpp | 60 ++++++------
src/ir.cpp | 198 +++++++++++++++++++-------------------
src/ir_print.cpp | 16 +--
src/parser.cpp | 21 ++--
src/tokenizer.cpp | 10 +-
src/tokenizer.hpp | 3 +-
src/translate_c.cpp | 14 +--
std/array_list.zig | 2 +-
std/buf_map.zig | 6 +-
std/event.zig | 4 +-
std/fmt/index.zig | 6 +-
std/hash_map.zig | 8 +-
std/heap.zig | 4 +-
std/json.zig | 12 +--
std/linked_list.zig | 8 +-
std/macho.zig | 2 +-
std/mem.zig | 22 ++---
std/os/child_process.zig | 18 ++--
std/os/index.zig | 4 +-
std/os/linux/vdso.zig | 2 +-
std/os/path.zig | 8 +-
std/segmented_list.zig | 8 +-
std/special/bootstrap.zig | 6 +-
std/special/builtin.zig | 8 +-
std/unicode.zig | 24 ++---
std/zig/ast.zig | 12 +--
std/zig/parse.zig | 35 ++++---
std/zig/parser_test.zig | 5 +-
std/zig/render.zig | 25 +++--
test/cases/bugs/656.zig | 2 +-
test/cases/cast.zig | 50 +++++-----
test/cases/error.zig | 2 +-
test/cases/eval.zig | 2 +-
test/cases/generics.zig | 2 +-
test/cases/misc.zig | 2 +-
test/cases/null.zig | 30 +++---
test/cases/reflection.zig | 2 +-
test/cases/type_info.zig | 14 +--
test/cases/while.zig | 12 +--
test/compile_errors.zig | 16 +--
test/tests.zig | 12 +--
51 files changed, 489 insertions(+), 482 deletions(-)
diff --git a/build.zig b/build.zig
index 08a47570ef..eada37816c 100644
--- a/build.zig
+++ b/build.zig
@@ -75,7 +75,7 @@ pub fn build(b: *Builder) !void {
cxx_compiler,
"-print-file-name=libstdc++.a",
});
- const libstdcxx_path = ??mem.split(libstdcxx_path_padded, "\r\n").next();
+ const libstdcxx_path = mem.split(libstdcxx_path_padded, "\r\n").next().?;
if (mem.eql(u8, libstdcxx_path, "libstdc++.a")) {
warn(
\\Unable to determine path to libstdc++.a
diff --git a/doc/codegen.md b/doc/codegen.md
index 02406fae82..65f12f4875 100644
--- a/doc/codegen.md
+++ b/doc/codegen.md
@@ -6,7 +6,7 @@ Every type has a "handle". If a type is a simple primitive type such as i32 or
f64, the handle is "by value", meaning that we pass around the value itself when
we refer to a value of that type.
-If a type is a container, error union, maybe type, slice, or array, then its
+If a type is a container, error union, optional type, slice, or array, then its
handle is a pointer, and everywhere we refer to a value of this type we refer to
a pointer.
@@ -19,7 +19,7 @@ Error union types are represented as:
payload: T,
}
-Maybe types are represented as:
+Optional types are represented as:
struct {
payload: T,
@@ -28,6 +28,6 @@ Maybe types are represented as:
## Data Optimizations
-Maybe pointer types are special: the 0x0 pointer value is used to represent a
-null pointer. Thus, instead of the struct above, maybe pointer types are
+Optional pointer types are special: the 0x0 pointer value is used to represent a
+null pointer. Thus, instead of the struct above, optional pointer types are
represented as a `usize` in codegen and the handle is by value.
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 6a1f1c3102..4c4a637095 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -156,18 +156,18 @@ pub fn main() void {
true or false,
!true);
- // nullable
- var nullable_value: ?[]const u8 = null;
- assert(nullable_value == null);
+ // optional
+ var optional_value: ?[]const u8 = null;
+ assert(optional_value == null);
- warn("\nnullable 1\ntype: {}\nvalue: {}\n",
- @typeName(@typeOf(nullable_value)), nullable_value);
+ warn("\noptional 1\ntype: {}\nvalue: {}\n",
+ @typeName(@typeOf(optional_value)), optional_value);
- nullable_value = "hi";
- assert(nullable_value != null);
+ optional_value = "hi";
+ assert(optional_value != null);
- warn("\nnullable 2\ntype: {}\nvalue: {}\n",
- @typeName(@typeOf(nullable_value)), nullable_value);
+ warn("\noptional 2\ntype: {}\nvalue: {}\n",
+ @typeName(@typeOf(optional_value)), optional_value);
// error union
var number_or_error: error!i32 = error.ArgNotFound;
@@ -428,7 +428,7 @@ pub fn main() void {
null |
- used to set a nullable type to null |
+ used to set an optional type to null |
undefined |
@@ -440,7 +440,7 @@ pub fn main() void {
- {#see_also|Nullables|this#}
+ {#see_also|Optionals|this#}
{#header_close#}
{#header_open|String Literals#}
{#code_begin|test#}
@@ -988,7 +988,7 @@ a ^= b
a ?? b
|
- - {#link|Nullables#}
+ - {#link|Optionals#}
|
If a is null,
@@ -1003,10 +1003,10 @@ unwrapped == 1234
|
- ??a
|
+ a.?
|
- - {#link|Nullables#}
+ - {#link|Optionals#}
|
@@ -1015,7 +1015,7 @@ unwrapped == 1234
|
const value: ?u32 = 5678;
-??value == 5678
+value.? == 5678
|
@@ -1103,7 +1103,7 @@ unwrapped == 1234
a == null
|
- - {#link|Nullables#}
+ - {#link|Optionals#}
|
@@ -1267,8 +1267,8 @@ x.* == 1234
{#header_open|Precedence#}
x() x[] x.y
a!b
-!x -x -%x ~x &x ?x ??x
-x{} x.*
+!x -x -%x ~x &x ?x
+x{} x.* x.?
! * / % ** *%
+ - ++ +% -%
<< >>
@@ -1483,17 +1483,17 @@ test "volatile" {
assert(@typeOf(mmio_ptr) == *volatile u8);
}
-test "nullable pointers" {
- // Pointers cannot be null. If you want a null pointer, use the nullable
- // prefix `?` to make the pointer type nullable.
+test "optional pointers" {
+ // Pointers cannot be null. If you want a null pointer, use the optional
+ // prefix `?` to make the pointer type optional.
var ptr: ?*i32 = null;
var x: i32 = 1;
ptr = &x;
- assert((??ptr).* == 1);
+ assert(ptr.?.* == 1);
- // Nullable pointers are the same size as normal pointers, because pointer
+ // Optional pointers are the same size as normal pointers, because pointer
// value 0 is used as the null value.
assert(@sizeOf(?*i32) == @sizeOf(*i32));
}
@@ -1832,7 +1832,7 @@ test "linked list" {
.last = &node,
.len = 1,
};
- assert((??list2.first).data == 1234);
+ assert(list2.first.?.data == 1234);
}
{#code_end#}
{#see_also|comptime|@fieldParentPtr#}
@@ -2270,7 +2270,7 @@ fn rangeHasNumber(begin: usize, end: usize, number: usize) bool {
}
test "while null capture" {
- // Just like if expressions, while loops can take a nullable as the
+ // Just like if expressions, while loops can take an optional as the
// condition and capture the payload. When null is encountered the loop
// exits.
var sum1: u32 = 0;
@@ -2280,7 +2280,7 @@ test "while null capture" {
}
assert(sum1 == 3);
- // The else branch is allowed on nullable iteration. In this case, it will
+ // The else branch is allowed on optional iteration. In this case, it will
// be executed on the first null value encountered.
var sum2: u32 = 0;
numbers_left = 3;
@@ -2340,7 +2340,7 @@ fn typeNameLength(comptime T: type) usize {
return @typeName(T).len;
}
{#code_end#}
- {#see_also|if|Nullables|Errors|comptime|unreachable#}
+ {#see_also|if|Optionals|Errors|comptime|unreachable#}
{#header_close#}
{#header_open|for#}
{#code_begin|test|for#}
@@ -2400,7 +2400,7 @@ test "for else" {
if (value == null) {
break 9;
} else {
- sum += ??value;
+ sum += value.?;
}
} else blk: {
assert(sum == 7);
@@ -2461,7 +2461,7 @@ test "if boolean" {
assert(result == 47);
}
-test "if nullable" {
+test "if optional" {
// If expressions test for null.
const a: ?u32 = 0;
@@ -2544,7 +2544,7 @@ test "if error union" {
}
}
{#code_end#}
- {#see_also|Nullables|Errors#}
+ {#see_also|Optionals|Errors#}
{#header_close#}
{#header_open|defer#}
{#code_begin|test|defer#}
@@ -3167,24 +3167,24 @@ test "inferred error set" {
TODO
{#header_close#}
{#header_close#}
- {#header_open|Nullables#}
+ {#header_open|Optionals#}
One area that Zig provides safety without compromising efficiency or
- readability is with the nullable type.
+ readability is with the optional type.
- The question mark symbolizes the nullable type. You can convert a type to a nullable
+ The question mark symbolizes the optional type. You can convert a type to an optional
type by putting a question mark in front of it, like this:
{#code_begin|syntax#}
// normal integer
const normal_int: i32 = 1234;
-// nullable integer
-const nullable_int: ?i32 = 5678;
+// optional integer
+const optional_int: ?i32 = 5678;
{#code_end#}
- Now the variable nullable_int could be an i32, or null.
+ Now the variable optional_int could be an i32, or null.
Instead of integers, let's talk about pointers. Null references are the source of many runtime
@@ -3193,8 +3193,8 @@ const nullable_int: ?i32 = 5678;
Zig does not have them.
- Instead, you can use a nullable pointer. This secretly compiles down to a normal pointer,
- since we know we can use 0 as the null value for the nullable type. But the compiler
+ Instead, you can use an optional pointer. This secretly compiles down to a normal pointer,
+ since we know we can use 0 as the null value for the optional type. But the compiler
can check your work and make sure you don't assign null to something that can't be null.
@@ -3226,7 +3226,7 @@ fn doAThing() ?*Foo {
Here, Zig is at least as convenient, if not more, than C. And, the type of "ptr"
is *u8 not ?*u8. The ?? operator
- unwrapped the nullable type and therefore ptr is guaranteed to be non-null everywhere
+ unwrapped the optional type and therefore ptr is guaranteed to be non-null everywhere
it is used in the function.
@@ -3245,10 +3245,10 @@ fn doAThing() ?*Foo {
In Zig you can accomplish the same thing:
{#code_begin|syntax#}
-fn doAThing(nullable_foo: ?*Foo) void {
+fn doAThing(optional_foo: ?*Foo) void {
// do some stuff
- if (nullable_foo) |foo| {
+ if (optional_foo) |foo| {
doSomethingWithFoo(foo);
}
@@ -3257,7 +3257,7 @@ fn doAThing(nullable_foo: ?*Foo) void {
{#code_end#}
Once again, the notable thing here is that inside the if block,
- foo is no longer a nullable pointer, it is a pointer, which
+ foo is no longer an optional pointer, it is a pointer, which
cannot be null.
@@ -3267,20 +3267,20 @@ fn doAThing(nullable_foo: ?*Foo) void {
The optimizer can sometimes make better decisions knowing that pointer arguments
cannot be null.
- {#header_open|Nullable Type#}
- A nullable is created by putting ? in front of a type. You can use compile-time
- reflection to access the child type of a nullable:
+ {#header_open|Optional Type#}
+ An optional is created by putting ? in front of a type. You can use compile-time
+ reflection to access the child type of an optional:
{#code_begin|test#}
const assert = @import("std").debug.assert;
-test "nullable type" {
- // Declare a nullable and implicitly cast from null:
+test "optional type" {
+ // Declare an optional and implicitly cast from null:
var foo: ?i32 = null;
- // Implicitly cast from child type of a nullable
+ // Implicitly cast from child type of an optional
foo = 1234;
- // Use compile-time reflection to access the child type of the nullable:
+ // Use compile-time reflection to access the child type of the optional:
comptime assert(@typeOf(foo).Child == i32);
}
{#code_end#}
@@ -4888,7 +4888,7 @@ pub const TypeId = enum {
ComptimeInt,
Undefined,
Null,
- Nullable,
+ Optional,
ErrorUnion,
Error,
Enum,
@@ -4922,7 +4922,7 @@ pub const TypeInfo = union(TypeId) {
ComptimeInt: void,
Undefined: void,
Null: void,
- Nullable: Nullable,
+ Optional: Optional,
ErrorUnion: ErrorUnion,
ErrorSet: ErrorSet,
Enum: Enum,
@@ -4975,7 +4975,7 @@ pub const TypeInfo = union(TypeId) {
defs: []Definition,
};
- pub const Nullable = struct {
+ pub const Optional = struct {
child: type,
};
@@ -5366,8 +5366,8 @@ comptime {
At compile-time:
{#code_begin|test_err|unable to unwrap null#}
comptime {
- const nullable_number: ?i32 = null;
- const number = ??nullable_number;
+ const optional_number: ?i32 = null;
+ const number = optional_number.?;
}
{#code_end#}
At runtime crashes with the message attempt to unwrap null and a stack trace.
@@ -5376,9 +5376,9 @@ comptime {
{#code_begin|exe|test#}
const warn = @import("std").debug.warn;
pub fn main() void {
- const nullable_number: ?i32 = null;
+ const optional_number: ?i32 = null;
- if (nullable_number) |number| {
+ if (optional_number) |number| {
warn("got number: {}\n", number);
} else {
warn("it's null\n");
@@ -5939,9 +5939,9 @@ AsmInputItem = "[" Symbol "]" String "(" Expression ")"
AsmClobbers= ":" list(String, ",")
-UnwrapExpression = BoolOrExpression (UnwrapNullable | UnwrapError) | BoolOrExpression
+UnwrapExpression = BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression
-UnwrapNullable = "??" Expression
+UnwrapOptional = "??" Expression
UnwrapError = "catch" option("|" Symbol "|") Expression
@@ -6015,12 +6015,10 @@ MultiplyOperator = "||" | "*" | "/" | "%" | "**" | "*%"
PrefixOpExpression = PrefixOp TypeExpr | SuffixOpExpression
-SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | PtrDerefExpression)
+SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | ".*" | ".?")
FieldAccessExpression = "." Symbol
-PtrDerefExpression = ".*"
-
FnCallExpression = "(" list(Expression, ",") ")"
ArrayAccessExpression = "[" Expression "]"
@@ -6033,7 +6031,7 @@ ContainerInitBody = list(StructLiteralField, ",") | list(Expression, ",")
StructLiteralField = "." Symbol "=" Expression
-PrefixOp = "!" | "-" | "~" | (("*" | "[*]") option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" | "await"
+PrefixOp = "!" | "-" | "~" | (("*" | "[*]") option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "-%" | "try" | "await"
PrimaryExpression = Integer | Float | String | CharLiteral | KeywordLiteral | GroupedExpression | BlockExpression(BlockOrExpression) | Symbol | ("@" Symbol FnCallExpression) | ArrayType | FnProto | AsmExpression | ContainerDecl | ("continue" option(":" Symbol)) | ErrorSetDecl | PromiseType
diff --git a/example/cat/main.zig b/example/cat/main.zig
index 1b34cb22eb..27690d2695 100644
--- a/example/cat/main.zig
+++ b/example/cat/main.zig
@@ -7,7 +7,7 @@ const allocator = std.debug.global_allocator;
pub fn main() !void {
var args_it = os.args();
- const exe = try unwrapArg(??args_it.next(allocator));
+ const exe = try unwrapArg(args_it.next(allocator).?);
var catted_anything = false;
var stdout_file = try io.getStdOut();
diff --git a/src-self-hosted/arg.zig b/src-self-hosted/arg.zig
index df2c04ef1f..dc89483213 100644
--- a/src-self-hosted/arg.zig
+++ b/src-self-hosted/arg.zig
@@ -99,7 +99,7 @@ pub const Args = struct {
error.ArgumentNotInAllowedSet => {
std.debug.warn("argument '{}' is invalid for flag '{}'\n", args[i], arg);
std.debug.warn("allowed options are ");
- for (??flag.allowed_set) |possible| {
+ for (flag.allowed_set.?) |possible| {
std.debug.warn("'{}' ", possible);
}
std.debug.warn("\n");
@@ -276,14 +276,14 @@ test "parse arguments" {
debug.assert(!args.present("help2"));
debug.assert(!args.present("init"));
- debug.assert(mem.eql(u8, ??args.single("build-file"), "build.zig"));
- debug.assert(mem.eql(u8, ??args.single("color"), "on"));
+ debug.assert(mem.eql(u8, args.single("build-file").?, "build.zig"));
+ debug.assert(mem.eql(u8, args.single("color").?, "on"));
- const objects = ??args.many("object");
+ const objects = args.many("object").?;
debug.assert(mem.eql(u8, objects[0], "obj1"));
debug.assert(mem.eql(u8, objects[1], "obj2"));
- debug.assert(mem.eql(u8, ??args.single("library"), "lib2"));
+ debug.assert(mem.eql(u8, args.single("library").?, "lib2"));
const pos = args.positionals.toSliceConst();
debug.assert(mem.eql(u8, pos[0], "build"));
diff --git a/src-self-hosted/llvm.zig b/src-self-hosted/llvm.zig
index 16c359adcf..391a92cd63 100644
--- a/src-self-hosted/llvm.zig
+++ b/src-self-hosted/llvm.zig
@@ -8,6 +8,6 @@ pub const ContextRef = removeNullability(c.LLVMContextRef);
pub const BuilderRef = removeNullability(c.LLVMBuilderRef);
fn removeNullability(comptime T: type) type {
- comptime assert(@typeId(T) == builtin.TypeId.Nullable);
+ comptime assert(@typeId(T) == builtin.TypeId.Optional);
return T.Child;
}
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index a264b5484a..64734f077a 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -490,7 +490,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
try stderr.print("encountered --pkg-end with no matching --pkg-begin\n");
os.exit(1);
}
- cur_pkg = ??cur_pkg.parent;
+ cur_pkg = cur_pkg.parent.?;
}
}
@@ -514,7 +514,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
},
}
- const basename = os.path.basename(??in_file);
+ const basename = os.path.basename(in_file.?);
var it = mem.split(basename, ".");
const root_name = it.next() ?? {
try stderr.write("file name cannot be empty\n");
@@ -523,12 +523,12 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
const asm_a = flags.many("assembly");
const obj_a = flags.many("object");
- if (in_file == null and (obj_a == null or (??obj_a).len == 0) and (asm_a == null or (??asm_a).len == 0)) {
+ if (in_file == null and (obj_a == null or obj_a.?.len == 0) and (asm_a == null or asm_a.?.len == 0)) {
try stderr.write("Expected source file argument or at least one --object or --assembly argument\n");
os.exit(1);
}
- if (out_type == Module.Kind.Obj and (obj_a != null and (??obj_a).len != 0)) {
+ if (out_type == Module.Kind.Obj and (obj_a != null and obj_a.?.len != 0)) {
try stderr.write("When building an object file, --object arguments are invalid\n");
os.exit(1);
}
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 14a44ea768..2a5a0ad740 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -145,8 +145,8 @@ enum ConstPtrSpecial {
// emit a binary with a compile time known address.
// In this case index is the numeric address value.
// We also use this for null pointer. We need the data layout for ConstCastOnly == true
- // types to be the same, so all nullables of pointer types use x_ptr
- // instead of x_nullable
+ // types to be the same, so all optionals of pointer types use x_ptr
+ // instead of x_optional
ConstPtrSpecialHardCodedAddr,
// This means that the pointer represents memory of assigning to _.
// That is, storing discards the data, and loading is invalid.
@@ -222,10 +222,10 @@ enum RuntimeHintErrorUnion {
RuntimeHintErrorUnionNonError,
};
-enum RuntimeHintMaybe {
- RuntimeHintMaybeUnknown,
- RuntimeHintMaybeNull, // TODO is this value even possible? if this is the case it might mean the const value is compile time known.
- RuntimeHintMaybeNonNull,
+enum RuntimeHintOptional {
+ RuntimeHintOptionalUnknown,
+ RuntimeHintOptionalNull, // TODO is this value even possible? if this is the case it might mean the const value is compile time known.
+ RuntimeHintOptionalNonNull,
};
enum RuntimeHintPtr {
@@ -254,7 +254,7 @@ struct ConstExprValue {
bool x_bool;
ConstBoundFnValue x_bound_fn;
TypeTableEntry *x_type;
- ConstExprValue *x_nullable;
+ ConstExprValue *x_optional;
ConstErrValue x_err_union;
ErrorTableEntry *x_err_set;
BigInt x_enum_tag;
@@ -268,7 +268,7 @@ struct ConstExprValue {
// populated if special == ConstValSpecialRuntime
RuntimeHintErrorUnion rh_error_union;
- RuntimeHintMaybe rh_maybe;
+ RuntimeHintOptional rh_maybe;
RuntimeHintPtr rh_ptr;
} data;
};
@@ -556,7 +556,7 @@ enum BinOpType {
BinOpTypeMultWrap,
BinOpTypeDiv,
BinOpTypeMod,
- BinOpTypeUnwrapMaybe,
+ BinOpTypeUnwrapOptional,
BinOpTypeArrayCat,
BinOpTypeArrayMult,
BinOpTypeErrorUnion,
@@ -623,8 +623,8 @@ enum PrefixOp {
PrefixOpBinNot,
PrefixOpNegation,
PrefixOpNegationWrap,
- PrefixOpMaybe,
- PrefixOpUnwrapMaybe,
+ PrefixOpOptional,
+ PrefixOpUnwrapOptional,
PrefixOpAddrOf,
};
@@ -1052,7 +1052,7 @@ struct TypeTableEntryStruct {
HashMap fields_by_name;
};
-struct TypeTableEntryMaybe {
+struct TypeTableEntryOptional {
TypeTableEntry *child_type;
};
@@ -1175,7 +1175,7 @@ enum TypeTableEntryId {
TypeTableEntryIdComptimeInt,
TypeTableEntryIdUndefined,
TypeTableEntryIdNull,
- TypeTableEntryIdMaybe,
+ TypeTableEntryIdOptional,
TypeTableEntryIdErrorUnion,
TypeTableEntryIdErrorSet,
TypeTableEntryIdEnum,
@@ -1206,7 +1206,7 @@ struct TypeTableEntry {
TypeTableEntryFloat floating;
TypeTableEntryArray array;
TypeTableEntryStruct structure;
- TypeTableEntryMaybe maybe;
+ TypeTableEntryOptional maybe;
TypeTableEntryErrorUnion error_union;
TypeTableEntryErrorSet error_set;
TypeTableEntryEnum enumeration;
@@ -1402,7 +1402,7 @@ enum PanicMsgId {
PanicMsgIdRemainderDivisionByZero,
PanicMsgIdExactDivisionRemainder,
PanicMsgIdSliceWidenRemainder,
- PanicMsgIdUnwrapMaybeFail,
+ PanicMsgIdUnwrapOptionalFail,
PanicMsgIdInvalidErrorCode,
PanicMsgIdIncorrectAlignment,
PanicMsgIdBadUnionField,
@@ -2016,8 +2016,8 @@ enum IrInstructionId {
IrInstructionIdAsm,
IrInstructionIdSizeOf,
IrInstructionIdTestNonNull,
- IrInstructionIdUnwrapMaybe,
- IrInstructionIdMaybeWrap,
+ IrInstructionIdUnwrapOptional,
+ IrInstructionIdOptionalWrap,
IrInstructionIdUnionTag,
IrInstructionIdClz,
IrInstructionIdCtz,
@@ -2184,7 +2184,7 @@ enum IrUnOp {
IrUnOpNegation,
IrUnOpNegationWrap,
IrUnOpDereference,
- IrUnOpMaybe,
+ IrUnOpOptional,
};
struct IrInstructionUnOp {
@@ -2487,7 +2487,7 @@ struct IrInstructionTestNonNull {
IrInstruction *value;
};
-struct IrInstructionUnwrapMaybe {
+struct IrInstructionUnwrapOptional {
IrInstruction base;
IrInstruction *value;
@@ -2745,7 +2745,7 @@ struct IrInstructionUnwrapErrPayload {
bool safety_check_on;
};
-struct IrInstructionMaybeWrap {
+struct IrInstructionOptionalWrap {
IrInstruction base;
IrInstruction *value;
@@ -2954,10 +2954,10 @@ struct IrInstructionExport {
struct IrInstructionErrorReturnTrace {
IrInstruction base;
- enum Nullable {
+ enum Optional {
Null,
NonNull,
- } nullable;
+ } optional;
};
struct IrInstructionErrorUnion {
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 16b2cb0590..ed261148ea 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -236,7 +236,7 @@ bool type_is_complete(TypeTableEntry *type_entry) {
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
@@ -272,7 +272,7 @@ bool type_has_zero_bits_known(TypeTableEntry *type_entry) {
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
@@ -520,7 +520,7 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) {
} else {
ensure_complete_type(g, child_type);
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdMaybe);
+ TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdOptional);
assert(child_type->type_ref || child_type->zero_bits);
assert(child_type->di_type);
entry->is_copyable = type_is_copyable(g, child_type);
@@ -1361,7 +1361,7 @@ static bool type_allowed_in_packed_struct(TypeTableEntry *type_entry) {
return type_entry->data.structure.layout == ContainerLayoutPacked;
case TypeTableEntryIdUnion:
return type_entry->data.unionation.layout == ContainerLayoutPacked;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
return type_is_codegen_pointer(child_type);
@@ -1415,7 +1415,7 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) {
return type_allowed_in_extern(g, type_entry->data.pointer.child_type);
case TypeTableEntryIdStruct:
return type_entry->data.structure.layout == ContainerLayoutExtern || type_entry->data.structure.layout == ContainerLayoutPacked;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
return child_type->id == TypeTableEntryIdPointer || child_type->id == TypeTableEntryIdFn;
@@ -1538,7 +1538,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -1632,7 +1632,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -2985,8 +2985,8 @@ static void typecheck_panic_fn(CodeGen *g, FnTableEntry *panic_fn) {
return wrong_panic_prototype(g, proto_node, fn_type);
}
- TypeTableEntry *nullable_ptr_to_stack_trace_type = get_maybe_type(g, get_ptr_to_stack_trace_type(g));
- if (fn_type_id->param_info[1].type != nullable_ptr_to_stack_trace_type) {
+ TypeTableEntry *optional_ptr_to_stack_trace_type = get_maybe_type(g, get_ptr_to_stack_trace_type(g));
+ if (fn_type_id->param_info[1].type != optional_ptr_to_stack_trace_type) {
return wrong_panic_prototype(g, proto_node, fn_type);
}
@@ -3368,7 +3368,7 @@ TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEnt
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -3746,7 +3746,7 @@ static bool is_container(TypeTableEntry *type_entry) {
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
@@ -3805,7 +3805,7 @@ void resolve_container_type(CodeGen *g, TypeTableEntry *type_entry) {
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
@@ -3824,7 +3824,7 @@ TypeTableEntry *get_codegen_ptr_type(TypeTableEntry *type) {
if (type->id == TypeTableEntryIdPointer) return type;
if (type->id == TypeTableEntryIdFn) return type;
if (type->id == TypeTableEntryIdPromise) return type;
- if (type->id == TypeTableEntryIdMaybe) {
+ if (type->id == TypeTableEntryIdOptional) {
if (type->data.maybe.child_type->id == TypeTableEntryIdPointer) return type->data.maybe.child_type;
if (type->data.maybe.child_type->id == TypeTableEntryIdFn) return type->data.maybe.child_type;
if (type->data.maybe.child_type->id == TypeTableEntryIdPromise) return type->data.maybe.child_type;
@@ -4331,7 +4331,7 @@ bool handle_is_ptr(TypeTableEntry *type_entry) {
return type_has_bits(type_entry);
case TypeTableEntryIdErrorUnion:
return type_has_bits(type_entry->data.error_union.payload_type);
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
return type_has_bits(type_entry->data.maybe.child_type) &&
!type_is_codegen_pointer(type_entry->data.maybe.child_type);
case TypeTableEntryIdUnion:
@@ -4709,12 +4709,12 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
case TypeTableEntryIdUnion:
// TODO better hashing algorithm
return 2709806591;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
if (get_codegen_ptr_type(const_val->type) != nullptr) {
return hash_const_val(const_val) * 1992916303;
} else {
- if (const_val->data.x_nullable) {
- return hash_const_val(const_val->data.x_nullable) * 1992916303;
+ if (const_val->data.x_optional) {
+ return hash_const_val(const_val->data.x_optional) * 1992916303;
} else {
return 4016830364;
}
@@ -4817,12 +4817,12 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) {
}
return false;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
if (get_codegen_ptr_type(value->type) != nullptr)
return value->data.x_ptr.mut == ConstPtrMutComptimeVar;
- if (value->data.x_nullable == nullptr)
+ if (value->data.x_optional == nullptr)
return false;
- return can_mutate_comptime_var_state(value->data.x_nullable);
+ return can_mutate_comptime_var_state(value->data.x_optional);
case TypeTableEntryIdErrorUnion:
if (value->data.x_err_union.err != nullptr)
@@ -4869,7 +4869,7 @@ static bool return_type_is_cacheable(TypeTableEntry *return_type) {
case TypeTableEntryIdUnion:
return false;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
return return_type_is_cacheable(return_type->data.maybe.child_type);
case TypeTableEntryIdErrorUnion:
@@ -4978,7 +4978,7 @@ bool type_requires_comptime(TypeTableEntry *type_entry) {
case TypeTableEntryIdUnion:
assert(type_has_zero_bits_known(type_entry));
return type_entry->data.unionation.requires_comptime;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
return type_requires_comptime(type_entry->data.maybe.child_type);
case TypeTableEntryIdErrorUnion:
return type_requires_comptime(type_entry->data.error_union.payload_type);
@@ -5460,13 +5460,13 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
zig_panic("TODO");
case TypeTableEntryIdNull:
zig_panic("TODO");
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
if (get_codegen_ptr_type(a->type) != nullptr)
return const_values_equal_ptr(a, b);
- if (a->data.x_nullable == nullptr || b->data.x_nullable == nullptr) {
- return (a->data.x_nullable == nullptr && b->data.x_nullable == nullptr);
+ if (a->data.x_optional == nullptr || b->data.x_optional == nullptr) {
+ return (a->data.x_optional == nullptr && b->data.x_optional == nullptr);
} else {
- return const_values_equal(a->data.x_nullable, b->data.x_nullable);
+ return const_values_equal(a->data.x_optional, b->data.x_optional);
}
case TypeTableEntryIdErrorUnion:
zig_panic("TODO");
@@ -5708,12 +5708,12 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
buf_appendf(buf, "undefined");
return;
}
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
if (get_codegen_ptr_type(const_val->type) != nullptr)
return render_const_val_ptr(g, buf, const_val, type_entry->data.maybe.child_type);
- if (const_val->data.x_nullable) {
- render_const_value(g, buf, const_val->data.x_nullable);
+ if (const_val->data.x_optional) {
+ render_const_value(g, buf, const_val->data.x_optional);
} else {
buf_appendf(buf, "null");
}
@@ -5819,7 +5819,7 @@ uint32_t type_id_hash(TypeId x) {
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
case TypeTableEntryIdUnion:
@@ -5865,7 +5865,7 @@ bool type_id_eql(TypeId a, TypeId b) {
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdPromise:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -5987,7 +5987,7 @@ static const TypeTableEntryId all_type_ids[] = {
TypeTableEntryIdComptimeInt,
TypeTableEntryIdUndefined,
TypeTableEntryIdNull,
- TypeTableEntryIdMaybe,
+ TypeTableEntryIdOptional,
TypeTableEntryIdErrorUnion,
TypeTableEntryIdErrorSet,
TypeTableEntryIdEnum,
@@ -6042,7 +6042,7 @@ size_t type_id_index(TypeTableEntry *entry) {
return 11;
case TypeTableEntryIdNull:
return 12;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
return 13;
case TypeTableEntryIdErrorUnion:
return 14;
@@ -6100,8 +6100,8 @@ const char *type_id_name(TypeTableEntryId id) {
return "Undefined";
case TypeTableEntryIdNull:
return "Null";
- case TypeTableEntryIdMaybe:
- return "Nullable";
+ case TypeTableEntryIdOptional:
+ return "Optional";
case TypeTableEntryIdErrorUnion:
return "ErrorUnion";
case TypeTableEntryIdErrorSet:
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index 3785cb6ca1..2c8c03b226 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -50,7 +50,7 @@ static const char *bin_op_str(BinOpType bin_op) {
case BinOpTypeAssignBitXor: return "^=";
case BinOpTypeAssignBitOr: return "|=";
case BinOpTypeAssignMergeErrorSets: return "||=";
- case BinOpTypeUnwrapMaybe: return "??";
+ case BinOpTypeUnwrapOptional: return "??";
case BinOpTypeArrayCat: return "++";
case BinOpTypeArrayMult: return "**";
case BinOpTypeErrorUnion: return "!";
@@ -66,8 +66,8 @@ static const char *prefix_op_str(PrefixOp prefix_op) {
case PrefixOpNegationWrap: return "-%";
case PrefixOpBoolNot: return "!";
case PrefixOpBinNot: return "~";
- case PrefixOpMaybe: return "?";
- case PrefixOpUnwrapMaybe: return "??";
+ case PrefixOpOptional: return "?";
+ case PrefixOpUnwrapOptional: return "??";
case PrefixOpAddrOf: return "&";
}
zig_unreachable();
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 65b465a519..da08ecfc9e 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -865,7 +865,7 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("exact division produced remainder");
case PanicMsgIdSliceWidenRemainder:
return buf_create_from_str("slice widening size mismatch");
- case PanicMsgIdUnwrapMaybeFail:
+ case PanicMsgIdUnwrapOptionalFail:
return buf_create_from_str("attempt to unwrap null");
case PanicMsgIdUnreachable:
return buf_create_from_str("reached unreachable code");
@@ -2734,7 +2734,7 @@ static LLVMValueRef ir_render_un_op(CodeGen *g, IrExecutable *executable, IrInst
switch (op_id) {
case IrUnOpInvalid:
- case IrUnOpMaybe:
+ case IrUnOpOptional:
case IrUnOpDereference:
zig_unreachable();
case IrUnOpNegation:
@@ -3333,7 +3333,7 @@ static LLVMValueRef ir_render_asm(CodeGen *g, IrExecutable *executable, IrInstru
}
static LLVMValueRef gen_non_null_bit(CodeGen *g, TypeTableEntry *maybe_type, LLVMValueRef maybe_handle) {
- assert(maybe_type->id == TypeTableEntryIdMaybe);
+ assert(maybe_type->id == TypeTableEntryIdOptional);
TypeTableEntry *child_type = maybe_type->data.maybe.child_type;
if (child_type->zero_bits) {
return maybe_handle;
@@ -3355,23 +3355,23 @@ static LLVMValueRef ir_render_test_non_null(CodeGen *g, IrExecutable *executable
}
static LLVMValueRef ir_render_unwrap_maybe(CodeGen *g, IrExecutable *executable,
- IrInstructionUnwrapMaybe *instruction)
+ IrInstructionUnwrapOptional *instruction)
{
TypeTableEntry *ptr_type = instruction->value->value.type;
assert(ptr_type->id == TypeTableEntryIdPointer);
TypeTableEntry *maybe_type = ptr_type->data.pointer.child_type;
- assert(maybe_type->id == TypeTableEntryIdMaybe);
+ assert(maybe_type->id == TypeTableEntryIdOptional);
TypeTableEntry *child_type = maybe_type->data.maybe.child_type;
LLVMValueRef maybe_ptr = ir_llvm_value(g, instruction->value);
LLVMValueRef maybe_handle = get_handle_value(g, maybe_ptr, maybe_type, ptr_type);
if (ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on) {
LLVMValueRef non_null_bit = gen_non_null_bit(g, maybe_type, maybe_handle);
- LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapMaybeOk");
- LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapMaybeFail");
+ LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapOptionalOk");
+ LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapOptionalFail");
LLVMBuildCondBr(g->builder, non_null_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
- gen_safety_crash(g, PanicMsgIdUnwrapMaybeFail);
+ gen_safety_crash(g, PanicMsgIdUnwrapOptionalFail);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
@@ -3593,17 +3593,17 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I
} else if (target_type->id == TypeTableEntryIdFn) {
align_bytes = target_type->data.fn.fn_type_id.alignment;
ptr_val = target_val;
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdPointer)
{
align_bytes = target_type->data.maybe.child_type->data.pointer.alignment;
ptr_val = target_val;
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdFn)
{
align_bytes = target_type->data.maybe.child_type->data.fn.fn_type_id.alignment;
ptr_val = target_val;
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdPromise)
{
zig_panic("TODO audit this function");
@@ -3705,7 +3705,7 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutable *executable, IrIn
success_order, failure_order, instruction->is_weak);
TypeTableEntry *maybe_type = instruction->base.value.type;
- assert(maybe_type->id == TypeTableEntryIdMaybe);
+ assert(maybe_type->id == TypeTableEntryIdOptional);
TypeTableEntry *child_type = maybe_type->data.maybe.child_type;
if (type_is_codegen_pointer(child_type)) {
@@ -4115,10 +4115,10 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu
}
}
-static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, IrInstructionMaybeWrap *instruction) {
+static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, IrInstructionOptionalWrap *instruction) {
TypeTableEntry *wanted_type = instruction->base.value.type;
- assert(wanted_type->id == TypeTableEntryIdMaybe);
+ assert(wanted_type->id == TypeTableEntryIdOptional);
TypeTableEntry *child_type = wanted_type->data.maybe.child_type;
@@ -4699,8 +4699,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_asm(g, executable, (IrInstructionAsm *)instruction);
case IrInstructionIdTestNonNull:
return ir_render_test_non_null(g, executable, (IrInstructionTestNonNull *)instruction);
- case IrInstructionIdUnwrapMaybe:
- return ir_render_unwrap_maybe(g, executable, (IrInstructionUnwrapMaybe *)instruction);
+ case IrInstructionIdUnwrapOptional:
+ return ir_render_unwrap_maybe(g, executable, (IrInstructionUnwrapOptional *)instruction);
case IrInstructionIdClz:
return ir_render_clz(g, executable, (IrInstructionClz *)instruction);
case IrInstructionIdCtz:
@@ -4741,8 +4741,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_unwrap_err_code(g, executable, (IrInstructionUnwrapErrCode *)instruction);
case IrInstructionIdUnwrapErrPayload:
return ir_render_unwrap_err_payload(g, executable, (IrInstructionUnwrapErrPayload *)instruction);
- case IrInstructionIdMaybeWrap:
- return ir_render_maybe_wrap(g, executable, (IrInstructionMaybeWrap *)instruction);
+ case IrInstructionIdOptionalWrap:
+ return ir_render_maybe_wrap(g, executable, (IrInstructionOptionalWrap *)instruction);
case IrInstructionIdErrWrapCode:
return ir_render_err_wrap_code(g, executable, (IrInstructionErrWrapCode *)instruction);
case IrInstructionIdErrWrapPayload:
@@ -4972,7 +4972,7 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con
}
case TypeTableEntryIdPointer:
case TypeTableEntryIdFn:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdPromise:
{
LLVMValueRef ptr_val = gen_const_val(g, const_val, "");
@@ -5137,19 +5137,19 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
} else {
return LLVMConstNull(LLVMInt1Type());
}
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
if (child_type->zero_bits) {
- return LLVMConstInt(LLVMInt1Type(), const_val->data.x_nullable ? 1 : 0, false);
+ return LLVMConstInt(LLVMInt1Type(), const_val->data.x_optional ? 1 : 0, false);
} else if (type_is_codegen_pointer(child_type)) {
return gen_const_val_ptr(g, const_val, name);
} else {
LLVMValueRef child_val;
LLVMValueRef maybe_val;
bool make_unnamed_struct;
- if (const_val->data.x_nullable) {
- child_val = gen_const_val(g, const_val->data.x_nullable, "");
+ if (const_val->data.x_optional) {
+ child_val = gen_const_val(g, const_val->data.x_optional, "");
maybe_val = LLVMConstAllOnes(LLVMInt1Type());
make_unnamed_struct = is_llvm_value_unnamed_type(const_val->type, child_val);
@@ -5755,8 +5755,8 @@ static void do_code_gen(CodeGen *g) {
} else if (instruction->id == IrInstructionIdSlice) {
IrInstructionSlice *slice_instruction = (IrInstructionSlice *)instruction;
slot = &slice_instruction->tmp_ptr;
- } else if (instruction->id == IrInstructionIdMaybeWrap) {
- IrInstructionMaybeWrap *maybe_wrap_instruction = (IrInstructionMaybeWrap *)instruction;
+ } else if (instruction->id == IrInstructionIdOptionalWrap) {
+ IrInstructionOptionalWrap *maybe_wrap_instruction = (IrInstructionOptionalWrap *)instruction;
slot = &maybe_wrap_instruction->tmp_ptr;
} else if (instruction->id == IrInstructionIdErrWrapPayload) {
IrInstructionErrWrapPayload *err_wrap_payload_instruction = (IrInstructionErrWrapPayload *)instruction;
@@ -6511,7 +6511,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" ComptimeInt: void,\n"
" Undefined: void,\n"
" Null: void,\n"
- " Nullable: Nullable,\n"
+ " Optional: Optional,\n"
" ErrorUnion: ErrorUnion,\n"
" ErrorSet: ErrorSet,\n"
" Enum: Enum,\n"
@@ -6570,7 +6570,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) {
" defs: []Definition,\n"
" };\n"
"\n"
- " pub const Nullable = struct {\n"
+ " pub const Optional = struct {\n"
" child: type,\n"
" };\n"
"\n"
@@ -7145,7 +7145,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, TypeTableEntry
case TypeTableEntryIdArray:
prepend_c_type_to_decl_list(g, gen_h, type_entry->data.array.child_type);
return;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
prepend_c_type_to_decl_list(g, gen_h, type_entry->data.maybe.child_type);
return;
case TypeTableEntryIdFn:
@@ -7234,7 +7234,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, TypeTableEntry *type_entry, Buf
buf_appendf(out_buf, "%s%s *", const_str, buf_ptr(&child_buf));
break;
}
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
if (child_type->zero_bits) {
@@ -7448,7 +7448,7 @@ static void gen_h_file(CodeGen *g) {
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdArgTuple:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdFn:
case TypeTableEntryIdPromise:
zig_unreachable();
diff --git a/src/ir.cpp b/src/ir.cpp
index 10098f3c32..02606fc4aa 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -47,7 +47,7 @@ enum ConstCastResultId {
ConstCastResultIdErrSetGlobal,
ConstCastResultIdPointerChild,
ConstCastResultIdSliceChild,
- ConstCastResultIdNullableChild,
+ ConstCastResultIdOptionalChild,
ConstCastResultIdErrorUnionPayload,
ConstCastResultIdErrorUnionErrorSet,
ConstCastResultIdFnAlign,
@@ -86,7 +86,7 @@ struct ConstCastOnly {
ConstCastErrSetMismatch error_set;
ConstCastOnly *pointer_child;
ConstCastOnly *slice_child;
- ConstCastOnly *nullable_child;
+ ConstCastOnly *optional_child;
ConstCastOnly *error_union_payload;
ConstCastOnly *error_union_error_set;
ConstCastOnly *return_type;
@@ -372,8 +372,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTestNonNull *) {
return IrInstructionIdTestNonNull;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapMaybe *) {
- return IrInstructionIdUnwrapMaybe;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapOptional *) {
+ return IrInstructionIdUnwrapOptional;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionClz *) {
@@ -524,8 +524,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapErrPayload
return IrInstructionIdUnwrapErrPayload;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionMaybeWrap *) {
- return IrInstructionIdMaybeWrap;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionOptionalWrap *) {
+ return IrInstructionIdOptionalWrap;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionErrWrapPayload *) {
@@ -1571,7 +1571,7 @@ static IrInstruction *ir_build_test_nonnull_from(IrBuilder *irb, IrInstruction *
static IrInstruction *ir_build_unwrap_maybe(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value,
bool safety_check_on)
{
- IrInstructionUnwrapMaybe *instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionUnwrapOptional *instruction = ir_build_instruction(irb, scope, source_node);
instruction->value = value;
instruction->safety_check_on = safety_check_on;
@@ -1590,7 +1590,7 @@ static IrInstruction *ir_build_unwrap_maybe_from(IrBuilder *irb, IrInstruction *
}
static IrInstruction *ir_build_maybe_wrap(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value) {
- IrInstructionMaybeWrap *instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionOptionalWrap *instruction = ir_build_instruction(irb, scope, source_node);
instruction->value = value;
ir_ref_instruction(value, irb->current_basic_block);
@@ -2496,9 +2496,9 @@ static IrInstruction *ir_build_arg_type(IrBuilder *irb, Scope *scope, AstNode *s
return &instruction->base;
}
-static IrInstruction *ir_build_error_return_trace(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstructionErrorReturnTrace::Nullable nullable) {
+static IrInstruction *ir_build_error_return_trace(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstructionErrorReturnTrace::Optional optional) {
IrInstructionErrorReturnTrace *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->nullable = nullable;
+ instruction->optional = optional;
return &instruction->base;
}
@@ -3295,9 +3295,9 @@ static IrInstruction *ir_gen_maybe_ok_or(IrBuilder *irb, Scope *parent_scope, As
is_comptime = ir_build_test_comptime(irb, parent_scope, node, is_non_null);
}
- IrBasicBlock *ok_block = ir_create_basic_block(irb, parent_scope, "MaybeNonNull");
- IrBasicBlock *null_block = ir_create_basic_block(irb, parent_scope, "MaybeNull");
- IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "MaybeEnd");
+ IrBasicBlock *ok_block = ir_create_basic_block(irb, parent_scope, "OptionalNonNull");
+ IrBasicBlock *null_block = ir_create_basic_block(irb, parent_scope, "OptionalNull");
+ IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "OptionalEnd");
ir_build_cond_br(irb, parent_scope, node, is_non_null, ok_block, null_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, null_block);
@@ -3426,7 +3426,7 @@ static IrInstruction *ir_gen_bin_op(IrBuilder *irb, Scope *scope, AstNode *node)
return ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayMult);
case BinOpTypeMergeErrorSets:
return ir_gen_bin_op_id(irb, scope, node, IrBinOpMergeErrorSets);
- case BinOpTypeUnwrapMaybe:
+ case BinOpTypeUnwrapOptional:
return ir_gen_maybe_ok_or(irb, scope, node);
case BinOpTypeErrorUnion:
return ir_gen_error_union(irb, scope, node);
@@ -4703,9 +4703,9 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegation), lval);
case PrefixOpNegationWrap:
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegationWrap), lval);
- case PrefixOpMaybe:
- return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpMaybe), lval);
- case PrefixOpUnwrapMaybe:
+ case PrefixOpOptional:
+ return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval);
+ case PrefixOpUnwrapOptional:
return ir_gen_maybe_assert_ok(irb, scope, node, lval);
case PrefixOpAddrOf: {
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
@@ -5370,9 +5370,9 @@ static IrInstruction *ir_gen_test_expr(IrBuilder *irb, Scope *scope, AstNode *no
IrInstruction *maybe_val = ir_build_load_ptr(irb, scope, node, maybe_val_ptr);
IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_val);
- IrBasicBlock *then_block = ir_create_basic_block(irb, scope, "MaybeThen");
- IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "MaybeElse");
- IrBasicBlock *endif_block = ir_create_basic_block(irb, scope, "MaybeEndIf");
+ IrBasicBlock *then_block = ir_create_basic_block(irb, scope, "OptionalThen");
+ IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "OptionalElse");
+ IrBasicBlock *endif_block = ir_create_basic_block(irb, scope, "OptionalEndIf");
IrInstruction *is_comptime;
if (ir_should_inline(irb->exec, scope)) {
@@ -7519,7 +7519,7 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc
}
} else if (const_val_fits_in_num_lit(const_val, other_type)) {
return true;
- } else if (other_type->id == TypeTableEntryIdMaybe) {
+ } else if (other_type->id == TypeTableEntryIdOptional) {
TypeTableEntry *child_type = other_type->data.maybe.child_type;
if (const_val_fits_in_num_lit(const_val, child_type)) {
return true;
@@ -7663,7 +7663,7 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
return result;
// * and [*] can do a const-cast-only to ?* and ?[*], respectively
- if (expected_type->id == TypeTableEntryIdMaybe &&
+ if (expected_type->id == TypeTableEntryIdOptional &&
expected_type->data.maybe.child_type->id == TypeTableEntryIdPointer &&
actual_type->id == TypeTableEntryIdPointer)
{
@@ -7718,12 +7718,12 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
}
// maybe
- if (expected_type->id == TypeTableEntryIdMaybe && actual_type->id == TypeTableEntryIdMaybe) {
+ if (expected_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) {
ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.maybe.child_type, actual_type->data.maybe.child_type, source_node);
if (child.id != ConstCastResultIdOk) {
- result.id = ConstCastResultIdNullableChild;
- result.data.nullable_child = allocate_nonzero(1);
- *result.data.nullable_child = child;
+ result.id = ConstCastResultIdOptionalChild;
+ result.data.optional_child = allocate_nonzero(1);
+ *result.data.optional_child = child;
}
return result;
}
@@ -7925,7 +7925,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
}
// implicit conversion from ?T to ?U
- if (expected_type->id == TypeTableEntryIdMaybe && actual_type->id == TypeTableEntryIdMaybe) {
+ if (expected_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) {
ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, expected_type->data.maybe.child_type,
actual_type->data.maybe.child_type, value);
if (res != ImplicitCastMatchResultNo)
@@ -7933,7 +7933,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
}
// implicit conversion from non maybe type to maybe type
- if (expected_type->id == TypeTableEntryIdMaybe) {
+ if (expected_type->id == TypeTableEntryIdOptional) {
ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, expected_type->data.maybe.child_type,
actual_type, value);
if (res != ImplicitCastMatchResultNo)
@@ -7941,7 +7941,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
}
// implicit conversion from null literal to maybe type
- if (expected_type->id == TypeTableEntryIdMaybe &&
+ if (expected_type->id == TypeTableEntryIdOptional &&
actual_type->id == TypeTableEntryIdNull)
{
return ImplicitCastMatchResultYes;
@@ -7963,7 +7963,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
// implicit conversion from T to U!?T
if (expected_type->id == TypeTableEntryIdErrorUnion &&
- expected_type->data.error_union.payload_type->id == TypeTableEntryIdMaybe &&
+ expected_type->data.error_union.payload_type->id == TypeTableEntryIdOptional &&
ir_types_match_with_implicit_cast(ira,
expected_type->data.error_union.payload_type->data.maybe.child_type,
actual_type, value))
@@ -8072,7 +8072,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
}
// implicit [N]T to ?[]const T
- if (expected_type->id == TypeTableEntryIdMaybe &&
+ if (expected_type->id == TypeTableEntryIdOptional &&
is_slice(expected_type->data.maybe.child_type) &&
actual_type->id == TypeTableEntryIdArray)
{
@@ -8552,13 +8552,13 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
continue;
}
- if (prev_type->id == TypeTableEntryIdMaybe &&
+ if (prev_type->id == TypeTableEntryIdOptional &&
types_match_const_cast_only(ira, prev_type->data.maybe.child_type, cur_type, source_node).id == ConstCastResultIdOk)
{
continue;
}
- if (cur_type->id == TypeTableEntryIdMaybe &&
+ if (cur_type->id == TypeTableEntryIdOptional &&
types_match_const_cast_only(ira, cur_type->data.maybe.child_type, prev_type, source_node).id == ConstCastResultIdOk)
{
prev_inst = cur_inst;
@@ -8711,7 +8711,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
ir_add_error_node(ira, source_node,
buf_sprintf("unable to make maybe out of number literal"));
return ira->codegen->builtin_types.entry_invalid;
- } else if (prev_inst->value.type->id == TypeTableEntryIdMaybe) {
+ } else if (prev_inst->value.type->id == TypeTableEntryIdOptional) {
return prev_inst->value.type;
} else {
return get_maybe_type(ira->codegen, prev_inst->value.type);
@@ -9193,7 +9193,7 @@ static FnTableEntry *ir_resolve_fn(IrAnalyze *ira, IrInstruction *fn_value) {
}
static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, TypeTableEntry *wanted_type) {
- assert(wanted_type->id == TypeTableEntryIdMaybe);
+ assert(wanted_type->id == TypeTableEntryIdOptional);
if (instr_is_comptime(value)) {
TypeTableEntry *payload_type = wanted_type->data.maybe.child_type;
@@ -9211,7 +9211,7 @@ static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *sourc
if (get_codegen_ptr_type(wanted_type) != nullptr) {
copy_const_val(&const_instruction->base.value, val, val->data.x_ptr.mut == ConstPtrMutComptimeConst);
} else {
- const_instruction->base.value.data.x_nullable = val;
+ const_instruction->base.value.data.x_optional = val;
}
const_instruction->base.value.type = wanted_type;
return &const_instruction->base;
@@ -9219,7 +9219,7 @@ static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *sourc
IrInstruction *result = ir_build_maybe_wrap(&ira->new_irb, source_instr->scope, source_instr->source_node, value);
result->value.type = wanted_type;
- result->value.data.rh_maybe = RuntimeHintMaybeNonNull;
+ result->value.data.rh_maybe = RuntimeHintOptionalNonNull;
ir_add_alloca(ira, result, wanted_type);
return result;
}
@@ -9361,7 +9361,7 @@ static IrInstruction *ir_analyze_cast_ref(IrAnalyze *ira, IrInstruction *source_
}
static IrInstruction *ir_analyze_null_to_maybe(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, TypeTableEntry *wanted_type) {
- assert(wanted_type->id == TypeTableEntryIdMaybe);
+ assert(wanted_type->id == TypeTableEntryIdOptional);
assert(instr_is_comptime(value));
ConstExprValue *val = ir_resolve_const(ira, value, UndefBad);
@@ -9373,7 +9373,7 @@ static IrInstruction *ir_analyze_null_to_maybe(IrAnalyze *ira, IrInstruction *so
const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
const_instruction->base.value.data.x_ptr.data.hard_coded_addr.addr = 0;
} else {
- const_instruction->base.value.data.x_nullable = nullptr;
+ const_instruction->base.value.data.x_optional = nullptr;
}
const_instruction->base.value.type = wanted_type;
return &const_instruction->base;
@@ -9992,7 +9992,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
// explicit cast from [N]T to ?[]const N
- if (wanted_type->id == TypeTableEntryIdMaybe &&
+ if (wanted_type->id == TypeTableEntryIdOptional &&
is_slice(wanted_type->data.maybe.child_type) &&
actual_type->id == TypeTableEntryIdArray)
{
@@ -10091,7 +10091,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
// explicit cast from T to ?T
// note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism
- if (wanted_type->id == TypeTableEntryIdMaybe) {
+ if (wanted_type->id == TypeTableEntryIdOptional) {
TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type;
if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk) {
return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type);
@@ -10120,7 +10120,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
// explicit cast from null literal to maybe type
- if (wanted_type->id == TypeTableEntryIdMaybe &&
+ if (wanted_type->id == TypeTableEntryIdOptional &&
actual_type->id == TypeTableEntryIdNull)
{
return ir_analyze_null_to_maybe(ira, source_instr, value, wanted_type);
@@ -10173,8 +10173,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
// explicit cast from T to E!?T
if (wanted_type->id == TypeTableEntryIdErrorUnion &&
- wanted_type->data.error_union.payload_type->id == TypeTableEntryIdMaybe &&
- actual_type->id != TypeTableEntryIdMaybe)
+ wanted_type->data.error_union.payload_type->id == TypeTableEntryIdOptional &&
+ actual_type->id != TypeTableEntryIdOptional)
{
TypeTableEntry *wanted_child_type = wanted_type->data.error_union.payload_type->data.maybe.child_type;
if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk ||
@@ -10737,13 +10737,13 @@ static bool resolve_cmp_op_id(IrBinOp op_id, Cmp cmp) {
}
}
-static bool nullable_value_is_null(ConstExprValue *val) {
+static bool optional_value_is_null(ConstExprValue *val) {
assert(val->special == ConstValSpecialStatic);
if (get_codegen_ptr_type(val->type) != nullptr) {
return val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr &&
val->data.x_ptr.data.hard_coded_addr.addr == 0;
} else {
- return val->data.x_nullable == nullptr;
+ return val->data.x_optional == nullptr;
}
}
@@ -10755,8 +10755,8 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
IrBinOp op_id = bin_op_instruction->op_id;
bool is_equality_cmp = (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq);
if (is_equality_cmp &&
- ((op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdMaybe) ||
- (op2->value.type->id == TypeTableEntryIdNull && op1->value.type->id == TypeTableEntryIdMaybe) ||
+ ((op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdOptional) ||
+ (op2->value.type->id == TypeTableEntryIdNull && op1->value.type->id == TypeTableEntryIdOptional) ||
(op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdNull)))
{
if (op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdNull) {
@@ -10776,7 +10776,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
ConstExprValue *maybe_val = ir_resolve_const(ira, maybe_op, UndefBad);
if (!maybe_val)
return ira->codegen->builtin_types.entry_invalid;
- bool is_null = nullable_value_is_null(maybe_val);
+ bool is_null = optional_value_is_null(maybe_val);
ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base);
out_val->data.x_bool = (op_id == IrBinOpCmpEq) ? is_null : !is_null;
return ira->codegen->builtin_types.entry_bool;
@@ -10925,7 +10925,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
case TypeTableEntryIdStruct:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdUnion:
ir_add_error_node(ira, source_node,
@@ -11998,7 +11998,7 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdNamespace:
@@ -12022,7 +12022,7 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
zig_panic("TODO export const value of type %s", buf_ptr(&target->value.type->name));
@@ -12049,24 +12049,24 @@ static bool exec_has_err_ret_trace(CodeGen *g, IrExecutable *exec) {
static TypeTableEntry *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
IrInstructionErrorReturnTrace *instruction)
{
- if (instruction->nullable == IrInstructionErrorReturnTrace::Null) {
+ if (instruction->optional == IrInstructionErrorReturnTrace::Null) {
TypeTableEntry *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen);
- TypeTableEntry *nullable_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type);
+ TypeTableEntry *optional_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type);
if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) {
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- assert(get_codegen_ptr_type(nullable_type) != nullptr);
+ assert(get_codegen_ptr_type(optional_type) != nullptr);
out_val->data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
out_val->data.x_ptr.data.hard_coded_addr.addr = 0;
- return nullable_type;
+ return optional_type;
}
IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, instruction->nullable);
+ instruction->base.source_node, instruction->optional);
ir_link_new_instruction(new_instruction, &instruction->base);
- return nullable_type;
+ return optional_type;
} else {
assert(ira->codegen->have_err_ret_tracing);
IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, instruction->nullable);
+ instruction->base.source_node, instruction->optional);
ir_link_new_instruction(new_instruction, &instruction->base);
return get_ptr_to_stack_trace_type(ira->codegen);
}
@@ -12998,7 +12998,7 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -13017,7 +13017,7 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdOpaque:
ir_add_error_node(ira, un_op_instruction->base.source_node,
- buf_sprintf("type '%s' not nullable", buf_ptr(&type_entry->name)));
+ buf_sprintf("type '%s' not optional", buf_ptr(&type_entry->name)));
return ira->codegen->builtin_types.entry_invalid;
}
zig_unreachable();
@@ -13109,7 +13109,7 @@ static TypeTableEntry *ir_analyze_instruction_un_op(IrAnalyze *ira, IrInstructio
return ir_analyze_negation(ira, un_op_instruction);
case IrUnOpDereference:
return ir_analyze_dereference(ira, un_op_instruction);
- case IrUnOpMaybe:
+ case IrUnOpOptional:
return ir_analyze_maybe(ira, un_op_instruction);
}
zig_unreachable();
@@ -14155,7 +14155,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
buf_ptr(&child_type->name), buf_ptr(field_name)));
return ira->codegen->builtin_types.entry_invalid;
}
- } else if (child_type->id == TypeTableEntryIdMaybe) {
+ } else if (child_type->id == TypeTableEntryIdOptional) {
if (buf_eql_str(field_name, "Child")) {
bool ptr_is_const = true;
bool ptr_is_volatile = false;
@@ -14339,7 +14339,7 @@ static TypeTableEntry *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructi
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -14607,7 +14607,7 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira,
case TypeTableEntryIdStruct:
case TypeTableEntryIdComptimeFloat:
case TypeTableEntryIdComptimeInt:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -14715,7 +14715,7 @@ static TypeTableEntry *ir_analyze_instruction_array_type(IrAnalyze *ira,
case TypeTableEntryIdStruct:
case TypeTableEntryIdComptimeFloat:
case TypeTableEntryIdComptimeInt:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -14786,7 +14786,7 @@ static TypeTableEntry *ir_analyze_instruction_size_of(IrAnalyze *ira,
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -14810,14 +14810,14 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn
TypeTableEntry *type_entry = value->value.type;
- if (type_entry->id == TypeTableEntryIdMaybe) {
+ if (type_entry->id == TypeTableEntryIdOptional) {
if (instr_is_comptime(value)) {
ConstExprValue *maybe_val = ir_resolve_const(ira, value, UndefBad);
if (!maybe_val)
return ira->codegen->builtin_types.entry_invalid;
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- out_val->data.x_bool = !nullable_value_is_null(maybe_val);
+ out_val->data.x_bool = !optional_value_is_null(maybe_val);
return ira->codegen->builtin_types.entry_bool;
}
@@ -14835,7 +14835,7 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn
}
static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
- IrInstructionUnwrapMaybe *unwrap_maybe_instruction)
+ IrInstructionUnwrapOptional *unwrap_maybe_instruction)
{
IrInstruction *value = unwrap_maybe_instruction->value->other;
if (type_is_invalid(value->value.type))
@@ -14863,9 +14863,9 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile);
ir_link_new_instruction(result_instr, &unwrap_maybe_instruction->base);
return result_instr->value.type;
- } else if (type_entry->id != TypeTableEntryIdMaybe) {
+ } else if (type_entry->id != TypeTableEntryIdOptional) {
ir_add_error_node(ira, unwrap_maybe_instruction->value->source_node,
- buf_sprintf("expected nullable type, found '%s'", buf_ptr(&type_entry->name)));
+ buf_sprintf("expected optional type, found '%s'", buf_ptr(&type_entry->name)));
return ira->codegen->builtin_types.entry_invalid;
}
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
@@ -14881,7 +14881,7 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
ConstExprValue *maybe_val = const_ptr_pointee(ira->codegen, val);
if (val->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
- if (nullable_value_is_null(maybe_val)) {
+ if (optional_value_is_null(maybe_val)) {
ir_add_error(ira, &unwrap_maybe_instruction->base, buf_sprintf("unable to unwrap null"));
return ira->codegen->builtin_types.entry_invalid;
}
@@ -14891,7 +14891,7 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
if (type_is_codegen_pointer(child_type)) {
out_val->data.x_ptr.data.ref.pointee = maybe_val;
} else {
- out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_nullable;
+ out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_optional;
}
return result_type;
}
@@ -15216,7 +15216,7 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case TypeTableEntryIdStruct:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdArgTuple:
@@ -15737,7 +15737,7 @@ static TypeTableEntry *ir_analyze_min_max(IrAnalyze *ira, IrInstruction *source_
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdUnion:
@@ -16255,11 +16255,11 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
0, 0);
fn_def_fields[6].type = get_maybe_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr));
if (fn_node->is_extern && buf_len(fn_node->lib_name) > 0) {
- fn_def_fields[6].data.x_nullable = create_const_vals(1);
+ fn_def_fields[6].data.x_optional = create_const_vals(1);
ConstExprValue *lib_name = create_const_str_lit(ira->codegen, fn_node->lib_name);
- init_const_slice(ira->codegen, fn_def_fields[6].data.x_nullable, lib_name, 0, buf_len(fn_node->lib_name), true);
+ init_const_slice(ira->codegen, fn_def_fields[6].data.x_optional, lib_name, 0, buf_len(fn_node->lib_name), true);
} else {
- fn_def_fields[6].data.x_nullable = nullptr;
+ fn_def_fields[6].data.x_optional = nullptr;
}
// return_type: type
ensure_field_index(fn_def_val->type, "return_type", 7);
@@ -16507,11 +16507,11 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
break;
}
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Nullable");
+ result->type = ir_type_info_get_type(ira, "Optional");
ConstExprValue *fields = create_const_vals(1);
result->data.x_struct.fields = fields;
@@ -16725,10 +16725,10 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
inner_fields[1].type = get_maybe_type(ira->codegen, type_info_enum_field_type);
if (fields[1].data.x_type == ira->codegen->builtin_types.entry_undef) {
- inner_fields[1].data.x_nullable = nullptr;
+ inner_fields[1].data.x_optional = nullptr;
} else {
- inner_fields[1].data.x_nullable = create_const_vals(1);
- make_enum_field_val(inner_fields[1].data.x_nullable, union_field->enum_field, type_info_enum_field_type);
+ inner_fields[1].data.x_optional = create_const_vals(1);
+ make_enum_field_val(inner_fields[1].data.x_optional, union_field->enum_field, type_info_enum_field_type);
}
inner_fields[2].special = ConstValSpecialStatic;
@@ -16796,13 +16796,13 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
inner_fields[1].type = get_maybe_type(ira->codegen, ira->codegen->builtin_types.entry_usize);
if (!type_has_bits(struct_field->type_entry)) {
- inner_fields[1].data.x_nullable = nullptr;
+ inner_fields[1].data.x_optional = nullptr;
} else {
size_t byte_offset = LLVMOffsetOfElement(ira->codegen->target_data_ref, type_entry->type_ref, struct_field->gen_index);
- inner_fields[1].data.x_nullable = create_const_vals(1);
- inner_fields[1].data.x_nullable->special = ConstValSpecialStatic;
- inner_fields[1].data.x_nullable->type = ira->codegen->builtin_types.entry_usize;
- bigint_init_unsigned(&inner_fields[1].data.x_nullable->data.x_bigint, byte_offset);
+ inner_fields[1].data.x_optional = create_const_vals(1);
+ inner_fields[1].data.x_optional->special = ConstValSpecialStatic;
+ inner_fields[1].data.x_optional->type = ira->codegen->builtin_types.entry_usize;
+ bigint_init_unsigned(&inner_fields[1].data.x_optional->data.x_bigint, byte_offset);
}
inner_fields[2].special = ConstValSpecialStatic;
@@ -18027,7 +18027,7 @@ static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruc
case TypeTableEntryIdPromise:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -18591,7 +18591,7 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3
old_align_bytes = fn_type_id.alignment;
fn_type_id.alignment = align_bytes;
result_type = get_fn_type(ira->codegen, &fn_type_id);
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdPointer)
{
TypeTableEntry *ptr_type = target_type->data.maybe.child_type;
@@ -18599,7 +18599,7 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3
TypeTableEntry *better_ptr_type = adjust_ptr_align(ira->codegen, ptr_type, align_bytes);
result_type = get_maybe_type(ira->codegen, better_ptr_type);
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdFn)
{
FnTypeId fn_type_id = target_type->data.maybe.child_type->data.fn.fn_type_id;
@@ -18757,7 +18757,7 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
return;
case TypeTableEntryIdStruct:
zig_panic("TODO buf_write_value_bytes struct type");
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
zig_panic("TODO buf_write_value_bytes maybe type");
case TypeTableEntryIdErrorUnion:
zig_panic("TODO buf_write_value_bytes error union");
@@ -18815,7 +18815,7 @@ static void buf_read_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
zig_panic("TODO buf_read_value_bytes array type");
case TypeTableEntryIdStruct:
zig_panic("TODO buf_read_value_bytes struct type");
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
zig_panic("TODO buf_read_value_bytes maybe type");
case TypeTableEntryIdErrorUnion:
zig_panic("TODO buf_read_value_bytes error union");
@@ -19731,7 +19731,7 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
case IrInstructionIdUnionInit:
case IrInstructionIdStructFieldPtr:
case IrInstructionIdUnionFieldPtr:
- case IrInstructionIdMaybeWrap:
+ case IrInstructionIdOptionalWrap:
case IrInstructionIdErrWrapCode:
case IrInstructionIdErrWrapPayload:
case IrInstructionIdCast:
@@ -19791,8 +19791,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_size_of(ira, (IrInstructionSizeOf *)instruction);
case IrInstructionIdTestNonNull:
return ir_analyze_instruction_test_non_null(ira, (IrInstructionTestNonNull *)instruction);
- case IrInstructionIdUnwrapMaybe:
- return ir_analyze_instruction_unwrap_maybe(ira, (IrInstructionUnwrapMaybe *)instruction);
+ case IrInstructionIdUnwrapOptional:
+ return ir_analyze_instruction_unwrap_maybe(ira, (IrInstructionUnwrapOptional *)instruction);
case IrInstructionIdClz:
return ir_analyze_instruction_clz(ira, (IrInstructionClz *)instruction);
case IrInstructionIdCtz:
@@ -20128,7 +20128,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdSliceType:
case IrInstructionIdSizeOf:
case IrInstructionIdTestNonNull:
- case IrInstructionIdUnwrapMaybe:
+ case IrInstructionIdUnwrapOptional:
case IrInstructionIdClz:
case IrInstructionIdCtz:
case IrInstructionIdSwitchVar:
@@ -20150,7 +20150,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdFrameAddress:
case IrInstructionIdTestErr:
case IrInstructionIdUnwrapErrCode:
- case IrInstructionIdMaybeWrap:
+ case IrInstructionIdOptionalWrap:
case IrInstructionIdErrWrapCode:
case IrInstructionIdErrWrapPayload:
case IrInstructionIdFnProto:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 776ef64566..43907fa9d4 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -148,7 +148,7 @@ static const char *ir_un_op_id_str(IrUnOp op_id) {
return "-%";
case IrUnOpDereference:
return "*";
- case IrUnOpMaybe:
+ case IrUnOpOptional:
return "?";
}
zig_unreachable();
@@ -481,7 +481,7 @@ static void ir_print_test_null(IrPrint *irp, IrInstructionTestNonNull *instructi
fprintf(irp->f, " != null");
}
-static void ir_print_unwrap_maybe(IrPrint *irp, IrInstructionUnwrapMaybe *instruction) {
+static void ir_print_unwrap_maybe(IrPrint *irp, IrInstructionUnwrapOptional *instruction) {
fprintf(irp->f, "&??*");
ir_print_other_instruction(irp, instruction->value);
if (!instruction->safety_check_on) {
@@ -777,7 +777,7 @@ static void ir_print_unwrap_err_payload(IrPrint *irp, IrInstructionUnwrapErrPayl
}
}
-static void ir_print_maybe_wrap(IrPrint *irp, IrInstructionMaybeWrap *instruction) {
+static void ir_print_maybe_wrap(IrPrint *irp, IrInstructionOptionalWrap *instruction) {
fprintf(irp->f, "@maybeWrap(");
ir_print_other_instruction(irp, instruction->value);
fprintf(irp->f, ")");
@@ -1032,7 +1032,7 @@ static void ir_print_export(IrPrint *irp, IrInstructionExport *instruction) {
static void ir_print_error_return_trace(IrPrint *irp, IrInstructionErrorReturnTrace *instruction) {
fprintf(irp->f, "@errorReturnTrace(");
- switch (instruction->nullable) {
+ switch (instruction->optional) {
case IrInstructionErrorReturnTrace::Null:
fprintf(irp->f, "Null");
break;
@@ -1348,8 +1348,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdTestNonNull:
ir_print_test_null(irp, (IrInstructionTestNonNull *)instruction);
break;
- case IrInstructionIdUnwrapMaybe:
- ir_print_unwrap_maybe(irp, (IrInstructionUnwrapMaybe *)instruction);
+ case IrInstructionIdUnwrapOptional:
+ ir_print_unwrap_maybe(irp, (IrInstructionUnwrapOptional *)instruction);
break;
case IrInstructionIdCtz:
ir_print_ctz(irp, (IrInstructionCtz *)instruction);
@@ -1465,8 +1465,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdUnwrapErrPayload:
ir_print_unwrap_err_payload(irp, (IrInstructionUnwrapErrPayload *)instruction);
break;
- case IrInstructionIdMaybeWrap:
- ir_print_maybe_wrap(irp, (IrInstructionMaybeWrap *)instruction);
+ case IrInstructionIdOptionalWrap:
+ ir_print_maybe_wrap(irp, (IrInstructionOptionalWrap *)instruction);
break;
case IrInstructionIdErrWrapCode:
ir_print_err_wrap_code(irp, (IrInstructionErrWrapCode *)instruction);
diff --git a/src/parser.cpp b/src/parser.cpp
index 3ad2de906b..2ee69f81ab 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -1046,12 +1046,11 @@ static AstNode *ast_parse_fn_proto_partial(ParseContext *pc, size_t *token_index
}
/*
-SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | PtrDerefExpression | SliceExpression)
+SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | ".*" | ".?")
FnCallExpression : token(LParen) list(Expression, token(Comma)) token(RParen)
ArrayAccessExpression : token(LBracket) Expression token(RBracket)
SliceExpression = "[" Expression ".." option(Expression) "]"
FieldAccessExpression : token(Dot) token(Symbol)
-PtrDerefExpression = ".*"
StructLiteralField : token(Dot) token(Symbol) token(Eq) Expression
*/
static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index, bool mandatory) {
@@ -1148,6 +1147,14 @@ static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index,
AstNode *node = ast_create_node(pc, NodeTypePtrDeref, first_token);
node->data.ptr_deref_expr.target = primary_expr;
+ primary_expr = node;
+ } else if (token->id == TokenIdQuestion) {
+ *token_index += 1;
+
+ AstNode *node = ast_create_node(pc, NodeTypePrefixOpExpr, first_token);
+ node->data.prefix_op_expr.prefix_op = PrefixOpUnwrapOptional;
+ node->data.prefix_op_expr.primary_expr = primary_expr;
+
primary_expr = node;
} else {
ast_invalid_token_error(pc, token);
@@ -1165,8 +1172,8 @@ static PrefixOp tok_to_prefix_op(Token *token) {
case TokenIdDash: return PrefixOpNegation;
case TokenIdMinusPercent: return PrefixOpNegationWrap;
case TokenIdTilde: return PrefixOpBinNot;
- case TokenIdMaybe: return PrefixOpMaybe;
- case TokenIdDoubleQuestion: return PrefixOpUnwrapMaybe;
+ case TokenIdQuestion: return PrefixOpOptional;
+ case TokenIdDoubleQuestion: return PrefixOpUnwrapOptional;
case TokenIdAmpersand: return PrefixOpAddrOf;
default: return PrefixOpInvalid;
}
@@ -2304,8 +2311,8 @@ static BinOpType ast_parse_ass_op(ParseContext *pc, size_t *token_index, bool ma
}
/*
-UnwrapExpression : BoolOrExpression (UnwrapMaybe | UnwrapError) | BoolOrExpression
-UnwrapMaybe : "??" BoolOrExpression
+UnwrapExpression : BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression
+UnwrapOptional : "??" BoolOrExpression
UnwrapError = "catch" option("|" Symbol "|") Expression
*/
static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, bool mandatory) {
@@ -2322,7 +2329,7 @@ static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, boo
AstNode *node = ast_create_node(pc, NodeTypeBinOpExpr, token);
node->data.bin_op_expr.op1 = lhs;
- node->data.bin_op_expr.bin_op = BinOpTypeUnwrapMaybe;
+ node->data.bin_op_expr.bin_op = BinOpTypeUnwrapOptional;
node->data.bin_op_expr.op2 = rhs;
return node;
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index badbd695ec..cfabdf11ad 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -625,7 +625,7 @@ void tokenize(Buf *buf, Tokenization *out) {
t.state = TokenizeStateSawDot;
break;
case '?':
- begin_token(&t, TokenIdMaybe);
+ begin_token(&t, TokenIdQuestion);
t.state = TokenizeStateSawQuestionMark;
break;
default:
@@ -639,11 +639,6 @@ void tokenize(Buf *buf, Tokenization *out) {
end_token(&t);
t.state = TokenizeStateStart;
break;
- case '=':
- set_token_id(&t, t.cur_tok, TokenIdMaybeAssign);
- end_token(&t);
- t.state = TokenizeStateStart;
- break;
default:
t.pos -= 1;
end_token(&t);
@@ -1609,8 +1604,7 @@ const char * token_name(TokenId id) {
case TokenIdLBrace: return "{";
case TokenIdLBracket: return "[";
case TokenIdLParen: return "(";
- case TokenIdMaybe: return "?";
- case TokenIdMaybeAssign: return "?=";
+ case TokenIdQuestion: return "?";
case TokenIdMinusEq: return "-=";
case TokenIdMinusPercent: return "-%";
case TokenIdMinusPercentEq: return "-%=";
diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp
index d0089909cd..7c617f85c6 100644
--- a/src/tokenizer.hpp
+++ b/src/tokenizer.hpp
@@ -100,8 +100,7 @@ enum TokenId {
TokenIdLBrace,
TokenIdLBracket,
TokenIdLParen,
- TokenIdMaybe,
- TokenIdMaybeAssign,
+ TokenIdQuestion,
TokenIdMinusEq,
TokenIdMinusPercent,
TokenIdMinusPercentEq,
diff --git a/src/translate_c.cpp b/src/translate_c.cpp
index d78bd1fa70..aaaf5a1edb 100644
--- a/src/translate_c.cpp
+++ b/src/translate_c.cpp
@@ -382,7 +382,7 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r
fn_def->data.fn_def.fn_proto = fn_proto;
fn_proto->data.fn_proto.fn_def_node = fn_def;
- AstNode *unwrap_node = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, ref_node);
+ AstNode *unwrap_node = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, ref_node);
AstNode *fn_call_node = trans_create_node(c, NodeTypeFnCallExpr);
fn_call_node->data.fn_call_expr.fn_ref_expr = unwrap_node;
@@ -410,7 +410,7 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r
}
static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child) {
- return trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, child);
+ return trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, child);
}
static AstNode *get_global(Context *c, Buf *name) {
@@ -879,14 +879,14 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
}
if (qual_type_child_is_fn_proto(child_qt)) {
- return trans_create_node_prefix_op(c, PrefixOpMaybe, child_node);
+ return trans_create_node_prefix_op(c, PrefixOpOptional, child_node);
}
PtrLen ptr_len = type_is_opaque(c, child_qt.getTypePtr(), source_loc) ? PtrLenSingle : PtrLenUnknown;
AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
child_qt.isVolatileQualified(), child_node, ptr_len);
- return trans_create_node_prefix_op(c, PrefixOpMaybe, pointer_node);
+ return trans_create_node_prefix_op(c, PrefixOpOptional, pointer_node);
}
case Type::Typedef:
{
@@ -1963,7 +1963,7 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc
bool is_fn_ptr = qual_type_is_fn_ptr(stmt->getSubExpr()->getType());
if (is_fn_ptr)
return value_node;
- AstNode *unwrapped = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, value_node);
+ AstNode *unwrapped = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, value_node);
return trans_create_node_ptr_deref(c, unwrapped);
}
case UO_Plus:
@@ -2587,7 +2587,7 @@ static AstNode *trans_call_expr(Context *c, ResultUsed result_used, TransScope *
}
}
if (callee_node == nullptr) {
- callee_node = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, callee_raw_node);
+ callee_node = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, callee_raw_node);
}
} else {
callee_node = callee_raw_node;
@@ -4301,7 +4301,7 @@ static AstNode *trans_lookup_ast_maybe_fn(Context *c, AstNode *ref_node) {
return nullptr;
if (prefix_node->type != NodeTypePrefixOpExpr)
return nullptr;
- if (prefix_node->data.prefix_op_expr.prefix_op != PrefixOpMaybe)
+ if (prefix_node->data.prefix_op_expr.prefix_op != PrefixOpOptional)
return nullptr;
AstNode *fn_proto_node = prefix_node->data.prefix_op_expr.primary_expr;
diff --git a/std/array_list.zig b/std/array_list.zig
index 30715f4d6f..1a235d28a3 100644
--- a/std/array_list.zig
+++ b/std/array_list.zig
@@ -258,7 +258,7 @@ test "iterator ArrayList test" {
}
it.reset();
- assert(??it.next() == 1);
+ assert(it.next().? == 1);
}
test "insert ArrayList test" {
diff --git a/std/buf_map.zig b/std/buf_map.zig
index 22d821ae7b..0d4f3a6d5e 100644
--- a/std/buf_map.zig
+++ b/std/buf_map.zig
@@ -72,15 +72,15 @@ test "BufMap" {
defer bufmap.deinit();
try bufmap.set("x", "1");
- assert(mem.eql(u8, ??bufmap.get("x"), "1"));
+ assert(mem.eql(u8, bufmap.get("x").?, "1"));
assert(1 == bufmap.count());
try bufmap.set("x", "2");
- assert(mem.eql(u8, ??bufmap.get("x"), "2"));
+ assert(mem.eql(u8, bufmap.get("x").?, "2"));
assert(1 == bufmap.count());
try bufmap.set("x", "3");
- assert(mem.eql(u8, ??bufmap.get("x"), "3"));
+ assert(mem.eql(u8, bufmap.get("x").?, "3"));
assert(1 == bufmap.count());
bufmap.delete("x");
diff --git a/std/event.zig b/std/event.zig
index 89ab816bb6..0821c789b7 100644
--- a/std/event.zig
+++ b/std/event.zig
@@ -40,9 +40,9 @@ pub const TcpServer = struct {
self.listen_address = std.net.Address.initPosix(try std.os.posixGetSockName(self.sockfd));
self.accept_coro = try async TcpServer.handler(self);
- errdefer cancel ??self.accept_coro;
+ errdefer cancel self.accept_coro.?;
- try self.loop.addFd(self.sockfd, ??self.accept_coro);
+ try self.loop.addFd(self.sockfd, self.accept_coro.?);
errdefer self.loop.removeFd(self.sockfd);
}
diff --git a/std/fmt/index.zig b/std/fmt/index.zig
index 3844fbb10a..b52625e26e 100644
--- a/std/fmt/index.zig
+++ b/std/fmt/index.zig
@@ -111,7 +111,7 @@ pub fn formatType(
builtin.TypeId.Bool => {
return output(context, if (value) "true" else "false");
},
- builtin.TypeId.Nullable => {
+ builtin.TypeId.Optional => {
if (value) |payload| {
return formatType(payload, fmt, context, Errors, output);
} else {
@@ -819,11 +819,11 @@ test "parse unsigned comptime" {
test "fmt.format" {
{
const value: ?i32 = 1234;
- try testFmt("nullable: 1234\n", "nullable: {}\n", value);
+ try testFmt("optional: 1234\n", "optional: {}\n", value);
}
{
const value: ?i32 = null;
- try testFmt("nullable: null\n", "nullable: {}\n", value);
+ try testFmt("optional: null\n", "optional: {}\n", value);
}
{
const value: error!i32 = 1234;
diff --git a/std/hash_map.zig b/std/hash_map.zig
index a323cdc197..3bd03d4f28 100644
--- a/std/hash_map.zig
+++ b/std/hash_map.zig
@@ -265,11 +265,11 @@ test "basic hash map usage" {
assert((map.put(4, 44) catch unreachable) == null);
assert((map.put(5, 55) catch unreachable) == null);
- assert(??(map.put(5, 66) catch unreachable) == 55);
- assert(??(map.put(5, 55) catch unreachable) == 66);
+ assert((map.put(5, 66) catch unreachable).? == 55);
+ assert((map.put(5, 55) catch unreachable).? == 66);
assert(map.contains(2));
- assert((??map.get(2)).value == 22);
+ assert(map.get(2).?.value == 22);
_ = map.remove(2);
assert(map.remove(2) == null);
assert(map.get(2) == null);
@@ -317,7 +317,7 @@ test "iterator hash map" {
}
it.reset();
- var entry = ??it.next();
+ var entry = it.next().?;
assert(entry.key == keys[0]);
assert(entry.value == values[0]);
}
diff --git a/std/heap.zig b/std/heap.zig
index 5d430bc761..d1fbf9ca0a 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -142,7 +142,7 @@ pub const DirectAllocator = struct {
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
const old_ptr = @intToPtr(*c_void, root_addr);
const amt = new_size + alignment + @sizeOf(usize);
- const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: {
+ const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) ?? blk: {
if (new_size > old_mem.len) return error.OutOfMemory;
const new_record_addr = old_record_addr - new_size + old_mem.len;
@intToPtr(*align(1) usize, new_record_addr).* = root_addr;
@@ -171,7 +171,7 @@ pub const DirectAllocator = struct {
const record_addr = @ptrToInt(bytes.ptr) + bytes.len;
const root_addr = @intToPtr(*align(1) usize, record_addr).*;
const ptr = @intToPtr(*c_void, root_addr);
- _ = os.windows.HeapFree(??self.heap_handle, 0, ptr);
+ _ = os.windows.HeapFree(self.heap_handle.?, 0, ptr);
},
else => @compileError("Unsupported OS"),
}
diff --git a/std/json.zig b/std/json.zig
index 03b19a7fa4..75ea2eee1c 100644
--- a/std/json.zig
+++ b/std/json.zig
@@ -908,7 +908,7 @@ pub const TokenStream = struct {
};
fn checkNext(p: *TokenStream, id: Token.Id) void {
- const token = ??(p.next() catch unreachable);
+ const token = (p.next() catch unreachable).?;
debug.assert(token.id == id);
}
@@ -1376,17 +1376,17 @@ test "json parser dynamic" {
var root = tree.root;
- var image = (??root.Object.get("Image")).value;
+ var image = root.Object.get("Image").?.value;
- const width = (??image.Object.get("Width")).value;
+ const width = image.Object.get("Width").?.value;
debug.assert(width.Integer == 800);
- const height = (??image.Object.get("Height")).value;
+ const height = image.Object.get("Height").?.value;
debug.assert(height.Integer == 600);
- const title = (??image.Object.get("Title")).value;
+ const title = image.Object.get("Title").?.value;
debug.assert(mem.eql(u8, title.String, "View from 15th Floor"));
- const animated = (??image.Object.get("Animated")).value;
+ const animated = image.Object.get("Animated").?.value;
debug.assert(animated.Bool == false);
}
diff --git a/std/linked_list.zig b/std/linked_list.zig
index fbc0a0c42a..536c6d24d0 100644
--- a/std/linked_list.zig
+++ b/std/linked_list.zig
@@ -270,8 +270,8 @@ test "basic linked list test" {
var last = list.pop(); // {2, 3, 4}
list.remove(three); // {2, 4}
- assert((??list.first).data == 2);
- assert((??list.last).data == 4);
+ assert(list.first.?.data == 2);
+ assert(list.last.?.data == 4);
assert(list.len == 2);
}
@@ -336,7 +336,7 @@ test "basic intrusive linked list test" {
var last = list.pop(); // {2, 3, 4}
list.remove(&three.link); // {2, 4}
- assert((??list.first).toData().value == 2);
- assert((??list.last).toData().value == 4);
+ assert(list.first.?.toData().value == 2);
+ assert(list.last.?.toData().value == 4);
assert(list.len == 2);
}
diff --git a/std/macho.zig b/std/macho.zig
index d6eef9a325..64f78ae4a3 100644
--- a/std/macho.zig
+++ b/std/macho.zig
@@ -130,7 +130,7 @@ pub fn loadSymbols(allocator: *mem.Allocator, in: *io.FileInStream) !SymbolTable
for (syms) |sym| {
if (!isSymbol(sym)) continue;
const start = sym.n_strx;
- const end = ??mem.indexOfScalarPos(u8, strings, start, 0);
+ const end = mem.indexOfScalarPos(u8, strings, start, 0).?;
const name = strings[start..end];
const address = sym.n_value;
symbols[nsym] = Symbol{ .name = name, .address = address };
diff --git a/std/mem.zig b/std/mem.zig
index 423460e73b..f961c7862b 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -304,20 +304,20 @@ pub fn indexOfPos(comptime T: type, haystack: []const T, start_index: usize, nee
}
test "mem.indexOf" {
- assert(??indexOf(u8, "one two three four", "four") == 14);
- assert(??lastIndexOf(u8, "one two three two four", "two") == 14);
+ assert(indexOf(u8, "one two three four", "four").? == 14);
+ assert(lastIndexOf(u8, "one two three two four", "two").? == 14);
assert(indexOf(u8, "one two three four", "gour") == null);
assert(lastIndexOf(u8, "one two three four", "gour") == null);
- assert(??indexOf(u8, "foo", "foo") == 0);
- assert(??lastIndexOf(u8, "foo", "foo") == 0);
+ assert(indexOf(u8, "foo", "foo").? == 0);
+ assert(lastIndexOf(u8, "foo", "foo").? == 0);
assert(indexOf(u8, "foo", "fool") == null);
assert(lastIndexOf(u8, "foo", "lfoo") == null);
assert(lastIndexOf(u8, "foo", "fool") == null);
- assert(??indexOf(u8, "foo foo", "foo") == 0);
- assert(??lastIndexOf(u8, "foo foo", "foo") == 4);
- assert(??lastIndexOfAny(u8, "boo, cat", "abo") == 6);
- assert(??lastIndexOfScalar(u8, "boo", 'o') == 2);
+ assert(indexOf(u8, "foo foo", "foo").? == 0);
+ assert(lastIndexOf(u8, "foo foo", "foo").? == 4);
+ assert(lastIndexOfAny(u8, "boo, cat", "abo").? == 6);
+ assert(lastIndexOfScalar(u8, "boo", 'o').? == 2);
}
/// Reads an integer from memory with size equal to bytes.len.
@@ -432,9 +432,9 @@ pub fn split(buffer: []const u8, split_bytes: []const u8) SplitIterator {
test "mem.split" {
var it = split(" abc def ghi ", " ");
- assert(eql(u8, ??it.next(), "abc"));
- assert(eql(u8, ??it.next(), "def"));
- assert(eql(u8, ??it.next(), "ghi"));
+ assert(eql(u8, it.next().?, "abc"));
+ assert(eql(u8, it.next().?, "def"));
+ assert(eql(u8, it.next().?, "ghi"));
assert(it.next() == null);
}
diff --git a/std/os/child_process.zig b/std/os/child_process.zig
index 822ade2eb8..1e3a732498 100644
--- a/std/os/child_process.zig
+++ b/std/os/child_process.zig
@@ -156,7 +156,7 @@ pub const ChildProcess = struct {
};
}
try self.waitUnwrappedWindows();
- return ??self.term;
+ return self.term.?;
}
pub fn killPosix(self: *ChildProcess) !Term {
@@ -175,7 +175,7 @@ pub const ChildProcess = struct {
};
}
self.waitUnwrapped();
- return ??self.term;
+ return self.term.?;
}
/// Blocks until child process terminates and then cleans up all resources.
@@ -212,8 +212,8 @@ pub const ChildProcess = struct {
defer Buffer.deinit(&stdout);
defer Buffer.deinit(&stderr);
- var stdout_file_in_stream = io.FileInStream.init(&??child.stdout);
- var stderr_file_in_stream = io.FileInStream.init(&??child.stderr);
+ var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?);
+ var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?);
try stdout_file_in_stream.stream.readAllBuffer(&stdout, max_output_size);
try stderr_file_in_stream.stream.readAllBuffer(&stderr, max_output_size);
@@ -232,7 +232,7 @@ pub const ChildProcess = struct {
}
try self.waitUnwrappedWindows();
- return ??self.term;
+ return self.term.?;
}
fn waitPosix(self: *ChildProcess) !Term {
@@ -242,7 +242,7 @@ pub const ChildProcess = struct {
}
self.waitUnwrapped();
- return ??self.term;
+ return self.term.?;
}
pub fn deinit(self: *ChildProcess) void {
@@ -619,13 +619,13 @@ pub const ChildProcess = struct {
self.term = null;
if (self.stdin_behavior == StdIo.Pipe) {
- os.close(??g_hChildStd_IN_Rd);
+ os.close(g_hChildStd_IN_Rd.?);
}
if (self.stderr_behavior == StdIo.Pipe) {
- os.close(??g_hChildStd_ERR_Wr);
+ os.close(g_hChildStd_ERR_Wr.?);
}
if (self.stdout_behavior == StdIo.Pipe) {
- os.close(??g_hChildStd_OUT_Wr);
+ os.close(g_hChildStd_OUT_Wr.?);
}
}
diff --git a/std/os/index.zig b/std/os/index.zig
index fe5ecc38ba..807b2c398b 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -422,7 +422,7 @@ pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator:
const exe_path = argv[0];
if (mem.indexOfScalar(u8, exe_path, '/') != null) {
- return posixExecveErrnoToErr(posix.getErrno(posix.execve(??argv_buf[0], argv_buf.ptr, envp_buf.ptr)));
+ return posixExecveErrnoToErr(posix.getErrno(posix.execve(argv_buf[0].?, argv_buf.ptr, envp_buf.ptr)));
}
const PATH = getEnvPosix("PATH") ?? "/usr/local/bin:/bin/:/usr/bin";
@@ -1729,7 +1729,7 @@ test "windows arg parsing" {
fn testWindowsCmdLine(input_cmd_line: [*]const u8, expected_args: []const []const u8) void {
var it = ArgIteratorWindows.initWithCmdLine(input_cmd_line);
for (expected_args) |expected_arg| {
- const arg = ??it.next(debug.global_allocator) catch unreachable;
+ const arg = it.next(debug.global_allocator).? catch unreachable;
assert(mem.eql(u8, arg, expected_arg));
}
assert(it.next(debug.global_allocator) == null);
diff --git a/std/os/linux/vdso.zig b/std/os/linux/vdso.zig
index 2ab4d0cbc1..1414b8185b 100644
--- a/std/os/linux/vdso.zig
+++ b/std/os/linux/vdso.zig
@@ -67,7 +67,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
if (0 == syms[i].st_shndx) continue;
if (!mem.eql(u8, name, cstr.toSliceConst(strings + syms[i].st_name))) continue;
if (maybe_versym) |versym| {
- if (!checkver(??maybe_verdef, versym[i], vername, strings))
+ if (!checkver(maybe_verdef.?, versym[i], vername, strings))
continue;
}
return base + syms[i].st_value;
diff --git a/std/os/path.zig b/std/os/path.zig
index 4df6179bf5..430dda2934 100644
--- a/std/os/path.zig
+++ b/std/os/path.zig
@@ -265,7 +265,7 @@ fn networkShareServersEql(ns1: []const u8, ns2: []const u8) bool {
var it2 = mem.split(ns2, []u8{sep2});
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
- return asciiEqlIgnoreCase(??it1.next(), ??it2.next());
+ return asciiEqlIgnoreCase(it1.next().?, it2.next().?);
}
fn compareDiskDesignators(kind: WindowsPath.Kind, p1: []const u8, p2: []const u8) bool {
@@ -286,7 +286,7 @@ fn compareDiskDesignators(kind: WindowsPath.Kind, p1: []const u8, p2: []const u8
var it2 = mem.split(p2, []u8{sep2});
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
- return asciiEqlIgnoreCase(??it1.next(), ??it2.next()) and asciiEqlIgnoreCase(??it1.next(), ??it2.next());
+ return asciiEqlIgnoreCase(it1.next().?, it2.next().?) and asciiEqlIgnoreCase(it1.next().?, it2.next().?);
},
}
}
@@ -414,8 +414,8 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
WindowsPath.Kind.NetworkShare => {
result = try allocator.alloc(u8, max_size);
var it = mem.split(paths[first_index], "/\\");
- const server_name = ??it.next();
- const other_name = ??it.next();
+ const server_name = it.next().?;
+ const other_name = it.next().?;
result[result_index] = '\\';
result_index += 1;
diff --git a/std/segmented_list.zig b/std/segmented_list.zig
index a2f3607ad8..9f10f4d44a 100644
--- a/std/segmented_list.zig
+++ b/std/segmented_list.zig
@@ -364,7 +364,7 @@ fn testSegmentedList(comptime prealloc: usize, allocator: *Allocator) !void {
assert(x == 0);
}
- assert(??list.pop() == 100);
+ assert(list.pop().? == 100);
assert(list.len == 99);
try list.pushMany([]i32{
@@ -373,9 +373,9 @@ fn testSegmentedList(comptime prealloc: usize, allocator: *Allocator) !void {
3,
});
assert(list.len == 102);
- assert(??list.pop() == 3);
- assert(??list.pop() == 2);
- assert(??list.pop() == 1);
+ assert(list.pop().? == 3);
+ assert(list.pop().? == 2);
+ assert(list.pop().? == 1);
assert(list.len == 99);
try list.pushMany([]const i32{});
diff --git a/std/special/bootstrap.zig b/std/special/bootstrap.zig
index 8aefe4751f..dd37f1edb6 100644
--- a/std/special/bootstrap.zig
+++ b/std/special/bootstrap.zig
@@ -54,10 +54,10 @@ fn posixCallMainAndExit() noreturn {
const argc = argc_ptr[0];
const argv = @ptrCast([*][*]u8, argc_ptr + 1);
- const envp_nullable = @ptrCast([*]?[*]u8, argv + argc + 1);
+ const envp_optional = @ptrCast([*]?[*]u8, argv + argc + 1);
var envp_count: usize = 0;
- while (envp_nullable[envp_count]) |_| : (envp_count += 1) {}
- const envp = @ptrCast([*][*]u8, envp_nullable)[0..envp_count];
+ while (envp_optional[envp_count]) |_| : (envp_count += 1) {}
+ const envp = @ptrCast([*][*]u8, envp_optional)[0..envp_count];
if (builtin.os == builtin.Os.linux) {
const auxv = @ptrCast([*]usize, envp.ptr + envp_count + 1);
var i: usize = 0;
diff --git a/std/special/builtin.zig b/std/special/builtin.zig
index e537078924..e97b0a89e4 100644
--- a/std/special/builtin.zig
+++ b/std/special/builtin.zig
@@ -19,7 +19,7 @@ export fn memset(dest: ?[*]u8, c: u8, n: usize) ?[*]u8 {
var index: usize = 0;
while (index != n) : (index += 1)
- (??dest)[index] = c;
+ dest.?[index] = c;
return dest;
}
@@ -29,7 +29,7 @@ export fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) ?[*]
var index: usize = 0;
while (index != n) : (index += 1)
- (??dest)[index] = (??src)[index];
+ dest.?[index] = src.?[index];
return dest;
}
@@ -40,13 +40,13 @@ export fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) ?[*]u8 {
if (@ptrToInt(dest) < @ptrToInt(src)) {
var index: usize = 0;
while (index != n) : (index += 1) {
- (??dest)[index] = (??src)[index];
+ dest.?[index] = src.?[index];
}
} else {
var index = n;
while (index != 0) {
index -= 1;
- (??dest)[index] = (??src)[index];
+ dest.?[index] = src.?[index];
}
}
diff --git a/std/unicode.zig b/std/unicode.zig
index 3d1bebdb55..21ae12f59c 100644
--- a/std/unicode.zig
+++ b/std/unicode.zig
@@ -286,15 +286,15 @@ fn testUtf8IteratorOnAscii() void {
const s = Utf8View.initComptime("abc");
var it1 = s.iterator();
- debug.assert(std.mem.eql(u8, "a", ??it1.nextCodepointSlice()));
- debug.assert(std.mem.eql(u8, "b", ??it1.nextCodepointSlice()));
- debug.assert(std.mem.eql(u8, "c", ??it1.nextCodepointSlice()));
+ debug.assert(std.mem.eql(u8, "a", it1.nextCodepointSlice().?));
+ debug.assert(std.mem.eql(u8, "b", it1.nextCodepointSlice().?));
+ debug.assert(std.mem.eql(u8, "c", it1.nextCodepointSlice().?));
debug.assert(it1.nextCodepointSlice() == null);
var it2 = s.iterator();
- debug.assert(??it2.nextCodepoint() == 'a');
- debug.assert(??it2.nextCodepoint() == 'b');
- debug.assert(??it2.nextCodepoint() == 'c');
+ debug.assert(it2.nextCodepoint().? == 'a');
+ debug.assert(it2.nextCodepoint().? == 'b');
+ debug.assert(it2.nextCodepoint().? == 'c');
debug.assert(it2.nextCodepoint() == null);
}
@@ -321,15 +321,15 @@ fn testUtf8ViewOk() void {
const s = Utf8View.initComptime("東京市");
var it1 = s.iterator();
- debug.assert(std.mem.eql(u8, "東", ??it1.nextCodepointSlice()));
- debug.assert(std.mem.eql(u8, "京", ??it1.nextCodepointSlice()));
- debug.assert(std.mem.eql(u8, "市", ??it1.nextCodepointSlice()));
+ debug.assert(std.mem.eql(u8, "東", it1.nextCodepointSlice().?));
+ debug.assert(std.mem.eql(u8, "京", it1.nextCodepointSlice().?));
+ debug.assert(std.mem.eql(u8, "市", it1.nextCodepointSlice().?));
debug.assert(it1.nextCodepointSlice() == null);
var it2 = s.iterator();
- debug.assert(??it2.nextCodepoint() == 0x6771);
- debug.assert(??it2.nextCodepoint() == 0x4eac);
- debug.assert(??it2.nextCodepoint() == 0x5e02);
+ debug.assert(it2.nextCodepoint().? == 0x6771);
+ debug.assert(it2.nextCodepoint().? == 0x4eac);
+ debug.assert(it2.nextCodepoint().? == 0x5e02);
debug.assert(it2.nextCodepoint() == null);
}
diff --git a/std/zig/ast.zig b/std/zig/ast.zig
index a4b64d5db2..defaded78a 100644
--- a/std/zig/ast.zig
+++ b/std/zig/ast.zig
@@ -1417,7 +1417,7 @@ pub const Node = struct {
Range,
Sub,
SubWrap,
- UnwrapMaybe,
+ UnwrapOptional,
};
pub fn iterate(self: *InfixOp, index: usize) ?*Node {
@@ -1475,7 +1475,7 @@ pub const Node = struct {
Op.Range,
Op.Sub,
Op.SubWrap,
- Op.UnwrapMaybe,
+ Op.UnwrapOptional,
=> {},
}
@@ -1507,14 +1507,13 @@ pub const Node = struct {
BitNot,
BoolNot,
Cancel,
- MaybeType,
+ OptionalType,
Negation,
NegationWrap,
Resume,
PtrType: PtrInfo,
SliceType: PtrInfo,
Try,
- UnwrapMaybe,
};
pub const PtrInfo = struct {
@@ -1557,12 +1556,12 @@ pub const Node = struct {
Op.BitNot,
Op.BoolNot,
Op.Cancel,
- Op.MaybeType,
+ Op.OptionalType,
Op.Negation,
Op.NegationWrap,
Op.Try,
Op.Resume,
- Op.UnwrapMaybe,
+ Op.UnwrapOptional,
Op.PointerType,
=> {},
}
@@ -1619,6 +1618,7 @@ pub const Node = struct {
ArrayInitializer: InitList,
StructInitializer: InitList,
Deref,
+ UnwrapOptional,
pub const InitList = SegmentedList(*Node, 2);
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 7faca8e11b..9f8ef3c3d6 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -711,7 +711,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
else => {
// TODO: this is a special case. Remove this when #760 is fixed
if (token_ptr.id == Token.Id.Keyword_error) {
- if ((??tok_it.peek()).id == Token.Id.LBrace) {
+ if (tok_it.peek().?.id == Token.Id.LBrace) {
const error_type_node = try arena.construct(ast.Node.ErrorType{
.base = ast.Node{ .id = ast.Node.Id.ErrorType },
.token = token_index,
@@ -1434,8 +1434,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
try stack.append(State{
.ExpectTokenSave = ExpectTokenSave{
.id = Token.Id.AngleBracketRight,
- .ptr = &??async_node.rangle_bracket,
- },
+ .ptr = &async_node.rangle_bracket.? },
});
try stack.append(State{ .TypeExprBegin = OptionalCtx{ .RequiredNull = &async_node.allocator_type } });
continue;
@@ -1567,7 +1566,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
.bit_range = null,
};
// TODO https://github.com/ziglang/zig/issues/1022
- const align_info = &??addr_of_info.align_info;
+ const align_info = &addr_of_info.align_info.?;
try stack.append(State{ .AlignBitRange = align_info });
try stack.append(State{ .Expression = OptionalCtx{ .Required = &align_info.node } });
@@ -1604,7 +1603,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
switch (token.ptr.id) {
Token.Id.Colon => {
align_info.bit_range = ast.Node.PrefixOp.PtrInfo.Align.BitRange(undefined);
- const bit_range = &??align_info.bit_range;
+ const bit_range = &align_info.bit_range.?;
try stack.append(State{ .ExpectToken = Token.Id.RParen });
try stack.append(State{ .Expression = OptionalCtx{ .Required = &bit_range.end } });
@@ -2144,7 +2143,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
State.CurlySuffixExpressionEnd => |opt_ctx| {
const lhs = opt_ctx.get() ?? continue;
- if ((??tok_it.peek()).id == Token.Id.Period) {
+ if (tok_it.peek().?.id == Token.Id.Period) {
const node = try arena.construct(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
@@ -2326,6 +2325,17 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
continue;
}
+ if (eatToken(&tok_it, &tree, Token.Id.QuestionMark)) |question_token| {
+ const node = try arena.construct(ast.Node.SuffixOp{
+ .base = ast.Node{ .id = ast.Node.Id.SuffixOp },
+ .lhs = lhs,
+ .op = ast.Node.SuffixOp.Op.UnwrapOptional,
+ .rtoken = question_token,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ continue;
+ }
const node = try arena.construct(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
@@ -2403,7 +2413,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
.arrow_token = next_token_index,
.return_type = undefined,
};
- const return_type_ptr = &((??node.result).return_type);
+ const return_type_ptr = &node.result.?.return_type;
try stack.append(State{ .Expression = OptionalCtx{ .Required = return_type_ptr } });
continue;
},
@@ -2875,7 +2885,7 @@ const OptionalCtx = union(enum) {
pub fn get(self: *const OptionalCtx) ?*ast.Node {
switch (self.*) {
OptionalCtx.Optional => |ptr| return ptr.*,
- OptionalCtx.RequiredNull => |ptr| return ??ptr.*,
+ OptionalCtx.RequiredNull => |ptr| return ptr.*.?,
OptionalCtx.Required => |ptr| return ptr.*,
}
}
@@ -3237,7 +3247,7 @@ fn tokenIdToAssignment(id: *const Token.Id) ?ast.Node.InfixOp.Op {
fn tokenIdToUnwrapExpr(id: @TagType(Token.Id)) ?ast.Node.InfixOp.Op {
return switch (id) {
Token.Id.Keyword_catch => ast.Node.InfixOp.Op{ .Catch = null },
- Token.Id.QuestionMarkQuestionMark => ast.Node.InfixOp.Op{ .UnwrapMaybe = void{} },
+ Token.Id.QuestionMarkQuestionMark => ast.Node.InfixOp.Op{ .UnwrapOptional = void{} },
else => null,
};
}
@@ -3299,8 +3309,7 @@ fn tokenIdToPrefixOp(id: @TagType(Token.Id)) ?ast.Node.PrefixOp.Op {
.volatile_token = null,
},
},
- Token.Id.QuestionMark => ast.Node.PrefixOp.Op{ .MaybeType = void{} },
- Token.Id.QuestionMarkQuestionMark => ast.Node.PrefixOp.Op{ .UnwrapMaybe = void{} },
+ Token.Id.QuestionMark => ast.Node.PrefixOp.Op{ .OptionalType = void{} },
Token.Id.Keyword_await => ast.Node.PrefixOp.Op{ .Await = void{} },
Token.Id.Keyword_try => ast.Node.PrefixOp.Op{ .Try = void{} },
else => null,
@@ -3322,7 +3331,7 @@ fn createToCtxLiteral(arena: *mem.Allocator, opt_ctx: *const OptionalCtx, compti
}
fn eatToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, id: @TagType(Token.Id)) ?TokenIndex {
- const token = ??tok_it.peek();
+ const token = tok_it.peek().?;
if (token.id == id) {
return nextToken(tok_it, tree).index;
@@ -3334,7 +3343,7 @@ fn eatToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, id: @TagType(
fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedToken {
const result = AnnotatedToken{
.index = tok_it.index,
- .ptr = ??tok_it.next(),
+ .ptr = tok_it.next().?,
};
assert(result.ptr.id != Token.Id.LineComment);
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
index 91a56de827..ea3a4858b0 100644
--- a/std/zig/parser_test.zig
+++ b/std/zig/parser_test.zig
@@ -650,9 +650,10 @@ test "zig fmt: statements with empty line between" {
);
}
-test "zig fmt: ptr deref operator" {
+test "zig fmt: ptr deref operator and unwrap optional operator" {
try testCanonical(
\\const a = b.*;
+ \\const a = b.?;
\\
);
}
@@ -1209,7 +1210,7 @@ test "zig fmt: precedence" {
test "zig fmt: prefix operators" {
try testCanonical(
\\test "prefix operators" {
- \\ try return --%~??!*&0;
+ \\ try return --%~!*&0;
\\}
\\
);
diff --git a/std/zig/render.zig b/std/zig/render.zig
index 7c9b53b77a..0b8e4d1453 100644
--- a/std/zig/render.zig
+++ b/std/zig/render.zig
@@ -222,7 +222,7 @@ fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, i
}
}
- const value_expr = ??tag.value_expr;
+ const value_expr = tag.value_expr.?;
try renderToken(tree, stream, tree.prevToken(value_expr.firstToken()), indent, start_col, Space.Space); // =
try renderExpression(allocator, stream, tree, indent, start_col, value_expr, Space.Comma); // value,
},
@@ -465,8 +465,7 @@ fn renderExpression(
ast.Node.PrefixOp.Op.BoolNot,
ast.Node.PrefixOp.Op.Negation,
ast.Node.PrefixOp.Op.NegationWrap,
- ast.Node.PrefixOp.Op.UnwrapMaybe,
- ast.Node.PrefixOp.Op.MaybeType,
+ ast.Node.PrefixOp.Op.OptionalType,
ast.Node.PrefixOp.Op.AddressOf,
=> {
try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None);
@@ -513,7 +512,7 @@ fn renderExpression(
var it = call_info.params.iterator(0);
while (true) {
- const param_node = ??it.next();
+ const param_node = it.next().?;
const param_node_new_indent = if (param_node.*.id == ast.Node.Id.MultilineStringLiteral) blk: {
break :blk indent;
@@ -559,10 +558,10 @@ fn renderExpression(
return renderToken(tree, stream, rbracket, indent, start_col, space); // ]
},
- ast.Node.SuffixOp.Op.Deref => {
+ ast.Node.SuffixOp.Op.Deref, ast.Node.SuffixOp.Op.UnwrapOptional => {
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
try renderToken(tree, stream, tree.prevToken(suffix_op.rtoken), indent, start_col, Space.None); // .
- return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // *
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // * or ?
},
@TagType(ast.Node.SuffixOp.Op).Slice => |range| {
@@ -595,7 +594,7 @@ fn renderExpression(
}
if (field_inits.len == 1) blk: {
- const field_init = ??field_inits.at(0).*.cast(ast.Node.FieldInitializer);
+ const field_init = field_inits.at(0).*.cast(ast.Node.FieldInitializer).?;
if (field_init.expr.cast(ast.Node.SuffixOp)) |nested_suffix_op| {
if (nested_suffix_op.op == ast.Node.SuffixOp.Op.StructInitializer) {
@@ -688,7 +687,7 @@ fn renderExpression(
var count: usize = 1;
var it = exprs.iterator(0);
while (true) {
- const expr = (??it.next()).*;
+ const expr = it.next().?.*;
if (it.peek()) |next_expr| {
const expr_last_token = expr.*.lastToken() + 1;
const loc = tree.tokenLocation(tree.tokens.at(expr_last_token).end, next_expr.*.firstToken());
@@ -806,7 +805,7 @@ fn renderExpression(
},
}
- return renderExpression(allocator, stream, tree, indent, start_col, ??flow_expr.rhs, space);
+ return renderExpression(allocator, stream, tree, indent, start_col, flow_expr.rhs.?, space);
},
ast.Node.Id.Payload => {
@@ -1245,7 +1244,7 @@ fn renderExpression(
} else {
var it = switch_case.items.iterator(0);
while (true) {
- const node = ??it.next();
+ const node = it.next().?;
if (it.peek()) |next_node| {
try renderExpression(allocator, stream, tree, indent, start_col, node.*, Space.None);
@@ -1550,7 +1549,7 @@ fn renderExpression(
var it = asm_node.outputs.iterator(0);
while (true) {
- const asm_output = ??it.next();
+ const asm_output = it.next().?;
const node = &(asm_output.*).base;
if (it.peek()) |next_asm_output| {
@@ -1588,7 +1587,7 @@ fn renderExpression(
var it = asm_node.inputs.iterator(0);
while (true) {
- const asm_input = ??it.next();
+ const asm_input = it.next().?;
const node = &(asm_input.*).base;
if (it.peek()) |next_asm_input| {
@@ -1620,7 +1619,7 @@ fn renderExpression(
var it = asm_node.clobbers.iterator(0);
while (true) {
- const clobber_token = ??it.next();
+ const clobber_token = it.next().?;
if (it.peek() == null) {
try renderToken(tree, stream, clobber_token.*, indent_once, start_col, Space.Newline);
diff --git a/test/cases/bugs/656.zig b/test/cases/bugs/656.zig
index a6035d51bb..f93f0ac4d5 100644
--- a/test/cases/bugs/656.zig
+++ b/test/cases/bugs/656.zig
@@ -9,7 +9,7 @@ const Value = struct {
align_expr: ?u32,
};
-test "nullable if after an if in a switch prong of a switch with 2 prongs in an else" {
+test "optional if after an if in a switch prong of a switch with 2 prongs in an else" {
foo(false, true);
}
diff --git a/test/cases/cast.zig b/test/cases/cast.zig
index da3cba7d80..a56c470408 100644
--- a/test/cases/cast.zig
+++ b/test/cases/cast.zig
@@ -109,16 +109,16 @@ test "implicitly cast indirect pointer to maybe-indirect pointer" {
const Self = this;
x: u8,
fn constConst(p: *const *const Self) u8 {
- return (p.*).x;
+ return p.*.x;
}
fn maybeConstConst(p: ?*const *const Self) u8 {
- return ((??p).*).x;
+ return p.?.*.x;
}
fn constConstConst(p: *const *const *const Self) u8 {
- return (p.*.*).x;
+ return p.*.*.x;
}
fn maybeConstConstConst(p: ?*const *const *const Self) u8 {
- return ((??p).*.*).x;
+ return p.?.*.*.x;
}
};
const s = S{ .x = 42 };
@@ -177,56 +177,56 @@ test "string literal to &const []const u8" {
}
test "implicitly cast from T to error!?T" {
- castToMaybeTypeError(1);
- comptime castToMaybeTypeError(1);
+ castToOptionalTypeError(1);
+ comptime castToOptionalTypeError(1);
}
const A = struct {
a: i32,
};
-fn castToMaybeTypeError(z: i32) void {
+fn castToOptionalTypeError(z: i32) void {
const x = i32(1);
const y: error!?i32 = x;
- assert(??(try y) == 1);
+ assert((try y).? == 1);
const f = z;
const g: error!?i32 = f;
const a = A{ .a = z };
const b: error!?A = a;
- assert((??(b catch unreachable)).a == 1);
+ assert((b catch unreachable).?.a == 1);
}
test "implicitly cast from int to error!?T" {
- implicitIntLitToMaybe();
- comptime implicitIntLitToMaybe();
+ implicitIntLitToOptional();
+ comptime implicitIntLitToOptional();
}
-fn implicitIntLitToMaybe() void {
+fn implicitIntLitToOptional() void {
const f: ?i32 = 1;
const g: error!?i32 = 1;
}
test "return null from fn() error!?&T" {
- const a = returnNullFromMaybeTypeErrorRef();
- const b = returnNullLitFromMaybeTypeErrorRef();
+ const a = returnNullFromOptionalTypeErrorRef();
+ const b = returnNullLitFromOptionalTypeErrorRef();
assert((try a) == null and (try b) == null);
}
-fn returnNullFromMaybeTypeErrorRef() error!?*A {
+fn returnNullFromOptionalTypeErrorRef() error!?*A {
const a: ?*A = null;
return a;
}
-fn returnNullLitFromMaybeTypeErrorRef() error!?*A {
+fn returnNullLitFromOptionalTypeErrorRef() error!?*A {
return null;
}
test "peer type resolution: ?T and T" {
- assert(??peerTypeTAndMaybeT(true, false) == 0);
- assert(??peerTypeTAndMaybeT(false, false) == 3);
+ assert(peerTypeTAndOptionalT(true, false).? == 0);
+ assert(peerTypeTAndOptionalT(false, false).? == 3);
comptime {
- assert(??peerTypeTAndMaybeT(true, false) == 0);
- assert(??peerTypeTAndMaybeT(false, false) == 3);
+ assert(peerTypeTAndOptionalT(true, false).? == 0);
+ assert(peerTypeTAndOptionalT(false, false).? == 3);
}
}
-fn peerTypeTAndMaybeT(c: bool, b: bool) ?usize {
+fn peerTypeTAndOptionalT(c: bool, b: bool) ?usize {
if (c) {
return if (b) null else usize(0);
}
@@ -251,11 +251,11 @@ fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
}
test "implicitly cast from [N]T to ?[]const T" {
- assert(mem.eql(u8, ??castToMaybeSlice(), "hi"));
- comptime assert(mem.eql(u8, ??castToMaybeSlice(), "hi"));
+ assert(mem.eql(u8, castToOptionalSlice().?, "hi"));
+ comptime assert(mem.eql(u8, castToOptionalSlice().?, "hi"));
}
-fn castToMaybeSlice() ?[]const u8 {
+fn castToOptionalSlice() ?[]const u8 {
return "hi";
}
@@ -404,5 +404,5 @@ fn testCastPtrOfArrayToSliceAndPtr() void {
test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
const window_name = [1][*]const u8{c"window name"};
const x: [*]const ?[*]const u8 = &window_name;
- assert(mem.eql(u8, std.cstr.toSliceConst(??x[0]), "window name"));
+ assert(mem.eql(u8, std.cstr.toSliceConst(x[0].?), "window name"));
}
diff --git a/test/cases/error.zig b/test/cases/error.zig
index ced49419d5..693631fe2d 100644
--- a/test/cases/error.zig
+++ b/test/cases/error.zig
@@ -140,7 +140,7 @@ fn testComptimeTestErrorEmptySet(x: EmptyErrorSet!i32) void {
if (x) |v| assert(v == 1234) else |err| @compileError("bad");
}
-test "syntax: nullable operator in front of error union operator" {
+test "syntax: optional operator in front of error union operator" {
comptime {
assert(?error!i32 == ?(error!i32));
}
diff --git a/test/cases/eval.zig b/test/cases/eval.zig
index 9612466a86..08d3f3a841 100644
--- a/test/cases/eval.zig
+++ b/test/cases/eval.zig
@@ -12,7 +12,7 @@ fn fibonacci(x: i32) i32 {
}
fn unwrapAndAddOne(blah: ?i32) i32 {
- return ??blah + 1;
+ return blah.? + 1;
}
const should_be_1235 = unwrapAndAddOne(1234);
test "static add one" {
diff --git a/test/cases/generics.zig b/test/cases/generics.zig
index a76990e2a1..52aa013989 100644
--- a/test/cases/generics.zig
+++ b/test/cases/generics.zig
@@ -127,7 +127,7 @@ test "generic fn with implicit cast" {
}) == 0);
}
fn getByte(ptr: ?*const u8) u8 {
- return (??ptr).*;
+ return ptr.?.*;
}
fn getFirstByte(comptime T: type, mem: []const T) u8 {
return getByte(@ptrCast(*const u8, &mem[0]));
diff --git a/test/cases/misc.zig b/test/cases/misc.zig
index 369d8e5cf3..beb0d6d456 100644
--- a/test/cases/misc.zig
+++ b/test/cases/misc.zig
@@ -505,7 +505,7 @@ test "@typeId" {
assert(@typeId(@typeOf(1.0)) == Tid.ComptimeFloat);
assert(@typeId(@typeOf(undefined)) == Tid.Undefined);
assert(@typeId(@typeOf(null)) == Tid.Null);
- assert(@typeId(?i32) == Tid.Nullable);
+ assert(@typeId(?i32) == Tid.Optional);
assert(@typeId(error!i32) == Tid.ErrorUnion);
assert(@typeId(error) == Tid.ErrorSet);
assert(@typeId(AnEnum) == Tid.Enum);
diff --git a/test/cases/null.zig b/test/cases/null.zig
index bd78990ff4..62565784ac 100644
--- a/test/cases/null.zig
+++ b/test/cases/null.zig
@@ -1,6 +1,6 @@
const assert = @import("std").debug.assert;
-test "nullable type" {
+test "optional type" {
const x: ?bool = true;
if (x) |y| {
@@ -33,7 +33,7 @@ test "test maybe object and get a pointer to the inner value" {
b.* = false;
}
- assert(??maybe_bool == false);
+ assert(maybe_bool.? == false);
}
test "rhs maybe unwrap return" {
@@ -47,9 +47,9 @@ test "maybe return" {
}
fn maybeReturnImpl() void {
- assert(??foo(1235));
+ assert(foo(1235).?);
if (foo(null) != null) unreachable;
- assert(!??foo(1234));
+ assert(!foo(1234).?);
}
fn foo(x: ?i32) ?bool {
@@ -102,12 +102,12 @@ fn testTestNullRuntime(x: ?i32) void {
assert(!(x != null));
}
-test "nullable void" {
- nullableVoidImpl();
- comptime nullableVoidImpl();
+test "optional void" {
+ optionalVoidImpl();
+ comptime optionalVoidImpl();
}
-fn nullableVoidImpl() void {
+fn optionalVoidImpl() void {
assert(bar(null) == null);
assert(bar({}) != null);
}
@@ -120,19 +120,19 @@ fn bar(x: ?void) ?void {
}
}
-const StructWithNullable = struct {
+const StructWithOptional = struct {
field: ?i32,
};
-var struct_with_nullable: StructWithNullable = undefined;
+var struct_with_optional: StructWithOptional = undefined;
-test "unwrap nullable which is field of global var" {
- struct_with_nullable.field = null;
- if (struct_with_nullable.field) |payload| {
+test "unwrap optional which is field of global var" {
+ struct_with_optional.field = null;
+ if (struct_with_optional.field) |payload| {
unreachable;
}
- struct_with_nullable.field = 1234;
- if (struct_with_nullable.field) |payload| {
+ struct_with_optional.field = 1234;
+ if (struct_with_optional.field) |payload| {
assert(payload == 1234);
} else {
unreachable;
diff --git a/test/cases/reflection.zig b/test/cases/reflection.zig
index 48fcc9ef03..3d3af3c889 100644
--- a/test/cases/reflection.zig
+++ b/test/cases/reflection.zig
@@ -2,7 +2,7 @@ const assert = @import("std").debug.assert;
const mem = @import("std").mem;
const reflection = this;
-test "reflection: array, pointer, nullable, error union type child" {
+test "reflection: array, pointer, optional, error union type child" {
comptime {
assert(([10]u8).Child == u8);
assert((*u8).Child == u8);
diff --git a/test/cases/type_info.zig b/test/cases/type_info.zig
index b452c8e9f6..1bc58b14e1 100644
--- a/test/cases/type_info.zig
+++ b/test/cases/type_info.zig
@@ -88,15 +88,15 @@ fn testArray() void {
assert(arr_info.Array.child == bool);
}
-test "type info: nullable type info" {
- testNullable();
- comptime testNullable();
+test "type info: optional type info" {
+ testOptional();
+ comptime testOptional();
}
-fn testNullable() void {
+fn testOptional() void {
const null_info = @typeInfo(?void);
- assert(TypeId(null_info) == TypeId.Nullable);
- assert(null_info.Nullable.child == void);
+ assert(TypeId(null_info) == TypeId.Optional);
+ assert(null_info.Optional.child == void);
}
test "type info: promise info" {
@@ -168,7 +168,7 @@ fn testUnion() void {
assert(typeinfo_info.Union.tag_type == TypeId);
assert(typeinfo_info.Union.fields.len == 25);
assert(typeinfo_info.Union.fields[4].enum_field != null);
- assert((??typeinfo_info.Union.fields[4].enum_field).value == 4);
+ assert(typeinfo_info.Union.fields[4].enum_field.?.value == 4);
assert(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int));
assert(typeinfo_info.Union.defs.len == 20);
diff --git a/test/cases/while.zig b/test/cases/while.zig
index a95481668d..fe53522ea6 100644
--- a/test/cases/while.zig
+++ b/test/cases/while.zig
@@ -81,7 +81,7 @@ test "while with else" {
assert(got_else == 1);
}
-test "while with nullable as condition" {
+test "while with optional as condition" {
numbers_left = 10;
var sum: i32 = 0;
while (getNumberOrNull()) |value| {
@@ -90,7 +90,7 @@ test "while with nullable as condition" {
assert(sum == 45);
}
-test "while with nullable as condition with else" {
+test "while with optional as condition with else" {
numbers_left = 10;
var sum: i32 = 0;
var got_else: i32 = 0;
@@ -132,7 +132,7 @@ fn getNumberOrNull() ?i32 {
};
}
-test "while on nullable with else result follow else prong" {
+test "while on optional with else result follow else prong" {
const result = while (returnNull()) |value| {
break value;
} else
@@ -140,8 +140,8 @@ test "while on nullable with else result follow else prong" {
assert(result == 2);
}
-test "while on nullable with else result follow break prong" {
- const result = while (returnMaybe(10)) |value| {
+test "while on optional with else result follow break prong" {
+ const result = while (returnOptional(10)) |value| {
break value;
} else
i32(2);
@@ -210,7 +210,7 @@ fn testContinueOuter() void {
fn returnNull() ?i32 {
return null;
}
-fn returnMaybe(x: i32) ?i32 {
+fn returnOptional(x: i32) ?i32 {
return x;
}
fn returnError() error!i32 {
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 102c4e428d..1c737a59e7 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1341,7 +1341,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ if (true) |x| { }
\\}
,
- ".tmp_source.zig:2:9: error: expected nullable type, found 'bool'",
+ ".tmp_source.zig:2:9: error: expected optional type, found 'bool'",
);
cases.add(
@@ -1780,7 +1780,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.add(
- "assign null to non-nullable pointer",
+ "assign null to non-optional pointer",
\\const a: *u8 = null;
\\
\\export fn entry() usize { return @sizeOf(@typeOf(a)); }
@@ -2817,7 +2817,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.add(
- "while expected bool, got nullable",
+ "while expected bool, got optional",
\\export fn foo() void {
\\ while (bar()) {}
\\}
@@ -2837,23 +2837,23 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.add(
- "while expected nullable, got bool",
+ "while expected optional, got bool",
\\export fn foo() void {
\\ while (bar()) |x| {}
\\}
\\fn bar() bool { return true; }
,
- ".tmp_source.zig:2:15: error: expected nullable type, found 'bool'",
+ ".tmp_source.zig:2:15: error: expected optional type, found 'bool'",
);
cases.add(
- "while expected nullable, got error union",
+ "while expected optional, got error union",
\\export fn foo() void {
\\ while (bar()) |x| {}
\\}
\\fn bar() error!i32 { return 1; }
,
- ".tmp_source.zig:2:15: error: expected nullable type, found 'error!i32'",
+ ".tmp_source.zig:2:15: error: expected optional type, found 'error!i32'",
);
cases.add(
@@ -2867,7 +2867,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.add(
- "while expected error union, got nullable",
+ "while expected error union, got optional",
\\export fn foo() void {
\\ while (bar()) |x| {} else |err| {}
\\}
diff --git a/test/tests.zig b/test/tests.zig
index cc562331fe..b66441f628 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -282,8 +282,8 @@ pub const CompareOutputContext = struct {
var stdout = Buffer.initNull(b.allocator);
var stderr = Buffer.initNull(b.allocator);
- var stdout_file_in_stream = io.FileInStream.init(&??child.stdout);
- var stderr_file_in_stream = io.FileInStream.init(&??child.stderr);
+ var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?);
+ var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?);
stdout_file_in_stream.stream.readAllBuffer(&stdout, max_stdout_size) catch unreachable;
stderr_file_in_stream.stream.readAllBuffer(&stderr, max_stdout_size) catch unreachable;
@@ -601,8 +601,8 @@ pub const CompileErrorContext = struct {
var stdout_buf = Buffer.initNull(b.allocator);
var stderr_buf = Buffer.initNull(b.allocator);
- var stdout_file_in_stream = io.FileInStream.init(&??child.stdout);
- var stderr_file_in_stream = io.FileInStream.init(&??child.stderr);
+ var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?);
+ var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?);
stdout_file_in_stream.stream.readAllBuffer(&stdout_buf, max_stdout_size) catch unreachable;
stderr_file_in_stream.stream.readAllBuffer(&stderr_buf, max_stdout_size) catch unreachable;
@@ -872,8 +872,8 @@ pub const TranslateCContext = struct {
var stdout_buf = Buffer.initNull(b.allocator);
var stderr_buf = Buffer.initNull(b.allocator);
- var stdout_file_in_stream = io.FileInStream.init(&??child.stdout);
- var stderr_file_in_stream = io.FileInStream.init(&??child.stderr);
+ var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?);
+ var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?);
stdout_file_in_stream.stream.readAllBuffer(&stdout_buf, max_stdout_size) catch unreachable;
stderr_file_in_stream.stream.readAllBuffer(&stderr_buf, max_stdout_size) catch unreachable;
From 77678b2cbc7ac9ba2d5d4725241f6a9f7ac64fa4 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 10 Jun 2018 01:13:51 -0400
Subject: [PATCH 27/49] breaking syntax change: orelse keyword instead of ??
(#1096)
use the `zig-fmt-optional-default` branch to have zig fmt
automatically do the changes.
closes #1023
---
build.zig | 6 ++---
doc/docgen.zig | 6 ++---
doc/langref.html.in | 16 ++++++------
src-self-hosted/main.zig | 14 +++++------
src-self-hosted/module.zig | 8 +++---
src/all_types.hpp | 7 +++++-
src/analyze.cpp | 1 +
src/ast_render.cpp | 12 +++++++--
src/ir.cpp | 31 ++++++++++--------------
src/parser.cpp | 13 +++++-----
src/tokenizer.cpp | 27 +++++----------------
src/tokenizer.hpp | 2 +-
src/translate_c.cpp | 16 ++++++------
std/atomic/queue.zig | 4 +--
std/atomic/stack.zig | 4 +--
std/buf_map.zig | 6 ++---
std/buf_set.zig | 4 +--
std/build.zig | 24 +++++++++---------
std/debug/index.zig | 20 +++++++--------
std/heap.zig | 10 ++++----
std/linked_list.zig | 4 +--
std/os/index.zig | 14 +++++------
std/os/linux/vdso.zig | 8 +++---
std/os/path.zig | 12 ++++-----
std/os/windows/util.zig | 2 +-
std/special/build_runner.zig | 10 ++++----
std/unicode.zig | 2 +-
std/zig/parse.zig | 47 ++++++++++++++++++------------------
std/zig/render.zig | 8 +++---
test/cases/cast.zig | 6 ++---
test/cases/null.zig | 10 ++++----
test/compile_errors.zig | 2 +-
test/translate_c.zig | 20 +++++++--------
33 files changed, 187 insertions(+), 189 deletions(-)
diff --git a/build.zig b/build.zig
index eada37816c..fd154c7504 100644
--- a/build.zig
+++ b/build.zig
@@ -102,11 +102,11 @@ pub fn build(b: *Builder) !void {
b.default_step.dependOn(&exe.step);
- const skip_self_hosted = b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") ?? false;
+ const skip_self_hosted = b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") orelse false;
if (!skip_self_hosted) {
test_step.dependOn(&exe.step);
}
- const verbose_link_exe = b.option(bool, "verbose-link", "Print link command for self hosted compiler") ?? false;
+ const verbose_link_exe = b.option(bool, "verbose-link", "Print link command for self hosted compiler") orelse false;
exe.setVerboseLink(verbose_link_exe);
b.installArtifact(exe);
@@ -114,7 +114,7 @@ pub fn build(b: *Builder) !void {
installCHeaders(b, c_header_files);
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");
- const with_lldb = b.option(bool, "with-lldb", "Run tests in LLDB to get a backtrace if one fails") ?? false;
+ const with_lldb = b.option(bool, "with-lldb", "Run tests in LLDB to get a backtrace if one fails") orelse false;
test_step.dependOn(docs_step);
diff --git a/doc/docgen.zig b/doc/docgen.zig
index ed0e1be273..3283d146b0 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -25,13 +25,13 @@ pub fn main() !void {
if (!args_it.skip()) @panic("expected self arg");
- const zig_exe = try (args_it.next(allocator) ?? @panic("expected zig exe arg"));
+ const zig_exe = try (args_it.next(allocator) orelse @panic("expected zig exe arg"));
defer allocator.free(zig_exe);
- const in_file_name = try (args_it.next(allocator) ?? @panic("expected input arg"));
+ const in_file_name = try (args_it.next(allocator) orelse @panic("expected input arg"));
defer allocator.free(in_file_name);
- const out_file_name = try (args_it.next(allocator) ?? @panic("expected output arg"));
+ const out_file_name = try (args_it.next(allocator) orelse @panic("expected output arg"));
defer allocator.free(out_file_name);
var in_file = try os.File.openRead(allocator, in_file_name);
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 4c4a637095..0ada8a5196 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -985,7 +985,7 @@ a ^= b
|
- a ?? b
|
+ a orelse b
|
- {#link|Optionals#}
@@ -998,7 +998,7 @@ a ^= b |
const value: ?u32 = null;
-const unwrapped = value ?? 1234;
+const unwrapped = value orelse 1234;
unwrapped == 1234
|
@@ -1011,7 +1011,7 @@ unwrapped == 1234
Equivalent to:
- a ?? unreachable
+ a orelse unreachable
|
const value: ?u32 = 5678;
@@ -1278,7 +1278,7 @@ x{} x.* x.?
== != < > <= >=
and
or
-?? catch
+orelse catch
= *= /= %= += -= <<= >>= &= ^= |=
{#header_close#}
{#header_close#}
@@ -3062,7 +3062,7 @@ fn createFoo(param: i32) !Foo {
// but we want to return it if the function succeeds.
errdefer deallocateFoo(foo);
- const tmp_buf = allocateTmpBuffer() ?? return error.OutOfMemory;
+ const tmp_buf = allocateTmpBuffer() orelse return error.OutOfMemory;
// tmp_buf is truly a temporary resource, and we for sure want to clean it up
// before this block leaves scope
defer deallocateTmpBuffer(tmp_buf);
@@ -3219,13 +3219,13 @@ struct Foo *do_a_thing(void) {
extern fn malloc(size: size_t) ?*u8;
fn doAThing() ?*Foo {
- const ptr = malloc(1234) ?? return null;
+ const ptr = malloc(1234) orelse return null;
// ...
}
{#code_end#}
Here, Zig is at least as convenient, if not more, than C. And, the type of "ptr"
- is *u8 not ?*u8. The ?? operator
+ is *u8 not ?*u8. The orelse keyword
unwrapped the optional type and therefore ptr is guaranteed to be non-null everywhere
it is used in the function.
@@ -5941,7 +5941,7 @@ AsmClobbers= ":" list(String, ",")
UnwrapExpression = BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression
-UnwrapOptional = "??" Expression
+UnwrapOptional = "orelse" Expression
UnwrapError = "catch" option("|" Symbol "|") Expression
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 64734f077a..1c91ab9cbe 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -212,7 +212,7 @@ fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void {
const build_runner_path = try os.path.join(allocator, special_dir, "build_runner.zig");
defer allocator.free(build_runner_path);
- const build_file = flags.single("build-file") ?? "build.zig";
+ const build_file = flags.single("build-file") orelse "build.zig";
const build_file_abs = try os.path.resolve(allocator, ".", build_file);
defer allocator.free(build_file_abs);
@@ -516,7 +516,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
const basename = os.path.basename(in_file.?);
var it = mem.split(basename, ".");
- const root_name = it.next() ?? {
+ const root_name = it.next() orelse {
try stderr.write("file name cannot be empty\n");
os.exit(1);
};
@@ -535,7 +535,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
const zig_root_source_file = in_file;
- const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") ?? "zig-cache"[0..]) catch {
+ const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") orelse "zig-cache"[0..]) catch {
os.exit(1);
};
defer allocator.free(full_cache_dir);
@@ -555,9 +555,9 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
);
defer module.destroy();
- module.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") ?? "0", 10);
- module.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") ?? "0", 10);
- module.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") ?? "0", 10);
+ module.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") orelse "0", 10);
+ module.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") orelse "0", 10);
+ module.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") orelse "0", 10);
module.is_test = false;
@@ -652,7 +652,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
}
try module.build();
- try module.link(flags.single("out-file") ?? null);
+ try module.link(flags.single("out-file") orelse null);
if (flags.present("print-timing-info")) {
// codegen_print_timing_info(g, stderr);
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index a7ddf3f9e9..575105f25f 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -130,13 +130,13 @@ pub const Module = struct {
var name_buffer = try Buffer.init(allocator, name);
errdefer name_buffer.deinit();
- const context = c.LLVMContextCreate() ?? return error.OutOfMemory;
+ const context = c.LLVMContextCreate() orelse return error.OutOfMemory;
errdefer c.LLVMContextDispose(context);
- const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) ?? return error.OutOfMemory;
+ const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeModule(module);
- const builder = c.LLVMCreateBuilderInContext(context) ?? return error.OutOfMemory;
+ const builder = c.LLVMCreateBuilderInContext(context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeBuilder(builder);
const module_ptr = try allocator.create(Module);
@@ -223,7 +223,7 @@ pub const Module = struct {
c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
}
- const root_src_path = self.root_src_path ?? @panic("TODO handle null root src path");
+ const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path");
const root_src_real_path = os.path.real(self.allocator, root_src_path) catch |err| {
try printError("unable to get real path '{}': {}", root_src_path, err);
return err;
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 2a5a0ad740..ab219e4e56 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -387,6 +387,7 @@ enum NodeType {
NodeTypeSliceExpr,
NodeTypeFieldAccessExpr,
NodeTypePtrDeref,
+ NodeTypeUnwrapOptional,
NodeTypeUse,
NodeTypeBoolLiteral,
NodeTypeNullLiteral,
@@ -575,6 +576,10 @@ struct AstNodeCatchExpr {
AstNode *op2;
};
+struct AstNodeUnwrapOptional {
+ AstNode *expr;
+};
+
enum CastOp {
CastOpNoCast, // signifies the function call expression is not a cast
CastOpNoop, // fn call expr is a cast, but does nothing
@@ -624,7 +629,6 @@ enum PrefixOp {
PrefixOpNegation,
PrefixOpNegationWrap,
PrefixOpOptional,
- PrefixOpUnwrapOptional,
PrefixOpAddrOf,
};
@@ -909,6 +913,7 @@ struct AstNode {
AstNodeTestDecl test_decl;
AstNodeBinOpExpr bin_op_expr;
AstNodeCatchExpr unwrap_err_expr;
+ AstNodeUnwrapOptional unwrap_optional;
AstNodePrefixOpExpr prefix_op_expr;
AstNodePointerType pointer_type;
AstNodeFnCallExpr fn_call_expr;
diff --git a/src/analyze.cpp b/src/analyze.cpp
index ed261148ea..0aa5ea5dcb 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -3308,6 +3308,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeAsmExpr:
case NodeTypeFieldAccessExpr:
case NodeTypePtrDeref:
+ case NodeTypeUnwrapOptional:
case NodeTypeStructField:
case NodeTypeContainerInitExpr:
case NodeTypeStructValueField:
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index 2c8c03b226..2ace00885d 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -50,7 +50,7 @@ static const char *bin_op_str(BinOpType bin_op) {
case BinOpTypeAssignBitXor: return "^=";
case BinOpTypeAssignBitOr: return "|=";
case BinOpTypeAssignMergeErrorSets: return "||=";
- case BinOpTypeUnwrapOptional: return "??";
+ case BinOpTypeUnwrapOptional: return "orelse";
case BinOpTypeArrayCat: return "++";
case BinOpTypeArrayMult: return "**";
case BinOpTypeErrorUnion: return "!";
@@ -67,7 +67,6 @@ static const char *prefix_op_str(PrefixOp prefix_op) {
case PrefixOpBoolNot: return "!";
case PrefixOpBinNot: return "~";
case PrefixOpOptional: return "?";
- case PrefixOpUnwrapOptional: return "??";
case PrefixOpAddrOf: return "&";
}
zig_unreachable();
@@ -222,6 +221,8 @@ static const char *node_type_str(NodeType node_type) {
return "FieldAccessExpr";
case NodeTypePtrDeref:
return "PtrDerefExpr";
+ case NodeTypeUnwrapOptional:
+ return "UnwrapOptional";
case NodeTypeContainerDecl:
return "ContainerDecl";
case NodeTypeStructField:
@@ -711,6 +712,13 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, ".*");
break;
}
+ case NodeTypeUnwrapOptional:
+ {
+ AstNode *lhs = node->data.unwrap_optional.expr;
+ render_node_ungrouped(ar, lhs);
+ fprintf(ar->f, ".?");
+ break;
+ }
case NodeTypeUndefinedLiteral:
fprintf(ar->f, "undefined");
break;
diff --git a/src/ir.cpp b/src/ir.cpp
index 02606fc4aa..96eb5f7434 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -4661,21 +4661,6 @@ static IrInstruction *ir_gen_err_assert_ok(IrBuilder *irb, Scope *scope, AstNode
return ir_build_load_ptr(irb, scope, source_node, payload_ptr);
}
-static IrInstruction *ir_gen_maybe_assert_ok(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) {
- assert(node->type == NodeTypePrefixOpExpr);
- AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
-
- IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
- if (maybe_ptr == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
-
- IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true);
- if (lval.is_ptr)
- return unwrapped_ptr;
-
- return ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
-}
-
static IrInstruction *ir_gen_bool_not(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypePrefixOpExpr);
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
@@ -4705,8 +4690,6 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegationWrap), lval);
case PrefixOpOptional:
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval);
- case PrefixOpUnwrapOptional:
- return ir_gen_maybe_assert_ok(irb, scope, node, lval);
case PrefixOpAddrOf: {
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR), lval);
@@ -6541,7 +6524,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_build_load_ptr(irb, scope, node, ptr_instruction);
}
case NodeTypePtrDeref: {
- assert(node->type == NodeTypePtrDeref);
AstNode *expr_node = node->data.ptr_deref_expr.target;
IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval);
if (value == irb->codegen->invalid_instruction)
@@ -6549,6 +6531,19 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_build_un_op(irb, scope, node, IrUnOpDereference, value);
}
+ case NodeTypeUnwrapOptional: {
+ AstNode *expr_node = node->data.unwrap_optional.expr;
+
+ IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
+ if (maybe_ptr == irb->codegen->invalid_instruction)
+ return irb->codegen->invalid_instruction;
+
+ IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true);
+ if (lval.is_ptr)
+ return unwrapped_ptr;
+
+ return ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
+ }
case NodeTypeThisLiteral:
return ir_lval_wrap(irb, scope, ir_gen_this_literal(irb, scope, node), lval);
case NodeTypeBoolLiteral:
diff --git a/src/parser.cpp b/src/parser.cpp
index 2ee69f81ab..adb1633f5d 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -1151,9 +1151,8 @@ static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index,
} else if (token->id == TokenIdQuestion) {
*token_index += 1;
- AstNode *node = ast_create_node(pc, NodeTypePrefixOpExpr, first_token);
- node->data.prefix_op_expr.prefix_op = PrefixOpUnwrapOptional;
- node->data.prefix_op_expr.primary_expr = primary_expr;
+ AstNode *node = ast_create_node(pc, NodeTypeUnwrapOptional, first_token);
+ node->data.unwrap_optional.expr = primary_expr;
primary_expr = node;
} else {
@@ -1173,7 +1172,6 @@ static PrefixOp tok_to_prefix_op(Token *token) {
case TokenIdMinusPercent: return PrefixOpNegationWrap;
case TokenIdTilde: return PrefixOpBinNot;
case TokenIdQuestion: return PrefixOpOptional;
- case TokenIdDoubleQuestion: return PrefixOpUnwrapOptional;
case TokenIdAmpersand: return PrefixOpAddrOf;
default: return PrefixOpInvalid;
}
@@ -2312,7 +2310,7 @@ static BinOpType ast_parse_ass_op(ParseContext *pc, size_t *token_index, bool ma
/*
UnwrapExpression : BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression
-UnwrapOptional : "??" BoolOrExpression
+UnwrapOptional = "orelse" Expression
UnwrapError = "catch" option("|" Symbol "|") Expression
*/
static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, bool mandatory) {
@@ -2322,7 +2320,7 @@ static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, boo
Token *token = &pc->tokens->at(*token_index);
- if (token->id == TokenIdDoubleQuestion) {
+ if (token->id == TokenIdKeywordOrElse) {
*token_index += 1;
AstNode *rhs = ast_parse_expression(pc, token_index, true);
@@ -3035,6 +3033,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypePtrDeref:
visit_field(&node->data.ptr_deref_expr.target, visit, context);
break;
+ case NodeTypeUnwrapOptional:
+ visit_field(&node->data.unwrap_optional.expr, visit, context);
+ break;
case NodeTypeUse:
visit_field(&node->data.use.expr, visit, context);
break;
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index cfabdf11ad..2950b4eb49 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -134,6 +134,7 @@ static const struct ZigKeyword zig_keywords[] = {
{"noalias", TokenIdKeywordNoAlias},
{"null", TokenIdKeywordNull},
{"or", TokenIdKeywordOr},
+ {"orelse", TokenIdKeywordOrElse},
{"packed", TokenIdKeywordPacked},
{"promise", TokenIdKeywordPromise},
{"pub", TokenIdKeywordPub},
@@ -215,7 +216,6 @@ enum TokenizeState {
TokenizeStateSawGreaterThanGreaterThan,
TokenizeStateSawDot,
TokenizeStateSawDotDot,
- TokenizeStateSawQuestionMark,
TokenizeStateSawAtSign,
TokenizeStateCharCode,
TokenizeStateError,
@@ -532,6 +532,10 @@ void tokenize(Buf *buf, Tokenization *out) {
begin_token(&t, TokenIdComma);
end_token(&t);
break;
+ case '?':
+ begin_token(&t, TokenIdQuestion);
+ end_token(&t);
+ break;
case '{':
begin_token(&t, TokenIdLBrace);
end_token(&t);
@@ -624,28 +628,10 @@ void tokenize(Buf *buf, Tokenization *out) {
begin_token(&t, TokenIdDot);
t.state = TokenizeStateSawDot;
break;
- case '?':
- begin_token(&t, TokenIdQuestion);
- t.state = TokenizeStateSawQuestionMark;
- break;
default:
invalid_char_error(&t, c);
}
break;
- case TokenizeStateSawQuestionMark:
- switch (c) {
- case '?':
- set_token_id(&t, t.cur_tok, TokenIdDoubleQuestion);
- end_token(&t);
- t.state = TokenizeStateStart;
- break;
- default:
- t.pos -= 1;
- end_token(&t);
- t.state = TokenizeStateStart;
- continue;
- }
- break;
case TokenizeStateSawDot:
switch (c) {
case '.':
@@ -1480,7 +1466,6 @@ void tokenize(Buf *buf, Tokenization *out) {
case TokenizeStateSawGreaterThan:
case TokenizeStateSawGreaterThanGreaterThan:
case TokenizeStateSawDot:
- case TokenizeStateSawQuestionMark:
case TokenizeStateSawAtSign:
case TokenizeStateSawStarPercent:
case TokenizeStateSawPlusPercent:
@@ -1545,7 +1530,6 @@ const char * token_name(TokenId id) {
case TokenIdDash: return "-";
case TokenIdDivEq: return "/=";
case TokenIdDot: return ".";
- case TokenIdDoubleQuestion: return "??";
case TokenIdEllipsis2: return "..";
case TokenIdEllipsis3: return "...";
case TokenIdEof: return "EOF";
@@ -1582,6 +1566,7 @@ const char * token_name(TokenId id) {
case TokenIdKeywordNoAlias: return "noalias";
case TokenIdKeywordNull: return "null";
case TokenIdKeywordOr: return "or";
+ case TokenIdKeywordOrElse: return "orelse";
case TokenIdKeywordPacked: return "packed";
case TokenIdKeywordPromise: return "promise";
case TokenIdKeywordPub: return "pub";
diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp
index 7c617f85c6..75c7feb476 100644
--- a/src/tokenizer.hpp
+++ b/src/tokenizer.hpp
@@ -41,7 +41,6 @@ enum TokenId {
TokenIdDash,
TokenIdDivEq,
TokenIdDot,
- TokenIdDoubleQuestion,
TokenIdEllipsis2,
TokenIdEllipsis3,
TokenIdEof,
@@ -76,6 +75,7 @@ enum TokenId {
TokenIdKeywordNoAlias,
TokenIdKeywordNull,
TokenIdKeywordOr,
+ TokenIdKeywordOrElse,
TokenIdKeywordPacked,
TokenIdKeywordPromise,
TokenIdKeywordPub,
diff --git a/src/translate_c.cpp b/src/translate_c.cpp
index aaaf5a1edb..db46d31c5b 100644
--- a/src/translate_c.cpp
+++ b/src/translate_c.cpp
@@ -260,6 +260,12 @@ static AstNode *trans_create_node_prefix_op(Context *c, PrefixOp op, AstNode *ch
return node;
}
+static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child_node) {
+ AstNode *node = trans_create_node(c, NodeTypeUnwrapOptional);
+ node->data.unwrap_optional.expr = child_node;
+ return node;
+}
+
static AstNode *trans_create_node_bin_op(Context *c, AstNode *lhs_node, BinOpType op, AstNode *rhs_node) {
AstNode *node = trans_create_node(c, NodeTypeBinOpExpr);
node->data.bin_op_expr.op1 = lhs_node;
@@ -382,7 +388,7 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r
fn_def->data.fn_def.fn_proto = fn_proto;
fn_proto->data.fn_proto.fn_def_node = fn_def;
- AstNode *unwrap_node = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, ref_node);
+ AstNode *unwrap_node = trans_create_node_unwrap_null(c, ref_node);
AstNode *fn_call_node = trans_create_node(c, NodeTypeFnCallExpr);
fn_call_node->data.fn_call_expr.fn_ref_expr = unwrap_node;
@@ -409,10 +415,6 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r
return fn_def;
}
-static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child) {
- return trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, child);
-}
-
static AstNode *get_global(Context *c, Buf *name) {
{
auto entry = c->global_table.maybe_get(name);
@@ -1963,7 +1965,7 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc
bool is_fn_ptr = qual_type_is_fn_ptr(stmt->getSubExpr()->getType());
if (is_fn_ptr)
return value_node;
- AstNode *unwrapped = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, value_node);
+ AstNode *unwrapped = trans_create_node_unwrap_null(c, value_node);
return trans_create_node_ptr_deref(c, unwrapped);
}
case UO_Plus:
@@ -2587,7 +2589,7 @@ static AstNode *trans_call_expr(Context *c, ResultUsed result_used, TransScope *
}
}
if (callee_node == nullptr) {
- callee_node = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, callee_raw_node);
+ callee_node = trans_create_node_unwrap_null(c, callee_raw_node);
}
} else {
callee_node = callee_raw_node;
diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
index 142c958173..4f856d9e01 100644
--- a/std/atomic/queue.zig
+++ b/std/atomic/queue.zig
@@ -33,8 +33,8 @@ pub fn Queue(comptime T: type) type {
pub fn get(self: *Self) ?*Node {
var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst);
while (true) {
- const node = head.next ?? return null;
- head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return node;
+ const node = head.next orelse return null;
+ head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return node;
}
}
};
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
index 15611188d2..77fa1a9100 100644
--- a/std/atomic/stack.zig
+++ b/std/atomic/stack.zig
@@ -28,14 +28,14 @@ pub fn Stack(comptime T: type) type {
var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
node.next = root;
- root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break;
+ root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse break;
}
}
pub fn pop(self: *Self) ?*Node {
var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
- root = @cmpxchgWeak(?*Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root;
+ root = @cmpxchgWeak(?*Node, &self.root, root, (root orelse return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return root;
}
}
diff --git a/std/buf_map.zig b/std/buf_map.zig
index 0d4f3a6d5e..a82d1b731a 100644
--- a/std/buf_map.zig
+++ b/std/buf_map.zig
@@ -19,7 +19,7 @@ pub const BufMap = struct {
pub fn deinit(self: *const BufMap) void {
var it = self.hash_map.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
self.free(entry.key);
self.free(entry.value);
}
@@ -37,12 +37,12 @@ pub const BufMap = struct {
}
pub fn get(self: *const BufMap, key: []const u8) ?[]const u8 {
- const entry = self.hash_map.get(key) ?? return null;
+ const entry = self.hash_map.get(key) orelse return null;
return entry.value;
}
pub fn delete(self: *BufMap, key: []const u8) void {
- const entry = self.hash_map.remove(key) ?? return;
+ const entry = self.hash_map.remove(key) orelse return;
self.free(entry.key);
self.free(entry.value);
}
diff --git a/std/buf_set.zig b/std/buf_set.zig
index 03a050ed8b..ab2d8e7c34 100644
--- a/std/buf_set.zig
+++ b/std/buf_set.zig
@@ -17,7 +17,7 @@ pub const BufSet = struct {
pub fn deinit(self: *const BufSet) void {
var it = self.hash_map.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
self.free(entry.key);
}
@@ -33,7 +33,7 @@ pub const BufSet = struct {
}
pub fn delete(self: *BufSet, key: []const u8) void {
- const entry = self.hash_map.remove(key) ?? return;
+ const entry = self.hash_map.remove(key) orelse return;
self.free(entry.key);
}
diff --git a/std/build.zig b/std/build.zig
index fed02e0815..5733aec17d 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -136,7 +136,7 @@ pub const Builder = struct {
}
pub fn setInstallPrefix(self: *Builder, maybe_prefix: ?[]const u8) void {
- self.prefix = maybe_prefix ?? "/usr/local"; // TODO better default
+ self.prefix = maybe_prefix orelse "/usr/local"; // TODO better default
self.lib_dir = os.path.join(self.allocator, self.prefix, "lib") catch unreachable;
self.exe_dir = os.path.join(self.allocator, self.prefix, "bin") catch unreachable;
}
@@ -312,9 +312,9 @@ pub const Builder = struct {
if (os.getEnvVarOwned(self.allocator, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| {
var it = mem.split(nix_cflags_compile, " ");
while (true) {
- const word = it.next() ?? break;
+ const word = it.next() orelse break;
if (mem.eql(u8, word, "-isystem")) {
- const include_path = it.next() ?? {
+ const include_path = it.next() orelse {
warn("Expected argument after -isystem in NIX_CFLAGS_COMPILE\n");
break;
};
@@ -330,9 +330,9 @@ pub const Builder = struct {
if (os.getEnvVarOwned(self.allocator, "NIX_LDFLAGS")) |nix_ldflags| {
var it = mem.split(nix_ldflags, " ");
while (true) {
- const word = it.next() ?? break;
+ const word = it.next() orelse break;
if (mem.eql(u8, word, "-rpath")) {
- const rpath = it.next() ?? {
+ const rpath = it.next() orelse {
warn("Expected argument after -rpath in NIX_LDFLAGS\n");
break;
};
@@ -362,7 +362,7 @@ pub const Builder = struct {
}
self.available_options_list.append(available_option) catch unreachable;
- const entry = self.user_input_options.get(name) ?? return null;
+ const entry = self.user_input_options.get(name) orelse return null;
entry.value.used = true;
switch (type_id) {
TypeId.Bool => switch (entry.value.value) {
@@ -416,9 +416,9 @@ pub const Builder = struct {
pub fn standardReleaseOptions(self: *Builder) builtin.Mode {
if (self.release_mode) |mode| return mode;
- const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") ?? false;
- const release_fast = self.option(bool, "release-fast", "optimizations on and safety off") ?? false;
- const release_small = self.option(bool, "release-small", "size optimizations on and safety off") ?? false;
+ const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") orelse false;
+ const release_fast = self.option(bool, "release-fast", "optimizations on and safety off") orelse false;
+ const release_small = self.option(bool, "release-small", "size optimizations on and safety off") orelse false;
const mode = if (release_safe and !release_fast and !release_small) builtin.Mode.ReleaseSafe else if (release_fast and !release_safe and !release_small) builtin.Mode.ReleaseFast else if (release_small and !release_fast and !release_safe) builtin.Mode.ReleaseSmall else if (!release_fast and !release_safe and !release_small) builtin.Mode.Debug else x: {
warn("Multiple release modes (of -Drelease-safe, -Drelease-fast and -Drelease-small)");
@@ -518,7 +518,7 @@ pub const Builder = struct {
// make sure all args are used
var it = self.user_input_options.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
if (!entry.value.used) {
warn("Invalid option: -D{}\n\n", entry.key);
self.markInvalidUserInput();
@@ -1246,7 +1246,7 @@ pub const LibExeObjStep = struct {
{
var it = self.link_libs.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
zig_args.append("--library") catch unreachable;
zig_args.append(entry.key) catch unreachable;
}
@@ -1696,7 +1696,7 @@ pub const TestStep = struct {
{
var it = self.link_libs.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
try zig_args.append("--library");
try zig_args.append(entry.key);
}
diff --git a/std/debug/index.zig b/std/debug/index.zig
index be47ab76bc..25f7a58b25 100644
--- a/std/debug/index.zig
+++ b/std/debug/index.zig
@@ -208,7 +208,7 @@ fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: us
.name = "???",
.address = address,
};
- const symbol = debug_info.symbol_table.search(address) ?? &unknown;
+ const symbol = debug_info.symbol_table.search(address) orelse &unknown;
try out_stream.print(WHITE ++ "{}" ++ RESET ++ ": " ++ DIM ++ ptr_hex ++ " in ??? (???)" ++ RESET ++ "\n", symbol.name, address);
},
else => {
@@ -268,10 +268,10 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
try st.elf.openFile(allocator, &st.self_exe_file);
errdefer st.elf.close();
- st.debug_info = (try st.elf.findSection(".debug_info")) ?? return error.MissingDebugInfo;
- st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) ?? return error.MissingDebugInfo;
- st.debug_str = (try st.elf.findSection(".debug_str")) ?? return error.MissingDebugInfo;
- st.debug_line = (try st.elf.findSection(".debug_line")) ?? return error.MissingDebugInfo;
+ st.debug_info = (try st.elf.findSection(".debug_info")) orelse return error.MissingDebugInfo;
+ st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) orelse return error.MissingDebugInfo;
+ st.debug_str = (try st.elf.findSection(".debug_str")) orelse return error.MissingDebugInfo;
+ st.debug_line = (try st.elf.findSection(".debug_line")) orelse return error.MissingDebugInfo;
st.debug_ranges = (try st.elf.findSection(".debug_ranges"));
try scanAllCompileUnits(st);
return st;
@@ -443,7 +443,7 @@ const Die = struct {
}
fn getAttrAddr(self: *const Die, id: u64) !u64 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Address => |value| value,
else => error.InvalidDebugInfo,
@@ -451,7 +451,7 @@ const Die = struct {
}
fn getAttrSecOffset(self: *const Die, id: u64) !u64 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
FormValue.SecOffset => |value| value,
@@ -460,7 +460,7 @@ const Die = struct {
}
fn getAttrUnsignedLe(self: *const Die, id: u64) !u64 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
else => error.InvalidDebugInfo,
@@ -468,7 +468,7 @@ const Die = struct {
}
fn getAttrString(self: *const Die, st: *ElfStackTrace, id: u64) ![]u8 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.String => |value| value,
FormValue.StrPtr => |offset| getString(st, offset),
@@ -748,7 +748,7 @@ fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) !
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
const abbrev_code = try readULeb128(in_stream);
- const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) ?? return error.InvalidDebugInfo;
+ const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) orelse return error.InvalidDebugInfo;
var result = Die{
.tag_id = table_entry.tag_id,
diff --git a/std/heap.zig b/std/heap.zig
index d1fbf9ca0a..172bc24118 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -97,12 +97,12 @@ pub const DirectAllocator = struct {
},
Os.windows => {
const amt = n + alignment + @sizeOf(usize);
- const heap_handle = self.heap_handle ?? blk: {
- const hh = os.windows.HeapCreate(os.windows.HEAP_NO_SERIALIZE, amt, 0) ?? return error.OutOfMemory;
+ const heap_handle = self.heap_handle orelse blk: {
+ const hh = os.windows.HeapCreate(os.windows.HEAP_NO_SERIALIZE, amt, 0) orelse return error.OutOfMemory;
self.heap_handle = hh;
break :blk hh;
};
- const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) ?? return error.OutOfMemory;
+ const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory;
const root_addr = @ptrToInt(ptr);
const rem = @rem(root_addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
@@ -142,7 +142,7 @@ pub const DirectAllocator = struct {
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
const old_ptr = @intToPtr(*c_void, root_addr);
const amt = new_size + alignment + @sizeOf(usize);
- const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) ?? blk: {
+ const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) orelse blk: {
if (new_size > old_mem.len) return error.OutOfMemory;
const new_record_addr = old_record_addr - new_size + old_mem.len;
@intToPtr(*align(1) usize, new_record_addr).* = root_addr;
@@ -343,7 +343,7 @@ pub const ThreadSafeFixedBufferAllocator = struct {
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
- end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) ?? return self.buffer[adjusted_index..new_end_index];
+ end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
}
}
diff --git a/std/linked_list.zig b/std/linked_list.zig
index 536c6d24d0..9e32b7d9da 100644
--- a/std/linked_list.zig
+++ b/std/linked_list.zig
@@ -169,7 +169,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Returns:
/// A pointer to the last node in the list.
pub fn pop(list: *Self) ?*Node {
- const last = list.last ?? return null;
+ const last = list.last orelse return null;
list.remove(last);
return last;
}
@@ -179,7 +179,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Returns:
/// A pointer to the first node in the list.
pub fn popFirst(list: *Self) ?*Node {
- const first = list.first ?? return null;
+ const first = list.first orelse return null;
list.remove(first);
return first;
}
diff --git a/std/os/index.zig b/std/os/index.zig
index 807b2c398b..6a13ff94d4 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -425,7 +425,7 @@ pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator:
return posixExecveErrnoToErr(posix.getErrno(posix.execve(argv_buf[0].?, argv_buf.ptr, envp_buf.ptr)));
}
- const PATH = getEnvPosix("PATH") ?? "/usr/local/bin:/bin/:/usr/bin";
+ const PATH = getEnvPosix("PATH") orelse "/usr/local/bin:/bin/:/usr/bin";
// PATH.len because it is >= the largest search_path
// +1 for the / to join the search path and exe_path
// +1 for the null terminating byte
@@ -490,7 +490,7 @@ pub fn getEnvMap(allocator: *Allocator) !BufMap {
errdefer result.deinit();
if (is_windows) {
- const ptr = windows.GetEnvironmentStringsA() ?? return error.OutOfMemory;
+ const ptr = windows.GetEnvironmentStringsA() orelse return error.OutOfMemory;
defer assert(windows.FreeEnvironmentStringsA(ptr) != 0);
var i: usize = 0;
@@ -573,7 +573,7 @@ pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) ![]u8 {
return allocator.shrink(u8, buf, result);
}
} else {
- const result = getEnvPosix(key) ?? return error.EnvironmentVariableNotFound;
+ const result = getEnvPosix(key) orelse return error.EnvironmentVariableNotFound;
return mem.dupe(allocator, u8, result);
}
}
@@ -1641,7 +1641,7 @@ pub const ArgIterator = struct {
if (builtin.os == Os.windows) {
return self.inner.next(allocator);
} else {
- return mem.dupe(allocator, u8, self.inner.next() ?? return null);
+ return mem.dupe(allocator, u8, self.inner.next() orelse return null);
}
}
@@ -2457,9 +2457,9 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
}
};
- const heap_handle = windows.GetProcessHeap() ?? return SpawnThreadError.OutOfMemory;
+ const heap_handle = windows.GetProcessHeap() orelse return SpawnThreadError.OutOfMemory;
const byte_count = @alignOf(WinThread.OuterContext) + @sizeOf(WinThread.OuterContext);
- const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) ?? return SpawnThreadError.OutOfMemory;
+ const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) orelse return SpawnThreadError.OutOfMemory;
errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0);
const bytes = @ptrCast([*]u8, bytes_ptr)[0..byte_count];
const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable;
@@ -2468,7 +2468,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
outer_context.thread.data.alloc_start = bytes_ptr;
const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(*c_void, &outer_context.inner);
- outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) ?? {
+ outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) orelse {
const err = windows.GetLastError();
return switch (err) {
else => os.unexpectedErrorWindows(err),
diff --git a/std/os/linux/vdso.zig b/std/os/linux/vdso.zig
index 1414b8185b..cbd0cd1df5 100644
--- a/std/os/linux/vdso.zig
+++ b/std/os/linux/vdso.zig
@@ -28,7 +28,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
}
}
}
- const dynv = maybe_dynv ?? return 0;
+ const dynv = maybe_dynv orelse return 0;
if (base == @maxValue(usize)) return 0;
var maybe_strings: ?[*]u8 = null;
@@ -52,9 +52,9 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
}
}
- const strings = maybe_strings ?? return 0;
- const syms = maybe_syms ?? return 0;
- const hashtab = maybe_hashtab ?? return 0;
+ const strings = maybe_strings orelse return 0;
+ const syms = maybe_syms orelse return 0;
+ const hashtab = maybe_hashtab orelse return 0;
if (maybe_verdef == null) maybe_versym = null;
const OK_TYPES = (1 << elf.STT_NOTYPE | 1 << elf.STT_OBJECT | 1 << elf.STT_FUNC | 1 << elf.STT_COMMON);
diff --git a/std/os/path.zig b/std/os/path.zig
index 430dda2934..a3ad23b1a9 100644
--- a/std/os/path.zig
+++ b/std/os/path.zig
@@ -182,8 +182,8 @@ pub fn windowsParsePath(path: []const u8) WindowsPath {
}
var it = mem.split(path, []u8{this_sep});
- _ = (it.next() ?? return relative_path);
- _ = (it.next() ?? return relative_path);
+ _ = (it.next() orelse return relative_path);
+ _ = (it.next() orelse return relative_path);
return WindowsPath{
.is_abs = isAbsoluteWindows(path),
.kind = WindowsPath.Kind.NetworkShare,
@@ -200,8 +200,8 @@ pub fn windowsParsePath(path: []const u8) WindowsPath {
}
var it = mem.split(path, []u8{this_sep});
- _ = (it.next() ?? return relative_path);
- _ = (it.next() ?? return relative_path);
+ _ = (it.next() orelse return relative_path);
+ _ = (it.next() orelse return relative_path);
return WindowsPath{
.is_abs = isAbsoluteWindows(path),
.kind = WindowsPath.Kind.NetworkShare,
@@ -923,7 +923,7 @@ pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8)
var from_it = mem.split(resolved_from, "/\\");
var to_it = mem.split(resolved_to, "/\\");
while (true) {
- const from_component = from_it.next() ?? return mem.dupe(allocator, u8, to_it.rest());
+ const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest());
const to_rest = to_it.rest();
if (to_it.next()) |to_component| {
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
@@ -974,7 +974,7 @@ pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![
var from_it = mem.split(resolved_from, "/");
var to_it = mem.split(resolved_to, "/");
while (true) {
- const from_component = from_it.next() ?? return mem.dupe(allocator, u8, to_it.rest());
+ const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest());
const to_rest = to_it.rest();
if (to_it.next()) |to_component| {
if (mem.eql(u8, from_component, to_component))
diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig
index 7170346108..f93a673be0 100644
--- a/std/os/windows/util.zig
+++ b/std/os/windows/util.zig
@@ -153,7 +153,7 @@ pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap)
pub fn windowsLoadDll(allocator: *mem.Allocator, dll_path: []const u8) !windows.HMODULE {
const padded_buff = try cstr.addNullByte(allocator, dll_path);
defer allocator.free(padded_buff);
- return windows.LoadLibraryA(padded_buff.ptr) ?? error.DllNotFound;
+ return windows.LoadLibraryA(padded_buff.ptr) orelse error.DllNotFound;
}
pub fn windowsUnloadDll(hModule: windows.HMODULE) void {
diff --git a/std/special/build_runner.zig b/std/special/build_runner.zig
index 3471d6ed21..e4f04df6d0 100644
--- a/std/special/build_runner.zig
+++ b/std/special/build_runner.zig
@@ -27,15 +27,15 @@ pub fn main() !void {
// skip my own exe name
_ = arg_it.skip();
- const zig_exe = try unwrapArg(arg_it.next(allocator) ?? {
+ const zig_exe = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected first argument to be path to zig compiler\n");
return error.InvalidArgs;
});
- const build_root = try unwrapArg(arg_it.next(allocator) ?? {
+ const build_root = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected second argument to be build root directory path\n");
return error.InvalidArgs;
});
- const cache_root = try unwrapArg(arg_it.next(allocator) ?? {
+ const cache_root = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected third argument to be cache root directory path\n");
return error.InvalidArgs;
});
@@ -84,12 +84,12 @@ pub fn main() !void {
} else if (mem.eql(u8, arg, "--help")) {
return usage(&builder, false, try stdout_stream);
} else if (mem.eql(u8, arg, "--prefix")) {
- prefix = try unwrapArg(arg_it.next(allocator) ?? {
+ prefix = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected argument after --prefix\n\n");
return usageAndErr(&builder, false, try stderr_stream);
});
} else if (mem.eql(u8, arg, "--search-prefix")) {
- const search_prefix = try unwrapArg(arg_it.next(allocator) ?? {
+ const search_prefix = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected argument after --search-prefix\n\n");
return usageAndErr(&builder, false, try stderr_stream);
});
diff --git a/std/unicode.zig b/std/unicode.zig
index 21ae12f59c..ec808ca4fe 100644
--- a/std/unicode.zig
+++ b/std/unicode.zig
@@ -220,7 +220,7 @@ const Utf8Iterator = struct {
}
pub fn nextCodepoint(it: *Utf8Iterator) ?u32 {
- const slice = it.nextCodepointSlice() ?? return null;
+ const slice = it.nextCodepointSlice() orelse return null;
switch (slice.len) {
1 => return u32(slice[0]),
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 9f8ef3c3d6..5752f69409 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -43,7 +43,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
// skip over line comments at the top of the file
while (true) {
- const next_tok = tok_it.peek() ?? break;
+ const next_tok = tok_it.peek() orelse break;
if (next_tok.id != Token.Id.LineComment) break;
_ = tok_it.next();
}
@@ -197,7 +197,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lib_name_token = nextToken(&tok_it, &tree);
const lib_name_token_index = lib_name_token.index;
const lib_name_token_ptr = lib_name_token.ptr;
- break :blk (try parseStringLiteral(arena, &tok_it, lib_name_token_ptr, lib_name_token_index, &tree)) ?? {
+ break :blk (try parseStringLiteral(arena, &tok_it, lib_name_token_ptr, lib_name_token_index, &tree)) orelse {
prevToken(&tok_it, &tree);
break :blk null;
};
@@ -1434,13 +1434,14 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
try stack.append(State{
.ExpectTokenSave = ExpectTokenSave{
.id = Token.Id.AngleBracketRight,
- .ptr = &async_node.rangle_bracket.? },
+ .ptr = &async_node.rangle_bracket.?,
+ },
});
try stack.append(State{ .TypeExprBegin = OptionalCtx{ .RequiredNull = &async_node.allocator_type } });
continue;
},
State.AsyncEnd => |ctx| {
- const node = ctx.ctx.get() ?? continue;
+ const node = ctx.ctx.get() orelse continue;
switch (node.id) {
ast.Node.Id.FnProto => {
@@ -1813,7 +1814,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
State.RangeExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Ellipsis3)) |ellipsis3| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -1835,7 +1836,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.AssignmentExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -1865,7 +1866,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.UnwrapExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -1900,7 +1901,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BoolOrExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Keyword_or)) |or_token| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -1924,7 +1925,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BoolAndExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Keyword_and)) |and_token| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -1948,7 +1949,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.ComparisonExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -1978,7 +1979,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BinaryOrExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Pipe)) |pipe| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -2002,7 +2003,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BinaryXorExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Caret)) |caret| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -2026,7 +2027,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BinaryAndExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Ampersand)) |ampersand| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -2050,7 +2051,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BitShiftExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -2080,7 +2081,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.AdditionExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -2110,7 +2111,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.MultiplyExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -2141,7 +2142,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.CurlySuffixExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (tok_it.peek().?.id == Token.Id.Period) {
const node = try arena.construct(ast.Node.SuffixOp{
@@ -2189,7 +2190,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.TypeExprEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Bang)) |bang| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -2269,7 +2270,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.SuffixOpExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -2418,7 +2419,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
Token.Id.StringLiteral, Token.Id.MultilineStringLiteralLine => {
- opt_ctx.store((try parseStringLiteral(arena, &tok_it, token.ptr, token.index, &tree)) ?? unreachable);
+ opt_ctx.store((try parseStringLiteral(arena, &tok_it, token.ptr, token.index, &tree)) orelse unreachable);
continue;
},
Token.Id.LParen => {
@@ -2648,7 +2649,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
const token_ptr = token.ptr;
- opt_ctx.store((try parseStringLiteral(arena, &tok_it, token_ptr, token_index, &tree)) ?? {
+ opt_ctx.store((try parseStringLiteral(arena, &tok_it, token_ptr, token_index, &tree)) orelse {
prevToken(&tok_it, &tree);
if (opt_ctx != OptionalCtx.Optional) {
((try tree.errors.addOne())).* = Error{ .ExpectedPrimaryExpr = Error.ExpectedPrimaryExpr{ .token = token_index } };
@@ -3348,7 +3349,7 @@ fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedTok
assert(result.ptr.id != Token.Id.LineComment);
while (true) {
- const next_tok = tok_it.peek() ?? return result;
+ const next_tok = tok_it.peek() orelse return result;
if (next_tok.id != Token.Id.LineComment) return result;
_ = tok_it.next();
}
@@ -3356,7 +3357,7 @@ fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedTok
fn prevToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) void {
while (true) {
- const prev_tok = tok_it.prev() ?? return;
+ const prev_tok = tok_it.prev() orelse return;
if (prev_tok.id == Token.Id.LineComment) continue;
return;
}
diff --git a/std/zig/render.zig b/std/zig/render.zig
index 0b8e4d1453..bc45768fa3 100644
--- a/std/zig/render.zig
+++ b/std/zig/render.zig
@@ -83,7 +83,7 @@ fn renderRoot(
var start_col: usize = 0;
var it = tree.root_node.decls.iterator(0);
while (true) {
- var decl = (it.next() ?? return).*;
+ var decl = (it.next() orelse return).*;
// look for zig fmt: off comment
var start_token_index = decl.firstToken();
zig_fmt_loop: while (start_token_index != 0) {
@@ -112,7 +112,7 @@ fn renderRoot(
const start = tree.tokens.at(start_token_index + 1).start;
try stream.print("{}\n", tree.source[start..end_token.end]);
while (tree.tokens.at(decl.firstToken()).start < end_token.end) {
- decl = (it.next() ?? return).*;
+ decl = (it.next() orelse return).*;
}
break :zig_fmt_loop;
}
@@ -1993,7 +1993,7 @@ fn renderDocComments(
indent: usize,
start_col: *usize,
) (@typeOf(stream).Child.Error || Error)!void {
- const comment = node.doc_comments ?? return;
+ const comment = node.doc_comments orelse return;
var it = comment.lines.iterator(0);
const first_token = node.firstToken();
while (it.next()) |line_token_index| {
@@ -2021,7 +2021,7 @@ fn nodeIsBlock(base: *const ast.Node) bool {
}
fn nodeCausesSliceOpSpace(base: *ast.Node) bool {
- const infix_op = base.cast(ast.Node.InfixOp) ?? return false;
+ const infix_op = base.cast(ast.Node.InfixOp) orelse return false;
return switch (infix_op.op) {
ast.Node.InfixOp.Op.Period => false,
else => true,
diff --git a/test/cases/cast.zig b/test/cases/cast.zig
index a56c470408..ade1cf78aa 100644
--- a/test/cases/cast.zig
+++ b/test/cases/cast.zig
@@ -73,7 +73,7 @@ fn Struct(comptime T: type) type {
fn maybePointer(self: ?*const Self) Self {
const none = Self{ .x = if (T == void) void{} else 0 };
- return (self ?? &none).*;
+ return (self orelse &none).*;
}
};
}
@@ -87,7 +87,7 @@ const Union = union {
fn maybePointer(self: ?*const Union) Union {
const none = Union{ .x = 0 };
- return (self ?? &none).*;
+ return (self orelse &none).*;
}
};
@@ -100,7 +100,7 @@ const Enum = enum {
}
fn maybePointer(self: ?*const Enum) Enum {
- return (self ?? &Enum.None).*;
+ return (self orelse &Enum.None).*;
}
};
diff --git a/test/cases/null.zig b/test/cases/null.zig
index 62565784ac..cdcfd23efb 100644
--- a/test/cases/null.zig
+++ b/test/cases/null.zig
@@ -15,13 +15,13 @@ test "optional type" {
const next_x: ?i32 = null;
- const z = next_x ?? 1234;
+ const z = next_x orelse 1234;
assert(z == 1234);
const final_x: ?i32 = 13;
- const num = final_x ?? unreachable;
+ const num = final_x orelse unreachable;
assert(num == 13);
}
@@ -38,7 +38,7 @@ test "test maybe object and get a pointer to the inner value" {
test "rhs maybe unwrap return" {
const x: ?bool = true;
- const y = x ?? return;
+ const y = x orelse return;
}
test "maybe return" {
@@ -53,7 +53,7 @@ fn maybeReturnImpl() void {
}
fn foo(x: ?i32) ?bool {
- const value = x ?? return null;
+ const value = x orelse return null;
return value > 1234;
}
@@ -140,6 +140,6 @@ test "unwrap optional which is field of global var" {
}
test "null with default unwrap" {
- const x: i32 = null ?? 1;
+ const x: i32 = null orelse 1;
assert(x == 1);
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 1c737a59e7..5ec2759032 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -2296,7 +2296,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\ defer try canFail();
\\
- \\ const a = maybeInt() ?? return;
+ \\ const a = maybeInt() orelse return;
\\}
\\
\\fn canFail() error!void { }
diff --git a/test/translate_c.zig b/test/translate_c.zig
index 3489f9da21..417171d2c2 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -246,13 +246,13 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub extern var fn_ptr: ?extern fn() void;
,
\\pub inline fn foo() void {
- \\ return (??fn_ptr)();
+ \\ return fn_ptr.?();
\\}
,
\\pub extern var fn_ptr2: ?extern fn(c_int, f32) u8;
,
\\pub inline fn bar(arg0: c_int, arg1: f32) u8 {
- \\ return (??fn_ptr2)(arg0, arg1);
+ \\ return fn_ptr2.?(arg0, arg1);
\\}
);
@@ -608,7 +608,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ field: c_int,
\\};
\\pub export fn read_field(foo: ?[*]struct_Foo) c_int {
- \\ return (??foo).field;
+ \\ return foo.?.field;
\\}
);
@@ -969,11 +969,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub export fn bar() void {
\\ var f: ?extern fn() void = foo;
\\ var b: ?extern fn() c_int = baz;
- \\ (??f)();
- \\ (??f)();
+ \\ f.?();
+ \\ f.?();
\\ foo();
- \\ _ = (??b)();
- \\ _ = (??b)();
+ \\ _ = b.?();
+ \\ _ = b.?();
\\ _ = baz();
\\}
);
@@ -984,7 +984,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\}
,
\\pub export fn foo(x: ?[*]c_int) void {
- \\ (??x).* = 1;
+ \\ x.?.* = 1;
\\}
);
@@ -1012,7 +1012,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub fn foo() c_int {
\\ var x: c_int = 1234;
\\ var ptr: ?[*]c_int = &x;
- \\ return (??ptr).*;
+ \\ return ptr.?.*;
\\}
);
@@ -1119,7 +1119,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub const glClearPFN = PFNGLCLEARPROC;
,
\\pub inline fn glClearUnion(arg0: GLbitfield) void {
- \\ return (??glProcs.gl.Clear)(arg0);
+ \\ return glProcs.gl.Clear.?(arg0);
\\}
,
\\pub const OpenGLProcs = union_OpenGLProcs;
From 0a95b0f1ffeb8ae5ee317b02626890adebe5ec63 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Sun, 10 Jun 2018 01:18:31 -0400
Subject: [PATCH 28/49] std.zig: update syntax for orelse keyword
---
src/ir.cpp | 16 ----------------
std/zig/parse.zig | 2 +-
std/zig/parser_test.zig | 2 +-
std/zig/tokenizer.zig | 27 +++++++--------------------
4 files changed, 9 insertions(+), 38 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 96eb5f7434..38f4dc90e7 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -14842,22 +14842,6 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
TypeTableEntry *type_entry = ptr_type->data.pointer.child_type;
if (type_is_invalid(type_entry)) {
return ira->codegen->builtin_types.entry_invalid;
- } else if (type_entry->id == TypeTableEntryIdMetaType) {
- // surprise! actually this is just ??T not an unwrap maybe instruction
- ConstExprValue *ptr_val = const_ptr_pointee(ira->codegen, &value->value);
- assert(ptr_val->type->id == TypeTableEntryIdMetaType);
- TypeTableEntry *child_type = ptr_val->data.x_type;
-
- type_ensure_zero_bits_known(ira->codegen, child_type);
- TypeTableEntry *layer1 = get_maybe_type(ira->codegen, child_type);
- TypeTableEntry *layer2 = get_maybe_type(ira->codegen, layer1);
-
- IrInstruction *const_instr = ir_build_const_type(&ira->new_irb, unwrap_maybe_instruction->base.scope,
- unwrap_maybe_instruction->base.source_node, layer2);
- IrInstruction *result_instr = ir_get_ref(ira, &unwrap_maybe_instruction->base, const_instr,
- ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile);
- ir_link_new_instruction(result_instr, &unwrap_maybe_instruction->base);
- return result_instr->value.type;
} else if (type_entry->id != TypeTableEntryIdOptional) {
ir_add_error_node(ira, unwrap_maybe_instruction->value->source_node,
buf_sprintf("expected optional type, found '%s'", buf_ptr(&type_entry->name)));
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 5752f69409..877b81c527 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -3248,7 +3248,7 @@ fn tokenIdToAssignment(id: *const Token.Id) ?ast.Node.InfixOp.Op {
fn tokenIdToUnwrapExpr(id: @TagType(Token.Id)) ?ast.Node.InfixOp.Op {
return switch (id) {
Token.Id.Keyword_catch => ast.Node.InfixOp.Op{ .Catch = null },
- Token.Id.QuestionMarkQuestionMark => ast.Node.InfixOp.Op{ .UnwrapOptional = void{} },
+ Token.Id.Keyword_orelse => ast.Node.InfixOp.Op{ .UnwrapOptional = void{} },
else => null,
};
}
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
index ea3a4858b0..09ea8aa1a1 100644
--- a/std/zig/parser_test.zig
+++ b/std/zig/parser_test.zig
@@ -1151,7 +1151,7 @@ test "zig fmt: infix operators" {
\\ _ = i!i;
\\ _ = i ** i;
\\ _ = i ++ i;
- \\ _ = i ?? i;
+ \\ _ = i orelse i;
\\ _ = i % i;
\\ _ = i / i;
\\ _ = i *% i;
diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig
index b288a3adb7..4534529f36 100644
--- a/std/zig/tokenizer.zig
+++ b/std/zig/tokenizer.zig
@@ -39,6 +39,7 @@ pub const Token = struct {
Keyword{ .bytes = "noalias", .id = Id.Keyword_noalias },
Keyword{ .bytes = "null", .id = Id.Keyword_null },
Keyword{ .bytes = "or", .id = Id.Keyword_or },
+ Keyword{ .bytes = "orelse", .id = Id.Keyword_orelse },
Keyword{ .bytes = "packed", .id = Id.Keyword_packed },
Keyword{ .bytes = "promise", .id = Id.Keyword_promise },
Keyword{ .bytes = "pub", .id = Id.Keyword_pub },
@@ -129,7 +130,6 @@ pub const Token = struct {
Ampersand,
AmpersandEqual,
QuestionMark,
- QuestionMarkQuestionMark,
AngleBracketLeft,
AngleBracketLeftEqual,
AngleBracketAngleBracketLeft,
@@ -171,6 +171,7 @@ pub const Token = struct {
Keyword_noalias,
Keyword_null,
Keyword_or,
+ Keyword_orelse,
Keyword_packed,
Keyword_promise,
Keyword_pub,
@@ -254,7 +255,6 @@ pub const Tokenizer = struct {
Ampersand,
Caret,
Percent,
- QuestionMark,
Plus,
PlusPercent,
AngleBracketLeft,
@@ -345,6 +345,11 @@ pub const Tokenizer = struct {
self.index += 1;
break;
},
+ '?' => {
+ result.id = Token.Id.QuestionMark;
+ self.index += 1;
+ break;
+ },
':' => {
result.id = Token.Id.Colon;
self.index += 1;
@@ -359,9 +364,6 @@ pub const Tokenizer = struct {
'+' => {
state = State.Plus;
},
- '?' => {
- state = State.QuestionMark;
- },
'<' => {
state = State.AngleBracketLeft;
},
@@ -496,18 +498,6 @@ pub const Tokenizer = struct {
},
},
- State.QuestionMark => switch (c) {
- '?' => {
- result.id = Token.Id.QuestionMarkQuestionMark;
- self.index += 1;
- break;
- },
- else => {
- result.id = Token.Id.QuestionMark;
- break;
- },
- },
-
State.Percent => switch (c) {
'=' => {
result.id = Token.Id.PercentEqual;
@@ -1084,9 +1074,6 @@ pub const Tokenizer = struct {
State.Plus => {
result.id = Token.Id.Plus;
},
- State.QuestionMark => {
- result.id = Token.Id.QuestionMark;
- },
State.Percent => {
result.id = Token.Id.Percent;
},
From dc8bda7e0203410132e0689b7561d9e8731176e9 Mon Sep 17 00:00:00 2001
From: Marc Tiehuis
Date: Sat, 9 Jun 2018 10:24:20 +1200
Subject: [PATCH 29/49] Add arbitrary-precision integer to std
A few notes on the implementation:
- Any unsigned power of two integer type less than 64 bits in size is supported
as a Limb type.
- The algorithms used are kept simple for the moment. More complicated
algorithms are generally only more useful as integer sizes increase a
lot and I don't expect our current usage to be used for this purpose
just yet.
- All branches (practically) have been covered by tests.
See https://github.com/tiehuis/zig-bn/tree/986a2b3243d0454b8430a6adf4ad48611850c1b8/bench
for rough performance comparison numbers.
Closes #364.
---
CMakeLists.txt | 1 +
std/math/big/index.zig | 5 +
std/math/big/int.zig | 2023 ++++++++++++++++++++++++++++++++++++++++
std/math/index.zig | 4 +
4 files changed, 2033 insertions(+)
create mode 100644 std/math/big/index.zig
create mode 100644 std/math/big/int.zig
diff --git a/CMakeLists.txt b/CMakeLists.txt
index bda576347e..64abb67a8f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -464,6 +464,7 @@ set(ZIG_STD_FILES
"math/atan.zig"
"math/atan2.zig"
"math/atanh.zig"
+ "math/big/int.zig"
"math/cbrt.zig"
"math/ceil.zig"
"math/complex/abs.zig"
diff --git a/std/math/big/index.zig b/std/math/big/index.zig
new file mode 100644
index 0000000000..26fa538c4f
--- /dev/null
+++ b/std/math/big/index.zig
@@ -0,0 +1,5 @@
+pub use @import("int.zig");
+
+test "math.big" {
+ _ = @import("int.zig");
+}
diff --git a/std/math/big/int.zig b/std/math/big/int.zig
new file mode 100644
index 0000000000..19af10e695
--- /dev/null
+++ b/std/math/big/int.zig
@@ -0,0 +1,2023 @@
+const std = @import("../../index.zig");
+const builtin = @import("builtin");
+const debug = std.debug;
+const math = std.math;
+const mem = std.mem;
+const Allocator = mem.Allocator;
+const ArrayList = std.ArrayList;
+
+const TypeId = builtin.TypeId;
+
+pub const Limb = usize;
+pub const DoubleLimb = @IntType(false, 2 * Limb.bit_count);
+pub const Log2Limb = math.Log2Int(Limb);
+
+comptime {
+ debug.assert(math.floorPowerOfTwo(usize, Limb.bit_count) == Limb.bit_count);
+ debug.assert(Limb.bit_count <= 64); // u128 set is unsupported
+ debug.assert(Limb.is_signed == false);
+}
+
+const wrapped_buffer_size = 512;
+
+// Converts primitive integer values onto a stack-based big integer, or passes through existing
+// Int types with no modifications. This can fail at runtime if using a very large dynamic
+// integer but it is very unlikely and is considered a user error.
+fn wrapInt(allocator: *Allocator, bn: var) *const Int {
+ const T = @typeOf(bn);
+ switch (@typeInfo(T)) {
+ TypeId.Pointer => |info| {
+ if (info.child == Int) {
+ return bn;
+ } else {
+ @compileError("cannot set Int using type " ++ @typeName(T));
+ }
+ },
+ else => {
+ var s = allocator.create(Int) catch unreachable;
+ s.* = Int{
+ .allocator = allocator,
+ .positive = false,
+ .limbs = block: {
+ var limbs = allocator.alloc(Limb, Int.default_capacity) catch unreachable;
+ limbs[0] = 0;
+ break :block limbs;
+ },
+ .len = 1,
+ };
+ s.set(bn) catch unreachable;
+ return s;
+ },
+ }
+}
+
+pub const Int = struct {
+ allocator: *Allocator,
+ positive: bool,
+ // - little-endian ordered
+ // - len >= 1 always
+ // - zero value -> len == 1 with limbs[0] == 0
+ limbs: []Limb,
+ len: usize,
+
+ const default_capacity = 4;
+
+ pub fn init(allocator: *Allocator) !Int {
+ return try Int.initCapacity(allocator, default_capacity);
+ }
+
+ pub fn initSet(allocator: *Allocator, value: var) !Int {
+ var s = try Int.init(allocator);
+ try s.set(value);
+ return s;
+ }
+
+ pub fn initCapacity(allocator: *Allocator, capacity: usize) !Int {
+ return Int{
+ .allocator = allocator,
+ .positive = true,
+ .limbs = block: {
+ var limbs = try allocator.alloc(Limb, math.max(default_capacity, capacity));
+ limbs[0] = 0;
+ break :block limbs;
+ },
+ .len = 1,
+ };
+ }
+
+ pub fn ensureCapacity(self: *Int, capacity: usize) !void {
+ if (capacity <= self.limbs.len) {
+ return;
+ }
+
+ self.limbs = try self.allocator.realloc(Limb, self.limbs, capacity);
+ }
+
+ pub fn deinit(self: *const Int) void {
+ self.allocator.free(self.limbs);
+ }
+
+ pub fn clone(other: *const Int) !Int {
+ return Int{
+ .allocator = other.allocator,
+ .positive = other.positive,
+ .limbs = block: {
+ var limbs = try other.allocator.alloc(Limb, other.len);
+ mem.copy(Limb, limbs[0..], other.limbs[0..other.len]);
+ break :block limbs;
+ },
+ .len = other.len,
+ };
+ }
+
+ pub fn copy(self: *Int, other: *const Int) !void {
+ if (self == other) {
+ return;
+ }
+
+ self.positive = other.positive;
+ try self.ensureCapacity(other.len);
+ mem.copy(Limb, self.limbs[0..], other.limbs[0..other.len]);
+ self.len = other.len;
+ }
+
+ pub fn swap(self: *Int, other: *Int) void {
+ mem.swap(Int, self, other);
+ }
+
+ pub fn dump(self: *const Int) void {
+ for (self.limbs) |limb| {
+ debug.warn("{x} ", limb);
+ }
+ debug.warn("\n");
+ }
+
+ pub fn negate(r: *Int) void {
+ r.positive = !r.positive;
+ }
+
+ pub fn abs(r: *Int) void {
+ r.positive = true;
+ }
+
+ pub fn isOdd(r: *const Int) bool {
+ return r.limbs[0] & 1 != 0;
+ }
+
+ pub fn isEven(r: *const Int) bool {
+ return !r.isOdd();
+ }
+
+ fn bitcount(self: *const Int) usize {
+ const u_bit_count = (self.len - 1) * Limb.bit_count + (Limb.bit_count - @clz(self.limbs[self.len - 1]));
+ return usize(!self.positive) + u_bit_count;
+ }
+
+ pub fn sizeInBase(self: *const Int, base: usize) usize {
+ return (self.bitcount() / math.log2(base)) + 1;
+ }
+
+ pub fn set(self: *Int, value: var) Allocator.Error!void {
+ const T = @typeOf(value);
+
+ switch (@typeInfo(T)) {
+ TypeId.Int => |info| {
+ const UT = if (T.is_signed) @IntType(false, T.bit_count - 1) else T;
+
+ try self.ensureCapacity(@sizeOf(UT) / @sizeOf(Limb));
+ self.positive = value >= 0;
+ self.len = 0;
+
+ var w_value: UT = if (value < 0) UT(-value) else UT(value);
+
+ if (info.bits <= Limb.bit_count) {
+ self.limbs[0] = Limb(w_value);
+ self.len = 1;
+ } else {
+ var i: usize = 0;
+ while (w_value != 0) : (i += 1) {
+ self.limbs[i] = @truncate(Limb, w_value);
+ self.len += 1;
+
+ // TODO: shift == 64 at compile-time fails. Fails on u128 limbs.
+ w_value >>= Limb.bit_count / 2;
+ w_value >>= Limb.bit_count / 2;
+ }
+ }
+ },
+ TypeId.ComptimeInt => {
+ comptime var w_value = if (value < 0) -value else value;
+
+ const req_limbs = @divFloor(math.log2(w_value), Limb.bit_count) + 1;
+ try self.ensureCapacity(req_limbs);
+
+ self.positive = value >= 0;
+ self.len = req_limbs;
+
+ if (w_value <= @maxValue(Limb)) {
+ self.limbs[0] = w_value;
+ } else {
+ const mask = (1 << Limb.bit_count) - 1;
+
+ comptime var i = 0;
+ inline while (w_value != 0) : (i += 1) {
+ self.limbs[i] = w_value & mask;
+
+ w_value >>= Limb.bit_count / 2;
+ w_value >>= Limb.bit_count / 2;
+ }
+ }
+ },
+ else => {
+ @compileError("cannot set Int using type " ++ @typeName(T));
+ },
+ }
+ }
+
+ pub const ConvertError = error{
+ NegativeIntoUnsigned,
+ TargetTooSmall,
+ };
+
+ pub fn to(self: *const Int, comptime T: type) ConvertError!T {
+ switch (@typeId(T)) {
+ TypeId.Int => {
+ const UT = if (T.is_signed) @IntType(false, T.bit_count - 1) else T;
+
+ if (self.bitcount() > 8 * @sizeOf(UT)) {
+ return error.TargetTooSmall;
+ }
+
+ var r: UT = 0;
+
+ if (@sizeOf(UT) <= @sizeOf(Limb)) {
+ r = UT(self.limbs[0]);
+ } else {
+ for (self.limbs[0..self.len]) |_, ri| {
+ const limb = self.limbs[self.len - ri - 1];
+ r <<= Limb.bit_count;
+ r |= limb;
+ }
+ }
+
+ if (!T.is_signed) {
+ return if (self.positive) r else error.NegativeIntoUnsigned;
+ } else {
+ return if (self.positive) T(r) else -T(r);
+ }
+ },
+ else => {
+ @compileError("cannot convert Int to type " ++ @typeName(T));
+ },
+ }
+ }
+
+ fn charToDigit(ch: u8, base: u8) !u8 {
+ const d = switch (ch) {
+ '0'...'9' => ch - '0',
+ 'a'...'f' => (ch - 'a') + 0xa,
+ else => return error.InvalidCharForDigit,
+ };
+
+ return if (d < base) d else return error.DigitTooLargeForBase;
+ }
+
+ fn digitToChar(d: u8, base: u8) !u8 {
+ if (d >= base) {
+ return error.DigitTooLargeForBase;
+ }
+
+ return switch (d) {
+ 0...9 => '0' + d,
+ 0xa...0xf => ('a' - 0xa) + d,
+ else => unreachable,
+ };
+ }
+
+ pub fn setString(self: *Int, base: u8, value: []const u8) !void {
+ if (base < 2 or base > 16) {
+ return error.InvalidBase;
+ }
+
+ var i: usize = 0;
+ var positive = true;
+ if (value.len > 0 and value[0] == '-') {
+ positive = false;
+ i += 1;
+ }
+
+ try self.set(0);
+ for (value[i..]) |ch| {
+ const d = try charToDigit(ch, base);
+ try self.mul(self, base);
+ try self.add(self, d);
+ }
+ self.positive = positive;
+ }
+
+ pub fn toString(self: *const Int, allocator: *Allocator, base: u8) ![]const u8 {
+ if (base < 2 or base > 16) {
+ return error.InvalidBase;
+ }
+
+ var digits = ArrayList(u8).init(allocator);
+ try digits.ensureCapacity(self.sizeInBase(base) + 1);
+ defer digits.deinit();
+
+ if (self.eqZero()) {
+ try digits.append('0');
+ return digits.toOwnedSlice();
+ }
+
+ // Power of two: can do a single pass and use masks to extract digits.
+ if (base & (base - 1) == 0) {
+ const base_shift = math.log2_int(Limb, base);
+
+ for (self.limbs[0..self.len]) |limb| {
+ var shift: usize = 0;
+ while (shift < Limb.bit_count) : (shift += base_shift) {
+ const r = u8((limb >> Log2Limb(shift)) & Limb(base - 1));
+ const ch = try digitToChar(r, base);
+ try digits.append(ch);
+ }
+ }
+
+ while (true) {
+ // always will have a non-zero digit somewhere
+ const c = digits.pop();
+ if (c != '0') {
+ digits.append(c) catch unreachable;
+ break;
+ }
+ }
+ } // Non power-of-two: batch divisions per word size.
+ else {
+ const digits_per_limb = math.log(Limb, base, @maxValue(Limb));
+ var limb_base: Limb = 1;
+ var j: usize = 0;
+ while (j < digits_per_limb) : (j += 1) {
+ limb_base *= base;
+ }
+
+ var q = try self.clone();
+ q.positive = true;
+ var r = try Int.init(allocator);
+ var b = try Int.initSet(allocator, limb_base);
+
+ while (q.len >= 2) {
+ try Int.divTrunc(&q, &r, &q, &b);
+
+ var r_word = r.limbs[0];
+ var i: usize = 0;
+ while (i < digits_per_limb) : (i += 1) {
+ const ch = try digitToChar(u8(r_word % base), base);
+ r_word /= base;
+ try digits.append(ch);
+ }
+ }
+
+ {
+ debug.assert(q.len == 1);
+
+ var r_word = q.limbs[0];
+ while (r_word != 0) {
+ const ch = try digitToChar(u8(r_word % base), base);
+ r_word /= base;
+ try digits.append(ch);
+ }
+ }
+ }
+
+ if (!self.positive) {
+ try digits.append('-');
+ }
+
+ var s = digits.toOwnedSlice();
+ mem.reverse(u8, s);
+ return s;
+ }
+
+ // returns -1, 0, 1 if |a| < |b|, |a| == |b| or |a| > |b| respectively.
+ pub fn cmpAbs(a: *const Int, bv: var) i8 {
+ // TODO: Thread-local buffer.
+ var buffer: [wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.len < b.len) {
+ return -1;
+ }
+ if (a.len > b.len) {
+ return 1;
+ }
+
+ var i: usize = a.len - 1;
+ while (i != 0) : (i -= 1) {
+ if (a.limbs[i] != b.limbs[i]) {
+ break;
+ }
+ }
+
+ if (a.limbs[i] < b.limbs[i]) {
+ return -1;
+ } else if (a.limbs[i] > b.limbs[i]) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ // returns -1, 0, 1 if a < b, a == b or a > b respectively.
+ pub fn cmp(a: *const Int, bv: var) i8 {
+ var buffer: [wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.positive != b.positive) {
+ return if (a.positive) i8(1) else -1;
+ } else {
+ const r = cmpAbs(a, b);
+ return if (a.positive) r else -r;
+ }
+ }
+
+ // if a == 0
+ pub fn eqZero(a: *const Int) bool {
+ return a.len == 1 and a.limbs[0] == 0;
+ }
+
+ // if |a| == |b|
+ pub fn eqAbs(a: *const Int, b: var) bool {
+ return cmpAbs(a, b) == 0;
+ }
+
+ // if a == b
+ pub fn eq(a: *const Int, b: var) bool {
+ return cmp(a, b) == 0;
+ }
+
+ // Normalize for a possible single carry digit.
+ //
+ // [1, 2, 3, 4, 0] -> [1, 2, 3, 4]
+ // [1, 2, 3, 4, 5] -> [1, 2, 3, 4, 5]
+ // [0] -> [0]
+ fn norm1(r: *Int, length: usize) void {
+ debug.assert(length > 0);
+ debug.assert(length <= r.limbs.len);
+
+ if (r.limbs[length - 1] == 0) {
+ r.len = if (length > 1) length - 1 else 1;
+ } else {
+ r.len = length;
+ }
+ }
+
+ // Normalize a possible sequence of leading zeros.
+ //
+ // [1, 2, 3, 4, 0] -> [1, 2, 3, 4]
+ // [1, 2, 0, 0, 0] -> [1, 2]
+ // [0, 0, 0, 0, 0] -> [0]
+ fn normN(r: *Int, length: usize) void {
+ debug.assert(length > 0);
+ debug.assert(length <= r.limbs.len);
+
+ var j = length;
+ while (j > 0) : (j -= 1) {
+ if (r.limbs[j - 1] != 0) {
+ break;
+ }
+ }
+
+ // Handle zero
+ r.len = if (j != 0) j else 1;
+ }
+
+ // r = a + b
+ pub fn add(r: *Int, av: var, bv: var) Allocator.Error!void {
+ var buffer: [2 * wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.eqZero()) {
+ try r.copy(b);
+ return;
+ } else if (b.eqZero()) {
+ try r.copy(a);
+ return;
+ }
+
+ if (a.positive != b.positive) {
+ if (a.positive) {
+ // (a) + (-b) => a - b
+ const bp = Int{
+ .allocator = undefined,
+ .positive = true,
+ .limbs = b.limbs,
+ .len = b.len,
+ };
+ try r.sub(a, bp);
+ } else {
+ // (-a) + (b) => b - a
+ const ap = Int{
+ .allocator = undefined,
+ .positive = true,
+ .limbs = a.limbs,
+ .len = a.len,
+ };
+ try r.sub(b, ap);
+ }
+ } else {
+ if (a.len >= b.len) {
+ try r.ensureCapacity(a.len + 1);
+ lladd(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.norm1(a.len + 1);
+ } else {
+ try r.ensureCapacity(b.len + 1);
+ lladd(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.norm1(b.len + 1);
+ }
+
+ r.positive = a.positive;
+ }
+ }
+
+ // Knuth 4.3.1, Algorithm A.
+ fn lladd(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len != 0 and b.len != 0);
+ debug.assert(a.len >= b.len);
+ debug.assert(r.len >= a.len + 1);
+
+ var i: usize = 0;
+ var carry: Limb = 0;
+
+ while (i < b.len) : (i += 1) {
+ var c: Limb = 0;
+ c += Limb(@addWithOverflow(Limb, a[i], b[i], &r[i]));
+ c += Limb(@addWithOverflow(Limb, r[i], carry, &r[i]));
+ carry = c;
+ }
+
+ while (i < a.len) : (i += 1) {
+ carry = Limb(@addWithOverflow(Limb, a[i], carry, &r[i]));
+ }
+
+ r[i] = carry;
+ }
+
+ // r = a - b
+ pub fn sub(r: *Int, av: var, bv: var) !void {
+ var buffer: [wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.positive != b.positive) {
+ if (a.positive) {
+ // (a) - (-b) => a + b
+ const bp = Int{
+ .allocator = undefined,
+ .positive = true,
+ .limbs = b.limbs,
+ .len = b.len,
+ };
+ try r.add(a, bp);
+ } else {
+ // (-a) - (b) => -(a + b)
+ const ap = Int{
+ .allocator = undefined,
+ .positive = true,
+ .limbs = a.limbs,
+ .len = a.len,
+ };
+ try r.add(ap, b);
+ r.positive = false;
+ }
+ } else {
+ if (a.positive) {
+ // (a) - (b) => a - b
+ if (a.cmp(b) >= 0) {
+ try r.ensureCapacity(a.len + 1);
+ llsub(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.normN(a.len);
+ r.positive = true;
+ } else {
+ try r.ensureCapacity(b.len + 1);
+ llsub(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.normN(b.len);
+ r.positive = false;
+ }
+ } else {
+ // (-a) - (-b) => -(a - b)
+ if (a.cmp(b) < 0) {
+ try r.ensureCapacity(a.len + 1);
+ llsub(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.normN(a.len);
+ r.positive = false;
+ } else {
+ try r.ensureCapacity(b.len + 1);
+ llsub(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.normN(b.len);
+ r.positive = true;
+ }
+ }
+ }
+ }
+
+ // Knuth 4.3.1, Algorithm S.
+ fn llsub(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len != 0 and b.len != 0);
+ debug.assert(a.len > b.len or (a.len == b.len and a[a.len - 1] >= b[b.len - 1]));
+ debug.assert(r.len >= a.len);
+
+ var i: usize = 0;
+ var borrow: Limb = 0;
+
+ while (i < b.len) : (i += 1) {
+ var c: Limb = 0;
+ c += Limb(@subWithOverflow(Limb, a[i], b[i], &r[i]));
+ c += Limb(@subWithOverflow(Limb, r[i], borrow, &r[i]));
+ borrow = c;
+ }
+
+ while (i < a.len) : (i += 1) {
+ borrow = Limb(@subWithOverflow(Limb, a[i], borrow, &r[i]));
+ }
+
+ debug.assert(borrow == 0);
+ }
+
+ // rma = a * b
+ //
+ // For greatest efficiency, ensure rma does not alias a or b.
+ pub fn mul(rma: *Int, av: var, bv: var) !void {
+ var buffer: [2 * wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ var r = rma;
+ var aliased = rma == a or rma == b;
+
+ var sr: Int = undefined;
+ if (aliased) {
+ sr = try Int.initCapacity(rma.allocator, a.len + b.len);
+ r = &sr;
+ aliased = true;
+ }
+ defer if (aliased) {
+ rma.swap(r);
+ r.deinit();
+ };
+
+ try r.ensureCapacity(a.len + b.len);
+
+ if (a.len >= b.len) {
+ llmul(r.limbs, a.limbs[0..a.len], b.limbs[0..b.len]);
+ } else {
+ llmul(r.limbs, b.limbs[0..b.len], a.limbs[0..a.len]);
+ }
+
+ r.positive = a.positive == b.positive;
+ r.normN(a.len + b.len);
+ }
+
+ // a + b * c + *carry, sets carry to the overflow bits
+ pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
+ var r1: Limb = undefined;
+
+ // r1 = a + *carry
+ const c1 = Limb(@addWithOverflow(Limb, a, carry.*, &r1));
+
+ // r2 = b * c
+ //
+ // We still use a DoubleLimb here since the @mulWithOverflow builtin does not
+ // return the carry and lower bits separately so we would need to perform this
+ // anyway to get the carry bits. The branch on the overflow case costs more than
+ // just computing them unconditionally and splitting.
+ //
+ // This could be a single x86 mul instruction, which stores the carry/lower in rdx:rax.
+ const bc = DoubleLimb(b) * DoubleLimb(c);
+ const r2 = @truncate(Limb, bc);
+ const c2 = @truncate(Limb, bc >> Limb.bit_count);
+
+ // r1 = r1 + r2
+ const c3 = Limb(@addWithOverflow(Limb, r1, r2, &r1));
+
+ // This never overflows, c1, c3 are either 0 or 1 and if both are 1 then
+ // c2 is at least <= @maxValue(Limb) - 2.
+ carry.* = c1 + c2 + c3;
+
+ return r1;
+ }
+
+ // Knuth 4.3.1, Algorithm M.
+ //
+ // r MUST NOT alias any of a or b.
+ fn llmul(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len >= b.len);
+ debug.assert(r.len >= a.len + b.len);
+
+ mem.set(Limb, r[0 .. a.len + b.len], 0);
+
+ var i: usize = 0;
+ while (i < a.len) : (i += 1) {
+ var carry: Limb = 0;
+ var j: usize = 0;
+ while (j < b.len) : (j += 1) {
+ r[i + j] = @inlineCall(addMulLimbWithCarry, r[i + j], a[i], b[j], &carry);
+ }
+ r[i + j] = carry;
+ }
+ }
+
+ pub fn divFloor(q: *Int, r: *Int, a: var, b: var) !void {
+ try div(q, r, a, b);
+
+ // Trunc -> Floor.
+ if (!q.positive) {
+ try q.sub(q, 1);
+ try r.add(q, 1);
+ }
+ r.positive = b.positive;
+ }
+
+ pub fn divTrunc(q: *Int, r: *Int, a: var, b: var) !void {
+ try div(q, r, a, b);
+ r.positive = a.positive;
+ }
+
+ // Truncates by default.
+ fn div(quo: *Int, rem: *Int, av: var, bv: var) !void {
+ var buffer: [2 * wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (b.eqZero()) {
+ @panic("division by zero");
+ }
+ if (quo == rem) {
+ @panic("quo and rem cannot be same variable");
+ }
+
+ if (a.cmpAbs(b) < 0) {
+ // quo may alias a so handle rem first
+ try rem.copy(a);
+ rem.positive = a.positive == b.positive;
+
+ quo.positive = true;
+ quo.len = 1;
+ quo.limbs[0] = 0;
+ return;
+ }
+
+ if (b.len == 1) {
+ try quo.ensureCapacity(a.len);
+
+ lldiv1(quo.limbs[0..], &rem.limbs[0], a.limbs[0..a.len], b.limbs[0]);
+ quo.norm1(a.len);
+ quo.positive = a.positive == b.positive;
+
+ rem.len = 1;
+ rem.positive = true;
+ } else {
+ // x and y are modified during division
+ var x = try a.clone();
+ defer x.deinit();
+
+ var y = try b.clone();
+ defer y.deinit();
+
+ // x may grow one limb during normalization
+ try quo.ensureCapacity(a.len + y.len);
+ try divN(quo.allocator, quo, rem, &x, &y);
+
+ quo.positive = a.positive == b.positive;
+ }
+ }
+
+ // Knuth 4.3.1, Exercise 16.
+ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len > 1 or a[0] >= b);
+ debug.assert(quo.len >= a.len);
+
+ rem.* = 0;
+ for (a) |_, ri| {
+ const i = a.len - ri - 1;
+ const pdiv = ((DoubleLimb(rem.*) << Limb.bit_count) | a[i]);
+
+ if (pdiv == 0) {
+ quo[i] = 0;
+ rem.* = 0;
+ } else if (pdiv < b) {
+ quo[i] = 0;
+ rem.* = @truncate(Limb, pdiv);
+ } else if (pdiv == b) {
+ quo[i] = 1;
+ rem.* = 0;
+ } else {
+ quo[i] = @truncate(Limb, @divTrunc(pdiv, b));
+ rem.* = @truncate(Limb, pdiv - (quo[i] *% b));
+ }
+ }
+ }
+
+ // Handbook of Applied Cryptography, 14.20
+ //
+ // x = qy + r where 0 <= r < y
+ fn divN(allocator: *Allocator, q: *Int, r: *Int, x: *Int, y: *Int) !void {
+ debug.assert(y.len >= 2);
+ debug.assert(x.len >= y.len);
+ debug.assert(q.limbs.len >= x.len + y.len - 1);
+ debug.assert(default_capacity >= 3); // see 3.2
+
+ var tmp = try Int.init(allocator);
+ defer tmp.deinit();
+
+ // Normalize so y > Limb.bit_count / 2 (i.e. leading bit is set)
+ const norm_shift = @clz(y.limbs[y.len - 1]);
+ try x.shiftLeft(x, norm_shift);
+ try y.shiftLeft(y, norm_shift);
+
+ const n = x.len - 1;
+ const t = y.len - 1;
+
+ // 1.
+ q.len = n - t + 1;
+ mem.set(Limb, q.limbs[0..q.len], 0);
+
+ // 2.
+ try tmp.shiftLeft(y, Limb.bit_count * (n - t));
+ while (x.cmp(&tmp) >= 0) {
+ q.limbs[n - t] += 1;
+ try x.sub(x, tmp);
+ }
+
+ // 3.
+ var i = n;
+ while (i > t) : (i -= 1) {
+ // 3.1
+ if (x.limbs[i] == y.limbs[t]) {
+ q.limbs[i - t - 1] = @maxValue(Limb);
+ } else {
+ const num = (DoubleLimb(x.limbs[i]) << Limb.bit_count) | DoubleLimb(x.limbs[i - 1]);
+ const z = Limb(num / DoubleLimb(y.limbs[t]));
+ q.limbs[i - t - 1] = if (z > @maxValue(Limb)) @maxValue(Limb) else Limb(z);
+ }
+
+ // 3.2
+ tmp.limbs[0] = if (i >= 2) x.limbs[i - 2] else 0;
+ tmp.limbs[1] = if (i >= 1) x.limbs[i - 1] else 0;
+ tmp.limbs[2] = x.limbs[i];
+ tmp.normN(3);
+
+ while (true) {
+ // 2x1 limb multiplication unrolled against single-limb q[i-t-1]
+ var carry: Limb = 0;
+ r.limbs[0] = addMulLimbWithCarry(0, if (t >= 1) y.limbs[t - 1] else 0, q.limbs[i - t - 1], &carry);
+ r.limbs[1] = addMulLimbWithCarry(0, y.limbs[t], q.limbs[i - t - 1], &carry);
+ r.limbs[2] = carry;
+ r.normN(3);
+
+ if (r.cmpAbs(&tmp) <= 0) {
+ break;
+ }
+
+ q.limbs[i - t - 1] -= 1;
+ }
+
+ // 3.3
+ try tmp.set(q.limbs[i - t - 1]);
+ try tmp.mul(&tmp, y);
+ try tmp.shiftLeft(&tmp, Limb.bit_count * (i - t - 1));
+ try x.sub(x, &tmp);
+
+ if (!x.positive) {
+ try tmp.shiftLeft(y, Limb.bit_count * (i - t - 1));
+ try x.add(x, &tmp);
+ q.limbs[i - t - 1] -= 1;
+ }
+ }
+
+ // Denormalize
+ q.normN(q.len);
+
+ try r.shiftRight(x, norm_shift);
+ r.normN(r.len);
+ }
+
+ // r = a << shift, in other words, r = a * 2^shift
+ pub fn shiftLeft(r: *Int, av: var, shift: usize) !void {
+ var buffer: [wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+
+ try r.ensureCapacity(a.len + (shift / Limb.bit_count) + 1);
+ llshl(r.limbs[0..], a.limbs[0..a.len], shift);
+ r.norm1(a.len + (shift / Limb.bit_count) + 1);
+ r.positive = a.positive;
+ }
+
+ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len >= 1);
+ debug.assert(r.len >= a.len + (shift / Limb.bit_count) + 1);
+
+ const limb_shift = shift / Limb.bit_count + 1;
+ const interior_limb_shift = Log2Limb(shift % Limb.bit_count);
+
+ var carry: Limb = 0;
+ var i: usize = 0;
+ while (i < a.len) : (i += 1) {
+ const src_i = a.len - i - 1;
+ const dst_i = src_i + limb_shift;
+
+ const src_digit = a[src_i];
+ r[dst_i] = carry | @inlineCall(math.shr, Limb, src_digit, Limb.bit_count - Limb(interior_limb_shift));
+ carry = (src_digit << interior_limb_shift);
+ }
+
+ r[limb_shift - 1] = carry;
+ mem.set(Limb, r[0 .. limb_shift - 1], 0);
+ }
+
+ // r = a >> shift
+ pub fn shiftRight(r: *Int, av: var, shift: usize) !void {
+ var buffer: [wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+
+ if (a.len <= shift / Limb.bit_count) {
+ r.len = 1;
+ r.limbs[0] = 0;
+ r.positive = true;
+ return;
+ }
+
+ try r.ensureCapacity(a.len - (shift / Limb.bit_count));
+ const r_len = llshr(r.limbs[0..], a.limbs[0..a.len], shift);
+ r.len = a.len - (shift / Limb.bit_count);
+ r.positive = a.positive;
+ }
+
+ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len >= 1);
+ debug.assert(r.len >= a.len - (shift / Limb.bit_count));
+
+ const limb_shift = shift / Limb.bit_count;
+ const interior_limb_shift = Log2Limb(shift % Limb.bit_count);
+
+ var carry: Limb = 0;
+ var i: usize = 0;
+ while (i < a.len - limb_shift) : (i += 1) {
+ const src_i = a.len - i - 1;
+ const dst_i = src_i - limb_shift;
+
+ const src_digit = a[src_i];
+ r[dst_i] = carry | (src_digit >> interior_limb_shift);
+ carry = @inlineCall(math.shl, Limb, src_digit, Limb.bit_count - Limb(interior_limb_shift));
+ }
+ }
+
+ // r = a | b
+ pub fn bitOr(r: *Int, av: var, bv: var) !void {
+ var buffer: [2 * wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.len > b.len) {
+ try r.ensureCapacity(a.len);
+ llor(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.len = a.len;
+ } else {
+ try r.ensureCapacity(b.len);
+ llor(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.len = b.len;
+ }
+ }
+
+ fn llor(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(r.len >= a.len);
+ debug.assert(a.len >= b.len);
+
+ var i: usize = 0;
+ while (i < b.len) : (i += 1) {
+ r[i] = a[i] | b[i];
+ }
+ while (i < a.len) : (i += 1) {
+ r[i] = a[i];
+ }
+ }
+
+ // r = a & b
+ pub fn bitAnd(r: *Int, av: var, bv: var) !void {
+ var buffer: [2 * wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.len > b.len) {
+ try r.ensureCapacity(b.len);
+ lland(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.normN(b.len);
+ } else {
+ try r.ensureCapacity(a.len);
+ lland(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.normN(a.len);
+ }
+ }
+
+ fn lland(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(r.len >= b.len);
+ debug.assert(a.len >= b.len);
+
+ var i: usize = 0;
+ while (i < b.len) : (i += 1) {
+ r[i] = a[i] & b[i];
+ }
+ }
+
+ // r = a ^ b
+ pub fn bitXor(r: *Int, av: var, bv: var) !void {
+ var buffer: [2 * wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.len > b.len) {
+ try r.ensureCapacity(a.len);
+ llxor(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.normN(a.len);
+ } else {
+ try r.ensureCapacity(b.len);
+ llxor(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.normN(b.len);
+ }
+ }
+
+ fn llxor(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(r.len >= a.len);
+ debug.assert(a.len >= b.len);
+
+ var i: usize = 0;
+ while (i < b.len) : (i += 1) {
+ r[i] = a[i] ^ b[i];
+ }
+ while (i < a.len) : (i += 1) {
+ r[i] = a[i];
+ }
+ }
+};
+
+// NOTE: All the following tests assume the max machine-word will be 64-bit.
+//
+// They will still run on larger than this and should pass, but the multi-limb code-paths
+// may be untested in some cases.
+
+const u256 = @IntType(false, 256);
+var al = debug.global_allocator;
+
+test "big.int comptime_int set" {
+ comptime var s = 0xefffffff00000001eeeeeeefaaaaaaab;
+ var a = try Int.initSet(al, s);
+
+ const s_limb_count = 128 / Limb.bit_count;
+
+ comptime var i: usize = 0;
+ inline while (i < s_limb_count) : (i += 1) {
+ const result = Limb(s & @maxValue(Limb));
+ s >>= Limb.bit_count / 2;
+ s >>= Limb.bit_count / 2;
+ debug.assert(a.limbs[i] == result);
+ }
+}
+
+test "big.int comptime_int set negative" {
+ var a = try Int.initSet(al, -10);
+
+ debug.assert(a.limbs[0] == 10);
+ debug.assert(a.positive == false);
+}
+
+test "big.int int set unaligned small" {
+ var a = try Int.initSet(al, u7(45));
+
+ debug.assert(a.limbs[0] == 45);
+ debug.assert(a.positive == true);
+}
+
+test "big.int comptime_int to" {
+ const a = try Int.initSet(al, 0xefffffff00000001eeeeeeefaaaaaaab);
+
+ debug.assert((try a.to(u128)) == 0xefffffff00000001eeeeeeefaaaaaaab);
+}
+
+test "big.int sub-limb to" {
+ const a = try Int.initSet(al, 10);
+
+ debug.assert((try a.to(u8)) == 10);
+}
+
+test "big.int to target too small error" {
+ const a = try Int.initSet(al, 0xffffffff);
+
+ if (a.to(u8)) |_| {
+ unreachable;
+ } else |err| {
+ debug.assert(err == error.TargetTooSmall);
+ }
+}
+
+test "big.int norm1" {
+ var a = try Int.init(al);
+ try a.ensureCapacity(8);
+
+ a.limbs[0] = 1;
+ a.limbs[1] = 2;
+ a.limbs[2] = 3;
+ a.limbs[3] = 0;
+ a.norm1(4);
+ debug.assert(a.len == 3);
+
+ a.limbs[0] = 1;
+ a.limbs[1] = 2;
+ a.limbs[2] = 3;
+ a.norm1(3);
+ debug.assert(a.len == 3);
+
+ a.limbs[0] = 0;
+ a.limbs[1] = 0;
+ a.norm1(2);
+ debug.assert(a.len == 1);
+
+ a.limbs[0] = 0;
+ a.norm1(1);
+ debug.assert(a.len == 1);
+}
+
+test "big.int normN" {
+ var a = try Int.init(al);
+ try a.ensureCapacity(8);
+
+ a.limbs[0] = 1;
+ a.limbs[1] = 2;
+ a.limbs[2] = 0;
+ a.limbs[3] = 0;
+ a.normN(4);
+ debug.assert(a.len == 2);
+
+ a.limbs[0] = 1;
+ a.limbs[1] = 2;
+ a.limbs[2] = 3;
+ a.normN(3);
+ debug.assert(a.len == 3);
+
+ a.limbs[0] = 0;
+ a.limbs[1] = 0;
+ a.limbs[2] = 0;
+ a.limbs[3] = 0;
+ a.normN(4);
+ debug.assert(a.len == 1);
+
+ a.limbs[0] = 0;
+ a.normN(1);
+ debug.assert(a.len == 1);
+}
+
+test "big.int parity" {
+ var a = try Int.init(al);
+ try a.set(0);
+ debug.assert(a.isEven());
+ debug.assert(!a.isOdd());
+
+ try a.set(7);
+ debug.assert(!a.isEven());
+ debug.assert(a.isOdd());
+}
+
+test "big.int bitcount + sizeInBase" {
+ var a = try Int.init(al);
+
+ try a.set(0b100);
+ debug.assert(a.bitcount() == 3);
+ debug.assert(a.sizeInBase(2) >= 3);
+ debug.assert(a.sizeInBase(10) >= 1);
+
+ try a.set(0xffffffff);
+ debug.assert(a.bitcount() == 32);
+ debug.assert(a.sizeInBase(2) >= 32);
+ debug.assert(a.sizeInBase(10) >= 10);
+
+ try a.shiftLeft(&a, 5000);
+ debug.assert(a.bitcount() == 5032);
+ debug.assert(a.sizeInBase(2) >= 5032);
+ a.positive = false;
+
+ debug.assert(a.bitcount() == 5033);
+ debug.assert(a.sizeInBase(2) >= 5033);
+}
+
+test "big.int string set" {
+ var a = try Int.init(al);
+ try a.setString(10, "120317241209124781241290847124");
+
+ debug.assert((try a.to(u128)) == 120317241209124781241290847124);
+}
+
+test "big.int string negative" {
+ var a = try Int.init(al);
+ try a.setString(10, "-1023");
+ debug.assert((try a.to(i32)) == -1023);
+}
+
+test "big.int string set bad char error" {
+ var a = try Int.init(al);
+ a.setString(10, "x") catch |err| debug.assert(err == error.InvalidCharForDigit);
+}
+
+test "big.int string set bad base error" {
+ var a = try Int.init(al);
+ a.setString(45, "10") catch |err| debug.assert(err == error.InvalidBase);
+}
+
+test "big.int string to" {
+ const a = try Int.initSet(al, 120317241209124781241290847124);
+
+ const as = try a.toString(al, 10);
+ const es = "120317241209124781241290847124";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int string to base base error" {
+ const a = try Int.initSet(al, 0xffffffff);
+
+ if (a.toString(al, 45)) |_| {
+ unreachable;
+ } else |err| {
+ debug.assert(err == error.InvalidBase);
+ }
+}
+
+test "big.int string to base 2" {
+ const a = try Int.initSet(al, -0b1011);
+
+ const as = try a.toString(al, 2);
+ const es = "-1011";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int string to base 16" {
+ const a = try Int.initSet(al, 0xefffffff00000001eeeeeeefaaaaaaab);
+
+ const as = try a.toString(al, 16);
+ const es = "efffffff00000001eeeeeeefaaaaaaab";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int neg string to" {
+ const a = try Int.initSet(al, -123907434);
+
+ const as = try a.toString(al, 10);
+ const es = "-123907434";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int zero string to" {
+ const a = try Int.initSet(al, 0);
+
+ const as = try a.toString(al, 10);
+ const es = "0";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int clone" {
+ var a = try Int.initSet(al, 1234);
+ const b = try a.clone();
+
+ debug.assert((try a.to(u32)) == 1234);
+ debug.assert((try b.to(u32)) == 1234);
+
+ try a.set(77);
+ debug.assert((try a.to(u32)) == 77);
+ debug.assert((try b.to(u32)) == 1234);
+}
+
+test "big.int swap" {
+ var a = try Int.initSet(al, 1234);
+ var b = try Int.initSet(al, 5678);
+
+ debug.assert((try a.to(u32)) == 1234);
+ debug.assert((try b.to(u32)) == 5678);
+
+ a.swap(&b);
+
+ debug.assert((try a.to(u32)) == 5678);
+ debug.assert((try b.to(u32)) == 1234);
+}
+
+test "big.int to negative" {
+ var a = try Int.initSet(al, -10);
+
+ debug.assert((try a.to(i32)) == -10);
+}
+
+test "big.int compare" {
+ var a = try Int.initSet(al, -11);
+ var b = try Int.initSet(al, 10);
+
+ debug.assert(a.cmpAbs(&b) == 1);
+ debug.assert(a.cmp(&b) == -1);
+}
+
+test "big.int compare similar" {
+ var a = try Int.initSet(al, 0xffffffffeeeeeeeeffffffffeeeeeeee);
+ var b = try Int.initSet(al, 0xffffffffeeeeeeeeffffffffeeeeeeef);
+
+ debug.assert(a.cmpAbs(&b) == -1);
+ debug.assert(b.cmpAbs(&a) == 1);
+}
+
+test "big.int compare different limb size" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, 1);
+
+ debug.assert(a.cmpAbs(&b) == 1);
+ debug.assert(b.cmpAbs(&a) == -1);
+}
+
+test "big.int compare multi-limb" {
+ var a = try Int.initSet(al, -0x7777777799999999ffffeeeeffffeeeeffffeeeef);
+ var b = try Int.initSet(al, 0x7777777799999999ffffeeeeffffeeeeffffeeeee);
+
+ debug.assert(a.cmpAbs(&b) == 1);
+ debug.assert(a.cmp(&b) == -1);
+}
+
+test "big.int equality" {
+ var a = try Int.initSet(al, 0xffffffff1);
+ var b = try Int.initSet(al, -0xffffffff1);
+
+ debug.assert(a.eqAbs(&b));
+ debug.assert(!a.eq(&b));
+}
+
+test "big.int abs" {
+ var a = try Int.initSet(al, -5);
+
+ a.abs();
+ debug.assert((try a.to(u32)) == 5);
+
+ a.abs();
+ debug.assert((try a.to(u32)) == 5);
+}
+
+test "big.int negate" {
+ var a = try Int.initSet(al, 5);
+
+ a.negate();
+ debug.assert((try a.to(i32)) == -5);
+
+ a.negate();
+ debug.assert((try a.to(i32)) == 5);
+}
+
+test "big.int add single-single" {
+ var a = try Int.initSet(al, 50);
+ var b = try Int.initSet(al, 5);
+
+ var c = try Int.init(al);
+ try c.add(&a, &b);
+
+ debug.assert((try c.to(u32)) == 55);
+}
+
+test "big.int add multi-single" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, 1);
+
+ var c = try Int.init(al);
+
+ try c.add(&a, &b);
+ debug.assert((try c.to(DoubleLimb)) == @maxValue(Limb) + 2);
+
+ try c.add(&b, &a);
+ debug.assert((try c.to(DoubleLimb)) == @maxValue(Limb) + 2);
+}
+
+test "big.int add multi-multi" {
+ const op1 = 0xefefefef7f7f7f7f;
+ const op2 = 0xfefefefe9f9f9f9f;
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var c = try Int.init(al);
+ try c.add(&a, &b);
+
+ debug.assert((try c.to(u128)) == op1 + op2);
+}
+
+test "big.int add zero-zero" {
+ var a = try Int.initSet(al, 0);
+ var b = try Int.initSet(al, 0);
+
+ var c = try Int.init(al);
+ try c.add(&a, &b);
+
+ debug.assert((try c.to(u32)) == 0);
+}
+
+test "big.int add alias multi-limb nonzero-zero" {
+ const op1 = 0xffffffff777777771;
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, 0);
+
+ try a.add(&a, &b);
+
+ debug.assert((try a.to(u128)) == op1);
+}
+
+test "big.int add sign" {
+ var a = try Int.init(al);
+
+ try a.add(1, 2);
+ debug.assert((try a.to(i32)) == 3);
+
+ try a.add(-1, 2);
+ debug.assert((try a.to(i32)) == 1);
+
+ try a.add(1, -2);
+ debug.assert((try a.to(i32)) == -1);
+
+ try a.add(-1, -2);
+ debug.assert((try a.to(i32)) == -3);
+}
+
+test "big.int sub single-single" {
+ var a = try Int.initSet(al, 50);
+ var b = try Int.initSet(al, 5);
+
+ var c = try Int.init(al);
+ try c.sub(&a, &b);
+
+ debug.assert((try c.to(u32)) == 45);
+}
+
+test "big.int sub multi-single" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, 1);
+
+ var c = try Int.init(al);
+ try c.sub(&a, &b);
+
+ debug.assert((try c.to(Limb)) == @maxValue(Limb));
+}
+
+test "big.int sub multi-multi" {
+ const op1 = 0xefefefefefefefefefefefef;
+ const op2 = 0xabababababababababababab;
+
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var c = try Int.init(al);
+ try c.sub(&a, &b);
+
+ debug.assert((try c.to(u128)) == op1 - op2);
+}
+
+test "big.int sub equal" {
+ var a = try Int.initSet(al, 0x11efefefefefefefefefefefef);
+ var b = try Int.initSet(al, 0x11efefefefefefefefefefefef);
+
+ var c = try Int.init(al);
+ try c.sub(&a, &b);
+
+ debug.assert((try c.to(u32)) == 0);
+}
+
+test "big.int sub sign" {
+ var a = try Int.init(al);
+
+ try a.sub(1, 2);
+ debug.assert((try a.to(i32)) == -1);
+
+ try a.sub(-1, 2);
+ debug.assert((try a.to(i32)) == -3);
+
+ try a.sub(1, -2);
+ debug.assert((try a.to(i32)) == 3);
+
+ try a.sub(-1, -2);
+ debug.assert((try a.to(i32)) == 1);
+
+ try a.sub(-2, -1);
+ debug.assert((try a.to(i32)) == -1);
+}
+
+test "big.int mul single-single" {
+ var a = try Int.initSet(al, 50);
+ var b = try Int.initSet(al, 5);
+
+ var c = try Int.init(al);
+ try c.mul(&a, &b);
+
+ debug.assert((try c.to(u64)) == 250);
+}
+
+test "big.int mul multi-single" {
+ var a = try Int.initSet(al, @maxValue(Limb));
+ var b = try Int.initSet(al, 2);
+
+ var c = try Int.init(al);
+ try c.mul(&a, &b);
+
+ debug.assert((try c.to(DoubleLimb)) == 2 * @maxValue(Limb));
+}
+
+test "big.int mul multi-multi" {
+ const op1 = 0x998888efefefefefefefef;
+ const op2 = 0x333000abababababababab;
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var c = try Int.init(al);
+ try c.mul(&a, &b);
+
+ debug.assert((try c.to(u256)) == op1 * op2);
+}
+
+test "big.int mul alias r with a" {
+ var a = try Int.initSet(al, @maxValue(Limb));
+ var b = try Int.initSet(al, 2);
+
+ try a.mul(&a, &b);
+
+ debug.assert((try a.to(DoubleLimb)) == 2 * @maxValue(Limb));
+}
+
+test "big.int mul alias r with b" {
+ var a = try Int.initSet(al, @maxValue(Limb));
+ var b = try Int.initSet(al, 2);
+
+ try a.mul(&b, &a);
+
+ debug.assert((try a.to(DoubleLimb)) == 2 * @maxValue(Limb));
+}
+
+test "big.int mul alias r with a and b" {
+ var a = try Int.initSet(al, @maxValue(Limb));
+
+ try a.mul(&a, &a);
+
+ debug.assert((try a.to(DoubleLimb)) == @maxValue(Limb) * @maxValue(Limb));
+}
+
+test "big.int mul a*0" {
+ var a = try Int.initSet(al, 0xefefefefefefefef);
+ var b = try Int.initSet(al, 0);
+
+ var c = try Int.init(al);
+ try c.mul(&a, &b);
+
+ debug.assert((try c.to(u32)) == 0);
+}
+
+test "big.int mul 0*0" {
+ var a = try Int.initSet(al, 0);
+ var b = try Int.initSet(al, 0);
+
+ var c = try Int.init(al);
+ try c.mul(&a, &b);
+
+ debug.assert((try c.to(u32)) == 0);
+}
+
+test "big.int div single-single no rem" {
+ var a = try Int.initSet(al, 50);
+ var b = try Int.initSet(al, 5);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u32)) == 10);
+ debug.assert((try r.to(u32)) == 0);
+}
+
+test "big.int div single-single with rem" {
+ var a = try Int.initSet(al, 49);
+ var b = try Int.initSet(al, 5);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u32)) == 9);
+ debug.assert((try r.to(u32)) == 4);
+}
+
+test "big.int div multi-single no rem" {
+ const op1 = 0xffffeeeeddddcccc;
+ const op2 = 34;
+
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u64)) == op1 / op2);
+ debug.assert((try r.to(u64)) == 0);
+}
+
+test "big.int div multi-single with rem" {
+ const op1 = 0xffffeeeeddddcccf;
+ const op2 = 34;
+
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u64)) == op1 / op2);
+ debug.assert((try r.to(u64)) == 3);
+}
+
+test "big.int div multi>2-single" {
+ const op1 = 0xfefefefefefefefefefefefefefefefe;
+ const op2 = 0xefab8;
+
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u128)) == op1 / op2);
+ debug.assert((try r.to(u32)) == 0x3e4e);
+}
+
+test "big.int div single-single q < r" {
+ var a = try Int.initSet(al, 0x0078f432);
+ var b = try Int.initSet(al, 0x01000000);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u64)) == 0);
+ debug.assert((try r.to(u64)) == 0x0078f432);
+}
+
+test "big.int div single-single q == r" {
+ var a = try Int.initSet(al, 10);
+ var b = try Int.initSet(al, 10);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u64)) == 1);
+ debug.assert((try r.to(u64)) == 0);
+}
+
+test "big.int div q=0 alias" {
+ var a = try Int.initSet(al, 3);
+ var b = try Int.initSet(al, 10);
+
+ try Int.divTrunc(&a, &b, &a, &b);
+
+ debug.assert((try a.to(u64)) == 0);
+ debug.assert((try b.to(u64)) == 3);
+}
+
+test "big.int div multi-multi q < r" {
+ const op1 = 0x1ffffffff0078f432;
+ const op2 = 0x1ffffffff01000000;
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u128)) == 0);
+ debug.assert((try r.to(u128)) == op1);
+}
+
+test "big.int div trunc single-single +/+" {
+ const u: i32 = 5;
+ const v: i32 = 3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // 5 = 1 * 3 + 2
+ const eq = @divTrunc(u, v);
+ const er = @mod(u, v);
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div trunc single-single -/+" {
+ const u: i32 = -5;
+ const v: i32 = 3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // -5 = 1 * -3 - 2
+ const eq = -1;
+ const er = -2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div trunc single-single +/-" {
+ const u: i32 = 5;
+ const v: i32 = -3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // 5 = -1 * -3 + 2
+ const eq = -1;
+ const er = 2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div trunc single-single -/-" {
+ const u: i32 = -5;
+ const v: i32 = -3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // -5 = 1 * -3 - 2
+ const eq = 1;
+ const er = -2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div floor single-single +/+" {
+ const u: i32 = 5;
+ const v: i32 = 3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divFloor(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // 5 = 1 * 3 + 2
+ const eq = 1;
+ const er = 2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div floor single-single -/+" {
+ const u: i32 = -5;
+ const v: i32 = 3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divFloor(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // -5 = -2 * 3 + 1
+ const eq = -2;
+ const er = 1;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div floor single-single +/-" {
+ const u: i32 = 5;
+ const v: i32 = -3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divFloor(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // 5 = -2 * -3 - 1
+ const eq = -2;
+ const er = -1;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div floor single-single -/-" {
+ const u: i32 = -5;
+ const v: i32 = -3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divFloor(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // -5 = 2 * -3 + 1
+ const eq = 1;
+ const er = -2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div multi-multi with rem" {
+ var a = try Int.initSet(al, 0x8888999911110000ffffeeeeddddccccbbbbaaaa9999);
+ var b = try Int.initSet(al, 0x99990000111122223333);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u128)) == 0xe38f38e39161aaabd03f0f1b);
+ debug.assert((try r.to(u128)) == 0x28de0acacd806823638);
+}
+
+test "big.int div multi-multi no rem" {
+ var a = try Int.initSet(al, 0x8888999911110000ffffeeeedb4fec200ee3a4286361);
+ var b = try Int.initSet(al, 0x99990000111122223333);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u128)) == 0xe38f38e39161aaabd03f0f1b);
+ debug.assert((try r.to(u128)) == 0);
+}
+
+test "big.int div multi-multi (2 branch)" {
+ var a = try Int.initSet(al, 0x866666665555555588888887777777761111111111111111);
+ var b = try Int.initSet(al, 0x86666666555555554444444433333333);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u128)) == 0x10000000000000000);
+ debug.assert((try r.to(u128)) == 0x44444443444444431111111111111111);
+}
+
+test "big.int div multi-multi (3.1/3.3 branch)" {
+ var a = try Int.initSet(al, 0x11111111111111111111111111111111111111111111111111111111111111);
+ var b = try Int.initSet(al, 0x1111111111111111111111111111111111111111171);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u128)) == 0xfffffffffffffffffff);
+ debug.assert((try r.to(u256)) == 0x1111111111111111111110b12222222222222222282);
+}
+
+test "big.int shift-right single" {
+ var a = try Int.initSet(al, 0xffff0000);
+ try a.shiftRight(a, 16);
+
+ debug.assert((try a.to(u32)) == 0xffff);
+}
+
+test "big.int shift-right multi" {
+ var a = try Int.initSet(al, 0xffff0000eeee1111dddd2222cccc3333);
+ try a.shiftRight(a, 67);
+
+ debug.assert((try a.to(u64)) == 0x1fffe0001dddc222);
+}
+
+test "big.int shift-left single" {
+ var a = try Int.initSet(al, 0xffff);
+ try a.shiftLeft(a, 16);
+
+ debug.assert((try a.to(u64)) == 0xffff0000);
+}
+
+test "big.int shift-left multi" {
+ var a = try Int.initSet(al, 0x1fffe0001dddc222);
+ try a.shiftLeft(a, 67);
+
+ debug.assert((try a.to(u128)) == 0xffff0000eeee11100000000000000000);
+}
+
+test "big.int shift-right negative" {
+ var a = try Int.init(al);
+
+ try a.shiftRight(-20, 2);
+ debug.assert((try a.to(i32)) == -20 >> 2);
+
+ try a.shiftRight(-5, 10);
+ debug.assert((try a.to(i32)) == -5 >> 10);
+}
+
+test "big.int shift-left negative" {
+ var a = try Int.init(al);
+
+ try a.shiftRight(-10, 1232);
+ debug.assert((try a.to(i32)) == -10 >> 1232);
+}
+
+test "big.int bitwise and simple" {
+ var a = try Int.initSet(al, 0xffffffff11111111);
+ var b = try Int.initSet(al, 0xeeeeeeee22222222);
+
+ try a.bitAnd(&a, &b);
+
+ debug.assert((try a.to(u64)) == 0xeeeeeeee00000000);
+}
+
+test "big.int bitwise and multi-limb" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, @maxValue(Limb));
+
+ try a.bitAnd(&a, &b);
+
+ debug.assert((try a.to(u128)) == 0);
+}
+
+test "big.int bitwise xor simple" {
+ var a = try Int.initSet(al, 0xffffffff11111111);
+ var b = try Int.initSet(al, 0xeeeeeeee22222222);
+
+ try a.bitXor(&a, &b);
+
+ debug.assert((try a.to(u64)) == 0x1111111133333333);
+}
+
+test "big.int bitwise xor multi-limb" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, @maxValue(Limb));
+
+ try a.bitXor(&a, &b);
+
+ debug.assert((try a.to(DoubleLimb)) == (@maxValue(Limb) + 1) ^ @maxValue(Limb));
+}
+
+test "big.int bitwise or simple" {
+ var a = try Int.initSet(al, 0xffffffff11111111);
+ var b = try Int.initSet(al, 0xeeeeeeee22222222);
+
+ try a.bitOr(&a, &b);
+
+ debug.assert((try a.to(u64)) == 0xffffffff33333333);
+}
+
+test "big.int bitwise or multi-limb" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, @maxValue(Limb));
+
+ try a.bitOr(&a, &b);
+
+ // TODO: big.int.cpp or is wrong on multi-limb.
+ debug.assert((try a.to(DoubleLimb)) == (@maxValue(Limb) + 1) + @maxValue(Limb));
+}
+
+test "big.int var args" {
+ var a = try Int.initSet(al, 5);
+
+ try a.add(&a, 6);
+ debug.assert((try a.to(u64)) == 11);
+
+ debug.assert(a.cmp(11) == 0);
+ debug.assert(a.cmp(14) <= 0);
+}
diff --git a/std/math/index.zig b/std/math/index.zig
index a118f3ed47..cc1b833a37 100644
--- a/std/math/index.zig
+++ b/std/math/index.zig
@@ -132,6 +132,8 @@ pub const tan = @import("tan.zig").tan;
pub const complex = @import("complex/index.zig");
pub const Complex = complex.Complex;
+pub const big = @import("big/index.zig");
+
test "math" {
_ = @import("nan.zig");
_ = @import("isnan.zig");
@@ -177,6 +179,8 @@ test "math" {
_ = @import("tan.zig");
_ = @import("complex/index.zig");
+
+ _ = @import("big/index.zig");
}
pub fn min(x: var, y: var) @typeOf(x + y) {
From 854f90aa3064e8bf7ab62a113de3ff3aafa3e6f6 Mon Sep 17 00:00:00 2001
From: tgschultz
Date: Sun, 10 Jun 2018 11:57:21 -0500
Subject: [PATCH 30/49] Added C string support to fmt by using "{s}". The
format string requirement is for saftey. (#1092)
---
std/fmt/index.zig | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/std/fmt/index.zig b/std/fmt/index.zig
index b52625e26e..cfc0948d2c 100644
--- a/std/fmt/index.zig
+++ b/std/fmt/index.zig
@@ -161,6 +161,14 @@ pub fn formatType(
else => return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)),
},
builtin.TypeInfo.Pointer.Size.Many => {
+ if (ptr_info.child == u8) {
+ //This is a bit of a hack, but it made more sense to
+ // do this check here than have formatText do it
+ if (fmt[0] == 's') {
+ const len = std.cstr.len(value);
+ return formatText(value[0..len], fmt, context, Errors, output);
+ }
+ }
return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
},
builtin.TypeInfo.Pointer.Size.Slice => {
@@ -300,7 +308,7 @@ pub fn formatBuf(
var leftover_padding = if (width > buf.len) (width - buf.len) else return;
const pad_byte: u8 = ' ';
while (leftover_padding > 0) : (leftover_padding -= 1) {
- try output(context, (&pad_byte)[0..1]);
+ try output(context, (*[1]u8)(&pad_byte)[0..1]);
}
}
@@ -841,6 +849,10 @@ test "fmt.format" {
const value: u8 = 'a';
try testFmt("u8: a\n", "u8: {c}\n", value);
}
+ try testFmt("buf: Test \n", "buf: {s5}\n", "Test");
+ try testFmt("buf: Test\n Other text", "buf: {s}\n Other text", "Test");
+ try testFmt("cstr: Test C\n", "cstr: {s}\n", c"Test C");
+ try testFmt("cstr: Test C \n", "cstr: {s10}\n", c"Test C");
try testFmt("file size: 63MiB\n", "file size: {Bi}\n", usize(63 * 1024 * 1024));
try testFmt("file size: 66.06MB\n", "file size: {B2}\n", usize(63 * 1024 * 1024));
{
From 03c16c6c548a8f8246c3dcff540482b7612aab80 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 11 Jun 2018 14:58:42 -0400
Subject: [PATCH 31/49] implement @tagName as a switch instead of table lookup
closes #976
closes #1080
---
src/all_types.hpp | 6 +-
src/codegen.cpp | 172 ++++++++++++++++++++++++++------------------
src/ir.cpp | 5 --
test/cases/enum.zig | 9 +++
4 files changed, 112 insertions(+), 80 deletions(-)
diff --git a/src/all_types.hpp b/src/all_types.hpp
index ab219e4e56..0d364915f9 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1091,8 +1091,7 @@ struct TypeTableEntryEnum {
bool zero_bits_loop_flag;
bool zero_bits_known;
- bool generate_name_table;
- LLVMValueRef name_table;
+ LLVMValueRef name_function;
HashMap fields_by_name;
};
@@ -1411,6 +1410,7 @@ enum PanicMsgId {
PanicMsgIdInvalidErrorCode,
PanicMsgIdIncorrectAlignment,
PanicMsgIdBadUnionField,
+ PanicMsgIdBadEnumValue,
PanicMsgIdCount,
};
@@ -1730,8 +1730,6 @@ struct CodeGen {
ZigList link_objects;
ZigList assembly_files;
- ZigList name_table_enums;
-
Buf *test_filter;
Buf *test_name_prefix;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index da08ecfc9e..d05bcba2ce 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -875,6 +875,8 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("incorrect alignment");
case PanicMsgIdBadUnionField:
return buf_create_from_str("access of inactive union field");
+ case PanicMsgIdBadEnumValue:
+ return buf_create_from_str("invalid enum value");
}
zig_unreachable();
}
@@ -3516,34 +3518,112 @@ static LLVMValueRef ir_render_err_name(CodeGen *g, IrExecutable *executable, IrI
return LLVMBuildInBoundsGEP(g->builder, g->err_name_table, indices, 2, "");
}
+static LLVMValueRef get_enum_tag_name_function(CodeGen *g, TypeTableEntry *enum_type) {
+ assert(enum_type->id == TypeTableEntryIdEnum);
+ if (enum_type->data.enumeration.name_function)
+ return enum_type->data.enumeration.name_function;
+
+ TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, false, false,
+ PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
+ TypeTableEntry *u8_slice_type = get_slice_type(g, u8_ptr_type);
+ TypeTableEntry *tag_int_type = enum_type->data.enumeration.tag_int_type;
+
+ LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMPointerType(u8_slice_type->type_ref, 0),
+ &tag_int_type->type_ref, 1, false);
+
+ Buf *fn_name = get_mangled_name(g, buf_sprintf("__zig_tag_name_%s", buf_ptr(&enum_type->name)), false);
+ LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref);
+ LLVMSetLinkage(fn_val, LLVMInternalLinkage);
+ LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
+ addLLVMFnAttr(fn_val, "nounwind");
+ add_uwtable_attr(g, fn_val);
+ if (g->build_mode == BuildModeDebug) {
+ ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
+ ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
+ }
+
+ LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder);
+ LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder);
+ FnTableEntry *prev_cur_fn = g->cur_fn;
+ LLVMValueRef prev_cur_fn_val = g->cur_fn_val;
+
+ LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry");
+ LLVMPositionBuilderAtEnd(g->builder, entry_block);
+ ZigLLVMClearCurrentDebugLocation(g->builder);
+ g->cur_fn = nullptr;
+ g->cur_fn_val = fn_val;
+
+ size_t field_count = enum_type->data.enumeration.src_field_count;
+ LLVMBasicBlockRef bad_value_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadValue");
+ LLVMValueRef tag_int_value = LLVMGetParam(fn_val, 0);
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, tag_int_value, bad_value_block, field_count);
+
+
+ TypeTableEntry *usize = g->builtin_types.entry_usize;
+ LLVMValueRef array_ptr_indices[] = {
+ LLVMConstNull(usize->type_ref),
+ LLVMConstNull(usize->type_ref),
+ };
+
+ for (size_t field_i = 0; field_i < field_count; field_i += 1) {
+ Buf *name = enum_type->data.enumeration.fields[field_i].name;
+ LLVMValueRef str_init = LLVMConstString(buf_ptr(name), (unsigned)buf_len(name), true);
+ LLVMValueRef str_global = LLVMAddGlobal(g->module, LLVMTypeOf(str_init), "");
+ LLVMSetInitializer(str_global, str_init);
+ LLVMSetLinkage(str_global, LLVMPrivateLinkage);
+ LLVMSetGlobalConstant(str_global, true);
+ LLVMSetUnnamedAddr(str_global, true);
+ LLVMSetAlignment(str_global, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(str_init)));
+
+ LLVMValueRef fields[] = {
+ LLVMConstGEP(str_global, array_ptr_indices, 2),
+ LLVMConstInt(g->builtin_types.entry_usize->type_ref, buf_len(name), false),
+ };
+ LLVMValueRef slice_init_value = LLVMConstNamedStruct(u8_slice_type->type_ref, fields, 2);
+
+ LLVMValueRef slice_global = LLVMAddGlobal(g->module, LLVMTypeOf(slice_init_value), "");
+ LLVMSetInitializer(slice_global, slice_init_value);
+ LLVMSetLinkage(slice_global, LLVMPrivateLinkage);
+ LLVMSetGlobalConstant(slice_global, true);
+ LLVMSetUnnamedAddr(slice_global, true);
+ LLVMSetAlignment(slice_global, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(slice_init_value)));
+
+ LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(g->cur_fn_val, "Name");
+ LLVMValueRef this_tag_int_value = bigint_to_llvm_const(tag_int_type->type_ref,
+ &enum_type->data.enumeration.fields[field_i].value);
+ LLVMAddCase(switch_instr, this_tag_int_value, return_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, return_block);
+ LLVMBuildRet(g->builder, slice_global);
+ }
+
+ LLVMPositionBuilderAtEnd(g->builder, bad_value_block);
+ if (g->build_mode == BuildModeDebug || g->build_mode == BuildModeSafeRelease) {
+ gen_safety_crash(g, PanicMsgIdBadEnumValue);
+ } else {
+ LLVMBuildUnreachable(g->builder);
+ }
+
+ g->cur_fn = prev_cur_fn;
+ g->cur_fn_val = prev_cur_fn_val;
+ LLVMPositionBuilderAtEnd(g->builder, prev_block);
+ LLVMSetCurrentDebugLocation(g->builder, prev_debug_location);
+
+ enum_type->data.enumeration.name_function = fn_val;
+ return fn_val;
+}
+
static LLVMValueRef ir_render_enum_tag_name(CodeGen *g, IrExecutable *executable,
IrInstructionTagName *instruction)
{
TypeTableEntry *enum_type = instruction->target->value.type;
assert(enum_type->id == TypeTableEntryIdEnum);
- assert(enum_type->data.enumeration.generate_name_table);
- TypeTableEntry *tag_int_type = enum_type->data.enumeration.tag_int_type;
+ LLVMValueRef enum_name_function = get_enum_tag_name_function(g, enum_type);
+
LLVMValueRef enum_tag_value = ir_llvm_value(g, instruction->target);
- if (ir_want_runtime_safety(g, &instruction->base)) {
- size_t field_count = enum_type->data.enumeration.src_field_count;
-
- // if the field_count can't fit in the bits of the enum_type, then it can't possibly
- // be the wrong value
- BigInt field_bi;
- bigint_init_unsigned(&field_bi, field_count);
- if (bigint_fits_in_bits(&field_bi, tag_int_type->data.integral.bit_count, false)) {
- LLVMValueRef end_val = LLVMConstInt(LLVMTypeOf(enum_tag_value), field_count, false);
- add_bounds_check(g, enum_tag_value, LLVMIntEQ, nullptr, LLVMIntULT, end_val);
- }
- }
-
- LLVMValueRef indices[] = {
- LLVMConstNull(g->builtin_types.entry_usize->type_ref),
- gen_widen_or_shorten(g, false, tag_int_type,
- g->builtin_types.entry_usize, enum_tag_value),
- };
- return LLVMBuildInBoundsGEP(g->builder, enum_type->data.enumeration.name_table, indices, 2, "");
+ return ZigLLVMBuildCall(g->builder, enum_name_function, &enum_tag_value, 1,
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
}
static LLVMValueRef ir_render_field_parent_ptr(CodeGen *g, IrExecutable *executable,
@@ -5471,55 +5551,6 @@ static void generate_error_name_table(CodeGen *g) {
LLVMSetAlignment(g->err_name_table, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(err_name_table_init)));
}
-static void generate_enum_name_tables(CodeGen *g) {
- TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false,
- PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
- TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type);
-
- TypeTableEntry *usize = g->builtin_types.entry_usize;
- LLVMValueRef array_ptr_indices[] = {
- LLVMConstNull(usize->type_ref),
- LLVMConstNull(usize->type_ref),
- };
-
-
- for (size_t enum_i = 0; enum_i < g->name_table_enums.length; enum_i += 1) {
- TypeTableEntry *enum_type = g->name_table_enums.at(enum_i);
- assert(enum_type->id == TypeTableEntryIdEnum);
-
- size_t field_count = enum_type->data.enumeration.src_field_count;
- LLVMValueRef *values = allocate(field_count);
- for (size_t field_i = 0; field_i < field_count; field_i += 1) {
- Buf *name = enum_type->data.enumeration.fields[field_i].name;
-
- LLVMValueRef str_init = LLVMConstString(buf_ptr(name), (unsigned)buf_len(name), true);
- LLVMValueRef str_global = LLVMAddGlobal(g->module, LLVMTypeOf(str_init), "");
- LLVMSetInitializer(str_global, str_init);
- LLVMSetLinkage(str_global, LLVMPrivateLinkage);
- LLVMSetGlobalConstant(str_global, true);
- LLVMSetUnnamedAddr(str_global, true);
- LLVMSetAlignment(str_global, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(str_init)));
-
- LLVMValueRef fields[] = {
- LLVMConstGEP(str_global, array_ptr_indices, 2),
- LLVMConstInt(g->builtin_types.entry_usize->type_ref, buf_len(name), false),
- };
- values[field_i] = LLVMConstNamedStruct(str_type->type_ref, fields, 2);
- }
-
- LLVMValueRef name_table_init = LLVMConstArray(str_type->type_ref, values, (unsigned)field_count);
-
- Buf *table_name = get_mangled_name(g, buf_sprintf("%s_name_table", buf_ptr(&enum_type->name)), false);
- LLVMValueRef name_table = LLVMAddGlobal(g->module, LLVMTypeOf(name_table_init), buf_ptr(table_name));
- LLVMSetInitializer(name_table, name_table_init);
- LLVMSetLinkage(name_table, LLVMPrivateLinkage);
- LLVMSetGlobalConstant(name_table, true);
- LLVMSetUnnamedAddr(name_table, true);
- LLVMSetAlignment(name_table, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(name_table_init)));
- enum_type->data.enumeration.name_table = name_table;
- }
-}
-
static void build_all_basic_blocks(CodeGen *g, FnTableEntry *fn) {
IrExecutable *executable = &fn->analyzed_executable;
assert(executable->basic_block_list.length > 0);
@@ -5616,7 +5647,6 @@ static void do_code_gen(CodeGen *g) {
}
generate_error_name_table(g);
- generate_enum_name_tables(g);
// Generate module level variables
for (size_t i = 0; i < g->global_vars.length; i += 1) {
diff --git a/src/ir.cpp b/src/ir.cpp
index 38f4dc90e7..4b6d5fdcf1 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -15837,11 +15837,6 @@ static TypeTableEntry *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrIn
return out_val->type;
}
- if (!target->value.type->data.enumeration.generate_name_table) {
- target->value.type->data.enumeration.generate_name_table = true;
- ira->codegen->name_table_enums.append(target->value.type);
- }
-
IrInstruction *result = ir_build_tag_name(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, target);
ir_link_new_instruction(result, &instruction->base);
diff --git a/test/cases/enum.zig b/test/cases/enum.zig
index ae9f04869b..5c78d73092 100644
--- a/test/cases/enum.zig
+++ b/test/cases/enum.zig
@@ -883,3 +883,12 @@ test "empty extern enum with members" {
};
assert(@sizeOf(E) == @sizeOf(c_int));
}
+
+test "aoeu" {
+ const LocalFoo = enum {
+ A = 1,
+ B = 0,
+ };
+ var b = LocalFoo.B;
+ assert(mem.eql(u8, @tagName(b), "B"));
+}
From 5252566137c621c19225e692c82264096ff9f5fe Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 11 Jun 2018 17:34:45 -0400
Subject: [PATCH 32/49] langref: add coroutines documentation
See #367
---
doc/langref.html.in | 273 +++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 272 insertions(+), 1 deletion(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 0ada8a5196..d5ea8d75a4 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -3908,6 +3908,277 @@ pub fn main() void {
TODO: @fence()
TODO: @atomic rmw
TODO: builtin atomic memory ordering enum
+ {#header_close#}
+ {#header_open|Coroutines#}
+
+ A coroutine is a generalization of a function.
+
+
+ When you call a function, it creates a stack frame,
+ and then the function runs until it reaches a return
+ statement, and then the stack frame is destroyed.
+ At the callsite, the next line of code does not run
+ until the function returns.
+
+
+ A coroutine is like a function, but it can be suspended
+ and resumed any number of times, and then it must be
+ explicitly destroyed. When a coroutine suspends, it
+ returns to the resumer.
+
+ {#header_open|Minimal Coroutine Example#}
+
+ Declare a coroutine with the async keyword.
+ The expression in angle brackets must evaluate to a struct
+ which has these fields:
+
+
+ allocFn: fn (self: *Allocator, byte_count: usize, alignment: u29) Error![]u8 - where Error can be any error set.
+ freeFn: fn (self: *Allocator, old_mem: []u8) void
+
+
+ You may notice that this corresponds to the std.mem.Allocator interface.
+ This makes it convenient to integrate with existing allocators. Note, however,
+ that the language feature does not depend on the standard library, and any struct which
+ has these fields is allowed.
+
+
+ Omitting the angle bracket expression when defining an async function makes
+ the function generic. Zig will infer the allocator type when the async function is called.
+
+
+ Call a coroutine with the async keyword. Here, the expression in angle brackets
+ is a pointer to the allocator struct that the coroutine expects.
+
+
+ The result of an async function call is a promise->T type, where T
+ is the return type of the async function. Once a promise has been created, it must be
+ consumed, either with cancel or await:
+
+
+ Async functions start executing when created, so in the following example, the entire
+ async function completes before it is canceled:
+
+ {#code_begin|test#}
+const std = @import("std");
+const assert = std.debug.assert;
+
+var x: i32 = 1;
+
+test "create a coroutine and cancel it" {
+ const p = try async simpleAsyncFn();
+ comptime assert(@typeOf(p) == promise->void);
+ cancel p;
+ assert(x == 2);
+}
+async<*std.mem.Allocator> fn simpleAsyncFn() void {
+ x += 1;
+}
+ {#code_end#}
+ {#header_close#}
+ {#header_open|Suspend and Resume#}
+
+ At any point, an async function may suspend itself. This causes control flow to
+ return to the caller or resumer. The following code demonstrates where control flow
+ goes:
+
+ {#code_begin|test#}
+const std = @import("std");
+const assert = std.debug.assert;
+
+test "coroutine suspend, resume, cancel" {
+ seq('a');
+ const p = try async testAsyncSeq();
+ seq('c');
+ resume p;
+ seq('f');
+ cancel p;
+ seq('g');
+
+ assert(std.mem.eql(u8, points, "abcdefg"));
+}
+async fn testAsyncSeq() void {
+ defer seq('e');
+
+ seq('b');
+ suspend;
+ seq('d');
+}
+var points = []u8{0} ** "abcdefg".len;
+var index: usize = 0;
+
+fn seq(c: u8) void {
+ points[index] = c;
+ index += 1;
+}
+ {#code_end#}
+
+ When an async function suspends itself, it must be sure that it will be
+ resumed or canceled somehow, for example by registering its promise handle
+ in an event loop. Use a suspend capture block to gain access to the
+ promise:
+
+ {#code_begin|test#}
+const std = @import("std");
+const assert = std.debug.assert;
+
+test "coroutine suspend with block" {
+ const p = try async testSuspendBlock();
+ std.debug.assert(!result);
+ resume a_promise;
+ std.debug.assert(result);
+ cancel p;
+}
+
+var a_promise: promise = undefined;
+var result = false;
+async fn testSuspendBlock() void {
+ suspend |p| {
+ comptime assert(@typeOf(p) == promise->void);
+ a_promise = p;
+ }
+ result = true;
+}
+ {#code_end#}
+
+ Every suspend point in an async function represents a point at which the coroutine
+ could be destroyed. If that happens, defer expressions that are in
+ scope are run, as well as errdefer expressions.
+
+
+ {#link|Await#} counts as a suspend point.
+
+ {#header_open|Breaking from Suspend Blocks#}
+
+ Suspend blocks support labeled break, just like {#link|while#} and {#link|for#}.
+
+
+ Upon entering a suspend block, the coroutine is already considered
+ suspended, and can be resumed. For example, if you started another kernel thread,
+ and had that thread call resume on the promise handle provided by the
+ suspend block, the new thread would begin executing after the suspend
+ block, while the old thread continued executing the suspend block.
+
+
+ However, if you use labeled break on the suspend block, the coroutine
+ never returns to its resumer and continues executing.
+
+ {#code_begin|test#}
+const std = @import("std");
+const assert = std.debug.assert;
+
+test "break from suspend" {
+ var buf: [500]u8 = undefined;
+ var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator;
+ var my_result: i32 = 1;
+ const p = try async testBreakFromSuspend(&my_result);
+ cancel p;
+ std.debug.assert(my_result == 2);
+}
+async fn testBreakFromSuspend(my_result: *i32) void {
+ s: suspend |p| {
+ break :s;
+ }
+ my_result.* += 1;
+ suspend;
+ my_result.* += 1;
+}
+ {#code_end#}
+ {#header_close#}
+ {#header_close#}
+ {#header_open|Await#}
+
+ The await keyword is used to coordinate with an async function's
+ return statement.
+
+
+ await is valid only in an async function, and it takes
+ as an operand a promise handle.
+ If the async function associated with the promise handle has already returned,
+ then await destroys the target async function, and gives the return value.
+ Otherwise, await suspends the current async function, registering its
+ promise handle with the target coroutine. It becomes the target coroutine's responsibility
+ to have ensured that it will be resumed or destroyed. When the target coroutine reaches
+ its return statement, it gives the return value to the awaiter, destroys itself, and then
+ resumes the awaiter.
+
+
+ A promise handle must be consumed exactly once after it is created, either by cancel or await.
+
+
+ await counts as a suspend point, and therefore at every await,
+ a coroutine can be potentially destroyed, which would run defer and errdefer expressions.
+
+ {#code_begin|test#}
+const std = @import("std");
+const assert = std.debug.assert;
+
+var a_promise: promise = undefined;
+var final_result: i32 = 0;
+
+test "coroutine await" {
+ seq('a');
+ const p = async amain() catch unreachable;
+ seq('f');
+ resume a_promise;
+ seq('i');
+ assert(final_result == 1234);
+ assert(std.mem.eql(u8, seq_points, "abcdefghi"));
+}
+async fn amain() void {
+ seq('b');
+ const p = async another() catch unreachable;
+ seq('e');
+ final_result = await p;
+ seq('h');
+}
+async fn another() i32 {
+ seq('c');
+ suspend |p| {
+ seq('d');
+ a_promise = p;
+ }
+ seq('g');
+ return 1234;
+}
+
+var seq_points = []u8{0} ** "abcdefghi".len;
+var seq_index: usize = 0;
+
+fn seq(c: u8) void {
+ seq_points[seq_index] = c;
+ seq_index += 1;
+}
+ {#code_end#}
+
+ In general, suspend is lower level than await. Most application
+ code will use only async and await, but event loop
+ implementations will make use of suspend internally.
+
+ {#header_close#}
+ {#header_open|Open Issues#}
+
+ There are a few issues with coroutines that are considered unresolved. Best be aware of them,
+ as the situation is likely to change before 1.0.0:
+
+
+ - Async functions have optimizations disabled - even in release modes - due to an
+ LLVM bug.
+
+ -
+ There are some situations where we can know statically that there will not be
+ memory allocation failure, but Zig still forces us to handle it.
+ TODO file an issue for this and link it here.
+
+ -
+ Zig does not take advantage of LLVM's allocation elision optimization for
+ coroutines. It crashed LLVM when I tried to do it the first time. This is
+ related to the other 2 bullet points here. See
+ #802.
+
+
+ {#header_close#}
+
{#header_close#}
{#header_open|Builtin Functions#}
@@ -6124,7 +6395,7 @@ hljs.registerLanguage("zig", function(t) {
},
a = t.IR + "\\s*\\(",
c = {
- keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong",
+ keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong resume cancel await async",
built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall",
literal: "true false null undefined"
},
From 0a18d53c3dc9816677071c20ab846e3866787b39 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Mon, 11 Jun 2018 17:38:24 -0400
Subject: [PATCH 33/49] langref: add orelse keyword to syntax highlighting
---
doc/langref.html.in | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index d5ea8d75a4..3a7dbd1e90 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -6395,7 +6395,7 @@ hljs.registerLanguage("zig", function(t) {
},
a = t.IR + "\\s*\\(",
c = {
- keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong resume cancel await async",
+ keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong resume cancel await async orelse",
built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall",
literal: "true false null undefined"
},
From 3dd9af9948db696362aa5f41481dc4cb034bc6c2 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 12 Jun 2018 01:55:08 -0400
Subject: [PATCH 34/49] implement std.os.Dir for windows
improve std.os.File.access so that it does not depend on shlwapi.dll
closes #1084
---
doc/docgen.zig | 10 +-
std/os/file.zig | 18 ++-
std/os/index.zig | 266 ++++++++++++++++++++++++++++-----------
std/os/test.zig | 9 --
std/os/time.zig | 6 +-
std/os/windows/index.zig | 48 ++++++-
std/os/windows/util.zig | 38 ++++++
7 files changed, 293 insertions(+), 102 deletions(-)
diff --git a/doc/docgen.zig b/doc/docgen.zig
index 3283d146b0..d74c5a4615 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -51,14 +51,8 @@ pub fn main() !void {
var toc = try genToc(allocator, &tokenizer);
try os.makePath(allocator, tmp_dir_name);
- defer {
- // TODO issue #709
- // disabled to pass CI tests, but obviously we want to implement this
- // and then remove this workaround
- if (builtin.os != builtin.Os.windows) {
- os.deleteTree(allocator, tmp_dir_name) catch {};
- }
- }
+ defer os.deleteTree(allocator, tmp_dir_name) catch {};
+
try genHtml(allocator, &tokenizer, &toc, &buffered_out_stream.stream, zig_exe);
try buffered_out_stream.flush();
}
diff --git a/std/os/file.zig b/std/os/file.zig
index f15fa77688..e09c03c08b 100644
--- a/std/os/file.zig
+++ b/std/os/file.zig
@@ -96,7 +96,20 @@ pub const File = struct {
return File{ .handle = handle };
}
- pub fn access(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) !bool {
+ pub const AccessError = error {
+ PermissionDenied,
+ NotFound,
+ NameTooLong,
+ BadMode,
+ BadPathName,
+ Io,
+ SystemResources,
+ OutOfMemory,
+
+ Unexpected,
+ };
+
+ pub fn access(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) AccessError!bool {
const path_with_null = try std.cstr.addNullByte(allocator, path);
defer allocator.free(path_with_null);
@@ -123,8 +136,7 @@ pub const File = struct {
}
return true;
} else if (is_windows) {
- // TODO do not depend on shlwapi.dll
- if (os.windows.PathFileExistsA(path_with_null.ptr) == os.windows.TRUE) {
+ if (os.windows.GetFileAttributesA(path_with_null.ptr) != os.windows.INVALID_FILE_ATTRIBUTES) {
return true;
}
diff --git a/std/os/index.zig b/std/os/index.zig
index 6a13ff94d4..b8907776c5 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -734,7 +734,23 @@ pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path:
}
}
-pub fn deleteFile(allocator: *Allocator, file_path: []const u8) !void {
+pub const DeleteFileError = error {
+ FileNotFound,
+ AccessDenied,
+ FileBusy,
+ FileSystem,
+ IsDir,
+ SymLinkLoop,
+ NameTooLong,
+ NotDir,
+ SystemResources,
+ ReadOnlyFileSystem,
+ OutOfMemory,
+
+ Unexpected,
+};
+
+pub fn deleteFile(allocator: *Allocator, file_path: []const u8) DeleteFileError!void {
if (builtin.os == Os.windows) {
return deleteFileWindows(allocator, file_path);
} else {
@@ -1019,37 +1035,67 @@ pub fn makePath(allocator: *Allocator, full_path: []const u8) !void {
}
}
+pub const DeleteDirError = error {
+ AccessDenied,
+ FileBusy,
+ SymLinkLoop,
+ NameTooLong,
+ FileNotFound,
+ SystemResources,
+ NotDir,
+ DirNotEmpty,
+ ReadOnlyFileSystem,
+ OutOfMemory,
+
+ Unexpected,
+};
+
/// Returns ::error.DirNotEmpty if the directory is not empty.
/// To delete a directory recursively, see ::deleteTree
-pub fn deleteDir(allocator: *Allocator, dir_path: []const u8) !void {
+pub fn deleteDir(allocator: *Allocator, dir_path: []const u8) DeleteDirError!void {
const path_buf = try allocator.alloc(u8, dir_path.len + 1);
defer allocator.free(path_buf);
mem.copy(u8, path_buf, dir_path);
path_buf[dir_path.len] = 0;
- const err = posix.getErrno(posix.rmdir(path_buf.ptr));
- if (err > 0) {
- return switch (err) {
- posix.EACCES, posix.EPERM => error.AccessDenied,
- posix.EBUSY => error.FileBusy,
- posix.EFAULT, posix.EINVAL => unreachable,
- posix.ELOOP => error.SymLinkLoop,
- posix.ENAMETOOLONG => error.NameTooLong,
- posix.ENOENT => error.FileNotFound,
- posix.ENOMEM => error.SystemResources,
- posix.ENOTDIR => error.NotDir,
- posix.EEXIST, posix.ENOTEMPTY => error.DirNotEmpty,
- posix.EROFS => error.ReadOnlyFileSystem,
- else => unexpectedErrorPosix(err),
- };
+ switch (builtin.os) {
+ Os.windows => {
+ if (windows.RemoveDirectoryA(path_buf.ptr) == 0) {
+ const err = windows.GetLastError();
+ return switch (err) {
+ windows.ERROR.PATH_NOT_FOUND => error.FileNotFound,
+ windows.ERROR.DIR_NOT_EMPTY => error.DirNotEmpty,
+ else => unexpectedErrorWindows(err),
+ };
+ }
+ },
+ Os.linux, Os.macosx, Os.ios => {
+ const err = posix.getErrno(posix.rmdir(path_buf.ptr));
+ if (err > 0) {
+ return switch (err) {
+ posix.EACCES, posix.EPERM => error.AccessDenied,
+ posix.EBUSY => error.FileBusy,
+ posix.EFAULT, posix.EINVAL => unreachable,
+ posix.ELOOP => error.SymLinkLoop,
+ posix.ENAMETOOLONG => error.NameTooLong,
+ posix.ENOENT => error.FileNotFound,
+ posix.ENOMEM => error.SystemResources,
+ posix.ENOTDIR => error.NotDir,
+ posix.EEXIST, posix.ENOTEMPTY => error.DirNotEmpty,
+ posix.EROFS => error.ReadOnlyFileSystem,
+ else => unexpectedErrorPosix(err),
+ };
+ }
+ },
+ else => @compileError("unimplemented"),
}
+
}
/// Whether ::full_path describes a symlink, file, or directory, this function
/// removes it. If it cannot be removed because it is a non-empty directory,
/// this function recursively removes its entries and then tries again.
-/// TODO non-recursive implementation
const DeleteTreeError = error{
OutOfMemory,
AccessDenied,
@@ -1128,7 +1174,7 @@ pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!
try full_entry_buf.resize(full_path.len + entry.name.len + 1);
const full_entry_path = full_entry_buf.toSlice();
mem.copy(u8, full_entry_path, full_path);
- full_entry_path[full_path.len] = '/';
+ full_entry_path[full_path.len] = path.sep;
mem.copy(u8, full_entry_path[full_path.len + 1 ..], entry.name);
try deleteTree(allocator, full_entry_path);
@@ -1139,16 +1185,29 @@ pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!
}
pub const Dir = struct {
- fd: i32,
- darwin_seek: darwin_seek_t,
+ handle: Handle,
allocator: *Allocator,
- buf: []u8,
- index: usize,
- end_index: usize,
- const darwin_seek_t = switch (builtin.os) {
- Os.macosx, Os.ios => i64,
- else => void,
+ pub const Handle = switch (builtin.os) {
+ Os.macosx, Os.ios => struct {
+ fd: i32,
+ seek: i64,
+ buf: []u8,
+ index: usize,
+ end_index: usize,
+ },
+ Os.linux => struct {
+ fd: i32,
+ buf: []u8,
+ index: usize,
+ end_index: usize,
+ },
+ Os.windows => struct {
+ handle: windows.HANDLE,
+ find_file_data: windows.WIN32_FIND_DATAA,
+ first: bool,
+ },
+ else => @compileError("unimplemented"),
};
pub const Entry = struct {
@@ -1168,81 +1227,117 @@ pub const Dir = struct {
};
};
- pub fn open(allocator: *Allocator, dir_path: []const u8) !Dir {
- const fd = switch (builtin.os) {
- Os.windows => @compileError("TODO support Dir.open for windows"),
- Os.linux => try posixOpen(allocator, dir_path, posix.O_RDONLY | posix.O_DIRECTORY | posix.O_CLOEXEC, 0),
- Os.macosx, Os.ios => try posixOpen(
- allocator,
- dir_path,
- posix.O_RDONLY | posix.O_NONBLOCK | posix.O_DIRECTORY | posix.O_CLOEXEC,
- 0,
- ),
- else => @compileError("Dir.open is not supported for this platform"),
- };
- const darwin_seek_init = switch (builtin.os) {
- Os.macosx, Os.ios => 0,
- else => {},
- };
+ pub const OpenError = error {
+ PathNotFound,
+ NotDir,
+ AccessDenied,
+ FileTooBig,
+ IsDir,
+ SymLinkLoop,
+ ProcessFdQuotaExceeded,
+ NameTooLong,
+ SystemFdQuotaExceeded,
+ NoDevice,
+ SystemResources,
+ NoSpaceLeft,
+ PathAlreadyExists,
+ OutOfMemory,
+
+ Unexpected,
+ };
+
+ pub fn open(allocator: *Allocator, dir_path: []const u8) OpenError!Dir {
return Dir{
.allocator = allocator,
- .fd = fd,
- .darwin_seek = darwin_seek_init,
- .index = 0,
- .end_index = 0,
- .buf = []u8{},
+ .handle = switch (builtin.os) {
+ Os.windows => blk: {
+ var find_file_data: windows.WIN32_FIND_DATAA = undefined;
+ const handle = try windows_util.windowsFindFirstFile(allocator, dir_path, &find_file_data);
+ break :blk Handle {
+ .handle = handle,
+ .find_file_data = find_file_data, // TODO guaranteed copy elision
+ .first = true,
+ };
+ },
+ Os.macosx, Os.ios => Handle {
+ .fd = try posixOpen(
+ allocator,
+ dir_path,
+ posix.O_RDONLY | posix.O_NONBLOCK | posix.O_DIRECTORY | posix.O_CLOEXEC,
+ 0,
+ ),
+ .seek = 0,
+ .index = 0,
+ .end_index = 0,
+ .buf = []u8{},
+ },
+ Os.linux => Handle {
+ .fd = try posixOpen(allocator, dir_path, posix.O_RDONLY | posix.O_DIRECTORY | posix.O_CLOEXEC, 0,),
+ .index = 0,
+ .end_index = 0,
+ .buf = []u8{},
+ },
+ else => @compileError("unimplemented"),
+ },
};
}
pub fn close(self: *Dir) void {
- self.allocator.free(self.buf);
- os.close(self.fd);
+ switch (builtin.os) {
+ Os.windows => {
+ _ = windows.FindClose(self.handle.handle);
+ },
+ Os.macosx, Os.ios, Os.linux => {
+ self.allocator.free(self.handle.buf);
+ os.close(self.handle.fd);
+ },
+ else => @compileError("unimplemented"),
+ }
}
/// Memory such as file names referenced in this returned entry becomes invalid
- /// with subsequent calls to next, as well as when this ::Dir is deinitialized.
+ /// with subsequent calls to next, as well as when this `Dir` is deinitialized.
pub fn next(self: *Dir) !?Entry {
switch (builtin.os) {
Os.linux => return self.nextLinux(),
Os.macosx, Os.ios => return self.nextDarwin(),
Os.windows => return self.nextWindows(),
- else => @compileError("Dir.next not supported on " ++ @tagName(builtin.os)),
+ else => @compileError("unimplemented"),
}
}
fn nextDarwin(self: *Dir) !?Entry {
start_over: while (true) {
- if (self.index >= self.end_index) {
- if (self.buf.len == 0) {
- self.buf = try self.allocator.alloc(u8, page_size);
+ if (self.handle.index >= self.handle.end_index) {
+ if (self.handle.buf.len == 0) {
+ self.handle.buf = try self.allocator.alloc(u8, page_size);
}
while (true) {
- const result = posix.getdirentries64(self.fd, self.buf.ptr, self.buf.len, &self.darwin_seek);
+ const result = posix.getdirentries64(self.handle.fd, self.handle.buf.ptr, self.handle.buf.len, &self.handle.seek);
const err = posix.getErrno(result);
if (err > 0) {
switch (err) {
posix.EBADF, posix.EFAULT, posix.ENOTDIR => unreachable,
posix.EINVAL => {
- self.buf = try self.allocator.realloc(u8, self.buf, self.buf.len * 2);
+ self.handle.buf = try self.allocator.realloc(u8, self.handle.buf, self.handle.buf.len * 2);
continue;
},
else => return unexpectedErrorPosix(err),
}
}
if (result == 0) return null;
- self.index = 0;
- self.end_index = result;
+ self.handle.index = 0;
+ self.handle.end_index = result;
break;
}
}
- const darwin_entry = @ptrCast(*align(1) posix.dirent, &self.buf[self.index]);
- const next_index = self.index + darwin_entry.d_reclen;
- self.index = next_index;
+ const darwin_entry = @ptrCast(*align(1) posix.dirent, &self.handle.buf[self.handle.index]);
+ const next_index = self.handle.index + darwin_entry.d_reclen;
+ self.handle.index = next_index;
const name = @ptrCast([*]u8, &darwin_entry.d_name)[0..darwin_entry.d_namlen];
- // skip . and .. entries
if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) {
continue :start_over;
}
@@ -1266,38 +1361,59 @@ pub const Dir = struct {
}
fn nextWindows(self: *Dir) !?Entry {
- @compileError("TODO support Dir.next for windows");
+ while (true) {
+ if (self.handle.first) {
+ self.handle.first = false;
+ } else {
+ if (!try windows_util.windowsFindNextFile(self.handle.handle, &self.handle.find_file_data))
+ return null;
+ }
+ const name = std.cstr.toSlice(self.handle.find_file_data.cFileName[0..].ptr);
+ if (mem.eql(u8, name, ".") or mem.eql(u8, name, ".."))
+ continue;
+ const kind = blk: {
+ const attrs = self.handle.find_file_data.dwFileAttributes;
+ if (attrs & windows.FILE_ATTRIBUTE_DIRECTORY != 0) break :blk Entry.Kind.Directory;
+ if (attrs & windows.FILE_ATTRIBUTE_REPARSE_POINT != 0) break :blk Entry.Kind.SymLink;
+ if (attrs & windows.FILE_ATTRIBUTE_NORMAL != 0) break :blk Entry.Kind.File;
+ break :blk Entry.Kind.Unknown;
+ };
+ return Entry {
+ .name = name,
+ .kind = kind,
+ };
+ }
}
fn nextLinux(self: *Dir) !?Entry {
start_over: while (true) {
- if (self.index >= self.end_index) {
- if (self.buf.len == 0) {
- self.buf = try self.allocator.alloc(u8, page_size);
+ if (self.handle.index >= self.handle.end_index) {
+ if (self.handle.buf.len == 0) {
+ self.handle.buf = try self.allocator.alloc(u8, page_size);
}
while (true) {
- const result = posix.getdents(self.fd, self.buf.ptr, self.buf.len);
+ const result = posix.getdents(self.handle.fd, self.handle.buf.ptr, self.handle.buf.len);
const err = posix.getErrno(result);
if (err > 0) {
switch (err) {
posix.EBADF, posix.EFAULT, posix.ENOTDIR => unreachable,
posix.EINVAL => {
- self.buf = try self.allocator.realloc(u8, self.buf, self.buf.len * 2);
+ self.handle.buf = try self.allocator.realloc(u8, self.handle.buf, self.handle.buf.len * 2);
continue;
},
else => return unexpectedErrorPosix(err),
}
}
if (result == 0) return null;
- self.index = 0;
- self.end_index = result;
+ self.handle.index = 0;
+ self.handle.end_index = result;
break;
}
}
- const linux_entry = @ptrCast(*align(1) posix.dirent, &self.buf[self.index]);
- const next_index = self.index + linux_entry.d_reclen;
- self.index = next_index;
+ const linux_entry = @ptrCast(*align(1) posix.dirent, &self.handle.buf[self.handle.index]);
+ const next_index = self.handle.index + linux_entry.d_reclen;
+ self.handle.index = next_index;
const name = cstr.toSlice(@ptrCast([*]u8, &linux_entry.d_name));
@@ -1306,7 +1422,7 @@ pub const Dir = struct {
continue :start_over;
}
- const type_char = self.buf[next_index - 1];
+ const type_char = self.handle.buf[next_index - 1];
const entry_kind = switch (type_char) {
posix.DT_BLK => Entry.Kind.BlockDevice,
posix.DT_CHR => Entry.Kind.CharacterDevice,
diff --git a/std/os/test.zig b/std/os/test.zig
index 4aa3535829..5a977a569a 100644
--- a/std/os/test.zig
+++ b/std/os/test.zig
@@ -10,11 +10,6 @@ const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
test "makePath, put some files in it, deleteTree" {
- if (builtin.os == builtin.Os.windows) {
- // TODO implement os.Dir for windows
- // https://github.com/ziglang/zig/issues/709
- return;
- }
try os.makePath(a, "os_test_tmp/b/c");
try io.writeFile(a, "os_test_tmp/b/c/file.txt", "nonsense");
try io.writeFile(a, "os_test_tmp/b/file2.txt", "blah");
@@ -27,10 +22,6 @@ test "makePath, put some files in it, deleteTree" {
}
test "access file" {
- if (builtin.os == builtin.Os.windows) {
- return;
- }
-
try os.makePath(a, "os_test_tmp");
if (os.File.access(a, "os_test_tmp/file.txt", os.default_file_mode)) |ok| {
unreachable;
diff --git a/std/os/time.zig b/std/os/time.zig
index dd64df2156..43a584d936 100644
--- a/std/os/time.zig
+++ b/std/os/time.zig
@@ -68,11 +68,13 @@ pub const milliTimestamp = switch (builtin.os) {
fn milliTimestampWindows() u64 {
//FileTime has a granularity of 100 nanoseconds
// and uses the NTFS/Windows epoch
- var ft: i64 = undefined;
+ var ft: windows.FILETIME = undefined;
windows.GetSystemTimeAsFileTime(&ft);
const hns_per_ms = (ns_per_s / 100) / ms_per_s;
const epoch_adj = epoch.windows * ms_per_s;
- return u64(@divFloor(ft, hns_per_ms) + epoch_adj);
+
+ const ft64 = (u64(ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+ return @divFloor(ft64, hns_per_ms) - - epoch_adj;
}
fn milliTimestampDarwin() u64 {
diff --git a/std/os/windows/index.zig b/std/os/windows/index.zig
index 0934c3fd90..d631c6adbf 100644
--- a/std/os/windows/index.zig
+++ b/std/os/windows/index.zig
@@ -1,3 +1,7 @@
+test "import" {
+ _ = @import("util.zig");
+}
+
pub const ERROR = @import("error.zig");
pub extern "advapi32" stdcallcc fn CryptAcquireContextA(
@@ -61,6 +65,10 @@ pub extern "kernel32" stdcallcc fn DeleteFileA(lpFileName: LPCSTR) BOOL;
pub extern "kernel32" stdcallcc fn ExitProcess(exit_code: UINT) noreturn;
+pub extern "kernel32" stdcallcc fn FindFirstFileA(lpFileName: LPCSTR, lpFindFileData: *WIN32_FIND_DATAA) HANDLE;
+pub extern "kernel32" stdcallcc fn FindClose(hFindFile: HANDLE) BOOL;
+pub extern "kernel32" stdcallcc fn FindNextFileA(hFindFile: HANDLE, lpFindFileData: *WIN32_FIND_DATAA) BOOL;
+
pub extern "kernel32" stdcallcc fn FreeEnvironmentStringsA(penv: [*]u8) BOOL;
pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR;
@@ -77,6 +85,8 @@ pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCo
pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) BOOL;
+pub extern "kernel32" stdcallcc fn GetFileAttributesA(lpFileName: LPCSTR) DWORD;
+
pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) DWORD;
pub extern "kernel32" stdcallcc fn GetLastError() DWORD;
@@ -97,7 +107,7 @@ pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA(
pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE;
-pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?*FILETIME) void;
+pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(*FILETIME) void;
pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE;
pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL;
@@ -131,6 +141,8 @@ pub extern "kernel32" stdcallcc fn ReadFile(
in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL;
+pub extern "kernel32" stdcallcc fn RemoveDirectoryA(lpPathName: LPCSTR) BOOL;
+
pub extern "kernel32" stdcallcc fn SetFilePointerEx(
in_fFile: HANDLE,
in_liDistanceToMove: LARGE_INTEGER,
@@ -196,7 +208,6 @@ pub const UNICODE = false;
pub const WCHAR = u16;
pub const WORD = u16;
pub const LARGE_INTEGER = i64;
-pub const FILETIME = i64;
pub const TRUE = 1;
pub const FALSE = 0;
@@ -212,6 +223,8 @@ pub const STD_ERROR_HANDLE = @maxValue(DWORD) - 12 + 1;
pub const INVALID_HANDLE_VALUE = @intToPtr(HANDLE, @maxValue(usize));
+pub const INVALID_FILE_ATTRIBUTES = DWORD(@maxValue(DWORD));
+
pub const OVERLAPPED = extern struct {
Internal: ULONG_PTR,
InternalHigh: ULONG_PTR,
@@ -293,13 +306,24 @@ pub const OPEN_EXISTING = 3;
pub const TRUNCATE_EXISTING = 5;
pub const FILE_ATTRIBUTE_ARCHIVE = 0x20;
+pub const FILE_ATTRIBUTE_COMPRESSED = 0x800;
+pub const FILE_ATTRIBUTE_DEVICE = 0x40;
+pub const FILE_ATTRIBUTE_DIRECTORY = 0x10;
pub const FILE_ATTRIBUTE_ENCRYPTED = 0x4000;
pub const FILE_ATTRIBUTE_HIDDEN = 0x2;
+pub const FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x8000;
pub const FILE_ATTRIBUTE_NORMAL = 0x80;
+pub const FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x2000;
+pub const FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x20000;
pub const FILE_ATTRIBUTE_OFFLINE = 0x1000;
pub const FILE_ATTRIBUTE_READONLY = 0x1;
+pub const FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x400000;
+pub const FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x40000;
+pub const FILE_ATTRIBUTE_REPARSE_POINT = 0x400;
+pub const FILE_ATTRIBUTE_SPARSE_FILE = 0x200;
pub const FILE_ATTRIBUTE_SYSTEM = 0x4;
pub const FILE_ATTRIBUTE_TEMPORARY = 0x100;
+pub const FILE_ATTRIBUTE_VIRTUAL = 0x10000;
pub const PROCESS_INFORMATION = extern struct {
hProcess: HANDLE,
@@ -372,6 +396,20 @@ pub const HEAP_NO_SERIALIZE = 0x00000001;
pub const PTHREAD_START_ROUTINE = extern fn (LPVOID) DWORD;
pub const LPTHREAD_START_ROUTINE = PTHREAD_START_ROUTINE;
-test "import" {
- _ = @import("util.zig");
-}
+pub const WIN32_FIND_DATAA = extern struct {
+ dwFileAttributes: DWORD,
+ ftCreationTime: FILETIME,
+ ftLastAccessTime: FILETIME,
+ ftLastWriteTime: FILETIME,
+ nFileSizeHigh: DWORD,
+ nFileSizeLow: DWORD,
+ dwReserved0: DWORD,
+ dwReserved1: DWORD,
+ cFileName: [260]CHAR,
+ cAlternateFileName: [14]CHAR,
+};
+
+pub const FILETIME = extern struct {
+ dwLowDateTime: DWORD,
+ dwHighDateTime: DWORD,
+};
diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig
index f93a673be0..0f0a190ed1 100644
--- a/std/os/windows/util.zig
+++ b/std/os/windows/util.zig
@@ -170,3 +170,41 @@ test "InvalidDll" {
return;
};
}
+
+
+pub fn windowsFindFirstFile(allocator: *mem.Allocator, dir_path: []const u8,
+ find_file_data: *windows.WIN32_FIND_DATAA) !windows.HANDLE
+{
+ const wild_and_null = []u8{'\\', '*', 0};
+ const path_with_wild_and_null = try allocator.alloc(u8, dir_path.len + wild_and_null.len);
+ defer allocator.free(path_with_wild_and_null);
+
+ mem.copy(u8, path_with_wild_and_null, dir_path);
+ mem.copy(u8, path_with_wild_and_null[dir_path.len..], wild_and_null);
+
+ const handle = windows.FindFirstFileA(path_with_wild_and_null.ptr, find_file_data);
+
+ if (handle == windows.INVALID_HANDLE_VALUE) {
+ const err = windows.GetLastError();
+ switch (err) {
+ windows.ERROR.FILE_NOT_FOUND,
+ windows.ERROR.PATH_NOT_FOUND,
+ => return error.PathNotFound,
+ else => return os.unexpectedErrorWindows(err),
+ }
+ }
+
+ return handle;
+}
+
+/// Returns `true` if there was another file, `false` otherwise.
+pub fn windowsFindNextFile(handle: windows.HANDLE, find_file_data: *windows.WIN32_FIND_DATAA) !bool {
+ if (windows.FindNextFileA(handle, find_file_data) == 0) {
+ const err = windows.GetLastError();
+ return switch (err) {
+ windows.ERROR.NO_MORE_FILES => false,
+ else => os.unexpectedErrorWindows(err),
+ };
+ }
+ return true;
+}
From 7580e39b388525237a84aabfb41c462376eac28e Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 12 Jun 2018 02:18:11 -0400
Subject: [PATCH 35/49] zig fmt
---
std/os/file.zig | 2 +-
std/os/index.zig | 22 +++++++++++++---------
std/os/time.zig | 2 +-
std/os/windows/util.zig | 15 ++++++++-------
4 files changed, 23 insertions(+), 18 deletions(-)
diff --git a/std/os/file.zig b/std/os/file.zig
index e09c03c08b..56da4f73a6 100644
--- a/std/os/file.zig
+++ b/std/os/file.zig
@@ -96,7 +96,7 @@ pub const File = struct {
return File{ .handle = handle };
}
- pub const AccessError = error {
+ pub const AccessError = error{
PermissionDenied,
NotFound,
NameTooLong,
diff --git a/std/os/index.zig b/std/os/index.zig
index b8907776c5..612301d25d 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -734,7 +734,7 @@ pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path:
}
}
-pub const DeleteFileError = error {
+pub const DeleteFileError = error{
FileNotFound,
AccessDenied,
FileBusy,
@@ -1035,7 +1035,7 @@ pub fn makePath(allocator: *Allocator, full_path: []const u8) !void {
}
}
-pub const DeleteDirError = error {
+pub const DeleteDirError = error{
AccessDenied,
FileBusy,
SymLinkLoop,
@@ -1090,7 +1090,6 @@ pub fn deleteDir(allocator: *Allocator, dir_path: []const u8) DeleteDirError!voi
},
else => @compileError("unimplemented"),
}
-
}
/// Whether ::full_path describes a symlink, file, or directory, this function
@@ -1227,7 +1226,7 @@ pub const Dir = struct {
};
};
- pub const OpenError = error {
+ pub const OpenError = error{
PathNotFound,
NotDir,
AccessDenied,
@@ -1253,13 +1252,13 @@ pub const Dir = struct {
Os.windows => blk: {
var find_file_data: windows.WIN32_FIND_DATAA = undefined;
const handle = try windows_util.windowsFindFirstFile(allocator, dir_path, &find_file_data);
- break :blk Handle {
+ break :blk Handle{
.handle = handle,
.find_file_data = find_file_data, // TODO guaranteed copy elision
.first = true,
};
},
- Os.macosx, Os.ios => Handle {
+ Os.macosx, Os.ios => Handle{
.fd = try posixOpen(
allocator,
dir_path,
@@ -1271,8 +1270,13 @@ pub const Dir = struct {
.end_index = 0,
.buf = []u8{},
},
- Os.linux => Handle {
- .fd = try posixOpen(allocator, dir_path, posix.O_RDONLY | posix.O_DIRECTORY | posix.O_CLOEXEC, 0,),
+ Os.linux => Handle{
+ .fd = try posixOpen(
+ allocator,
+ dir_path,
+ posix.O_RDONLY | posix.O_DIRECTORY | posix.O_CLOEXEC,
+ 0,
+ ),
.index = 0,
.end_index = 0,
.buf = []u8{},
@@ -1378,7 +1382,7 @@ pub const Dir = struct {
if (attrs & windows.FILE_ATTRIBUTE_NORMAL != 0) break :blk Entry.Kind.File;
break :blk Entry.Kind.Unknown;
};
- return Entry {
+ return Entry{
.name = name,
.kind = kind,
};
diff --git a/std/os/time.zig b/std/os/time.zig
index 43a584d936..ffb506cd7d 100644
--- a/std/os/time.zig
+++ b/std/os/time.zig
@@ -74,7 +74,7 @@ fn milliTimestampWindows() u64 {
const epoch_adj = epoch.windows * ms_per_s;
const ft64 = (u64(ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
- return @divFloor(ft64, hns_per_ms) - - epoch_adj;
+ return @divFloor(ft64, hns_per_ms) - -epoch_adj;
}
fn milliTimestampDarwin() u64 {
diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig
index 0f0a190ed1..88a9e7952e 100644
--- a/std/os/windows/util.zig
+++ b/std/os/windows/util.zig
@@ -171,11 +171,12 @@ test "InvalidDll" {
};
}
-
-pub fn windowsFindFirstFile(allocator: *mem.Allocator, dir_path: []const u8,
- find_file_data: *windows.WIN32_FIND_DATAA) !windows.HANDLE
-{
- const wild_and_null = []u8{'\\', '*', 0};
+pub fn windowsFindFirstFile(
+ allocator: *mem.Allocator,
+ dir_path: []const u8,
+ find_file_data: *windows.WIN32_FIND_DATAA,
+) !windows.HANDLE {
+ const wild_and_null = []u8{ '\\', '*', 0 };
const path_with_wild_and_null = try allocator.alloc(u8, dir_path.len + wild_and_null.len);
defer allocator.free(path_with_wild_and_null);
@@ -195,7 +196,7 @@ pub fn windowsFindFirstFile(allocator: *mem.Allocator, dir_path: []const u8,
}
return handle;
-}
+}
/// Returns `true` if there was another file, `false` otherwise.
pub fn windowsFindNextFile(handle: windows.HANDLE, find_file_data: *windows.WIN32_FIND_DATAA) !bool {
@@ -207,4 +208,4 @@ pub fn windowsFindNextFile(handle: windows.HANDLE, find_file_data: *windows.WIN3
};
}
return true;
-}
+}
From 259413251df478545948fdbc6213669f88f584bd Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 12 Jun 2018 15:06:02 -0400
Subject: [PATCH 36/49] fix ability to call mutating methods on zero size
structs
closes #838
---
src/ir.cpp | 37 ++++++++++++++++++++++++++++++++++++-
test/cases/struct.zig | 17 +++++++++++++++++
2 files changed, 53 insertions(+), 1 deletion(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 4b6d5fdcf1..4cebc488b8 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -8151,6 +8151,17 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
}
}
+ // implicit T to *T where T is zero bits
+ if (expected_type->id == TypeTableEntryIdPointer && expected_type->data.pointer.ptr_len == PtrLenSingle &&
+ types_match_const_cast_only(ira, expected_type->data.pointer.child_type,
+ actual_type, source_node).id == ConstCastResultIdOk)
+ {
+ type_ensure_zero_bits_known(ira->codegen, actual_type);
+ if (!type_has_bits(actual_type)) {
+ return ImplicitCastMatchResultYes;
+ }
+ }
+
// implicit undefined literal to anything
if (actual_type->id == TypeTableEntryIdUndefined) {
return ImplicitCastMatchResultYes;
@@ -8820,7 +8831,7 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
static IrInstruction *ir_resolve_cast(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value,
TypeTableEntry *wanted_type, CastOp cast_op, bool need_alloca)
{
- if (value->value.special != ConstValSpecialRuntime &&
+ if ((instr_is_comptime(value) || !type_has_bits(wanted_type)) &&
cast_op != CastOpResizeSlice && cast_op != CastOpBytesToSlice)
{
IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
@@ -9382,9 +9393,19 @@ static IrInstruction *ir_get_ref(IrAnalyze *ira, IrInstruction *source_instructi
if (value->id == IrInstructionIdLoadPtr) {
IrInstructionLoadPtr *load_ptr_inst = (IrInstructionLoadPtr *) value;
+
if (load_ptr_inst->ptr->value.type->data.pointer.is_const) {
return load_ptr_inst->ptr;
}
+
+ type_ensure_zero_bits_known(ira->codegen, value->value.type);
+ if (type_is_invalid(value->value.type)) {
+ return ira->codegen->invalid_instruction;
+ }
+
+ if (!type_has_bits(value->value.type)) {
+ return load_ptr_inst->ptr;
+ }
}
if (instr_is_comptime(value)) {
@@ -10340,6 +10361,20 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
+ // explicit cast from T to *T where T is zero bits
+ if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type, source_node).id == ConstCastResultIdOk)
+ {
+ type_ensure_zero_bits_known(ira->codegen, actual_type);
+ if (type_is_invalid(actual_type)) {
+ return ira->codegen->invalid_instruction;
+ }
+ if (!type_has_bits(actual_type)) {
+ return ir_get_ref(ira, source_instr, value, false, false);
+ }
+ }
+
// explicit cast from undefined to anything
if (actual_type->id == TypeTableEntryIdUndefined) {
diff --git a/test/cases/struct.zig b/test/cases/struct.zig
index 6f7d44e09b..6952611a8c 100644
--- a/test/cases/struct.zig
+++ b/test/cases/struct.zig
@@ -421,3 +421,20 @@ const Expr = union(enum) {
fn alloc(comptime T: type) []T {
return []T{};
}
+
+test "call method with mutable reference to struct with no fields" {
+ const S = struct {
+ fn doC(s: *const this) bool {
+ return true;
+ }
+ fn do(s: *this) bool {
+ return true;
+ }
+ };
+
+ var s = S{};
+ assert(S.doC(&s));
+ assert(s.doC());
+ assert(S.do(&s));
+ assert(s.do());
+}
From fdd9cf09287b5e397ad9b2c960c833a7de075e4c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 12 Jun 2018 15:14:32 -0400
Subject: [PATCH 37/49] better debugging for CI failures of std.atomic
---
std/atomic/queue.zig | 14 ++++++++++++--
std/atomic/stack.zig | 14 ++++++++++++--
2 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
index 4f856d9e01..2a48407383 100644
--- a/std/atomic/queue.zig
+++ b/std/atomic/queue.zig
@@ -94,8 +94,18 @@ test "std.atomic.queue" {
for (getters) |t|
t.wait();
- std.debug.assert(context.put_sum == context.get_sum);
- std.debug.assert(context.get_count == puts_per_thread * put_thread_count);
+ if (context.put_sum != context.get_sum) {
+ std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum);
+ }
+
+ if (context.get_count != puts_per_thread * put_thread_count) {
+ std.debug.panic(
+ "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}",
+ context.get_count,
+ u32(puts_per_thread),
+ u32(put_thread_count),
+ );
+ }
}
fn startPuts(ctx: *Context) u8 {
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
index 77fa1a9100..c6b368b990 100644
--- a/std/atomic/stack.zig
+++ b/std/atomic/stack.zig
@@ -97,8 +97,18 @@ test "std.atomic.stack" {
for (getters) |t|
t.wait();
- std.debug.assert(context.put_sum == context.get_sum);
- std.debug.assert(context.get_count == puts_per_thread * put_thread_count);
+ if (context.put_sum != context.get_sum) {
+ std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum);
+ }
+
+ if (context.get_count != puts_per_thread * put_thread_count) {
+ std.debug.panic(
+ "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}",
+ context.get_count,
+ u32(puts_per_thread),
+ u32(put_thread_count),
+ );
+ }
}
fn startPuts(ctx: *Context) u8 {
From 13d3255e2a71836b9981aa4115676c79d41f275f Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 12 Jun 2018 15:21:14 -0400
Subject: [PATCH 38/49] docgen: don't leave garbage .h files lying around
closes #1100
---
doc/docgen.zig | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/doc/docgen.zig b/doc/docgen.zig
index d74c5a4615..dfda54567f 100644
--- a/doc/docgen.zig
+++ b/doc/docgen.zig
@@ -958,6 +958,9 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: var
var build_args = std.ArrayList([]const u8).init(allocator);
defer build_args.deinit();
+ const name_plus_h_ext = try std.fmt.allocPrint(allocator, "{}.h", code.name);
+ const output_h_file_name = try os.path.join(allocator, tmp_dir_name, name_plus_h_ext);
+
try build_args.appendSlice([][]const u8{
zig_exe,
"build-obj",
@@ -966,6 +969,8 @@ fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: var
"on",
"--output",
tmp_obj_file_name,
+ "--output-h",
+ output_h_file_name,
});
if (!code.is_inline) {
From 86adc1ef39ed12ebe9eb6d3b7cf8eea481dd060d Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Tue, 12 Jun 2018 19:38:59 -0400
Subject: [PATCH 39/49] add docs and missing test case for merging error sets
See #367
---
doc/langref.html.in | 45 ++++++++++++++++++++++++++++++++-
test/behavior.zig | 1 +
test/cases/merge_error_sets.zig | 21 +++++++++++++++
3 files changed, 66 insertions(+), 1 deletion(-)
create mode 100644 test/cases/merge_error_sets.zig
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 3a7dbd1e90..290ed77e7d 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -3117,7 +3117,50 @@ test "error union" {
comptime assert(@typeOf(foo).ErrorSet == error);
}
{#code_end#}
- TODO the || operator for error sets
+ {#header_open|Merging Error Sets#}
+
+ Use the || operator to merge two error sets together. The resulting
+ error set contains the errors of both error sets. Doc comments from the left-hand
+ side override doc comments from the right-hand side. In this example, the doc
+ comments for C.PathNotFound is A doc comment.
+
+
+ This is especially useful for functions which return different error sets depending
+ on {#link|comptime#} branches. For example, the Zig standard library uses
+ LinuxFileOpenError || WindowsFileOpenError for the error set of opening
+ files.
+
+ {#code_begin|test#}
+const A = error{
+ NotDir,
+
+ /// A doc comment
+ PathNotFound,
+};
+const B = error{
+ OutOfMemory,
+
+ /// B doc comment
+ PathNotFound,
+};
+
+const C = A || B;
+
+fn foo() C!void {
+ return error.NotDir;
+}
+
+test "merge error sets" {
+ if (foo()) {
+ @panic("unexpected");
+ } else |err| switch (err) {
+ error.OutOfMemory => @panic("unexpected"),
+ error.PathNotFound => @panic("unexpected"),
+ error.NotDir => {},
+ }
+}
+ {#code_end#}
+ {#header_close#}
{#header_open|Inferred Error Sets#}
Because many functions in Zig return a possible error, Zig supports inferring the error set.
diff --git a/test/behavior.zig b/test/behavior.zig
index 3341fe717d..eb8b643bb7 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -31,6 +31,7 @@ comptime {
_ = @import("cases/incomplete_struct_param_tld.zig");
_ = @import("cases/ir_block_deps.zig");
_ = @import("cases/math.zig");
+ _ = @import("cases/merge_error_sets.zig");
_ = @import("cases/misc.zig");
_ = @import("cases/namespace_depends_on_compile_var/index.zig");
_ = @import("cases/new_stack_call.zig");
diff --git a/test/cases/merge_error_sets.zig b/test/cases/merge_error_sets.zig
new file mode 100644
index 0000000000..189bd16a4d
--- /dev/null
+++ b/test/cases/merge_error_sets.zig
@@ -0,0 +1,21 @@
+const A = error{
+ PathNotFound,
+ NotDir,
+};
+const B = error{OutOfMemory};
+
+const C = A || B;
+
+fn foo() C!void {
+ return error.NotDir;
+}
+
+test "merge error sets" {
+ if (foo()) {
+ @panic("unexpected");
+ } else |err| switch (err) {
+ error.OutOfMemory => @panic("unexpected"),
+ error.PathNotFound => @panic("unexpected"),
+ error.NotDir => {},
+ }
+}
From 911014051487e83177689893e57491b86e72589b Mon Sep 17 00:00:00 2001
From: Marc Tiehuis
Date: Wed, 13 Jun 2018 22:25:04 +1200
Subject: [PATCH 40/49] Add i128 compiler-rt div/mul support
---
CMakeLists.txt | 2 +
std/special/compiler_rt/divti3.zig | 16 +++++
std/special/compiler_rt/divti3_test.zig | 21 +++++++
std/special/compiler_rt/index.zig | 3 +
std/special/compiler_rt/muloti4.zig | 45 ++++++++++++++
std/special/compiler_rt/muloti4_test.zig | 76 ++++++++++++++++++++++++
6 files changed, 163 insertions(+)
create mode 100644 std/special/compiler_rt/divti3.zig
create mode 100644 std/special/compiler_rt/divti3_test.zig
create mode 100644 std/special/compiler_rt/muloti4.zig
create mode 100644 std/special/compiler_rt/muloti4_test.zig
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 64abb67a8f..cfa0146bb1 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -556,6 +556,7 @@ set(ZIG_STD_FILES
"special/compiler_rt/aulldiv.zig"
"special/compiler_rt/aullrem.zig"
"special/compiler_rt/comparetf2.zig"
+ "special/compiler_rt/divti3.zig"
"special/compiler_rt/fixuint.zig"
"special/compiler_rt/fixunsdfdi.zig"
"special/compiler_rt/fixunsdfsi.zig"
@@ -566,6 +567,7 @@ set(ZIG_STD_FILES
"special/compiler_rt/fixunstfdi.zig"
"special/compiler_rt/fixunstfsi.zig"
"special/compiler_rt/fixunstfti.zig"
+ "special/compiler_rt/muloti4.zig"
"special/compiler_rt/index.zig"
"special/compiler_rt/udivmod.zig"
"special/compiler_rt/udivmoddi4.zig"
diff --git a/std/special/compiler_rt/divti3.zig b/std/special/compiler_rt/divti3.zig
new file mode 100644
index 0000000000..f3fccf3746
--- /dev/null
+++ b/std/special/compiler_rt/divti3.zig
@@ -0,0 +1,16 @@
+const udivmod = @import("udivmod.zig").udivmod;
+const builtin = @import("builtin");
+
+pub extern fn __divti3(a: i128, b: i128) i128 {
+ @setRuntimeSafety(builtin.is_test);
+
+ const s_a = a >> (i128.bit_count - 1);
+ const s_b = b >> (i128.bit_count - 1);
+
+ const an = (a ^ s_a) -% s_a;
+ const bn = (b ^ s_b) -% s_b;
+
+ const r = udivmod(u128, @bitCast(u128, an), @bitCast(u128, bn), null);
+ const s = s_a ^ s_b;
+ return (i128(r) ^ s) -% s;
+}
diff --git a/std/special/compiler_rt/divti3_test.zig b/std/special/compiler_rt/divti3_test.zig
new file mode 100644
index 0000000000..eef5a9b812
--- /dev/null
+++ b/std/special/compiler_rt/divti3_test.zig
@@ -0,0 +1,21 @@
+const __divti3 = @import("divti3.zig").__divti3;
+const assert = @import("std").debug.assert;
+
+fn test__divti3(a: i128, b: i128, expected: i128) void {
+ const x = __divti3(a, b);
+ assert(x == expected);
+}
+
+test "divti3" {
+ test__divti3(0, 1, 0);
+ test__divti3(0, -1, 0);
+ test__divti3(2, 1, 2);
+ test__divti3(2, -1, -2);
+ test__divti3(-2, 1, -2);
+ test__divti3(-2, -1, 2);
+
+ test__divti3(@bitCast(i128, u128(0x8 << 124)), 1, @bitCast(i128, u128(0x8 << 124)));
+ test__divti3(@bitCast(i128, u128(0x8 << 124)), -1, @bitCast(i128, u128(0x8 << 124)));
+ test__divti3(@bitCast(i128, u128(0x8 << 124)), -2, @bitCast(i128, u128(0x4 << 124)));
+ test__divti3(@bitCast(i128, u128(0x8 << 124)), 2, @bitCast(i128, u128(0xc << 124)));
+}
diff --git a/std/special/compiler_rt/index.zig b/std/special/compiler_rt/index.zig
index d328324320..0573854c91 100644
--- a/std/special/compiler_rt/index.zig
+++ b/std/special/compiler_rt/index.zig
@@ -38,6 +38,9 @@ comptime {
@export("__umoddi3", __umoddi3, linkage);
@export("__udivmodsi4", __udivmodsi4, linkage);
+ @export("__divti3", @import("divti3.zig").__divti3, linkage);
+ @export("__muloti4", @import("muloti4.zig").__muloti4, linkage);
+
if (isArmArch()) {
@export("__aeabi_uldivmod", __aeabi_uldivmod, linkage);
@export("__aeabi_uidivmod", __aeabi_uidivmod, linkage);
diff --git a/std/special/compiler_rt/muloti4.zig b/std/special/compiler_rt/muloti4.zig
new file mode 100644
index 0000000000..35d33f4ad4
--- /dev/null
+++ b/std/special/compiler_rt/muloti4.zig
@@ -0,0 +1,45 @@
+const udivmod = @import("udivmod.zig").udivmod;
+const builtin = @import("builtin");
+
+pub extern fn __muloti4(a: i128, b: i128, overflow: *c_int) i128 {
+ @setRuntimeSafety(builtin.is_test);
+
+ const min = @bitCast(i128, u128(1 << (i128.bit_count - 1)));
+ const max = ~min;
+ overflow.* = 0;
+
+ const r = a *% b;
+ if (a == min) {
+ if (b != 0 and b != 1) {
+ overflow.* = 1;
+ }
+ return r;
+ }
+ if (b == min) {
+ if (a != 0 and a != 1) {
+ overflow.* = 1;
+ }
+ return r;
+ }
+
+ const sa = a >> (i128.bit_count - 1);
+ const abs_a = (a ^ sa) -% sa;
+ const sb = b >> (i128.bit_count - 1);
+ const abs_b = (b ^ sb) -% sb;
+
+ if (abs_a < 2 or abs_b < 2) {
+ return r;
+ }
+
+ if (sa == sb) {
+ if (abs_a > @divFloor(max, abs_b)) {
+ overflow.* = 1;
+ }
+ } else {
+ if (abs_a > @divFloor(min, -abs_b)) {
+ overflow.* = 1;
+ }
+ }
+
+ return r;
+}
diff --git a/std/special/compiler_rt/muloti4_test.zig b/std/special/compiler_rt/muloti4_test.zig
new file mode 100644
index 0000000000..b61655aaec
--- /dev/null
+++ b/std/special/compiler_rt/muloti4_test.zig
@@ -0,0 +1,76 @@
+const __muloti4 = @import("muloti4.zig").__muloti4;
+const assert = @import("std").debug.assert;
+
+fn test__muloti4(a: i128, b: i128, expected: i128, expected_overflow: c_int) void {
+ var overflow: c_int = undefined;
+ const x = __muloti4(a, b, &overflow);
+ assert(overflow == expected_overflow and (overflow != 0 or x == expected));
+}
+
+test "muloti4" {
+ test__muloti4(0, 0, 0, 0);
+ test__muloti4(0, 1, 0, 0);
+ test__muloti4(1, 0, 0, 0);
+ test__muloti4(0, 10, 0, 0);
+ test__muloti4(10, 0, 0, 0);
+
+ test__muloti4(0, 81985529216486895, 0, 0);
+ test__muloti4(81985529216486895, 0, 0, 0);
+
+ test__muloti4(0, -1, 0, 0);
+ test__muloti4(-1, 0, 0, 0);
+ test__muloti4(0, -10, 0, 0);
+ test__muloti4(-10, 0, 0, 0);
+ test__muloti4(0, -81985529216486895, 0, 0);
+ test__muloti4(-81985529216486895, 0, 0, 0);
+
+ test__muloti4(3037000499, 3037000499, 9223372030926249001, 0);
+ test__muloti4(-3037000499, 3037000499, -9223372030926249001, 0);
+ test__muloti4(3037000499, -3037000499, -9223372030926249001, 0);
+ test__muloti4(-3037000499, -3037000499, 9223372030926249001, 0);
+
+ test__muloti4(4398046511103, 2097152, 9223372036852678656, 0);
+ test__muloti4(-4398046511103, 2097152, -9223372036852678656, 0);
+ test__muloti4(4398046511103, -2097152, -9223372036852678656, 0);
+ test__muloti4(-4398046511103, -2097152, 9223372036852678656, 0);
+
+ test__muloti4(2097152, 4398046511103, 9223372036852678656, 0);
+ test__muloti4(-2097152, 4398046511103, -9223372036852678656, 0);
+ test__muloti4(2097152, -4398046511103, -9223372036852678656, 0);
+ test__muloti4(-2097152, -4398046511103, 9223372036852678656, 0);
+
+ test__muloti4(@bitCast(i128, u128(0x00000000000000B504F333F9DE5BE000)), @bitCast(i128, u128(0x000000000000000000B504F333F9DE5B)), @bitCast(i128, u128(0x7FFFFFFFFFFFF328DF915DA296E8A000)), 0);
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), -2, @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+ test__muloti4(-2, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), -1, @bitCast(i128, u128(0x80000000000000000000000000000001)), 0);
+ test__muloti4(-1, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 0);
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0, 0, 0);
+ test__muloti4(0, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0, 0);
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 1, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
+ test__muloti4(1, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 2, @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+ test__muloti4(2, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), -2, @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(-2, @bitCast(i128, u128(0x80000000000000000000000000000000)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), -1, @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(-1, @bitCast(i128, u128(0x80000000000000000000000000000000)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), 0, 0, 0);
+ test__muloti4(0, @bitCast(i128, u128(0x80000000000000000000000000000000)), 0, 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), 1, @bitCast(i128, u128(0x80000000000000000000000000000000)), 0);
+ test__muloti4(1, @bitCast(i128, u128(0x80000000000000000000000000000000)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), 2, @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(2, @bitCast(i128, u128(0x80000000000000000000000000000000)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), -2, @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+ test__muloti4(-2, @bitCast(i128, u128(0x80000000000000000000000000000001)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), -1, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
+ test__muloti4(-1, @bitCast(i128, u128(0x80000000000000000000000000000001)), @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), 0, 0, 0);
+ test__muloti4(0, @bitCast(i128, u128(0x80000000000000000000000000000001)), 0, 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), 1, @bitCast(i128, u128(0x80000000000000000000000000000001)), 0);
+ test__muloti4(1, @bitCast(i128, u128(0x80000000000000000000000000000001)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), 2, @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(2, @bitCast(i128, u128(0x80000000000000000000000000000001)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+}
From 8dd24796c43b5241a5dcd5508e4be00483ebc25b Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 13 Jun 2018 11:04:09 -0400
Subject: [PATCH 41/49] disallow implicit casts that break rules for optionals
closes #1102
---
src/ir.cpp | 332 ++++++++++++++++++++++------------------
test/compile_errors.zig | 13 ++
2 files changed, 197 insertions(+), 148 deletions(-)
diff --git a/src/ir.cpp b/src/ir.cpp
index 4cebc488b8..e5e8dcbb9d 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -7647,23 +7647,24 @@ static TypeTableEntry *get_error_set_intersection(IrAnalyze *ira, TypeTableEntry
}
-static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry *expected_type,
- TypeTableEntry *actual_type, AstNode *source_node)
+static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry *wanted_type,
+ TypeTableEntry *actual_type, AstNode *source_node, bool wanted_is_mutable)
{
CodeGen *g = ira->codegen;
ConstCastOnly result = {};
result.id = ConstCastResultIdOk;
- if (expected_type == actual_type)
+ if (wanted_type == actual_type)
return result;
// * and [*] can do a const-cast-only to ?* and ?[*], respectively
- if (expected_type->id == TypeTableEntryIdOptional &&
- expected_type->data.maybe.child_type->id == TypeTableEntryIdPointer &&
+ // but not if there is a mutable parent pointer
+ if (!wanted_is_mutable && wanted_type->id == TypeTableEntryIdOptional &&
+ wanted_type->data.maybe.child_type->id == TypeTableEntryIdPointer &&
actual_type->id == TypeTableEntryIdPointer)
{
ConstCastOnly child = types_match_const_cast_only(ira,
- expected_type->data.maybe.child_type, actual_type, source_node);
+ wanted_type->data.maybe.child_type, actual_type, source_node, wanted_is_mutable);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdNullWrapPtr;
result.data.null_wrap_ptr_child = allocate_nonzero(1);
@@ -7673,16 +7674,17 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
}
// pointer const
- if (expected_type->id == TypeTableEntryIdPointer &&
+ if (wanted_type->id == TypeTableEntryIdPointer &&
actual_type->id == TypeTableEntryIdPointer &&
- (actual_type->data.pointer.ptr_len == expected_type->data.pointer.ptr_len) &&
- (!actual_type->data.pointer.is_const || expected_type->data.pointer.is_const) &&
- (!actual_type->data.pointer.is_volatile || expected_type->data.pointer.is_volatile) &&
- actual_type->data.pointer.bit_offset == expected_type->data.pointer.bit_offset &&
- actual_type->data.pointer.unaligned_bit_count == expected_type->data.pointer.unaligned_bit_count &&
- actual_type->data.pointer.alignment >= expected_type->data.pointer.alignment)
+ (actual_type->data.pointer.ptr_len == wanted_type->data.pointer.ptr_len) &&
+ (!actual_type->data.pointer.is_const || wanted_type->data.pointer.is_const) &&
+ (!actual_type->data.pointer.is_volatile || wanted_type->data.pointer.is_volatile) &&
+ actual_type->data.pointer.bit_offset == wanted_type->data.pointer.bit_offset &&
+ actual_type->data.pointer.unaligned_bit_count == wanted_type->data.pointer.unaligned_bit_count &&
+ actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment)
{
- ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.pointer.child_type, actual_type->data.pointer.child_type, source_node);
+ ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type, source_node, !wanted_type->data.pointer.is_const);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdPointerChild;
result.data.pointer_child = allocate_nonzero(1);
@@ -7692,17 +7694,17 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
}
// slice const
- if (is_slice(expected_type) && is_slice(actual_type)) {
+ if (is_slice(wanted_type) && is_slice(actual_type)) {
TypeTableEntry *actual_ptr_type = actual_type->data.structure.fields[slice_ptr_index].type_entry;
- TypeTableEntry *expected_ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry;
- if ((!actual_ptr_type->data.pointer.is_const || expected_ptr_type->data.pointer.is_const) &&
- (!actual_ptr_type->data.pointer.is_volatile || expected_ptr_type->data.pointer.is_volatile) &&
- actual_ptr_type->data.pointer.bit_offset == expected_ptr_type->data.pointer.bit_offset &&
- actual_ptr_type->data.pointer.unaligned_bit_count == expected_ptr_type->data.pointer.unaligned_bit_count &&
- actual_ptr_type->data.pointer.alignment >= expected_ptr_type->data.pointer.alignment)
+ TypeTableEntry *wanted_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ if ((!actual_ptr_type->data.pointer.is_const || wanted_ptr_type->data.pointer.is_const) &&
+ (!actual_ptr_type->data.pointer.is_volatile || wanted_ptr_type->data.pointer.is_volatile) &&
+ actual_ptr_type->data.pointer.bit_offset == wanted_ptr_type->data.pointer.bit_offset &&
+ actual_ptr_type->data.pointer.unaligned_bit_count == wanted_ptr_type->data.pointer.unaligned_bit_count &&
+ actual_ptr_type->data.pointer.alignment >= wanted_ptr_type->data.pointer.alignment)
{
- ConstCastOnly child = types_match_const_cast_only(ira, expected_ptr_type->data.pointer.child_type,
- actual_ptr_type->data.pointer.child_type, source_node);
+ ConstCastOnly child = types_match_const_cast_only(ira, wanted_ptr_type->data.pointer.child_type,
+ actual_ptr_type->data.pointer.child_type, source_node, !wanted_ptr_type->data.pointer.is_const);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdSliceChild;
result.data.slice_child = allocate_nonzero(1);
@@ -7713,8 +7715,9 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
}
// maybe
- if (expected_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) {
- ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.maybe.child_type, actual_type->data.maybe.child_type, source_node);
+ if (wanted_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) {
+ ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.maybe.child_type,
+ actual_type->data.maybe.child_type, source_node, wanted_is_mutable);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdOptionalChild;
result.data.optional_child = allocate_nonzero(1);
@@ -7724,15 +7727,17 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
}
// error union
- if (expected_type->id == TypeTableEntryIdErrorUnion && actual_type->id == TypeTableEntryIdErrorUnion) {
- ConstCastOnly payload_child = types_match_const_cast_only(ira, expected_type->data.error_union.payload_type, actual_type->data.error_union.payload_type, source_node);
+ if (wanted_type->id == TypeTableEntryIdErrorUnion && actual_type->id == TypeTableEntryIdErrorUnion) {
+ ConstCastOnly payload_child = types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type,
+ actual_type->data.error_union.payload_type, source_node, wanted_is_mutable);
if (payload_child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdErrorUnionPayload;
result.data.error_union_payload = allocate_nonzero(1);
*result.data.error_union_payload = payload_child;
return result;
}
- ConstCastOnly error_set_child = types_match_const_cast_only(ira, expected_type->data.error_union.err_set_type, actual_type->data.error_union.err_set_type, source_node);
+ ConstCastOnly error_set_child = types_match_const_cast_only(ira, wanted_type->data.error_union.err_set_type,
+ actual_type->data.error_union.err_set_type, source_node, wanted_is_mutable);
if (error_set_child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdErrorUnionErrorSet;
result.data.error_union_error_set = allocate_nonzero(1);
@@ -7743,9 +7748,9 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
}
// error set
- if (expected_type->id == TypeTableEntryIdErrorSet && actual_type->id == TypeTableEntryIdErrorSet) {
+ if (wanted_type->id == TypeTableEntryIdErrorSet && actual_type->id == TypeTableEntryIdErrorSet) {
TypeTableEntry *contained_set = actual_type;
- TypeTableEntry *container_set = expected_type;
+ TypeTableEntry *container_set = wanted_type;
// if the container set is inferred, then this will always work.
if (container_set->data.error_set.infer_fn != nullptr) {
@@ -7786,36 +7791,37 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
return result;
}
- if (expected_type == ira->codegen->builtin_types.entry_promise &&
+ if (wanted_type == ira->codegen->builtin_types.entry_promise &&
actual_type->id == TypeTableEntryIdPromise)
{
return result;
}
// fn
- if (expected_type->id == TypeTableEntryIdFn &&
+ if (wanted_type->id == TypeTableEntryIdFn &&
actual_type->id == TypeTableEntryIdFn)
{
- if (expected_type->data.fn.fn_type_id.alignment > actual_type->data.fn.fn_type_id.alignment) {
+ if (wanted_type->data.fn.fn_type_id.alignment > actual_type->data.fn.fn_type_id.alignment) {
result.id = ConstCastResultIdFnAlign;
return result;
}
- if (expected_type->data.fn.fn_type_id.cc != actual_type->data.fn.fn_type_id.cc) {
+ if (wanted_type->data.fn.fn_type_id.cc != actual_type->data.fn.fn_type_id.cc) {
result.id = ConstCastResultIdFnCC;
return result;
}
- if (expected_type->data.fn.fn_type_id.is_var_args != actual_type->data.fn.fn_type_id.is_var_args) {
+ if (wanted_type->data.fn.fn_type_id.is_var_args != actual_type->data.fn.fn_type_id.is_var_args) {
result.id = ConstCastResultIdFnVarArgs;
return result;
}
- if (expected_type->data.fn.is_generic != actual_type->data.fn.is_generic) {
+ if (wanted_type->data.fn.is_generic != actual_type->data.fn.is_generic) {
result.id = ConstCastResultIdFnIsGeneric;
return result;
}
- if (!expected_type->data.fn.is_generic &&
+ if (!wanted_type->data.fn.is_generic &&
actual_type->data.fn.fn_type_id.return_type->id != TypeTableEntryIdUnreachable)
{
- ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.fn.fn_type_id.return_type, actual_type->data.fn.fn_type_id.return_type, source_node);
+ ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.fn.fn_type_id.return_type,
+ actual_type->data.fn.fn_type_id.return_type, source_node, false);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdFnReturnType;
result.data.return_type = allocate_nonzero(1);
@@ -7823,9 +7829,11 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
return result;
}
}
- if (!expected_type->data.fn.is_generic && expected_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- ConstCastOnly child = types_match_const_cast_only(ira, actual_type->data.fn.fn_type_id.async_allocator_type,
- expected_type->data.fn.fn_type_id.async_allocator_type, source_node);
+ if (!wanted_type->data.fn.is_generic && wanted_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
+ ConstCastOnly child = types_match_const_cast_only(ira,
+ actual_type->data.fn.fn_type_id.async_allocator_type,
+ wanted_type->data.fn.fn_type_id.async_allocator_type,
+ source_node, false);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdAsyncAllocatorType;
result.data.async_allocator_type = allocate_nonzero(1);
@@ -7833,22 +7841,23 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
return result;
}
}
- if (expected_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) {
+ if (wanted_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) {
result.id = ConstCastResultIdFnArgCount;
return result;
}
- if (expected_type->data.fn.fn_type_id.next_param_index != actual_type->data.fn.fn_type_id.next_param_index) {
+ if (wanted_type->data.fn.fn_type_id.next_param_index != actual_type->data.fn.fn_type_id.next_param_index) {
result.id = ConstCastResultIdFnGenericArgCount;
return result;
}
- assert(expected_type->data.fn.is_generic ||
- expected_type->data.fn.fn_type_id.next_param_index == expected_type->data.fn.fn_type_id.param_count);
- for (size_t i = 0; i < expected_type->data.fn.fn_type_id.next_param_index; i += 1) {
+ assert(wanted_type->data.fn.is_generic ||
+ wanted_type->data.fn.fn_type_id.next_param_index == wanted_type->data.fn.fn_type_id.param_count);
+ for (size_t i = 0; i < wanted_type->data.fn.fn_type_id.next_param_index; i += 1) {
// note it's reversed for parameters
FnTypeParamInfo *actual_param_info = &actual_type->data.fn.fn_type_id.param_info[i];
- FnTypeParamInfo *expected_param_info = &expected_type->data.fn.fn_type_id.param_info[i];
+ FnTypeParamInfo *expected_param_info = &wanted_type->data.fn.fn_type_id.param_info[i];
- ConstCastOnly arg_child = types_match_const_cast_only(ira, actual_param_info->type, expected_param_info->type, source_node);
+ ConstCastOnly arg_child = types_match_const_cast_only(ira, actual_param_info->type,
+ expected_param_info->type, source_node, false);
if (arg_child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdFnArg;
result.data.fn_arg.arg_index = i;
@@ -7876,11 +7885,12 @@ enum ImplicitCastMatchResult {
ImplicitCastMatchResultReportedError,
};
-static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, TypeTableEntry *expected_type,
+static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, TypeTableEntry *wanted_type,
TypeTableEntry *actual_type, IrInstruction *value)
{
AstNode *source_node = value->source_node;
- ConstCastOnly const_cast_result = types_match_const_cast_only(ira, expected_type, actual_type, source_node);
+ ConstCastOnly const_cast_result = types_match_const_cast_only(ira, wanted_type, actual_type,
+ source_node, false);
if (const_cast_result.id == ConstCastResultIdOk) {
return ImplicitCastMatchResultYes;
}
@@ -7895,21 +7905,21 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
missing_errors = &const_cast_result.data.error_union_error_set->data.error_set.missing_errors;
} else if (const_cast_result.data.error_union_error_set->id == ConstCastResultIdErrSetGlobal) {
ErrorMsg *msg = ir_add_error(ira, value,
- buf_sprintf("expected '%s', found '%s'", buf_ptr(&expected_type->name), buf_ptr(&actual_type->name)));
+ buf_sprintf("expected '%s', found '%s'", buf_ptr(&wanted_type->name), buf_ptr(&actual_type->name)));
add_error_note(ira->codegen, msg, value->source_node,
buf_sprintf("unable to cast global error set into smaller set"));
return ImplicitCastMatchResultReportedError;
}
} else if (const_cast_result.id == ConstCastResultIdErrSetGlobal) {
ErrorMsg *msg = ir_add_error(ira, value,
- buf_sprintf("expected '%s', found '%s'", buf_ptr(&expected_type->name), buf_ptr(&actual_type->name)));
+ buf_sprintf("expected '%s', found '%s'", buf_ptr(&wanted_type->name), buf_ptr(&actual_type->name)));
add_error_note(ira->codegen, msg, value->source_node,
buf_sprintf("unable to cast global error set into smaller set"));
return ImplicitCastMatchResultReportedError;
}
if (missing_errors != nullptr) {
ErrorMsg *msg = ir_add_error(ira, value,
- buf_sprintf("expected '%s', found '%s'", buf_ptr(&expected_type->name), buf_ptr(&actual_type->name)));
+ buf_sprintf("expected '%s', found '%s'", buf_ptr(&wanted_type->name), buf_ptr(&actual_type->name)));
for (size_t i = 0; i < missing_errors->length; i += 1) {
ErrorTableEntry *error_entry = missing_errors->at(i);
add_error_note(ira->codegen, msg, error_entry->decl_node,
@@ -7920,162 +7930,168 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
}
// implicit conversion from ?T to ?U
- if (expected_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) {
- ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, expected_type->data.maybe.child_type,
+ if (wanted_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) {
+ ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, wanted_type->data.maybe.child_type,
actual_type->data.maybe.child_type, value);
if (res != ImplicitCastMatchResultNo)
return res;
}
// implicit conversion from non maybe type to maybe type
- if (expected_type->id == TypeTableEntryIdOptional) {
- ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, expected_type->data.maybe.child_type,
+ if (wanted_type->id == TypeTableEntryIdOptional) {
+ ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, wanted_type->data.maybe.child_type,
actual_type, value);
if (res != ImplicitCastMatchResultNo)
return res;
}
// implicit conversion from null literal to maybe type
- if (expected_type->id == TypeTableEntryIdOptional &&
+ if (wanted_type->id == TypeTableEntryIdOptional &&
actual_type->id == TypeTableEntryIdNull)
{
return ImplicitCastMatchResultYes;
}
// implicit T to U!T
- if (expected_type->id == TypeTableEntryIdErrorUnion &&
- ir_types_match_with_implicit_cast(ira, expected_type->data.error_union.payload_type, actual_type, value))
+ if (wanted_type->id == TypeTableEntryIdErrorUnion &&
+ ir_types_match_with_implicit_cast(ira, wanted_type->data.error_union.payload_type, actual_type, value))
{
return ImplicitCastMatchResultYes;
}
// implicit conversion from error set to error union type
- if (expected_type->id == TypeTableEntryIdErrorUnion &&
+ if (wanted_type->id == TypeTableEntryIdErrorUnion &&
actual_type->id == TypeTableEntryIdErrorSet)
{
return ImplicitCastMatchResultYes;
}
// implicit conversion from T to U!?T
- if (expected_type->id == TypeTableEntryIdErrorUnion &&
- expected_type->data.error_union.payload_type->id == TypeTableEntryIdOptional &&
+ if (wanted_type->id == TypeTableEntryIdErrorUnion &&
+ wanted_type->data.error_union.payload_type->id == TypeTableEntryIdOptional &&
ir_types_match_with_implicit_cast(ira,
- expected_type->data.error_union.payload_type->data.maybe.child_type,
+ wanted_type->data.error_union.payload_type->data.maybe.child_type,
actual_type, value))
{
return ImplicitCastMatchResultYes;
}
// implicit widening conversion
- if (expected_type->id == TypeTableEntryIdInt &&
+ if (wanted_type->id == TypeTableEntryIdInt &&
actual_type->id == TypeTableEntryIdInt &&
- expected_type->data.integral.is_signed == actual_type->data.integral.is_signed &&
- expected_type->data.integral.bit_count >= actual_type->data.integral.bit_count)
+ wanted_type->data.integral.is_signed == actual_type->data.integral.is_signed &&
+ wanted_type->data.integral.bit_count >= actual_type->data.integral.bit_count)
{
return ImplicitCastMatchResultYes;
}
// small enough unsigned ints can get casted to large enough signed ints
- if (expected_type->id == TypeTableEntryIdInt && expected_type->data.integral.is_signed &&
+ if (wanted_type->id == TypeTableEntryIdInt && wanted_type->data.integral.is_signed &&
actual_type->id == TypeTableEntryIdInt && !actual_type->data.integral.is_signed &&
- expected_type->data.integral.bit_count > actual_type->data.integral.bit_count)
+ wanted_type->data.integral.bit_count > actual_type->data.integral.bit_count)
{
return ImplicitCastMatchResultYes;
}
// implicit float widening conversion
- if (expected_type->id == TypeTableEntryIdFloat &&
+ if (wanted_type->id == TypeTableEntryIdFloat &&
actual_type->id == TypeTableEntryIdFloat &&
- expected_type->data.floating.bit_count >= actual_type->data.floating.bit_count)
+ wanted_type->data.floating.bit_count >= actual_type->data.floating.bit_count)
{
return ImplicitCastMatchResultYes;
}
// implicit [N]T to []const T
- if (is_slice(expected_type) && actual_type->id == TypeTableEntryIdArray) {
- TypeTableEntry *ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry;
+ if (is_slice(wanted_type) && actual_type->id == TypeTableEntryIdArray) {
+ TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
return ImplicitCastMatchResultYes;
}
}
// implicit &const [N]T to []const T
- if (is_slice(expected_type) &&
+ if (is_slice(wanted_type) &&
actual_type->id == TypeTableEntryIdPointer &&
actual_type->data.pointer.ptr_len == PtrLenSingle &&
actual_type->data.pointer.is_const &&
actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
{
- TypeTableEntry *ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry;
+ TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
TypeTableEntry *array_type = actual_type->data.pointer.child_type;
if ((ptr_type->data.pointer.is_const || array_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
return ImplicitCastMatchResultYes;
}
}
// implicit [N]T to &const []const T
- if (expected_type->id == TypeTableEntryIdPointer &&
- expected_type->data.pointer.is_const &&
- expected_type->data.pointer.ptr_len == PtrLenSingle &&
- is_slice(expected_type->data.pointer.child_type) &&
+ if (wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.is_const &&
+ wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ is_slice(wanted_type->data.pointer.child_type) &&
actual_type->id == TypeTableEntryIdArray)
{
TypeTableEntry *ptr_type =
- expected_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry;
+ wanted_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type,
+ actual_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
{
return ImplicitCastMatchResultYes;
}
}
// implicit *[N]T to [*]T
- if (expected_type->id == TypeTableEntryIdPointer &&
- expected_type->data.pointer.ptr_len == PtrLenUnknown &&
+ if (wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.ptr_len == PtrLenUnknown &&
actual_type->id == TypeTableEntryIdPointer &&
actual_type->data.pointer.ptr_len == PtrLenSingle &&
actual_type->data.pointer.child_type->id == TypeTableEntryIdArray &&
- types_match_const_cast_only(ira, expected_type->data.pointer.child_type,
- actual_type->data.pointer.child_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
{
return ImplicitCastMatchResultYes;
}
// implicit *[N]T to []T
- if (is_slice(expected_type) &&
+ if (is_slice(wanted_type) &&
actual_type->id == TypeTableEntryIdPointer &&
actual_type->data.pointer.ptr_len == PtrLenSingle &&
actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
{
- TypeTableEntry *slice_ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry;
+ TypeTableEntry *slice_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
assert(slice_ptr_type->id == TypeTableEntryIdPointer);
if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
- actual_type->data.pointer.child_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk)
{
return ImplicitCastMatchResultYes;
}
}
// implicit [N]T to ?[]const T
- if (expected_type->id == TypeTableEntryIdOptional &&
- is_slice(expected_type->data.maybe.child_type) &&
+ if (wanted_type->id == TypeTableEntryIdOptional &&
+ is_slice(wanted_type->data.maybe.child_type) &&
actual_type->id == TypeTableEntryIdArray)
{
TypeTableEntry *ptr_type =
- expected_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry;
+ wanted_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type,
+ actual_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
{
return ImplicitCastMatchResultYes;
}
@@ -8087,16 +8103,16 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
if (actual_type->id == TypeTableEntryIdComptimeFloat ||
actual_type->id == TypeTableEntryIdComptimeInt)
{
- if (expected_type->id == TypeTableEntryIdPointer &&
- expected_type->data.pointer.ptr_len == PtrLenSingle &&
- expected_type->data.pointer.is_const)
+ if (wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ wanted_type->data.pointer.is_const)
{
- if (ir_num_lit_fits_in_other_type(ira, value, expected_type->data.pointer.child_type, false)) {
+ if (ir_num_lit_fits_in_other_type(ira, value, wanted_type->data.pointer.child_type, false)) {
return ImplicitCastMatchResultYes;
} else {
return ImplicitCastMatchResultReportedError;
}
- } else if (ir_num_lit_fits_in_other_type(ira, value, expected_type, false)) {
+ } else if (ir_num_lit_fits_in_other_type(ira, value, wanted_type, false)) {
return ImplicitCastMatchResultYes;
} else {
return ImplicitCastMatchResultReportedError;
@@ -8106,41 +8122,41 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
// implicit typed number to integer or float literal.
// works when the number is known
if (value->value.special == ConstValSpecialStatic) {
- if (actual_type->id == TypeTableEntryIdInt && expected_type->id == TypeTableEntryIdComptimeInt) {
+ if (actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdComptimeInt) {
return ImplicitCastMatchResultYes;
- } else if (actual_type->id == TypeTableEntryIdFloat && expected_type->id == TypeTableEntryIdComptimeFloat) {
+ } else if (actual_type->id == TypeTableEntryIdFloat && wanted_type->id == TypeTableEntryIdComptimeFloat) {
return ImplicitCastMatchResultYes;
}
}
// implicit union to its enum tag type
- if (expected_type->id == TypeTableEntryIdEnum && actual_type->id == TypeTableEntryIdUnion &&
+ if (wanted_type->id == TypeTableEntryIdEnum && actual_type->id == TypeTableEntryIdUnion &&
(actual_type->data.unionation.decl_node->data.container_decl.auto_enum ||
actual_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
{
type_ensure_zero_bits_known(ira->codegen, actual_type);
- if (actual_type->data.unionation.tag_type == expected_type) {
+ if (actual_type->data.unionation.tag_type == wanted_type) {
return ImplicitCastMatchResultYes;
}
}
// implicit enum to union which has the enum as the tag type
- if (expected_type->id == TypeTableEntryIdUnion && actual_type->id == TypeTableEntryIdEnum &&
- (expected_type->data.unionation.decl_node->data.container_decl.auto_enum ||
- expected_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
+ if (wanted_type->id == TypeTableEntryIdUnion && actual_type->id == TypeTableEntryIdEnum &&
+ (wanted_type->data.unionation.decl_node->data.container_decl.auto_enum ||
+ wanted_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
{
- type_ensure_zero_bits_known(ira->codegen, expected_type);
- if (expected_type->data.unionation.tag_type == actual_type) {
+ type_ensure_zero_bits_known(ira->codegen, wanted_type);
+ if (wanted_type->data.unionation.tag_type == actual_type) {
return ImplicitCastMatchResultYes;
}
}
// implicit enum to &const union which has the enum as the tag type
if (actual_type->id == TypeTableEntryIdEnum &&
- expected_type->id == TypeTableEntryIdPointer &&
- expected_type->data.pointer.ptr_len == PtrLenSingle)
+ wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.ptr_len == PtrLenSingle)
{
- TypeTableEntry *union_type = expected_type->data.pointer.child_type;
+ TypeTableEntry *union_type = wanted_type->data.pointer.child_type;
if (union_type->data.unionation.decl_node->data.container_decl.auto_enum ||
union_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr)
{
@@ -8152,9 +8168,9 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
}
// implicit T to *T where T is zero bits
- if (expected_type->id == TypeTableEntryIdPointer && expected_type->data.pointer.ptr_len == PtrLenSingle &&
- types_match_const_cast_only(ira, expected_type->data.pointer.child_type,
- actual_type, source_node).id == ConstCastResultIdOk)
+ if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type, source_node, false).id == ConstCastResultIdOk)
{
type_ensure_zero_bits_known(ira->codegen, actual_type);
if (!type_has_bits(actual_type)) {
@@ -8170,10 +8186,10 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
// implicitly take a const pointer to something
if (!type_requires_comptime(actual_type)) {
TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true);
- if (expected_type->id == TypeTableEntryIdPointer &&
- expected_type->data.pointer.ptr_len == PtrLenSingle &&
- types_match_const_cast_only(ira, expected_type, const_ptr_actual,
- source_node).id == ConstCastResultIdOk)
+ if (wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ types_match_const_cast_only(ira, wanted_type, const_ptr_actual,
+ source_node, false).id == ConstCastResultIdOk)
{
return ImplicitCastMatchResultYes;
}
@@ -8415,9 +8431,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
TypeTableEntry *cur_payload_type = cur_type->data.error_union.payload_type;
bool const_cast_prev = types_match_const_cast_only(ira, prev_payload_type, cur_payload_type,
- source_node).id == ConstCastResultIdOk;
+ source_node, false).id == ConstCastResultIdOk;
bool const_cast_cur = types_match_const_cast_only(ira, cur_payload_type, prev_payload_type,
- source_node).id == ConstCastResultIdOk;
+ source_node, false).id == ConstCastResultIdOk;
if (const_cast_prev || const_cast_cur) {
if (const_cast_cur) {
@@ -8504,11 +8520,11 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
continue;
}
- if (types_match_const_cast_only(ira, prev_type, cur_type, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, prev_type, cur_type, source_node, false).id == ConstCastResultIdOk) {
continue;
}
- if (types_match_const_cast_only(ira, cur_type, prev_type, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, cur_type, prev_type, source_node, false).id == ConstCastResultIdOk) {
prev_inst = cur_inst;
continue;
}
@@ -8531,13 +8547,15 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
if (prev_type->id == TypeTableEntryIdErrorUnion &&
- types_match_const_cast_only(ira, prev_type->data.error_union.payload_type, cur_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, prev_type->data.error_union.payload_type, cur_type,
+ source_node, false).id == ConstCastResultIdOk)
{
continue;
}
if (cur_type->id == TypeTableEntryIdErrorUnion &&
- types_match_const_cast_only(ira, cur_type->data.error_union.payload_type, prev_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, cur_type->data.error_union.payload_type, prev_type,
+ source_node, false).id == ConstCastResultIdOk)
{
if (err_set_type != nullptr) {
TypeTableEntry *cur_err_set_type = cur_type->data.error_union.err_set_type;
@@ -8559,13 +8577,15 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
if (prev_type->id == TypeTableEntryIdOptional &&
- types_match_const_cast_only(ira, prev_type->data.maybe.child_type, cur_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, prev_type->data.maybe.child_type, cur_type,
+ source_node, false).id == ConstCastResultIdOk)
{
continue;
}
if (cur_type->id == TypeTableEntryIdOptional &&
- types_match_const_cast_only(ira, cur_type->data.maybe.child_type, prev_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, cur_type->data.maybe.child_type, prev_type,
+ source_node, false).id == ConstCastResultIdOk)
{
prev_inst = cur_inst;
continue;
@@ -8602,8 +8622,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
if (cur_type->id == TypeTableEntryIdArray && prev_type->id == TypeTableEntryIdArray &&
- cur_type->data.array.len != prev_type->data.array.len &&
- types_match_const_cast_only(ira, cur_type->data.array.child_type, prev_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ cur_type->data.array.len != prev_type->data.array.len &&
+ types_match_const_cast_only(ira, cur_type->data.array.child_type, prev_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
convert_to_const_slice = true;
prev_inst = cur_inst;
@@ -8611,8 +8632,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
if (cur_type->id == TypeTableEntryIdArray && prev_type->id == TypeTableEntryIdArray &&
- cur_type->data.array.len != prev_type->data.array.len &&
- types_match_const_cast_only(ira, prev_type->data.array.child_type, cur_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ cur_type->data.array.len != prev_type->data.array.len &&
+ types_match_const_cast_only(ira, prev_type->data.array.child_type, cur_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
convert_to_const_slice = true;
continue;
@@ -8621,8 +8643,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
if (cur_type->id == TypeTableEntryIdArray && is_slice(prev_type) &&
(prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const ||
cur_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type,
- cur_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira,
+ prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type,
+ cur_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
{
convert_to_const_slice = false;
continue;
@@ -8631,8 +8654,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
if (prev_type->id == TypeTableEntryIdArray && is_slice(cur_type) &&
(cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const ||
prev_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type,
- prev_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira,
+ cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type,
+ prev_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
{
prev_inst = cur_inst;
convert_to_const_slice = false;
@@ -9913,7 +9937,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
// explicit match or non-const to const
- if (types_match_const_cast_only(ira, wanted_type, actual_type, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, wanted_type, actual_type, source_node, false).id == ConstCastResultIdOk) {
return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop, false);
}
@@ -9959,7 +9983,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type);
}
@@ -9977,7 +10002,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
TypeTableEntry *array_type = actual_type->data.pointer.child_type;
if ((ptr_type->data.pointer.is_const || array_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type);
}
@@ -9993,7 +10019,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
wanted_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.pointer.child_type, value);
if (type_is_invalid(cast1->value.type))
@@ -10016,7 +10043,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
wanted_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.maybe.child_type, value);
if (type_is_invalid(cast1->value.type))
@@ -10084,7 +10112,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
actual_type->data.pointer.child_type->id == TypeTableEntryIdArray &&
actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment &&
types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
- actual_type->data.pointer.child_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
{
return ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value, wanted_type);
}
@@ -10098,7 +10127,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
TypeTableEntry *slice_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
assert(slice_ptr_type->id == TypeTableEntryIdPointer);
if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
- actual_type->data.pointer.child_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk)
{
return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, wanted_type);
}
@@ -10109,7 +10139,9 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
// note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism
if (wanted_type->id == TypeTableEntryIdOptional) {
TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type;
- if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node,
+ false).id == ConstCastResultIdOk)
+ {
return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type);
} else if (actual_type->id == TypeTableEntryIdComptimeInt ||
actual_type->id == TypeTableEntryIdComptimeFloat)
@@ -10144,7 +10176,9 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
// explicit cast from child type of error type to error type
if (wanted_type->id == TypeTableEntryIdErrorUnion) {
- if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type,
+ source_node, false).id == ConstCastResultIdOk)
+ {
return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type);
} else if (actual_type->id == TypeTableEntryIdComptimeInt ||
actual_type->id == TypeTableEntryIdComptimeFloat)
@@ -10166,7 +10200,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
wanted_type->data.error_union.payload_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value);
if (type_is_invalid(cast1->value.type))
@@ -10193,7 +10228,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
actual_type->id != TypeTableEntryIdOptional)
{
TypeTableEntry *wanted_child_type = wanted_type->data.error_union.payload_type->data.maybe.child_type;
- if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk ||
+ if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node, false).id == ConstCastResultIdOk ||
actual_type->id == TypeTableEntryIdNull ||
actual_type->id == TypeTableEntryIdComptimeInt ||
actual_type->id == TypeTableEntryIdComptimeFloat)
@@ -10345,7 +10380,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
TypeTableEntry *array_type = wanted_type->data.pointer.child_type;
if (array_type->id == TypeTableEntryIdArray && array_type->data.array.len == 1 &&
types_match_const_cast_only(ira, array_type->data.array.child_type,
- actual_type->data.pointer.child_type, source_node).id == ConstCastResultIdOk)
+ actual_type->data.pointer.child_type, source_node,
+ !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
{
if (wanted_type->data.pointer.alignment > actual_type->data.pointer.alignment) {
ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("cast increases pointer alignment"));
@@ -10364,7 +10400,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
// explicit cast from T to *T where T is zero bits
if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
- actual_type, source_node).id == ConstCastResultIdOk)
+ actual_type, source_node, !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
{
type_ensure_zero_bits_known(ira->codegen, actual_type);
if (type_is_invalid(actual_type)) {
@@ -10384,7 +10420,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
// explicit cast from something to const pointer of it
if (!type_requires_comptime(actual_type)) {
TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true);
- if (types_match_const_cast_only(ira, wanted_type, const_ptr_actual, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, wanted_type, const_ptr_actual, source_node, false).id == ConstCastResultIdOk) {
return ir_analyze_cast_ref(ira, source_instr, value, wanted_type);
}
}
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 5ec2759032..06f17a37ee 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,19 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "use implicit casts to assign null to non-nullable pointer",
+ \\export fn entry() void {
+ \\ var x: i32 = 1234;
+ \\ var p: *i32 = &x;
+ \\ var pp: *?*i32 = &p;
+ \\ pp.* = null;
+ \\ var y = p.*;
+ \\}
+ ,
+ ".tmp_source.zig:4:23: error: expected type '*?*i32', found '**i32'",
+ );
+
cases.add(
"attempted implicit cast from T to [*]const T",
\\export fn entry() void {
From 41e6c664d8b751b81effd2341cf1bd5950c40d8c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 13 Jun 2018 11:09:41 -0400
Subject: [PATCH 42/49] langref: add merge error sets operator to operator
table
---
doc/langref.html.in | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 290ed77e7d..1fccd6e351 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -1261,6 +1261,22 @@ const ptr = &x;
x.* == 1234
|
+
+ a || b
|
+
+
+ - {#link|Error Set Type#}
+
+ |
+
+ {#link|Merging Error Sets#}
+ |
+
+ const A = error{One};
+const B = error{Two};
+(A || B) == error{One, Two}
+ |
+
{#header_close#}
@@ -1269,7 +1285,7 @@ x.* == 1234
a!b
!x -x -%x ~x &x ?x
x{} x.* x.?
-! * / % ** *%
+! * / % ** *% ||
+ - ++ +% -%
<< >>
&
From e1f56c9af6fd9ab495e0c499e2c545e7d048fa9e Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 13 Jun 2018 11:48:06 -0400
Subject: [PATCH 43/49] std.zig.ast: add test for iterate
closes #1101
---
std/zig/ast.zig | 38 +++++++++++++++++++++++++++-----------
1 file changed, 27 insertions(+), 11 deletions(-)
diff --git a/std/zig/ast.zig b/std/zig/ast.zig
index defaded78a..4246a50861 100644
--- a/std/zig/ast.zig
+++ b/std/zig/ast.zig
@@ -734,7 +734,7 @@ pub const Node = struct {
var i = index;
if (self.doc_comments) |comments| {
- if (i < 1) return *comments.base;
+ if (i < 1) return &comments.base;
i -= 1;
}
@@ -1243,7 +1243,7 @@ pub const Node = struct {
i -= 1;
if (self.@"else") |@"else"| {
- if (i < 1) return *@"else".base;
+ if (i < 1) return &@"else".base;
i -= 1;
}
@@ -1296,7 +1296,7 @@ pub const Node = struct {
i -= 1;
if (self.@"else") |@"else"| {
- if (i < 1) return *@"else".base;
+ if (i < 1) return &@"else".base;
i -= 1;
}
@@ -1347,7 +1347,7 @@ pub const Node = struct {
i -= 1;
if (self.@"else") |@"else"| {
- if (i < 1) return *@"else".base;
+ if (i < 1) return &@"else".base;
i -= 1;
}
@@ -1536,22 +1536,27 @@ pub const Node = struct {
var i = index;
switch (self.op) {
+ // TODO https://github.com/ziglang/zig/issues/1107
Op.SliceType => |addr_of_info| {
if (addr_of_info.align_info) |align_info| {
if (i < 1) return align_info.node;
i -= 1;
}
},
- Op.AddrOf => |addr_of_info| {
+
+ Op.PtrType => |addr_of_info| {
if (addr_of_info.align_info) |align_info| {
if (i < 1) return align_info.node;
i -= 1;
}
},
+
Op.ArrayType => |size_expr| {
if (i < 1) return size_expr;
i -= 1;
},
+
+ Op.AddressOf,
Op.Await,
Op.BitNot,
Op.BoolNot,
@@ -1561,8 +1566,6 @@ pub const Node = struct {
Op.NegationWrap,
Op.Try,
Op.Resume,
- Op.UnwrapOptional,
- Op.PointerType,
=> {},
}
@@ -1667,7 +1670,9 @@ pub const Node = struct {
if (i < fields.len) return fields.at(i).*;
i -= fields.len;
},
- Op.Deref => {},
+ Op.UnwrapOptional,
+ Op.Deref,
+ => {},
}
return null;
@@ -2022,7 +2027,7 @@ pub const Node = struct {
switch (self.kind) {
Kind.Variable => |variable_name| {
- if (i < 1) return *variable_name.base;
+ if (i < 1) return &variable_name.base;
i -= 1;
},
Kind.Return => |return_type| {
@@ -2092,10 +2097,10 @@ pub const Node = struct {
pub fn iterate(self: *Asm, index: usize) ?*Node {
var i = index;
- if (i < self.outputs.len) return *(self.outputs.at(index).*).base;
+ if (i < self.outputs.len) return &self.outputs.at(index).*.base;
i -= self.outputs.len;
- if (i < self.inputs.len) return *(self.inputs.at(index).*).base;
+ if (i < self.inputs.len) return &self.inputs.at(index).*.base;
i -= self.inputs.len;
return null;
@@ -2205,3 +2210,14 @@ pub const Node = struct {
}
};
};
+
+test "iterate" {
+ var root = Node.Root{
+ .base = Node{ .id = Node.Id.Root },
+ .doc_comments = null,
+ .decls = Node.Root.DeclList.init(std.debug.global_allocator),
+ .eof_token = 0,
+ };
+ var base = &root.base;
+ assert(base.iterate(0) == null);
+}
From fc87f6e417d206a88b581b77d3a5494ae4c978dd Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Wed, 13 Jun 2018 11:57:57 -0400
Subject: [PATCH 44/49] fix race condition bug in test harness of std.atomic
---
std/atomic/queue.zig | 7 +++----
std/atomic/stack.zig | 7 +++----
2 files changed, 6 insertions(+), 8 deletions(-)
diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
index 2a48407383..3dc64dbea2 100644
--- a/std/atomic/queue.zig
+++ b/std/atomic/queue.zig
@@ -124,15 +124,14 @@ fn startPuts(ctx: *Context) u8 {
fn startGets(ctx: *Context) u8 {
while (true) {
+ const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
+
while (ctx.queue.get()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
}
- if (@atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1) {
- break;
- }
+ if (last) return 0;
}
- return 0;
}
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
index c6b368b990..9e81d89257 100644
--- a/std/atomic/stack.zig
+++ b/std/atomic/stack.zig
@@ -127,15 +127,14 @@ fn startPuts(ctx: *Context) u8 {
fn startGets(ctx: *Context) u8 {
while (true) {
+ const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
+
while (ctx.stack.pop()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
}
- if (@atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1) {
- break;
- }
+ if (last) return 0;
}
- return 0;
}
From a369d69c5144c2a4186e4f8d20bfda0c3f86605a Mon Sep 17 00:00:00 2001
From: Marc Tiehuis
Date: Thu, 14 Jun 2018 21:18:36 +1200
Subject: [PATCH 45/49] Add windows x86_64 i128 abi workaround
---
std/special/compiler_rt/divti3.zig | 10 ++++++++++
std/special/compiler_rt/index.zig | 7 ++++---
std/special/compiler_rt/muloti4.zig | 10 ++++++++++
std/special/compiler_rt/muloti4_test.zig | 2 +-
4 files changed, 25 insertions(+), 4 deletions(-)
diff --git a/std/special/compiler_rt/divti3.zig b/std/special/compiler_rt/divti3.zig
index f3fccf3746..60460ea62d 100644
--- a/std/special/compiler_rt/divti3.zig
+++ b/std/special/compiler_rt/divti3.zig
@@ -1,5 +1,6 @@
const udivmod = @import("udivmod.zig").udivmod;
const builtin = @import("builtin");
+const compiler_rt = @import("index.zig");
pub extern fn __divti3(a: i128, b: i128) i128 {
@setRuntimeSafety(builtin.is_test);
@@ -14,3 +15,12 @@ pub extern fn __divti3(a: i128, b: i128) i128 {
const s = s_a ^ s_b;
return (i128(r) ^ s) -% s;
}
+
+pub extern fn __divti3_windows_x86_64(a: *const i128, b: *const i128) void {
+ @setRuntimeSafety(builtin.is_test);
+ compiler_rt.setXmm0(i128, __divti3(a.*, b.*));
+}
+
+test "import divti3" {
+ _ = @import("divti3_test.zig");
+}
diff --git a/std/special/compiler_rt/index.zig b/std/special/compiler_rt/index.zig
index 0573854c91..f952730353 100644
--- a/std/special/compiler_rt/index.zig
+++ b/std/special/compiler_rt/index.zig
@@ -38,9 +38,6 @@ comptime {
@export("__umoddi3", __umoddi3, linkage);
@export("__udivmodsi4", __udivmodsi4, linkage);
- @export("__divti3", @import("divti3.zig").__divti3, linkage);
- @export("__muloti4", @import("muloti4.zig").__muloti4, linkage);
-
if (isArmArch()) {
@export("__aeabi_uldivmod", __aeabi_uldivmod, linkage);
@export("__aeabi_uidivmod", __aeabi_uidivmod, linkage);
@@ -61,6 +58,8 @@ comptime {
@export("__chkstk", __chkstk, strong_linkage);
@export("___chkstk_ms", ___chkstk_ms, linkage);
}
+ @export("__divti3", @import("divti3.zig").__divti3_windows_x86_64, linkage);
+ @export("__muloti4", @import("muloti4.zig").__muloti4_windows_x86_64, linkage);
@export("__udivti3", @import("udivti3.zig").__udivti3_windows_x86_64, linkage);
@export("__udivmodti4", @import("udivmodti4.zig").__udivmodti4_windows_x86_64, linkage);
@export("__umodti3", @import("umodti3.zig").__umodti3_windows_x86_64, linkage);
@@ -68,6 +67,8 @@ comptime {
else => {},
}
} else {
+ @export("__divti3", @import("divti3.zig").__divti3, linkage);
+ @export("__muloti4", @import("muloti4.zig").__muloti4, linkage);
@export("__udivti3", @import("udivti3.zig").__udivti3, linkage);
@export("__udivmodti4", @import("udivmodti4.zig").__udivmodti4, linkage);
@export("__umodti3", @import("umodti3.zig").__umodti3, linkage);
diff --git a/std/special/compiler_rt/muloti4.zig b/std/special/compiler_rt/muloti4.zig
index 35d33f4ad4..866077c80c 100644
--- a/std/special/compiler_rt/muloti4.zig
+++ b/std/special/compiler_rt/muloti4.zig
@@ -1,5 +1,6 @@
const udivmod = @import("udivmod.zig").udivmod;
const builtin = @import("builtin");
+const compiler_rt = @import("index.zig");
pub extern fn __muloti4(a: i128, b: i128, overflow: *c_int) i128 {
@setRuntimeSafety(builtin.is_test);
@@ -43,3 +44,12 @@ pub extern fn __muloti4(a: i128, b: i128, overflow: *c_int) i128 {
return r;
}
+
+pub extern fn __muloti4_windows_x86_64(a: *const i128, b: *const i128, overflow: *c_int) void {
+ @setRuntimeSafety(builtin.is_test);
+ compiler_rt.setXmm0(i128, __muloti4(a.*, b.*, overflow));
+}
+
+test "import muloti4" {
+ _ = @import("muloti4_test.zig");
+}
diff --git a/std/special/compiler_rt/muloti4_test.zig b/std/special/compiler_rt/muloti4_test.zig
index b61655aaec..6b3671323f 100644
--- a/std/special/compiler_rt/muloti4_test.zig
+++ b/std/special/compiler_rt/muloti4_test.zig
@@ -4,7 +4,7 @@ const assert = @import("std").debug.assert;
fn test__muloti4(a: i128, b: i128, expected: i128, expected_overflow: c_int) void {
var overflow: c_int = undefined;
const x = __muloti4(a, b, &overflow);
- assert(overflow == expected_overflow and (overflow != 0 or x == expected));
+ assert(overflow == expected_overflow and (expected_overflow != 0 or x == expected));
}
test "muloti4" {
From 4ec09ac243afa0b784669e618ec09e9e444a0275 Mon Sep 17 00:00:00 2001
From: Alexandros Naskos
Date: Thu, 14 Jun 2018 17:57:28 +0300
Subject: [PATCH 46/49] Enabled optional types of zero bit types with no LLVM
DI type. (#1110)
* Zero bit optional types do not need a LLVM DI type
---
src/analyze.cpp | 3 ++-
test/cases/null.zig | 11 +++++++++++
2 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/src/analyze.cpp b/src/analyze.cpp
index 0aa5ea5dcb..cbeac7bc21 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -522,7 +522,6 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) {
TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdOptional);
assert(child_type->type_ref || child_type->zero_bits);
- assert(child_type->di_type);
entry->is_copyable = type_is_copyable(g, child_type);
buf_resize(&entry->name, 0);
@@ -532,12 +531,14 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) {
entry->type_ref = LLVMInt1Type();
entry->di_type = g->builtin_types.entry_bool->di_type;
} else if (type_is_codegen_pointer(child_type)) {
+ assert(child_type->di_type);
// this is an optimization but also is necessary for calling C
// functions where all pointers are maybe pointers
// function types are technically pointers
entry->type_ref = child_type->type_ref;
entry->di_type = child_type->di_type;
} else {
+ assert(child_type->di_type);
// create a struct with a boolean whether this is the null value
LLVMTypeRef elem_types[] = {
child_type->type_ref,
diff --git a/test/cases/null.zig b/test/cases/null.zig
index cdcfd23efb..d2a9aaed55 100644
--- a/test/cases/null.zig
+++ b/test/cases/null.zig
@@ -143,3 +143,14 @@ test "null with default unwrap" {
const x: i32 = null orelse 1;
assert(x == 1);
}
+
+test "optional types" {
+ comptime {
+ const opt_type_struct = StructWithOptionalType { .t=u8, };
+ assert(opt_type_struct.t != null and opt_type_struct.t.? == u8);
+ }
+}
+
+const StructWithOptionalType = struct {
+ t: ?type,
+};
From 6943cefebf94631ff02d6e28436c07f4030924c6 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 14 Jun 2018 16:15:32 -0400
Subject: [PATCH 47/49] std.os.path.dirname: return null instead of empty slice
for when there is no directory component. Makes it harder
to write bugs.
closes #1017
---
src-self-hosted/introspect.zig | 2 +-
src-self-hosted/main.zig | 2 +-
std/build.zig | 16 +++++-----
std/os/index.zig | 12 ++++----
std/os/path.zig | 54 +++++++++++++++++++++-------------
5 files changed, 51 insertions(+), 35 deletions(-)
diff --git a/src-self-hosted/introspect.zig b/src-self-hosted/introspect.zig
index 56b56c0c78..74084b48c6 100644
--- a/src-self-hosted/introspect.zig
+++ b/src-self-hosted/introspect.zig
@@ -27,7 +27,7 @@ pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
var cur_path: []const u8 = self_exe_path;
while (true) {
- const test_dir = os.path.dirname(cur_path);
+ const test_dir = os.path.dirname(cur_path) orelse ".";
if (mem.eql(u8, test_dir, cur_path)) {
break;
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 1c91ab9cbe..ffe23d2ffe 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -249,7 +249,7 @@ fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void {
defer build_args.deinit();
const build_file_basename = os.path.basename(build_file_abs);
- const build_file_dirname = os.path.dirname(build_file_abs);
+ const build_file_dirname = os.path.dirname(build_file_abs) orelse ".";
var full_cache_dir: []u8 = undefined;
if (flags.single("cache-dir")) |cache_dir| {
diff --git a/std/build.zig b/std/build.zig
index 5733aec17d..16ce426bcb 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -617,7 +617,7 @@ pub const Builder = struct {
warn("cp {} {}\n", source_path, dest_path);
}
- const dirname = os.path.dirname(dest_path);
+ const dirname = os.path.dirname(dest_path) orelse ".";
const abs_source_path = self.pathFromRoot(source_path);
os.makePath(self.allocator, dirname) catch |err| {
warn("Unable to create path {}: {}\n", dirname, @errorName(err));
@@ -1395,8 +1395,9 @@ pub const LibExeObjStep = struct {
cc_args.append(abs_source_file) catch unreachable;
const cache_o_src = os.path.join(builder.allocator, builder.cache_root, source_file) catch unreachable;
- const cache_o_dir = os.path.dirname(cache_o_src);
- try builder.makePath(cache_o_dir);
+ if (os.path.dirname(cache_o_src)) |cache_o_dir| {
+ try builder.makePath(cache_o_dir);
+ }
const cache_o_file = builder.fmt("{}{}", cache_o_src, self.target.oFileExt());
cc_args.append("-o") catch unreachable;
cc_args.append(builder.pathFromRoot(cache_o_file)) catch unreachable;
@@ -1509,8 +1510,9 @@ pub const LibExeObjStep = struct {
cc_args.append(abs_source_file) catch unreachable;
const cache_o_src = os.path.join(builder.allocator, builder.cache_root, source_file) catch unreachable;
- const cache_o_dir = os.path.dirname(cache_o_src);
- try builder.makePath(cache_o_dir);
+ if (os.path.dirname(cache_o_src)) |cache_o_dir| {
+ try builder.makePath(cache_o_dir);
+ }
const cache_o_file = builder.fmt("{}{}", cache_o_src, self.target.oFileExt());
cc_args.append("-o") catch unreachable;
cc_args.append(builder.pathFromRoot(cache_o_file)) catch unreachable;
@@ -1855,7 +1857,7 @@ pub const WriteFileStep = struct {
fn make(step: *Step) !void {
const self = @fieldParentPtr(WriteFileStep, "step", step);
const full_path = self.builder.pathFromRoot(self.file_path);
- const full_path_dir = os.path.dirname(full_path);
+ const full_path_dir = os.path.dirname(full_path) orelse ".";
os.makePath(self.builder.allocator, full_path_dir) catch |err| {
warn("unable to make path {}: {}\n", full_path_dir, @errorName(err));
return err;
@@ -1945,7 +1947,7 @@ pub const Step = struct {
};
fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
- const out_dir = os.path.dirname(output_path);
+ const out_dir = os.path.dirname(output_path) orelse ".";
const out_basename = os.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3
const major_only_path = os.path.join(allocator, out_dir, filename_major_only) catch unreachable;
diff --git a/std/os/index.zig b/std/os/index.zig
index 612301d25d..46f5e76d98 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -714,7 +714,7 @@ pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path:
else => return err, // TODO zig should know this set does not include PathAlreadyExists
}
- const dirname = os.path.dirname(new_path);
+ const dirname = os.path.dirname(new_path) orelse ".";
var rand_buf: [12]u8 = undefined;
const tmp_path = try allocator.alloc(u8, dirname.len + 1 + base64.Base64Encoder.calcSize(rand_buf.len));
@@ -860,14 +860,14 @@ pub const AtomicFile = struct {
var rand_buf: [12]u8 = undefined;
- const dirname_component_len = if (dirname.len == 0) 0 else dirname.len + 1;
+ const dirname_component_len = if (dirname) |d| d.len + 1 else 0;
const tmp_path = try allocator.alloc(u8, dirname_component_len +
base64.Base64Encoder.calcSize(rand_buf.len));
errdefer allocator.free(tmp_path);
- if (dirname.len != 0) {
- mem.copy(u8, tmp_path[0..], dirname);
- tmp_path[dirname.len] = os.path.sep;
+ if (dirname) |dir| {
+ mem.copy(u8, tmp_path[0..], dir);
+ tmp_path[dir.len] = os.path.sep;
}
while (true) {
@@ -1965,7 +1965,7 @@ pub fn selfExeDirPath(allocator: *mem.Allocator) ![]u8 {
// the executable was in when it was run.
const full_exe_path = try readLink(allocator, "/proc/self/exe");
errdefer allocator.free(full_exe_path);
- const dir = path.dirname(full_exe_path);
+ const dir = path.dirname(full_exe_path) orelse ".";
return allocator.shrink(u8, full_exe_path, dir.len);
},
Os.windows, Os.macosx, Os.ios => {
diff --git a/std/os/path.zig b/std/os/path.zig
index a3ad23b1a9..d3ab0c519f 100644
--- a/std/os/path.zig
+++ b/std/os/path.zig
@@ -648,8 +648,8 @@ fn testResolvePosix(paths: []const []const u8) []u8 {
}
/// If the path is a file in the current directory (no directory component)
-/// then the returned slice has .len = 0.
-pub fn dirname(path: []const u8) []const u8 {
+/// then returns null
+pub fn dirname(path: []const u8) ?[]const u8 {
if (is_windows) {
return dirnameWindows(path);
} else {
@@ -657,9 +657,9 @@ pub fn dirname(path: []const u8) []const u8 {
}
}
-pub fn dirnameWindows(path: []const u8) []const u8 {
+pub fn dirnameWindows(path: []const u8) ?[]const u8 {
if (path.len == 0)
- return path[0..0];
+ return null;
const root_slice = diskDesignatorWindows(path);
if (path.len == root_slice.len)
@@ -671,13 +671,13 @@ pub fn dirnameWindows(path: []const u8) []const u8 {
while ((path[end_index] == '/' or path[end_index] == '\\') and end_index > root_slice.len) {
if (end_index == 0)
- return path[0..0];
+ return null;
end_index -= 1;
}
while (path[end_index] != '/' and path[end_index] != '\\' and end_index > root_slice.len) {
if (end_index == 0)
- return path[0..0];
+ return null;
end_index -= 1;
}
@@ -685,12 +685,15 @@ pub fn dirnameWindows(path: []const u8) []const u8 {
end_index += 1;
}
+ if (end_index == 0)
+ return null;
+
return path[0..end_index];
}
-pub fn dirnamePosix(path: []const u8) []const u8 {
+pub fn dirnamePosix(path: []const u8) ?[]const u8 {
if (path.len == 0)
- return path[0..0];
+ return null;
var end_index: usize = path.len - 1;
while (path[end_index] == '/') {
@@ -701,13 +704,16 @@ pub fn dirnamePosix(path: []const u8) []const u8 {
while (path[end_index] != '/') {
if (end_index == 0)
- return path[0..0];
+ return null;
end_index -= 1;
}
if (end_index == 0 and path[end_index] == '/')
return path[0..1];
+ if (end_index == 0)
+ return null;
+
return path[0..end_index];
}
@@ -717,10 +723,10 @@ test "os.path.dirnamePosix" {
testDirnamePosix("/a", "/");
testDirnamePosix("/", "/");
testDirnamePosix("////", "/");
- testDirnamePosix("", "");
- testDirnamePosix("a", "");
- testDirnamePosix("a/", "");
- testDirnamePosix("a//", "");
+ testDirnamePosix("", null);
+ testDirnamePosix("a", null);
+ testDirnamePosix("a/", null);
+ testDirnamePosix("a//", null);
}
test "os.path.dirnameWindows" {
@@ -742,7 +748,7 @@ test "os.path.dirnameWindows" {
testDirnameWindows("c:foo\\bar", "c:foo");
testDirnameWindows("c:foo\\bar\\", "c:foo");
testDirnameWindows("c:foo\\bar\\baz", "c:foo\\bar");
- testDirnameWindows("file:stream", "");
+ testDirnameWindows("file:stream", null);
testDirnameWindows("dir\\file:stream", "dir");
testDirnameWindows("\\\\unc\\share", "\\\\unc\\share");
testDirnameWindows("\\\\unc\\share\\foo", "\\\\unc\\share\\");
@@ -753,18 +759,26 @@ test "os.path.dirnameWindows" {
testDirnameWindows("/a/b/", "/a");
testDirnameWindows("/a/b", "/a");
testDirnameWindows("/a", "/");
- testDirnameWindows("", "");
+ testDirnameWindows("", null);
testDirnameWindows("/", "/");
testDirnameWindows("////", "/");
- testDirnameWindows("foo", "");
+ testDirnameWindows("foo", null);
}
-fn testDirnamePosix(input: []const u8, expected_output: []const u8) void {
- assert(mem.eql(u8, dirnamePosix(input), expected_output));
+fn testDirnamePosix(input: []const u8, expected_output: ?[]const u8) void {
+ if (dirnamePosix(input)) |output| {
+ assert(mem.eql(u8, output, expected_output.?));
+ } else {
+ assert(expected_output == null);
+ }
}
-fn testDirnameWindows(input: []const u8, expected_output: []const u8) void {
- assert(mem.eql(u8, dirnameWindows(input), expected_output));
+fn testDirnameWindows(input: []const u8, expected_output: ?[]const u8) void {
+ if (dirnameWindows(input)) |output| {
+ assert(mem.eql(u8, output, expected_output.?));
+ } else {
+ assert(expected_output == null);
+ }
}
pub fn basename(path: []const u8) []const u8 {
From cdf1e366f9c36af111cf41b001a58635d94a7714 Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 14 Jun 2018 16:36:07 -0400
Subject: [PATCH 48/49] fix build on windows, broken by previous commit
---
std/os/index.zig | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/std/os/index.zig b/std/os/index.zig
index 46f5e76d98..62eeb7e43e 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -1971,7 +1971,7 @@ pub fn selfExeDirPath(allocator: *mem.Allocator) ![]u8 {
Os.windows, Os.macosx, Os.ios => {
const self_exe_path = try selfExePath(allocator);
errdefer allocator.free(self_exe_path);
- const dirname = os.path.dirname(self_exe_path);
+ const dirname = os.path.dirname(self_exe_path) orelse ".";
return allocator.shrink(u8, self_exe_path, dirname.len);
},
else => @compileError("unimplemented: std.os.selfExeDirPath for " ++ @tagName(builtin.os)),
From f0697c28f80d64c544302aea576e41ebc443b41c Mon Sep 17 00:00:00 2001
From: Andrew Kelley
Date: Thu, 14 Jun 2018 18:12:05 -0400
Subject: [PATCH 49/49] langref: docs for error return traces
See #367
---
doc/langref.html.in | 214 ++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 206 insertions(+), 8 deletions(-)
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 1fccd6e351..814de721a6 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -590,6 +590,7 @@ test "initialization" {
x = 1;
}
{#code_end#}
+ {#header_open|undefined#}
Use undefined to leave variables uninitialized:
{#code_begin|test#}
const assert = @import("std").debug.assert;
@@ -602,6 +603,7 @@ test "init with undefined" {
{#code_end#}
{#header_close#}
{#header_close#}
+ {#header_close#}
{#header_open|Integers#}
{#header_open|Integer Literals#}
{#code_begin|syntax#}
@@ -2999,6 +3001,7 @@ test "parse u64" {
You know with complete certainty it will not return an error, so want to unconditionally unwrap it.
You want to take a different action for each possible error.
+ {#header_open|catch#}
If you want to provide a default value, you can use the catch binary operator:
{#code_begin|syntax#}
fn doAThing(str: []u8) void {
@@ -3011,6 +3014,8 @@ fn doAThing(str: []u8) void {
a default value of 13. The type of the right hand side of the binary catch operator must
match the unwrapped error union type, or be of type noreturn.
+ {#header_close#}
+ {#header_open|try#}
Let's say you wanted to return the error if you got one, otherwise continue with the
function logic:
{#code_begin|syntax#}
@@ -3033,6 +3038,7 @@ fn doAThing(str: []u8) !void {
from the current function with the same error. Otherwise, the expression results in
the unwrapped value.
+ {#header_close#}
Maybe you know with complete certainty that an expression will never be an error.
In this case you can do this:
@@ -3047,7 +3053,7 @@ fn doAThing(str: []u8) !void {
Finally, you may want to take a different action for every situation. For that, we combine
- the if and switch expression:
+ the {#link|if#} and {#link|switch#} expression:
{#code_begin|syntax#}
fn doAThing(str: []u8) void {
@@ -3062,9 +3068,10 @@ fn doAThing(str: []u8) void {
}
}
{#code_end#}
+ {#header_open|errdefer#}
The other component to error handling is defer statements.
- In addition to an unconditional defer, Zig has errdefer,
+ In addition to an unconditional {#link|defer#}, Zig has errdefer,
which evaluates the deferred expression on block exit path if and only if
the function returned with an error from the block.
@@ -3095,6 +3102,7 @@ fn createFoo(param: i32) !Foo {
the verbosity and cognitive overhead of trying to make sure every exit path
is covered. The deallocation code is always directly following the allocation code.
+ {#header_close#}
A couple of other tidbits about error handling:
@@ -3223,7 +3231,174 @@ test "inferred error set" {
{#header_close#}
{#header_close#}
{#header_open|Error Return Traces#}
- TODO
+
+ Error Return Traces show all the points in the code that an error was returned to the calling function. This makes it practical to use {#link|try#} everywhere and then still be able to know what happened if an error ends up bubbling all the way out of your application.
+
+ {#code_begin|exe_err#}
+pub fn main() !void {
+ try foo(12);
+}
+
+fn foo(x: i32) !void {
+ if (x >= 5) {
+ try bar();
+ } else {
+ try bang2();
+ }
+}
+
+fn bar() !void {
+ if (baz()) {
+ try quux();
+ } else |err| switch (err) {
+ error.FileNotFound => try hello(),
+ else => try another(),
+ }
+}
+
+fn baz() !void {
+ try bang1();
+}
+
+fn quux() !void {
+ try bang2();
+}
+
+fn hello() !void {
+ try bang2();
+}
+
+fn another() !void {
+ try bang1();
+}
+
+fn bang1() !void {
+ return error.FileNotFound;
+}
+
+fn bang2() !void {
+ return error.PermissionDenied;
+}
+ {#code_end#}
+
+ Look closely at this example. This is no stack trace.
+
+
+ You can see that the final error bubbled up was PermissionDenied,
+ but the original error that started this whole thing was FileNotFound. In the bar function, the code handles the original error code,
+ and then returns another one, from the switch statement. Error Return Traces make this clear, whereas a stack trace would look like this:
+
+ {#code_begin|exe_err#}
+pub fn main() void {
+ foo(12);
+}
+
+fn foo(x: i32) void {
+ if (x >= 5) {
+ bar();
+ } else {
+ bang2();
+ }
+}
+
+fn bar() void {
+ if (baz()) {
+ quux();
+ } else {
+ hello();
+ }
+}
+
+fn baz() bool {
+ return bang1();
+}
+
+fn quux() void {
+ bang2();
+}
+
+fn hello() void {
+ bang2();
+}
+
+fn bang1() bool {
+ return false;
+}
+
+fn bang2() void {
+ @panic("PermissionDenied");
+}
+ {#code_end#}
+
+ Here, the stack trace does not explain how the control
+ flow in bar got to the hello() call.
+ One would have to open a debugger or further instrument the application
+ in order to find out. The error return trace, on the other hand,
+ shows exactly how the error bubbled up.
+
+
+ This debugging feature makes it easier to iterate quickly on code that
+ robustly handles all error conditions. This means that Zig developers
+ will naturally find themselves writing correct, robust code in order
+ to increase their development pace.
+
+
+ Error Return Traces are enabled by default in {#link|Debug#} and {#link|ReleaseSafe#} builds and disabled by default in {#link|ReleaseFast#} and {#link|ReleaseSmall#} builds.
+
+
+ There are a few ways to activate this error return tracing feature:
+
+
+ - Return an error from main
+ - An error makes its way to
catch unreachable and you have not overridden the default panic handler
+ - Use {#link|errorReturnTrace#} to access the current return trace. You can use
std.debug.dumpStackTrace to print it. This function returns comptime-known {#link|null#} when building without error return tracing support.
+
+ {#header_open|Implementation Details#}
+
+ To analyze performance cost, there are two cases:
+
+
+ - when no errors are returned
+ - when returning errors
+
+
+ For the case when no errors are returned, the cost is a single memory write operation, only in the first non-failable function in the call graph that calls a failable function, i.e. when a function returning void calls a function returning error.
+ This is to initialize this struct in the stack memory:
+
+ {#code_begin|syntax#}
+pub const StackTrace = struct {
+ index: usize,
+ instruction_addresses: [N]usize,
+};
+ {#code_end#}
+
+ Here, N is the maximum function call depth as determined by call graph analysis. Recursion is ignored and counts for 2.
+
+
+ A pointer to StackTrace is passed as a secret parameter to every function that can return an error, but it's always the first parameter, so it can likely sit in a register and stay there.
+
+
+ That's it for the path when no errors occur. It's practically free in terms of performance.
+
+
+ When generating the code for a function that returns an error, just before the return statement (only for the return statements that return errors), Zig generates a call to this function:
+
+ {#code_begin|syntax#}
+// marked as "no-inline" in LLVM IR
+fn __zig_return_error(stack_trace: *StackTrace) void {
+ stack_trace.instruction_addresses[stack_trace.index] = @returnAddress();
+ stack_trace.index = (stack_trace.index + 1) % N;
+}
+ {#code_end#}
+
+ The cost is 2 math operations plus some memory reads and writes. The memory accessed is constrained and should remain cached for the duration of the error return bubbling.
+
+
+ As for code size cost, 1 function call before a return statement is no big deal. Even so,
+ I have a plan to make the call to
+ __zig_return_error a tail call, which brings the code size cost down to actually zero. What is a return statement in code without error return tracing can become a jump instruction in code with error return tracing.
+
+ {#header_close#}
{#header_close#}
{#header_close#}
{#header_open|Optionals#}
@@ -3342,6 +3517,15 @@ test "optional type" {
// Use compile-time reflection to access the child type of the optional:
comptime assert(@typeOf(foo).Child == i32);
}
+ {#code_end#}
+ {#header_close#}
+ {#header_open|null#}
+
+ Just like {#link|undefined#}, null has its own type, and the only way to use it is to
+ cast it to a different type:
+
+ {#code_begin|syntax#}
+const optional_value: ?i32 = null;
{#code_end#}
{#header_close#}
{#header_close#}
@@ -5426,12 +5610,13 @@ pub const TypeInfo = union(TypeId) {
{#header_close#}
{#header_open|Build Mode#}
- Zig has three build modes:
+ Zig has four build modes:
- {#link|Debug#} (default)
- {#link|ReleaseFast#}
- {#link|ReleaseSafe#}
+ - {#link|ReleaseSmall#}
To add standard build options to a build.zig file:
@@ -5448,14 +5633,16 @@ pub fn build(b: &Builder) void {
This causes these options to be available:
- -Drelease-safe=(bool) optimizations on and safety on
- -Drelease-fast=(bool) optimizations on and safety off
+ -Drelease-safe=[bool] optimizations on and safety on
+ -Drelease-fast=[bool] optimizations on and safety off
+ -Drelease-small=[bool] size optimizations on and safety off
{#header_open|Debug#}
$ zig build-exe example.zig
- Fast compilation speed
- Safety checks enabled
- Slow runtime performance
+ - Large binary size
{#header_close#}
{#header_open|ReleaseFast#}
@@ -5464,6 +5651,7 @@ pub fn build(b: &Builder) void {
Fast runtime performance
Safety checks disabled
Slow compilation speed
+ Large binary size
{#header_close#}
{#header_open|ReleaseSafe#}
@@ -5472,9 +5660,19 @@ pub fn build(b: &Builder) void {
Medium runtime performance
Safety checks enabled
Slow compilation speed
+ Large binary size
- {#see_also|Compile Variables|Zig Build System|Undefined Behavior#}
{#header_close#}
+ {#header_open|ReleaseSmall#}
+ $ zig build-exe example.zig --release-small
+
+ - Medium runtime performance
+ - Safety checks disabled
+ - Slow compilation speed
+ - Small binary size
+
+ {#header_close#}
+ {#see_also|Compile Variables|Zig Build System|Undefined Behavior#}
{#header_close#}
{#header_open|Undefined Behavior#}
@@ -5482,7 +5680,7 @@ pub fn build(b: &Builder) void {
detected at compile-time, Zig emits an error. Most undefined behavior that
cannot be detected at compile-time can be detected at runtime. In these cases,
Zig has safety checks. Safety checks can be disabled on a per-block basis
- with @setRuntimeSafety. The {#link|ReleaseFast#}
+ with {#link|setRuntimeSafety#}. The {#link|ReleaseFast#}
build mode disables all safety checks in order to facilitate optimizations.