+ A promise handle must be consumed exactly once after it is created, either by cancel or await.
+
+ amain() catch unreachable;
+ seq('f');
+ resume a_promise;
+ seq('i');
+ assert(final_result == 1234);
+ assert(std.mem.eql(u8, seq_points, "abcdefghi"));
+}
+async fn amain() void {
+ seq('b');
+ const p = async another() catch unreachable;
+ seq('e');
+ final_result = await p;
+ seq('h');
+}
+async fn another() i32 {
+ seq('c');
+ suspend |p| {
+ seq('d');
+ a_promise = p;
+ }
+ seq('g');
+ return 1234;
+}
+
+var seq_points = []u8{0} ** "abcdefghi".len;
+var seq_index: usize = 0;
+
+fn seq(c: u8) void {
+ seq_points[seq_index] = c;
+ seq_index += 1;
+}
+ {#code_end#}
+
+ In general, suspend is lower level than await. Most application
+ code will use only async and await, but event loop
+ implementations will make use of suspend internally.
+
+ {#header_close#}
+ {#header_open|Open Issues#}
+
+ There are a few issues with coroutines that are considered unresolved. Best be aware of them,
+ as the situation is likely to change before 1.0.0:
+
+
+ - Async functions have optimizations disabled - even in release modes - due to an
+ LLVM bug.
+
+ -
+ There are some situations where we can know statically that there will not be
+ memory allocation failure, but Zig still forces us to handle it.
+ TODO file an issue for this and link it here.
+
+ -
+ Zig does not take advantage of LLVM's allocation elision optimization for
+ coroutines. It crashed LLVM when I tried to do it the first time. This is
+ related to the other 2 bullet points here. See
+ #802.
+
+
+ {#header_close#}
+
{#header_close#}
{#header_open|Builtin Functions#}
@@ -4102,12 +4613,6 @@ comptime {
{#see_also|Import from C Header File|@cImport|@cDefine|@cInclude#}
{#header_close#}
- {#header_open|@canImplicitCast#}
- @canImplicitCast(comptime T: type, value) bool
-
- Returns whether a value can be implicitly casted to a given type.
-
- {#header_close#}
{#header_open|@clz#}
@clz(x: T) U
@@ -4897,7 +5402,7 @@ pub const TypeId = enum {
ComptimeInt,
Undefined,
Null,
- Nullable,
+ Optional,
ErrorUnion,
Error,
Enum,
@@ -4931,7 +5436,7 @@ pub const TypeInfo = union(TypeId) {
ComptimeInt: void,
Undefined: void,
Null: void,
- Nullable: Nullable,
+ Optional: Optional,
ErrorUnion: ErrorUnion,
ErrorSet: ErrorSet,
Enum: Enum,
@@ -4984,7 +5489,7 @@ pub const TypeInfo = union(TypeId) {
defs: []Definition,
};
- pub const Nullable = struct {
+ pub const Optional = struct {
child: type,
};
@@ -5105,12 +5610,13 @@ pub const TypeInfo = union(TypeId) {
{#header_close#}
{#header_open|Build Mode#}
- Zig has three build modes:
+ Zig has four build modes:
- {#link|Debug#} (default)
- {#link|ReleaseFast#}
- {#link|ReleaseSafe#}
+ - {#link|ReleaseSmall#}
To add standard build options to a build.zig file:
@@ -5127,14 +5633,16 @@ pub fn build(b: &Builder) void {
This causes these options to be available:
- -Drelease-safe=(bool) optimizations on and safety on
- -Drelease-fast=(bool) optimizations on and safety off
+ -Drelease-safe=[bool] optimizations on and safety on
+ -Drelease-fast=[bool] optimizations on and safety off
+ -Drelease-small=[bool] size optimizations on and safety off
{#header_open|Debug#}
$ zig build-exe example.zig
- Fast compilation speed
- Safety checks enabled
- Slow runtime performance
+ - Large binary size
{#header_close#}
{#header_open|ReleaseFast#}
@@ -5143,6 +5651,7 @@ pub fn build(b: &Builder) void {
Fast runtime performance
Safety checks disabled
Slow compilation speed
+ Large binary size
{#header_close#}
{#header_open|ReleaseSafe#}
@@ -5151,9 +5660,19 @@ pub fn build(b: &Builder) void {
Medium runtime performance
Safety checks enabled
Slow compilation speed
+ Large binary size
- {#see_also|Compile Variables|Zig Build System|Undefined Behavior#}
{#header_close#}
+ {#header_open|ReleaseSmall#}
+ $ zig build-exe example.zig --release-small
+
+ - Medium runtime performance
+ - Safety checks disabled
+ - Slow compilation speed
+ - Small binary size
+
+ {#header_close#}
+ {#see_also|Compile Variables|Zig Build System|Undefined Behavior#}
{#header_close#}
{#header_open|Undefined Behavior#}
@@ -5161,7 +5680,7 @@ pub fn build(b: &Builder) void {
detected at compile-time, Zig emits an error. Most undefined behavior that
cannot be detected at compile-time can be detected at runtime. In these cases,
Zig has safety checks. Safety checks can be disabled on a per-block basis
- with @setRuntimeSafety. The {#link|ReleaseFast#}
+ with {#link|setRuntimeSafety#}. The {#link|ReleaseFast#}
build mode disables all safety checks in order to facilitate optimizations.
@@ -5375,8 +5894,8 @@ comptime {
At compile-time:
{#code_begin|test_err|unable to unwrap null#}
comptime {
- const nullable_number: ?i32 = null;
- const number = ??nullable_number;
+ const optional_number: ?i32 = null;
+ const number = optional_number.?;
}
{#code_end#}
At runtime crashes with the message attempt to unwrap null and a stack trace.
@@ -5385,9 +5904,9 @@ comptime {
{#code_begin|exe|test#}
const warn = @import("std").debug.warn;
pub fn main() void {
- const nullable_number: ?i32 = null;
+ const optional_number: ?i32 = null;
- if (nullable_number) |number| {
+ if (optional_number) |number| {
warn("got number: {}\n", number);
} else {
warn("it's null\n");
@@ -5474,425 +5993,7 @@ const separator = if (builtin.os == builtin.Os.windows) '\\' else '/';
Example of what is imported with @import("builtin"):
- {#code_begin|syntax#}
-pub const StackTrace = struct {
- index: usize,
- instruction_addresses: []usize,
-};
-
-pub const Os = enum {
- freestanding,
- ananas,
- cloudabi,
- dragonfly,
- freebsd,
- fuchsia,
- ios,
- kfreebsd,
- linux,
- lv2,
- macosx,
- netbsd,
- openbsd,
- solaris,
- windows,
- haiku,
- minix,
- rtems,
- nacl,
- cnk,
- aix,
- cuda,
- nvcl,
- amdhsa,
- ps4,
- elfiamcu,
- tvos,
- watchos,
- mesa3d,
- contiki,
- amdpal,
- zen,
-};
-
-pub const Arch = enum {
- armv8_3a,
- armv8_2a,
- armv8_1a,
- armv8,
- armv8r,
- armv8m_baseline,
- armv8m_mainline,
- armv7,
- armv7em,
- armv7m,
- armv7s,
- armv7k,
- armv7ve,
- armv6,
- armv6m,
- armv6k,
- armv6t2,
- armv5,
- armv5te,
- armv4t,
- armebv8_3a,
- armebv8_2a,
- armebv8_1a,
- armebv8,
- armebv8r,
- armebv8m_baseline,
- armebv8m_mainline,
- armebv7,
- armebv7em,
- armebv7m,
- armebv7s,
- armebv7k,
- armebv7ve,
- armebv6,
- armebv6m,
- armebv6k,
- armebv6t2,
- armebv5,
- armebv5te,
- armebv4t,
- aarch64,
- aarch64_be,
- arc,
- avr,
- bpfel,
- bpfeb,
- hexagon,
- mips,
- mipsel,
- mips64,
- mips64el,
- msp430,
- nios2,
- powerpc,
- powerpc64,
- powerpc64le,
- r600,
- amdgcn,
- riscv32,
- riscv64,
- sparc,
- sparcv9,
- sparcel,
- s390x,
- tce,
- tcele,
- thumb,
- thumbeb,
- i386,
- x86_64,
- xcore,
- nvptx,
- nvptx64,
- le32,
- le64,
- amdil,
- amdil64,
- hsail,
- hsail64,
- spir,
- spir64,
- kalimbav3,
- kalimbav4,
- kalimbav5,
- shave,
- lanai,
- wasm32,
- wasm64,
- renderscript32,
- renderscript64,
-};
-
-pub const Environ = enum {
- unknown,
- gnu,
- gnuabin32,
- gnuabi64,
- gnueabi,
- gnueabihf,
- gnux32,
- code16,
- eabi,
- eabihf,
- android,
- musl,
- musleabi,
- musleabihf,
- msvc,
- itanium,
- cygnus,
- amdopencl,
- coreclr,
- opencl,
- simulator,
-};
-
-pub const ObjectFormat = enum {
- unknown,
- coff,
- elf,
- macho,
- wasm,
-};
-
-pub const GlobalLinkage = enum {
- Internal,
- Strong,
- Weak,
- LinkOnce,
-};
-
-pub const AtomicOrder = enum {
- Unordered,
- Monotonic,
- Acquire,
- Release,
- AcqRel,
- SeqCst,
-};
-
-pub const AtomicRmwOp = enum {
- Xchg,
- Add,
- Sub,
- And,
- Nand,
- Or,
- Xor,
- Max,
- Min,
-};
-
-pub const Mode = enum {
- Debug,
- ReleaseSafe,
- ReleaseFast,
- ReleaseSmall,
-};
-
-pub const TypeId = enum {
- Type,
- Void,
- Bool,
- NoReturn,
- Int,
- Float,
- Pointer,
- Array,
- Struct,
- ComptimeFloat,
- ComptimeInt,
- Undefined,
- Null,
- Nullable,
- ErrorUnion,
- ErrorSet,
- Enum,
- Union,
- Fn,
- Namespace,
- Block,
- BoundFn,
- ArgTuple,
- Opaque,
- Promise,
-};
-
-pub const TypeInfo = union(TypeId) {
- Type: void,
- Void: void,
- Bool: void,
- NoReturn: void,
- Int: Int,
- Float: Float,
- Pointer: Pointer,
- Array: Array,
- Struct: Struct,
- ComptimeFloat: void,
- ComptimeInt: void,
- Undefined: void,
- Null: void,
- Nullable: Nullable,
- ErrorUnion: ErrorUnion,
- ErrorSet: ErrorSet,
- Enum: Enum,
- Union: Union,
- Fn: Fn,
- Namespace: void,
- Block: void,
- BoundFn: Fn,
- ArgTuple: void,
- Opaque: void,
- Promise: Promise,
-
-
- pub const Int = struct {
- is_signed: bool,
- bits: u8,
- };
-
- pub const Float = struct {
- bits: u8,
- };
-
- pub const Pointer = struct {
- is_const: bool,
- is_volatile: bool,
- alignment: u32,
- child: type,
- };
-
- pub const Array = struct {
- len: usize,
- child: type,
- };
-
- pub const ContainerLayout = enum {
- Auto,
- Extern,
- Packed,
- };
-
- pub const StructField = struct {
- name: []const u8,
- offset: ?usize,
- field_type: type,
- };
-
- pub const Struct = struct {
- layout: ContainerLayout,
- fields: []StructField,
- defs: []Definition,
- };
-
- pub const Nullable = struct {
- child: type,
- };
-
- pub const ErrorUnion = struct {
- error_set: type,
- payload: type,
- };
-
- pub const Error = struct {
- name: []const u8,
- value: usize,
- };
-
- pub const ErrorSet = struct {
- errors: []Error,
- };
-
- pub const EnumField = struct {
- name: []const u8,
- value: usize,
- };
-
- pub const Enum = struct {
- layout: ContainerLayout,
- tag_type: type,
- fields: []EnumField,
- defs: []Definition,
- };
-
- pub const UnionField = struct {
- name: []const u8,
- enum_field: ?EnumField,
- field_type: type,
- };
-
- pub const Union = struct {
- layout: ContainerLayout,
- tag_type: type,
- fields: []UnionField,
- defs: []Definition,
- };
-
- pub const CallingConvention = enum {
- Unspecified,
- C,
- Cold,
- Naked,
- Stdcall,
- Async,
- };
-
- pub const FnArg = struct {
- is_generic: bool,
- is_noalias: bool,
- arg_type: type,
- };
-
- pub const Fn = struct {
- calling_convention: CallingConvention,
- is_generic: bool,
- is_var_args: bool,
- return_type: type,
- async_allocator_type: type,
- args: []FnArg,
- };
-
- pub const Promise = struct {
- child: type,
- };
-
- pub const Definition = struct {
- name: []const u8,
- is_pub: bool,
- data: Data,
-
- pub const Data = union(enum) {
- Type: type,
- Var: type,
- Fn: FnDef,
-
- pub const FnDef = struct {
- fn_type: type,
- inline_type: Inline,
- calling_convention: CallingConvention,
- is_var_args: bool,
- is_extern: bool,
- is_export: bool,
- lib_name: ?[]const u8,
- return_type: type,
- arg_names: [][] const u8,
-
- pub const Inline = enum {
- Auto,
- Always,
- Never,
- };
- };
- };
- };
-};
-
-pub const FloatMode = enum {
- Optimized,
- Strict,
-};
-
-pub const Endian = enum {
- Big,
- Little,
-};
-
-pub const endian = Endian.Little;
-pub const is_test = true;
-pub const os = Os.linux;
-pub const arch = Arch.x86_64;
-pub const environ = Environ.gnu;
-pub const object_format = ObjectFormat.elf;
-pub const mode = Mode.Debug;
-pub const link_libc = false;
-pub const have_error_return_tracing = true;
-pub const __zig_test_fn_slice = {}; // overwritten later
- {#code_end#}
+ {#builtin#}
{#see_also|Build Mode#}
{#header_close#}
{#header_open|Root Source File#}
@@ -6053,8 +6154,7 @@ pub fn build(b: *Builder) void {
b.default_step.dependOn(&exe.step);
}
{#code_end#}
- {#header_close#}
- {#header_open|Terminal#}
+ terminal
$ zig build
$ ./test
all your base are belong to us
@@ -6367,9 +6467,9 @@ AsmInputItem = "[" Symbol "]" String "(" Expression ")"
AsmClobbers= ":" list(String, ",")
-UnwrapExpression = BoolOrExpression (UnwrapNullable | UnwrapError) | BoolOrExpression
+UnwrapExpression = BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression
-UnwrapNullable = "??" Expression
+UnwrapOptional = "orelse" Expression
UnwrapError = "catch" option("|" Symbol "|") Expression
@@ -6443,12 +6543,10 @@ MultiplyOperator = "||" | "*" | "/" | "%" | "**" | "*%"
PrefixOpExpression = PrefixOp TypeExpr | SuffixOpExpression
-SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | PtrDerefExpression)
+SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | ".*" | ".?")
FieldAccessExpression = "." Symbol
-PtrDerefExpression = ".*"
-
FnCallExpression = "(" list(Expression, ",") ")"
ArrayAccessExpression = "[" Expression "]"
@@ -6461,7 +6559,7 @@ ContainerInitBody = list(StructLiteralField, ",") | list(Expression, ",")
StructLiteralField = "." Symbol "=" Expression
-PrefixOp = "!" | "-" | "~" | (("*" | "[*]") option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" | "await"
+PrefixOp = "!" | "-" | "~" | (("*" | "[*]") option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "-%" | "try" | "await"
PrimaryExpression = Integer | Float | String | CharLiteral | KeywordLiteral | GroupedExpression | BlockExpression(BlockOrExpression) | Symbol | ("@" Symbol FnCallExpression) | ArrayType | FnProto | AsmExpression | ContainerDecl | ("continue" option(":" Symbol)) | ErrorSetDecl | PromiseType
@@ -6554,8 +6652,8 @@ hljs.registerLanguage("zig", function(t) {
},
a = t.IR + "\\s*\\(",
c = {
- keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong",
- built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic canImplicitCast ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall",
+ keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong resume cancel await async orelse",
+ built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall",
literal: "true false null undefined"
},
n = [e, t.CLCM, t.CBCM, s, r];
diff --git a/example/cat/main.zig b/example/cat/main.zig
index 1b34cb22eb..27690d2695 100644
--- a/example/cat/main.zig
+++ b/example/cat/main.zig
@@ -7,7 +7,7 @@ const allocator = std.debug.global_allocator;
pub fn main() !void {
var args_it = os.args();
- const exe = try unwrapArg(??args_it.next(allocator));
+ const exe = try unwrapArg(args_it.next(allocator).?);
var catted_anything = false;
var stdout_file = try io.getStdOut();
diff --git a/src-self-hosted/arg.zig b/src-self-hosted/arg.zig
index df2c04ef1f..dc89483213 100644
--- a/src-self-hosted/arg.zig
+++ b/src-self-hosted/arg.zig
@@ -99,7 +99,7 @@ pub const Args = struct {
error.ArgumentNotInAllowedSet => {
std.debug.warn("argument '{}' is invalid for flag '{}'\n", args[i], arg);
std.debug.warn("allowed options are ");
- for (??flag.allowed_set) |possible| {
+ for (flag.allowed_set.?) |possible| {
std.debug.warn("'{}' ", possible);
}
std.debug.warn("\n");
@@ -276,14 +276,14 @@ test "parse arguments" {
debug.assert(!args.present("help2"));
debug.assert(!args.present("init"));
- debug.assert(mem.eql(u8, ??args.single("build-file"), "build.zig"));
- debug.assert(mem.eql(u8, ??args.single("color"), "on"));
+ debug.assert(mem.eql(u8, args.single("build-file").?, "build.zig"));
+ debug.assert(mem.eql(u8, args.single("color").?, "on"));
- const objects = ??args.many("object");
+ const objects = args.many("object").?;
debug.assert(mem.eql(u8, objects[0], "obj1"));
debug.assert(mem.eql(u8, objects[1], "obj2"));
- debug.assert(mem.eql(u8, ??args.single("library"), "lib2"));
+ debug.assert(mem.eql(u8, args.single("library").?, "lib2"));
const pos = args.positionals.toSliceConst();
debug.assert(mem.eql(u8, pos[0], "build"));
diff --git a/src-self-hosted/introspect.zig b/src-self-hosted/introspect.zig
index 56b56c0c78..74084b48c6 100644
--- a/src-self-hosted/introspect.zig
+++ b/src-self-hosted/introspect.zig
@@ -27,7 +27,7 @@ pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 {
var cur_path: []const u8 = self_exe_path;
while (true) {
- const test_dir = os.path.dirname(cur_path);
+ const test_dir = os.path.dirname(cur_path) orelse ".";
if (mem.eql(u8, test_dir, cur_path)) {
break;
diff --git a/src-self-hosted/llvm.zig b/src-self-hosted/llvm.zig
index 16c359adcf..391a92cd63 100644
--- a/src-self-hosted/llvm.zig
+++ b/src-self-hosted/llvm.zig
@@ -8,6 +8,6 @@ pub const ContextRef = removeNullability(c.LLVMContextRef);
pub const BuilderRef = removeNullability(c.LLVMBuilderRef);
fn removeNullability(comptime T: type) type {
- comptime assert(@typeId(T) == builtin.TypeId.Nullable);
+ comptime assert(@typeId(T) == builtin.TypeId.Optional);
return T.Child;
}
diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig
index 7a62f4985b..ffe23d2ffe 100644
--- a/src-self-hosted/main.zig
+++ b/src-self-hosted/main.zig
@@ -212,7 +212,7 @@ fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void {
const build_runner_path = try os.path.join(allocator, special_dir, "build_runner.zig");
defer allocator.free(build_runner_path);
- const build_file = flags.single("build-file") ?? "build.zig";
+ const build_file = flags.single("build-file") orelse "build.zig";
const build_file_abs = try os.path.resolve(allocator, ".", build_file);
defer allocator.free(build_file_abs);
@@ -249,7 +249,7 @@ fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void {
defer build_args.deinit();
const build_file_basename = os.path.basename(build_file_abs);
- const build_file_dirname = os.path.dirname(build_file_abs);
+ const build_file_dirname = os.path.dirname(build_file_abs) orelse ".";
var full_cache_dir: []u8 = undefined;
if (flags.single("cache-dir")) |cache_dir| {
@@ -490,7 +490,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
try stderr.print("encountered --pkg-end with no matching --pkg-begin\n");
os.exit(1);
}
- cur_pkg = ??cur_pkg.parent;
+ cur_pkg = cur_pkg.parent.?;
}
}
@@ -514,28 +514,28 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
},
}
- const basename = os.path.basename(??in_file);
+ const basename = os.path.basename(in_file.?);
var it = mem.split(basename, ".");
- const root_name = it.next() ?? {
+ const root_name = it.next() orelse {
try stderr.write("file name cannot be empty\n");
os.exit(1);
};
const asm_a = flags.many("assembly");
const obj_a = flags.many("object");
- if (in_file == null and (obj_a == null or (??obj_a).len == 0) and (asm_a == null or (??asm_a).len == 0)) {
+ if (in_file == null and (obj_a == null or obj_a.?.len == 0) and (asm_a == null or asm_a.?.len == 0)) {
try stderr.write("Expected source file argument or at least one --object or --assembly argument\n");
os.exit(1);
}
- if (out_type == Module.Kind.Obj and (obj_a != null and (??obj_a).len != 0)) {
+ if (out_type == Module.Kind.Obj and (obj_a != null and obj_a.?.len != 0)) {
try stderr.write("When building an object file, --object arguments are invalid\n");
os.exit(1);
}
const zig_root_source_file = in_file;
- const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") ?? "zig-cache"[0..]) catch {
+ const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") orelse "zig-cache"[0..]) catch {
os.exit(1);
};
defer allocator.free(full_cache_dir);
@@ -555,9 +555,9 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
);
defer module.destroy();
- module.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") ?? "0", 10);
- module.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") ?? "0", 10);
- module.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") ?? "0", 10);
+ module.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") orelse "0", 10);
+ module.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") orelse "0", 10);
+ module.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") orelse "0", 10);
module.is_test = false;
@@ -652,7 +652,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo
}
try module.build();
- try module.link(flags.single("out-file") ?? null);
+ try module.link(flags.single("out-file") orelse null);
if (flags.present("print-timing-info")) {
// codegen_print_timing_info(g, stderr);
@@ -734,7 +734,7 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
defer file.close();
const source_code = io.readFileAlloc(allocator, file_path) catch |err| {
- try stderr.print("unable to open '{}': {}", file_path, err);
+ try stderr.print("unable to open '{}': {}\n", file_path, err);
fmt_errors = true;
continue;
};
diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig
index a7ddf3f9e9..575105f25f 100644
--- a/src-self-hosted/module.zig
+++ b/src-self-hosted/module.zig
@@ -130,13 +130,13 @@ pub const Module = struct {
var name_buffer = try Buffer.init(allocator, name);
errdefer name_buffer.deinit();
- const context = c.LLVMContextCreate() ?? return error.OutOfMemory;
+ const context = c.LLVMContextCreate() orelse return error.OutOfMemory;
errdefer c.LLVMContextDispose(context);
- const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) ?? return error.OutOfMemory;
+ const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeModule(module);
- const builder = c.LLVMCreateBuilderInContext(context) ?? return error.OutOfMemory;
+ const builder = c.LLVMCreateBuilderInContext(context) orelse return error.OutOfMemory;
errdefer c.LLVMDisposeBuilder(builder);
const module_ptr = try allocator.create(Module);
@@ -223,7 +223,7 @@ pub const Module = struct {
c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr);
}
- const root_src_path = self.root_src_path ?? @panic("TODO handle null root src path");
+ const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path");
const root_src_real_path = os.path.real(self.allocator, root_src_path) catch |err| {
try printError("unable to get real path '{}': {}", root_src_path, err);
return err;
diff --git a/src/all_types.hpp b/src/all_types.hpp
index f05a44ad2d..683f64b93f 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -144,6 +144,9 @@ enum ConstPtrSpecial {
// understand the value of pointee at compile time. However, we will still
// emit a binary with a compile time known address.
// In this case index is the numeric address value.
+ // We also use this for null pointer. We need the data layout for ConstCastOnly == true
+ // types to be the same, so all optionals of pointer types use x_ptr
+ // instead of x_optional
ConstPtrSpecialHardCodedAddr,
// This means that the pointer represents memory of assigning to _.
// That is, storing discards the data, and loading is invalid.
@@ -219,10 +222,10 @@ enum RuntimeHintErrorUnion {
RuntimeHintErrorUnionNonError,
};
-enum RuntimeHintMaybe {
- RuntimeHintMaybeUnknown,
- RuntimeHintMaybeNull, // TODO is this value even possible? if this is the case it might mean the const value is compile time known.
- RuntimeHintMaybeNonNull,
+enum RuntimeHintOptional {
+ RuntimeHintOptionalUnknown,
+ RuntimeHintOptionalNull, // TODO is this value even possible? if this is the case it might mean the const value is compile time known.
+ RuntimeHintOptionalNonNull,
};
enum RuntimeHintPtr {
@@ -251,7 +254,7 @@ struct ConstExprValue {
bool x_bool;
ConstBoundFnValue x_bound_fn;
TypeTableEntry *x_type;
- ConstExprValue *x_maybe;
+ ConstExprValue *x_optional;
ConstErrValue x_err_union;
ErrorTableEntry *x_err_set;
BigInt x_enum_tag;
@@ -265,7 +268,7 @@ struct ConstExprValue {
// populated if special == ConstValSpecialRuntime
RuntimeHintErrorUnion rh_error_union;
- RuntimeHintMaybe rh_maybe;
+ RuntimeHintOptional rh_maybe;
RuntimeHintPtr rh_ptr;
} data;
};
@@ -384,6 +387,7 @@ enum NodeType {
NodeTypeSliceExpr,
NodeTypeFieldAccessExpr,
NodeTypePtrDeref,
+ NodeTypeUnwrapOptional,
NodeTypeUse,
NodeTypeBoolLiteral,
NodeTypeNullLiteral,
@@ -553,7 +557,7 @@ enum BinOpType {
BinOpTypeMultWrap,
BinOpTypeDiv,
BinOpTypeMod,
- BinOpTypeUnwrapMaybe,
+ BinOpTypeUnwrapOptional,
BinOpTypeArrayCat,
BinOpTypeArrayMult,
BinOpTypeErrorUnion,
@@ -572,6 +576,10 @@ struct AstNodeCatchExpr {
AstNode *op2;
};
+struct AstNodeUnwrapOptional {
+ AstNode *expr;
+};
+
enum CastOp {
CastOpNoCast, // signifies the function call expression is not a cast
CastOpNoop, // fn call expr is a cast, but does nothing
@@ -583,6 +591,7 @@ enum CastOp {
CastOpNumLitToConcrete,
CastOpErrSet,
CastOpBitCast,
+ CastOpPtrOfArrayToSlice,
};
struct AstNodeFnCallExpr {
@@ -619,8 +628,7 @@ enum PrefixOp {
PrefixOpBinNot,
PrefixOpNegation,
PrefixOpNegationWrap,
- PrefixOpMaybe,
- PrefixOpUnwrapMaybe,
+ PrefixOpOptional,
PrefixOpAddrOf,
};
@@ -905,6 +913,7 @@ struct AstNode {
AstNodeTestDecl test_decl;
AstNodeBinOpExpr bin_op_expr;
AstNodeCatchExpr unwrap_err_expr;
+ AstNodeUnwrapOptional unwrap_optional;
AstNodePrefixOpExpr prefix_op_expr;
AstNodePointerType pointer_type;
AstNodeFnCallExpr fn_call_expr;
@@ -1037,6 +1046,10 @@ struct TypeTableEntryStruct {
// whether we've finished resolving it
bool complete;
+ // whether any of the fields require comptime
+ // the value is not valid until zero_bits_known == true
+ bool requires_comptime;
+
bool zero_bits_loop_flag;
bool zero_bits_known;
uint32_t abi_alignment; // also figured out with zero_bits pass
@@ -1044,7 +1057,7 @@ struct TypeTableEntryStruct {
HashMap fields_by_name;
};
-struct TypeTableEntryMaybe {
+struct TypeTableEntryOptional {
TypeTableEntry *child_type;
};
@@ -1078,8 +1091,7 @@ struct TypeTableEntryEnum {
bool zero_bits_loop_flag;
bool zero_bits_known;
- bool generate_name_table;
- LLVMValueRef name_table;
+ LLVMValueRef name_function;
HashMap fields_by_name;
};
@@ -1105,6 +1117,10 @@ struct TypeTableEntryUnion {
// whether we've finished resolving it
bool complete;
+ // whether any of the fields require comptime
+ // the value is not valid until zero_bits_known == true
+ bool requires_comptime;
+
bool zero_bits_loop_flag;
bool zero_bits_known;
uint32_t abi_alignment; // also figured out with zero_bits pass
@@ -1163,7 +1179,7 @@ enum TypeTableEntryId {
TypeTableEntryIdComptimeInt,
TypeTableEntryIdUndefined,
TypeTableEntryIdNull,
- TypeTableEntryIdMaybe,
+ TypeTableEntryIdOptional,
TypeTableEntryIdErrorUnion,
TypeTableEntryIdErrorSet,
TypeTableEntryIdEnum,
@@ -1194,7 +1210,7 @@ struct TypeTableEntry {
TypeTableEntryFloat floating;
TypeTableEntryArray array;
TypeTableEntryStruct structure;
- TypeTableEntryMaybe maybe;
+ TypeTableEntryOptional maybe;
TypeTableEntryErrorUnion error_union;
TypeTableEntryErrorSet error_set;
TypeTableEntryEnum enumeration;
@@ -1346,7 +1362,6 @@ enum BuiltinFnId {
BuiltinFnIdSetRuntimeSafety,
BuiltinFnIdSetFloatMode,
BuiltinFnIdTypeName,
- BuiltinFnIdCanImplicitCast,
BuiltinFnIdPanic,
BuiltinFnIdPtrCast,
BuiltinFnIdBitCast,
@@ -1391,10 +1406,11 @@ enum PanicMsgId {
PanicMsgIdRemainderDivisionByZero,
PanicMsgIdExactDivisionRemainder,
PanicMsgIdSliceWidenRemainder,
- PanicMsgIdUnwrapMaybeFail,
+ PanicMsgIdUnwrapOptionalFail,
PanicMsgIdInvalidErrorCode,
PanicMsgIdIncorrectAlignment,
PanicMsgIdBadUnionField,
+ PanicMsgIdBadEnumValue,
PanicMsgIdCount,
};
@@ -1712,8 +1728,6 @@ struct CodeGen {
ZigList link_objects;
ZigList assembly_files;
- ZigList name_table_enums;
-
Buf *test_filter;
Buf *test_name_prefix;
@@ -2003,8 +2017,8 @@ enum IrInstructionId {
IrInstructionIdAsm,
IrInstructionIdSizeOf,
IrInstructionIdTestNonNull,
- IrInstructionIdUnwrapMaybe,
- IrInstructionIdMaybeWrap,
+ IrInstructionIdUnwrapOptional,
+ IrInstructionIdOptionalWrap,
IrInstructionIdUnionTag,
IrInstructionIdClz,
IrInstructionIdCtz,
@@ -2055,7 +2069,6 @@ enum IrInstructionId {
IrInstructionIdCheckSwitchProngs,
IrInstructionIdCheckStatementIsVoid,
IrInstructionIdTypeName,
- IrInstructionIdCanImplicitCast,
IrInstructionIdDeclRef,
IrInstructionIdPanic,
IrInstructionIdTagName,
@@ -2172,7 +2185,7 @@ enum IrUnOp {
IrUnOpNegation,
IrUnOpNegationWrap,
IrUnOpDereference,
- IrUnOpMaybe,
+ IrUnOpOptional,
};
struct IrInstructionUnOp {
@@ -2475,7 +2488,7 @@ struct IrInstructionTestNonNull {
IrInstruction *value;
};
-struct IrInstructionUnwrapMaybe {
+struct IrInstructionUnwrapOptional {
IrInstruction base;
IrInstruction *value;
@@ -2733,7 +2746,7 @@ struct IrInstructionUnwrapErrPayload {
bool safety_check_on;
};
-struct IrInstructionMaybeWrap {
+struct IrInstructionOptionalWrap {
IrInstruction base;
IrInstruction *value;
@@ -2848,13 +2861,6 @@ struct IrInstructionTypeName {
IrInstruction *type_value;
};
-struct IrInstructionCanImplicitCast {
- IrInstruction base;
-
- IrInstruction *type_value;
- IrInstruction *target_value;
-};
-
struct IrInstructionDeclRef {
IrInstruction base;
@@ -2949,10 +2955,10 @@ struct IrInstructionExport {
struct IrInstructionErrorReturnTrace {
IrInstruction base;
- enum Nullable {
+ enum Optional {
Null,
NonNull,
- } nullable;
+ } optional;
};
struct IrInstructionErrorUnion {
diff --git a/src/analyze.cpp b/src/analyze.cpp
index b0f0196020..cbeac7bc21 100644
--- a/src/analyze.cpp
+++ b/src/analyze.cpp
@@ -236,7 +236,7 @@ bool type_is_complete(TypeTableEntry *type_entry) {
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
@@ -272,7 +272,7 @@ bool type_has_zero_bits_known(TypeTableEntry *type_entry) {
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
@@ -384,6 +384,7 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type
bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count)
{
assert(!type_is_invalid(child_type));
+ assert(ptr_len == PtrLenSingle || child_type->id != TypeTableEntryIdOpaque);
TypeId type_id = {};
TypeTableEntry **parent_pointer = nullptr;
@@ -519,9 +520,8 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) {
} else {
ensure_complete_type(g, child_type);
- TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdMaybe);
+ TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdOptional);
assert(child_type->type_ref || child_type->zero_bits);
- assert(child_type->di_type);
entry->is_copyable = type_is_copyable(g, child_type);
buf_resize(&entry->name, 0);
@@ -531,12 +531,14 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) {
entry->type_ref = LLVMInt1Type();
entry->di_type = g->builtin_types.entry_bool->di_type;
} else if (type_is_codegen_pointer(child_type)) {
+ assert(child_type->di_type);
// this is an optimization but also is necessary for calling C
// functions where all pointers are maybe pointers
// function types are technically pointers
entry->type_ref = child_type->type_ref;
entry->di_type = child_type->di_type;
} else {
+ assert(child_type->di_type);
// create a struct with a boolean whether this is the null value
LLVMTypeRef elem_types[] = {
child_type->type_ref,
@@ -1360,7 +1362,7 @@ static bool type_allowed_in_packed_struct(TypeTableEntry *type_entry) {
return type_entry->data.structure.layout == ContainerLayoutPacked;
case TypeTableEntryIdUnion:
return type_entry->data.unionation.layout == ContainerLayoutPacked;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
return type_is_codegen_pointer(child_type);
@@ -1414,7 +1416,7 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) {
return type_allowed_in_extern(g, type_entry->data.pointer.child_type);
case TypeTableEntryIdStruct:
return type_entry->data.structure.layout == ContainerLayoutExtern || type_entry->data.structure.layout == ContainerLayoutPacked;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
return child_type->id == TypeTableEntryIdPointer || child_type->id == TypeTableEntryIdFn;
@@ -1537,7 +1539,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -1631,7 +1633,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -2532,6 +2534,10 @@ static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) {
continue;
}
+ if (type_requires_comptime(field_type)) {
+ struct_type->data.structure.requires_comptime = true;
+ }
+
if (!type_has_bits(field_type))
continue;
@@ -2723,6 +2729,11 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) {
}
union_field->type_entry = field_type;
+ if (type_requires_comptime(field_type)) {
+ union_type->data.unionation.requires_comptime = true;
+ }
+
+
if (field_node->data.struct_field.value != nullptr && !decl_node->data.container_decl.auto_enum) {
ErrorMsg *msg = add_node_error(g, field_node->data.struct_field.value,
buf_sprintf("non-enum union field assignment"));
@@ -2975,8 +2986,8 @@ static void typecheck_panic_fn(CodeGen *g, FnTableEntry *panic_fn) {
return wrong_panic_prototype(g, proto_node, fn_type);
}
- TypeTableEntry *nullable_ptr_to_stack_trace_type = get_maybe_type(g, get_ptr_to_stack_trace_type(g));
- if (fn_type_id->param_info[1].type != nullable_ptr_to_stack_trace_type) {
+ TypeTableEntry *optional_ptr_to_stack_trace_type = get_maybe_type(g, get_ptr_to_stack_trace_type(g));
+ if (fn_type_id->param_info[1].type != optional_ptr_to_stack_trace_type) {
return wrong_panic_prototype(g, proto_node, fn_type);
}
@@ -3298,6 +3309,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
case NodeTypeAsmExpr:
case NodeTypeFieldAccessExpr:
case NodeTypePtrDeref:
+ case NodeTypeUnwrapOptional:
case NodeTypeStructField:
case NodeTypeContainerInitExpr:
case NodeTypeStructValueField:
@@ -3358,7 +3370,7 @@ TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEnt
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -3736,7 +3748,7 @@ static bool is_container(TypeTableEntry *type_entry) {
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
@@ -3751,14 +3763,24 @@ static bool is_container(TypeTableEntry *type_entry) {
zig_unreachable();
}
+bool is_ref(TypeTableEntry *type_entry) {
+ return type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle;
+}
+
+bool is_array_ref(TypeTableEntry *type_entry) {
+ TypeTableEntry *array = is_ref(type_entry) ?
+ type_entry->data.pointer.child_type : type_entry;
+ return array->id == TypeTableEntryIdArray;
+}
+
bool is_container_ref(TypeTableEntry *type_entry) {
- return (type_entry->id == TypeTableEntryIdPointer) ?
+ return is_ref(type_entry) ?
is_container(type_entry->data.pointer.child_type) : is_container(type_entry);
}
TypeTableEntry *container_ref_type(TypeTableEntry *type_entry) {
assert(is_container_ref(type_entry));
- return (type_entry->id == TypeTableEntryIdPointer) ?
+ return is_ref(type_entry) ?
type_entry->data.pointer.child_type : type_entry;
}
@@ -3785,7 +3807,7 @@ void resolve_container_type(CodeGen *g, TypeTableEntry *type_entry) {
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
@@ -3804,7 +3826,7 @@ TypeTableEntry *get_codegen_ptr_type(TypeTableEntry *type) {
if (type->id == TypeTableEntryIdPointer) return type;
if (type->id == TypeTableEntryIdFn) return type;
if (type->id == TypeTableEntryIdPromise) return type;
- if (type->id == TypeTableEntryIdMaybe) {
+ if (type->id == TypeTableEntryIdOptional) {
if (type->data.maybe.child_type->id == TypeTableEntryIdPointer) return type->data.maybe.child_type;
if (type->data.maybe.child_type->id == TypeTableEntryIdFn) return type->data.maybe.child_type;
if (type->data.maybe.child_type->id == TypeTableEntryIdPromise) return type->data.maybe.child_type;
@@ -4311,7 +4333,7 @@ bool handle_is_ptr(TypeTableEntry *type_entry) {
return type_has_bits(type_entry);
case TypeTableEntryIdErrorUnion:
return type_has_bits(type_entry->data.error_union.payload_type);
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
return type_has_bits(type_entry->data.maybe.child_type) &&
!type_is_codegen_pointer(type_entry->data.maybe.child_type);
case TypeTableEntryIdUnion:
@@ -4558,6 +4580,52 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) {
return true;
}
+static uint32_t hash_const_val_ptr(ConstExprValue *const_val) {
+ uint32_t hash_val = 0;
+ switch (const_val->data.x_ptr.mut) {
+ case ConstPtrMutRuntimeVar:
+ hash_val += (uint32_t)3500721036;
+ break;
+ case ConstPtrMutComptimeConst:
+ hash_val += (uint32_t)4214318515;
+ break;
+ case ConstPtrMutComptimeVar:
+ hash_val += (uint32_t)1103195694;
+ break;
+ }
+ switch (const_val->data.x_ptr.special) {
+ case ConstPtrSpecialInvalid:
+ zig_unreachable();
+ case ConstPtrSpecialRef:
+ hash_val += (uint32_t)2478261866;
+ hash_val += hash_ptr(const_val->data.x_ptr.data.ref.pointee);
+ return hash_val;
+ case ConstPtrSpecialBaseArray:
+ hash_val += (uint32_t)1764906839;
+ hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val);
+ hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index);
+ hash_val += const_val->data.x_ptr.data.base_array.is_cstr ? 1297263887 : 200363492;
+ return hash_val;
+ case ConstPtrSpecialBaseStruct:
+ hash_val += (uint32_t)3518317043;
+ hash_val += hash_ptr(const_val->data.x_ptr.data.base_struct.struct_val);
+ hash_val += hash_size(const_val->data.x_ptr.data.base_struct.field_index);
+ return hash_val;
+ case ConstPtrSpecialHardCodedAddr:
+ hash_val += (uint32_t)4048518294;
+ hash_val += hash_size(const_val->data.x_ptr.data.hard_coded_addr.addr);
+ return hash_val;
+ case ConstPtrSpecialDiscard:
+ hash_val += 2010123162;
+ return hash_val;
+ case ConstPtrSpecialFunction:
+ hash_val += (uint32_t)2590901619;
+ hash_val += hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
+ return hash_val;
+ }
+ zig_unreachable();
+}
+
static uint32_t hash_const_val(ConstExprValue *const_val) {
assert(const_val->special == ConstValSpecialStatic);
switch (const_val->type->id) {
@@ -4626,51 +4694,7 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
assert(const_val->data.x_ptr.special == ConstPtrSpecialFunction);
return 3677364617 ^ hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
case TypeTableEntryIdPointer:
- {
- uint32_t hash_val = 0;
- switch (const_val->data.x_ptr.mut) {
- case ConstPtrMutRuntimeVar:
- hash_val += (uint32_t)3500721036;
- break;
- case ConstPtrMutComptimeConst:
- hash_val += (uint32_t)4214318515;
- break;
- case ConstPtrMutComptimeVar:
- hash_val += (uint32_t)1103195694;
- break;
- }
- switch (const_val->data.x_ptr.special) {
- case ConstPtrSpecialInvalid:
- zig_unreachable();
- case ConstPtrSpecialRef:
- hash_val += (uint32_t)2478261866;
- hash_val += hash_ptr(const_val->data.x_ptr.data.ref.pointee);
- return hash_val;
- case ConstPtrSpecialBaseArray:
- hash_val += (uint32_t)1764906839;
- hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val);
- hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index);
- hash_val += const_val->data.x_ptr.data.base_array.is_cstr ? 1297263887 : 200363492;
- return hash_val;
- case ConstPtrSpecialBaseStruct:
- hash_val += (uint32_t)3518317043;
- hash_val += hash_ptr(const_val->data.x_ptr.data.base_struct.struct_val);
- hash_val += hash_size(const_val->data.x_ptr.data.base_struct.field_index);
- return hash_val;
- case ConstPtrSpecialHardCodedAddr:
- hash_val += (uint32_t)4048518294;
- hash_val += hash_size(const_val->data.x_ptr.data.hard_coded_addr.addr);
- return hash_val;
- case ConstPtrSpecialDiscard:
- hash_val += 2010123162;
- return hash_val;
- case ConstPtrSpecialFunction:
- hash_val += (uint32_t)2590901619;
- hash_val += hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
- return hash_val;
- }
- zig_unreachable();
- }
+ return hash_const_val_ptr(const_val);
case TypeTableEntryIdPromise:
// TODO better hashing algorithm
return 223048345;
@@ -4687,11 +4711,15 @@ static uint32_t hash_const_val(ConstExprValue *const_val) {
case TypeTableEntryIdUnion:
// TODO better hashing algorithm
return 2709806591;
- case TypeTableEntryIdMaybe:
- if (const_val->data.x_maybe) {
- return hash_const_val(const_val->data.x_maybe) * 1992916303;
+ case TypeTableEntryIdOptional:
+ if (get_codegen_ptr_type(const_val->type) != nullptr) {
+ return hash_const_val(const_val) * 1992916303;
} else {
- return 4016830364;
+ if (const_val->data.x_optional) {
+ return hash_const_val(const_val->data.x_optional) * 1992916303;
+ } else {
+ return 4016830364;
+ }
}
case TypeTableEntryIdErrorUnion:
// TODO better hashing algorithm
@@ -4791,10 +4819,12 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) {
}
return false;
- case TypeTableEntryIdMaybe:
- if (value->data.x_maybe == nullptr)
+ case TypeTableEntryIdOptional:
+ if (get_codegen_ptr_type(value->type) != nullptr)
+ return value->data.x_ptr.mut == ConstPtrMutComptimeVar;
+ if (value->data.x_optional == nullptr)
return false;
- return can_mutate_comptime_var_state(value->data.x_maybe);
+ return can_mutate_comptime_var_state(value->data.x_optional);
case TypeTableEntryIdErrorUnion:
if (value->data.x_err_union.err != nullptr)
@@ -4841,7 +4871,7 @@ static bool return_type_is_cacheable(TypeTableEntry *return_type) {
case TypeTableEntryIdUnion:
return false;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
return return_type_is_cacheable(return_type->data.maybe.child_type);
case TypeTableEntryIdErrorUnion:
@@ -4943,17 +4973,29 @@ bool type_requires_comptime(TypeTableEntry *type_entry) {
case TypeTableEntryIdArgTuple:
return true;
case TypeTableEntryIdArray:
+ return type_requires_comptime(type_entry->data.array.child_type);
case TypeTableEntryIdStruct:
+ assert(type_has_zero_bits_known(type_entry));
+ return type_entry->data.structure.requires_comptime;
case TypeTableEntryIdUnion:
- case TypeTableEntryIdMaybe:
+ assert(type_has_zero_bits_known(type_entry));
+ return type_entry->data.unionation.requires_comptime;
+ case TypeTableEntryIdOptional:
+ return type_requires_comptime(type_entry->data.maybe.child_type);
case TypeTableEntryIdErrorUnion:
+ return type_requires_comptime(type_entry->data.error_union.payload_type);
+ case TypeTableEntryIdPointer:
+ if (type_entry->data.pointer.child_type->id == TypeTableEntryIdOpaque) {
+ return false;
+ } else {
+ return type_requires_comptime(type_entry->data.pointer.child_type);
+ }
case TypeTableEntryIdEnum:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdFn:
case TypeTableEntryIdBool:
case TypeTableEntryIdInt:
case TypeTableEntryIdFloat:
- case TypeTableEntryIdPointer:
case TypeTableEntryIdVoid:
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdPromise:
@@ -5308,6 +5350,52 @@ bool ir_get_var_is_comptime(VariableTableEntry *var) {
return var->is_comptime->value.data.x_bool;
}
+bool const_values_equal_ptr(ConstExprValue *a, ConstExprValue *b) {
+ if (a->data.x_ptr.special != b->data.x_ptr.special)
+ return false;
+ if (a->data.x_ptr.mut != b->data.x_ptr.mut)
+ return false;
+ switch (a->data.x_ptr.special) {
+ case ConstPtrSpecialInvalid:
+ zig_unreachable();
+ case ConstPtrSpecialRef:
+ if (a->data.x_ptr.data.ref.pointee != b->data.x_ptr.data.ref.pointee)
+ return false;
+ return true;
+ case ConstPtrSpecialBaseArray:
+ if (a->data.x_ptr.data.base_array.array_val != b->data.x_ptr.data.base_array.array_val &&
+ a->data.x_ptr.data.base_array.array_val->global_refs !=
+ b->data.x_ptr.data.base_array.array_val->global_refs)
+ {
+ return false;
+ }
+ if (a->data.x_ptr.data.base_array.elem_index != b->data.x_ptr.data.base_array.elem_index)
+ return false;
+ if (a->data.x_ptr.data.base_array.is_cstr != b->data.x_ptr.data.base_array.is_cstr)
+ return false;
+ return true;
+ case ConstPtrSpecialBaseStruct:
+ if (a->data.x_ptr.data.base_struct.struct_val != b->data.x_ptr.data.base_struct.struct_val &&
+ a->data.x_ptr.data.base_struct.struct_val->global_refs !=
+ b->data.x_ptr.data.base_struct.struct_val->global_refs)
+ {
+ return false;
+ }
+ if (a->data.x_ptr.data.base_struct.field_index != b->data.x_ptr.data.base_struct.field_index)
+ return false;
+ return true;
+ case ConstPtrSpecialHardCodedAddr:
+ if (a->data.x_ptr.data.hard_coded_addr.addr != b->data.x_ptr.data.hard_coded_addr.addr)
+ return false;
+ return true;
+ case ConstPtrSpecialDiscard:
+ return true;
+ case ConstPtrSpecialFunction:
+ return a->data.x_ptr.data.fn.fn_entry == b->data.x_ptr.data.fn.fn_entry;
+ }
+ zig_unreachable();
+}
+
bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
assert(a->type->id == b->type->id);
assert(a->special == ConstValSpecialStatic);
@@ -5359,49 +5447,7 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
return bigint_cmp(&a->data.x_bigint, &b->data.x_bigint) == CmpEQ;
case TypeTableEntryIdPointer:
case TypeTableEntryIdFn:
- if (a->data.x_ptr.special != b->data.x_ptr.special)
- return false;
- if (a->data.x_ptr.mut != b->data.x_ptr.mut)
- return false;
- switch (a->data.x_ptr.special) {
- case ConstPtrSpecialInvalid:
- zig_unreachable();
- case ConstPtrSpecialRef:
- if (a->data.x_ptr.data.ref.pointee != b->data.x_ptr.data.ref.pointee)
- return false;
- return true;
- case ConstPtrSpecialBaseArray:
- if (a->data.x_ptr.data.base_array.array_val != b->data.x_ptr.data.base_array.array_val &&
- a->data.x_ptr.data.base_array.array_val->global_refs !=
- b->data.x_ptr.data.base_array.array_val->global_refs)
- {
- return false;
- }
- if (a->data.x_ptr.data.base_array.elem_index != b->data.x_ptr.data.base_array.elem_index)
- return false;
- if (a->data.x_ptr.data.base_array.is_cstr != b->data.x_ptr.data.base_array.is_cstr)
- return false;
- return true;
- case ConstPtrSpecialBaseStruct:
- if (a->data.x_ptr.data.base_struct.struct_val != b->data.x_ptr.data.base_struct.struct_val &&
- a->data.x_ptr.data.base_struct.struct_val->global_refs !=
- b->data.x_ptr.data.base_struct.struct_val->global_refs)
- {
- return false;
- }
- if (a->data.x_ptr.data.base_struct.field_index != b->data.x_ptr.data.base_struct.field_index)
- return false;
- return true;
- case ConstPtrSpecialHardCodedAddr:
- if (a->data.x_ptr.data.hard_coded_addr.addr != b->data.x_ptr.data.hard_coded_addr.addr)
- return false;
- return true;
- case ConstPtrSpecialDiscard:
- return true;
- case ConstPtrSpecialFunction:
- return a->data.x_ptr.data.fn.fn_entry == b->data.x_ptr.data.fn.fn_entry;
- }
- zig_unreachable();
+ return const_values_equal_ptr(a, b);
case TypeTableEntryIdArray:
zig_panic("TODO");
case TypeTableEntryIdStruct:
@@ -5416,11 +5462,13 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) {
zig_panic("TODO");
case TypeTableEntryIdNull:
zig_panic("TODO");
- case TypeTableEntryIdMaybe:
- if (a->data.x_maybe == nullptr || b->data.x_maybe == nullptr) {
- return (a->data.x_maybe == nullptr && b->data.x_maybe == nullptr);
+ case TypeTableEntryIdOptional:
+ if (get_codegen_ptr_type(a->type) != nullptr)
+ return const_values_equal_ptr(a, b);
+ if (a->data.x_optional == nullptr || b->data.x_optional == nullptr) {
+ return (a->data.x_optional == nullptr && b->data.x_optional == nullptr);
} else {
- return const_values_equal(a->data.x_maybe, b->data.x_maybe);
+ return const_values_equal(a->data.x_optional, b->data.x_optional);
}
case TypeTableEntryIdErrorUnion:
zig_panic("TODO");
@@ -5493,6 +5541,41 @@ void eval_min_max_value(CodeGen *g, TypeTableEntry *type_entry, ConstExprValue *
}
}
+void render_const_val_ptr(CodeGen *g, Buf *buf, ConstExprValue *const_val, TypeTableEntry *type_entry) {
+ switch (const_val->data.x_ptr.special) {
+ case ConstPtrSpecialInvalid:
+ zig_unreachable();
+ case ConstPtrSpecialRef:
+ case ConstPtrSpecialBaseStruct:
+ buf_appendf(buf, "*");
+ render_const_value(g, buf, const_ptr_pointee(g, const_val));
+ return;
+ case ConstPtrSpecialBaseArray:
+ if (const_val->data.x_ptr.data.base_array.is_cstr) {
+ buf_appendf(buf, "*(c str lit)");
+ return;
+ } else {
+ buf_appendf(buf, "*");
+ render_const_value(g, buf, const_ptr_pointee(g, const_val));
+ return;
+ }
+ case ConstPtrSpecialHardCodedAddr:
+ buf_appendf(buf, "(*%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->data.pointer.child_type->name),
+ const_val->data.x_ptr.data.hard_coded_addr.addr);
+ return;
+ case ConstPtrSpecialDiscard:
+ buf_append_str(buf, "*_");
+ return;
+ case ConstPtrSpecialFunction:
+ {
+ FnTableEntry *fn_entry = const_val->data.x_ptr.data.fn.fn_entry;
+ buf_appendf(buf, "@ptrCast(%s, %s)", buf_ptr(&const_val->type->name), buf_ptr(&fn_entry->symbol_name));
+ return;
+ }
+ }
+ zig_unreachable();
+}
+
void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
switch (const_val->special) {
case ConstValSpecialRuntime:
@@ -5569,38 +5652,7 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
return;
}
case TypeTableEntryIdPointer:
- switch (const_val->data.x_ptr.special) {
- case ConstPtrSpecialInvalid:
- zig_unreachable();
- case ConstPtrSpecialRef:
- case ConstPtrSpecialBaseStruct:
- buf_appendf(buf, "&");
- render_const_value(g, buf, const_ptr_pointee(g, const_val));
- return;
- case ConstPtrSpecialBaseArray:
- if (const_val->data.x_ptr.data.base_array.is_cstr) {
- buf_appendf(buf, "&(c str lit)");
- return;
- } else {
- buf_appendf(buf, "&");
- render_const_value(g, buf, const_ptr_pointee(g, const_val));
- return;
- }
- case ConstPtrSpecialHardCodedAddr:
- buf_appendf(buf, "(&%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->data.pointer.child_type->name),
- const_val->data.x_ptr.data.hard_coded_addr.addr);
- return;
- case ConstPtrSpecialDiscard:
- buf_append_str(buf, "&_");
- return;
- case ConstPtrSpecialFunction:
- {
- FnTableEntry *fn_entry = const_val->data.x_ptr.data.fn.fn_entry;
- buf_appendf(buf, "@ptrCast(%s, %s)", buf_ptr(&const_val->type->name), buf_ptr(&fn_entry->symbol_name));
- return;
- }
- }
- zig_unreachable();
+ return render_const_val_ptr(g, buf, const_val, type_entry);
case TypeTableEntryIdBlock:
{
AstNode *node = const_val->data.x_block->source_node;
@@ -5658,10 +5710,12 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
buf_appendf(buf, "undefined");
return;
}
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
- if (const_val->data.x_maybe) {
- render_const_value(g, buf, const_val->data.x_maybe);
+ if (get_codegen_ptr_type(const_val->type) != nullptr)
+ return render_const_val_ptr(g, buf, const_val, type_entry->data.maybe.child_type);
+ if (const_val->data.x_optional) {
+ render_const_value(g, buf, const_val->data.x_optional);
} else {
buf_appendf(buf, "null");
}
@@ -5767,7 +5821,7 @@ uint32_t type_id_hash(TypeId x) {
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
case TypeTableEntryIdUnion:
@@ -5813,7 +5867,7 @@ bool type_id_eql(TypeId a, TypeId b) {
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdPromise:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -5935,7 +5989,7 @@ static const TypeTableEntryId all_type_ids[] = {
TypeTableEntryIdComptimeInt,
TypeTableEntryIdUndefined,
TypeTableEntryIdNull,
- TypeTableEntryIdMaybe,
+ TypeTableEntryIdOptional,
TypeTableEntryIdErrorUnion,
TypeTableEntryIdErrorSet,
TypeTableEntryIdEnum,
@@ -5980,7 +6034,7 @@ size_t type_id_index(TypeTableEntry *entry) {
return 7;
case TypeTableEntryIdStruct:
if (entry->data.structure.is_slice)
- return 25;
+ return 6;
return 8;
case TypeTableEntryIdComptimeFloat:
return 9;
@@ -5990,7 +6044,7 @@ size_t type_id_index(TypeTableEntry *entry) {
return 11;
case TypeTableEntryIdNull:
return 12;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
return 13;
case TypeTableEntryIdErrorUnion:
return 14;
@@ -6048,8 +6102,8 @@ const char *type_id_name(TypeTableEntryId id) {
return "Undefined";
case TypeTableEntryIdNull:
return "Null";
- case TypeTableEntryIdMaybe:
- return "Nullable";
+ case TypeTableEntryIdOptional:
+ return "Optional";
case TypeTableEntryIdErrorUnion:
return "ErrorUnion";
case TypeTableEntryIdErrorSet:
diff --git a/src/analyze.hpp b/src/analyze.hpp
index 25bda198d6..88e06b2390 100644
--- a/src/analyze.hpp
+++ b/src/analyze.hpp
@@ -70,6 +70,8 @@ TypeUnionField *find_union_type_field(TypeTableEntry *type_entry, Buf *name);
TypeEnumField *find_enum_field_by_tag(TypeTableEntry *enum_type, const BigInt *tag);
TypeUnionField *find_union_field_by_tag(TypeTableEntry *type_entry, const BigInt *tag);
+bool is_ref(TypeTableEntry *type_entry);
+bool is_array_ref(TypeTableEntry *type_entry);
bool is_container_ref(TypeTableEntry *type_entry);
void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node);
void scan_import(CodeGen *g, ImportTableEntry *import);
diff --git a/src/ast_render.cpp b/src/ast_render.cpp
index 3785cb6ca1..2ace00885d 100644
--- a/src/ast_render.cpp
+++ b/src/ast_render.cpp
@@ -50,7 +50,7 @@ static const char *bin_op_str(BinOpType bin_op) {
case BinOpTypeAssignBitXor: return "^=";
case BinOpTypeAssignBitOr: return "|=";
case BinOpTypeAssignMergeErrorSets: return "||=";
- case BinOpTypeUnwrapMaybe: return "??";
+ case BinOpTypeUnwrapOptional: return "orelse";
case BinOpTypeArrayCat: return "++";
case BinOpTypeArrayMult: return "**";
case BinOpTypeErrorUnion: return "!";
@@ -66,8 +66,7 @@ static const char *prefix_op_str(PrefixOp prefix_op) {
case PrefixOpNegationWrap: return "-%";
case PrefixOpBoolNot: return "!";
case PrefixOpBinNot: return "~";
- case PrefixOpMaybe: return "?";
- case PrefixOpUnwrapMaybe: return "??";
+ case PrefixOpOptional: return "?";
case PrefixOpAddrOf: return "&";
}
zig_unreachable();
@@ -222,6 +221,8 @@ static const char *node_type_str(NodeType node_type) {
return "FieldAccessExpr";
case NodeTypePtrDeref:
return "PtrDerefExpr";
+ case NodeTypeUnwrapOptional:
+ return "UnwrapOptional";
case NodeTypeContainerDecl:
return "ContainerDecl";
case NodeTypeStructField:
@@ -711,6 +712,13 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) {
fprintf(ar->f, ".*");
break;
}
+ case NodeTypeUnwrapOptional:
+ {
+ AstNode *lhs = node->data.unwrap_optional.expr;
+ render_node_ungrouped(ar, lhs);
+ fprintf(ar->f, ".?");
+ break;
+ }
case NodeTypeUndefinedLiteral:
fprintf(ar->f, "undefined");
break;
diff --git a/src/codegen.cpp b/src/codegen.cpp
index ac73db7b26..f675411979 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -869,7 +869,7 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("exact division produced remainder");
case PanicMsgIdSliceWidenRemainder:
return buf_create_from_str("slice widening size mismatch");
- case PanicMsgIdUnwrapMaybeFail:
+ case PanicMsgIdUnwrapOptionalFail:
return buf_create_from_str("attempt to unwrap null");
case PanicMsgIdUnreachable:
return buf_create_from_str("reached unreachable code");
@@ -879,6 +879,8 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) {
return buf_create_from_str("incorrect alignment");
case PanicMsgIdBadUnionField:
return buf_create_from_str("access of inactive union field");
+ case PanicMsgIdBadEnumValue:
+ return buf_create_from_str("invalid enum value");
}
zig_unreachable();
}
@@ -2497,7 +2499,7 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
assert(wanted_type->data.structure.is_slice);
assert(actual_type->id == TypeTableEntryIdArray);
- TypeTableEntry *wanted_pointer_type = wanted_type->data.structure.fields[0].type_entry;
+ TypeTableEntry *wanted_pointer_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
TypeTableEntry *wanted_child_type = wanted_pointer_type->data.pointer.child_type;
@@ -2543,6 +2545,29 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
return expr_val;
case CastOpBitCast:
return LLVMBuildBitCast(g->builder, expr_val, wanted_type->type_ref, "");
+ case CastOpPtrOfArrayToSlice: {
+ assert(cast_instruction->tmp_ptr);
+ assert(actual_type->id == TypeTableEntryIdPointer);
+ TypeTableEntry *array_type = actual_type->data.pointer.child_type;
+ assert(array_type->id == TypeTableEntryIdArray);
+
+ LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, cast_instruction->tmp_ptr,
+ slice_ptr_index, "");
+ LLVMValueRef indices[] = {
+ LLVMConstNull(g->builtin_types.entry_usize->type_ref),
+ LLVMConstInt(g->builtin_types.entry_usize->type_ref, 0, false),
+ };
+ LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, expr_val, indices, 2, "");
+ gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false);
+
+ LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, cast_instruction->tmp_ptr,
+ slice_len_index, "");
+ LLVMValueRef len_value = LLVMConstInt(g->builtin_types.entry_usize->type_ref,
+ array_type->data.array.len, false);
+ gen_store_untyped(g, len_value, len_field_ptr, 0, false);
+
+ return cast_instruction->tmp_ptr;
+ }
}
zig_unreachable();
}
@@ -2678,7 +2703,7 @@ static LLVMValueRef ir_render_un_op(CodeGen *g, IrExecutable *executable, IrInst
switch (op_id) {
case IrUnOpInvalid:
- case IrUnOpMaybe:
+ case IrUnOpOptional:
case IrUnOpDereference:
zig_unreachable();
case IrUnOpNegation:
@@ -3249,7 +3274,7 @@ static LLVMValueRef ir_render_asm(CodeGen *g, IrExecutable *executable, IrInstru
}
static LLVMValueRef gen_non_null_bit(CodeGen *g, TypeTableEntry *maybe_type, LLVMValueRef maybe_handle) {
- assert(maybe_type->id == TypeTableEntryIdMaybe);
+ assert(maybe_type->id == TypeTableEntryIdOptional);
TypeTableEntry *child_type = maybe_type->data.maybe.child_type;
if (child_type->zero_bits) {
return maybe_handle;
@@ -3271,23 +3296,23 @@ static LLVMValueRef ir_render_test_non_null(CodeGen *g, IrExecutable *executable
}
static LLVMValueRef ir_render_unwrap_maybe(CodeGen *g, IrExecutable *executable,
- IrInstructionUnwrapMaybe *instruction)
+ IrInstructionUnwrapOptional *instruction)
{
TypeTableEntry *ptr_type = instruction->value->value.type;
assert(ptr_type->id == TypeTableEntryIdPointer);
TypeTableEntry *maybe_type = ptr_type->data.pointer.child_type;
- assert(maybe_type->id == TypeTableEntryIdMaybe);
+ assert(maybe_type->id == TypeTableEntryIdOptional);
TypeTableEntry *child_type = maybe_type->data.maybe.child_type;
LLVMValueRef maybe_ptr = ir_llvm_value(g, instruction->value);
LLVMValueRef maybe_handle = get_handle_value(g, maybe_ptr, maybe_type, ptr_type);
if (ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on) {
LLVMValueRef non_null_bit = gen_non_null_bit(g, maybe_type, maybe_handle);
- LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapMaybeOk");
- LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapMaybeFail");
+ LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapOptionalOk");
+ LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapOptionalFail");
LLVMBuildCondBr(g->builder, non_null_bit, ok_block, fail_block);
LLVMPositionBuilderAtEnd(g->builder, fail_block);
- gen_safety_crash(g, PanicMsgIdUnwrapMaybeFail);
+ gen_safety_crash(g, PanicMsgIdUnwrapOptionalFail);
LLVMPositionBuilderAtEnd(g->builder, ok_block);
}
@@ -3432,34 +3457,112 @@ static LLVMValueRef ir_render_err_name(CodeGen *g, IrExecutable *executable, IrI
return LLVMBuildInBoundsGEP(g->builder, g->err_name_table, indices, 2, "");
}
+static LLVMValueRef get_enum_tag_name_function(CodeGen *g, TypeTableEntry *enum_type) {
+ assert(enum_type->id == TypeTableEntryIdEnum);
+ if (enum_type->data.enumeration.name_function)
+ return enum_type->data.enumeration.name_function;
+
+ TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, false, false,
+ PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
+ TypeTableEntry *u8_slice_type = get_slice_type(g, u8_ptr_type);
+ TypeTableEntry *tag_int_type = enum_type->data.enumeration.tag_int_type;
+
+ LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMPointerType(u8_slice_type->type_ref, 0),
+ &tag_int_type->type_ref, 1, false);
+
+ Buf *fn_name = get_mangled_name(g, buf_sprintf("__zig_tag_name_%s", buf_ptr(&enum_type->name)), false);
+ LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref);
+ LLVMSetLinkage(fn_val, LLVMInternalLinkage);
+ LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified));
+ addLLVMFnAttr(fn_val, "nounwind");
+ add_uwtable_attr(g, fn_val);
+ if (g->build_mode == BuildModeDebug) {
+ ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true");
+ ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr);
+ }
+
+ LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder);
+ LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder);
+ FnTableEntry *prev_cur_fn = g->cur_fn;
+ LLVMValueRef prev_cur_fn_val = g->cur_fn_val;
+
+ LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry");
+ LLVMPositionBuilderAtEnd(g->builder, entry_block);
+ ZigLLVMClearCurrentDebugLocation(g->builder);
+ g->cur_fn = nullptr;
+ g->cur_fn_val = fn_val;
+
+ size_t field_count = enum_type->data.enumeration.src_field_count;
+ LLVMBasicBlockRef bad_value_block = LLVMAppendBasicBlock(g->cur_fn_val, "BadValue");
+ LLVMValueRef tag_int_value = LLVMGetParam(fn_val, 0);
+ LLVMValueRef switch_instr = LLVMBuildSwitch(g->builder, tag_int_value, bad_value_block, field_count);
+
+
+ TypeTableEntry *usize = g->builtin_types.entry_usize;
+ LLVMValueRef array_ptr_indices[] = {
+ LLVMConstNull(usize->type_ref),
+ LLVMConstNull(usize->type_ref),
+ };
+
+ for (size_t field_i = 0; field_i < field_count; field_i += 1) {
+ Buf *name = enum_type->data.enumeration.fields[field_i].name;
+ LLVMValueRef str_init = LLVMConstString(buf_ptr(name), (unsigned)buf_len(name), true);
+ LLVMValueRef str_global = LLVMAddGlobal(g->module, LLVMTypeOf(str_init), "");
+ LLVMSetInitializer(str_global, str_init);
+ LLVMSetLinkage(str_global, LLVMPrivateLinkage);
+ LLVMSetGlobalConstant(str_global, true);
+ LLVMSetUnnamedAddr(str_global, true);
+ LLVMSetAlignment(str_global, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(str_init)));
+
+ LLVMValueRef fields[] = {
+ LLVMConstGEP(str_global, array_ptr_indices, 2),
+ LLVMConstInt(g->builtin_types.entry_usize->type_ref, buf_len(name), false),
+ };
+ LLVMValueRef slice_init_value = LLVMConstNamedStruct(u8_slice_type->type_ref, fields, 2);
+
+ LLVMValueRef slice_global = LLVMAddGlobal(g->module, LLVMTypeOf(slice_init_value), "");
+ LLVMSetInitializer(slice_global, slice_init_value);
+ LLVMSetLinkage(slice_global, LLVMPrivateLinkage);
+ LLVMSetGlobalConstant(slice_global, true);
+ LLVMSetUnnamedAddr(slice_global, true);
+ LLVMSetAlignment(slice_global, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(slice_init_value)));
+
+ LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(g->cur_fn_val, "Name");
+ LLVMValueRef this_tag_int_value = bigint_to_llvm_const(tag_int_type->type_ref,
+ &enum_type->data.enumeration.fields[field_i].value);
+ LLVMAddCase(switch_instr, this_tag_int_value, return_block);
+
+ LLVMPositionBuilderAtEnd(g->builder, return_block);
+ LLVMBuildRet(g->builder, slice_global);
+ }
+
+ LLVMPositionBuilderAtEnd(g->builder, bad_value_block);
+ if (g->build_mode == BuildModeDebug || g->build_mode == BuildModeSafeRelease) {
+ gen_safety_crash(g, PanicMsgIdBadEnumValue);
+ } else {
+ LLVMBuildUnreachable(g->builder);
+ }
+
+ g->cur_fn = prev_cur_fn;
+ g->cur_fn_val = prev_cur_fn_val;
+ LLVMPositionBuilderAtEnd(g->builder, prev_block);
+ LLVMSetCurrentDebugLocation(g->builder, prev_debug_location);
+
+ enum_type->data.enumeration.name_function = fn_val;
+ return fn_val;
+}
+
static LLVMValueRef ir_render_enum_tag_name(CodeGen *g, IrExecutable *executable,
IrInstructionTagName *instruction)
{
TypeTableEntry *enum_type = instruction->target->value.type;
assert(enum_type->id == TypeTableEntryIdEnum);
- assert(enum_type->data.enumeration.generate_name_table);
- TypeTableEntry *tag_int_type = enum_type->data.enumeration.tag_int_type;
+ LLVMValueRef enum_name_function = get_enum_tag_name_function(g, enum_type);
+
LLVMValueRef enum_tag_value = ir_llvm_value(g, instruction->target);
- if (ir_want_runtime_safety(g, &instruction->base)) {
- size_t field_count = enum_type->data.enumeration.src_field_count;
-
- // if the field_count can't fit in the bits of the enum_type, then it can't possibly
- // be the wrong value
- BigInt field_bi;
- bigint_init_unsigned(&field_bi, field_count);
- if (bigint_fits_in_bits(&field_bi, tag_int_type->data.integral.bit_count, false)) {
- LLVMValueRef end_val = LLVMConstInt(LLVMTypeOf(enum_tag_value), field_count, false);
- add_bounds_check(g, enum_tag_value, LLVMIntEQ, nullptr, LLVMIntULT, end_val);
- }
- }
-
- LLVMValueRef indices[] = {
- LLVMConstNull(g->builtin_types.entry_usize->type_ref),
- gen_widen_or_shorten(g, false, tag_int_type,
- g->builtin_types.entry_usize, enum_tag_value),
- };
- return LLVMBuildInBoundsGEP(g->builder, enum_type->data.enumeration.name_table, indices, 2, "");
+ return ZigLLVMBuildCall(g->builder, enum_name_function, &enum_tag_value, 1,
+ get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
}
static LLVMValueRef ir_render_field_parent_ptr(CodeGen *g, IrExecutable *executable,
@@ -3509,17 +3612,17 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I
} else if (target_type->id == TypeTableEntryIdFn) {
align_bytes = target_type->data.fn.fn_type_id.alignment;
ptr_val = target_val;
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdPointer)
{
align_bytes = target_type->data.maybe.child_type->data.pointer.alignment;
ptr_val = target_val;
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdFn)
{
align_bytes = target_type->data.maybe.child_type->data.fn.fn_type_id.alignment;
ptr_val = target_val;
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdPromise)
{
zig_panic("TODO audit this function");
@@ -3621,7 +3724,7 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutable *executable, IrIn
success_order, failure_order, instruction->is_weak);
TypeTableEntry *maybe_type = instruction->base.value.type;
- assert(maybe_type->id == TypeTableEntryIdMaybe);
+ assert(maybe_type->id == TypeTableEntryIdOptional);
TypeTableEntry *child_type = maybe_type->data.maybe.child_type;
if (type_is_codegen_pointer(child_type)) {
@@ -3730,7 +3833,6 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst
} else {
end_val = LLVMConstInt(g->builtin_types.entry_usize->type_ref, array_type->data.array.len, false);
}
-
if (want_runtime_safety) {
add_bounds_check(g, start_val, LLVMIntEQ, nullptr, LLVMIntULE, end_val);
if (instruction->end) {
@@ -4008,10 +4110,10 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu
}
}
-static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, IrInstructionMaybeWrap *instruction) {
+static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, IrInstructionOptionalWrap *instruction) {
TypeTableEntry *wanted_type = instruction->base.value.type;
- assert(wanted_type->id == TypeTableEntryIdMaybe);
+ assert(wanted_type->id == TypeTableEntryIdOptional);
TypeTableEntry *child_type = wanted_type->data.maybe.child_type;
@@ -4540,7 +4642,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
case IrInstructionIdCheckSwitchProngs:
case IrInstructionIdCheckStatementIsVoid:
case IrInstructionIdTypeName:
- case IrInstructionIdCanImplicitCast:
case IrInstructionIdDeclRef:
case IrInstructionIdSwitchVar:
case IrInstructionIdOffsetOf:
@@ -4593,8 +4694,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_asm(g, executable, (IrInstructionAsm *)instruction);
case IrInstructionIdTestNonNull:
return ir_render_test_non_null(g, executable, (IrInstructionTestNonNull *)instruction);
- case IrInstructionIdUnwrapMaybe:
- return ir_render_unwrap_maybe(g, executable, (IrInstructionUnwrapMaybe *)instruction);
+ case IrInstructionIdUnwrapOptional:
+ return ir_render_unwrap_maybe(g, executable, (IrInstructionUnwrapOptional *)instruction);
case IrInstructionIdClz:
return ir_render_clz(g, executable, (IrInstructionClz *)instruction);
case IrInstructionIdCtz:
@@ -4635,8 +4736,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_unwrap_err_code(g, executable, (IrInstructionUnwrapErrCode *)instruction);
case IrInstructionIdUnwrapErrPayload:
return ir_render_unwrap_err_payload(g, executable, (IrInstructionUnwrapErrPayload *)instruction);
- case IrInstructionIdMaybeWrap:
- return ir_render_maybe_wrap(g, executable, (IrInstructionMaybeWrap *)instruction);
+ case IrInstructionIdOptionalWrap:
+ return ir_render_maybe_wrap(g, executable, (IrInstructionOptionalWrap *)instruction);
case IrInstructionIdErrWrapCode:
return ir_render_err_wrap_code(g, executable, (IrInstructionErrWrapCode *)instruction);
case IrInstructionIdErrWrapPayload:
@@ -4866,7 +4967,7 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con
}
case TypeTableEntryIdPointer:
case TypeTableEntryIdFn:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdPromise:
{
LLVMValueRef ptr_val = gen_const_val(g, const_val, "");
@@ -4914,6 +5015,79 @@ static bool is_llvm_value_unnamed_type(TypeTableEntry *type_entry, LLVMValueRef
return LLVMTypeOf(val) != type_entry->type_ref;
}
+static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, const char *name) {
+ render_const_val_global(g, const_val, name);
+ switch (const_val->data.x_ptr.special) {
+ case ConstPtrSpecialInvalid:
+ case ConstPtrSpecialDiscard:
+ zig_unreachable();
+ case ConstPtrSpecialRef:
+ {
+ ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee;
+ render_const_val(g, pointee, "");
+ render_const_val_global(g, pointee, "");
+ ConstExprValue *other_val = pointee;
+ const_val->global_refs->llvm_value = LLVMConstBitCast(other_val->global_refs->llvm_global, const_val->type->type_ref);
+ render_const_val_global(g, const_val, "");
+ return const_val->global_refs->llvm_value;
+ }
+ case ConstPtrSpecialBaseArray:
+ {
+ ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val;
+ size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
+ assert(array_const_val->type->id == TypeTableEntryIdArray);
+ if (array_const_val->type->zero_bits) {
+ // make this a null pointer
+ TypeTableEntry *usize = g->builtin_types.entry_usize;
+ const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref),
+ const_val->type->type_ref);
+ render_const_val_global(g, const_val, "");
+ return const_val->global_refs->llvm_value;
+ }
+ LLVMValueRef uncasted_ptr_val = gen_const_ptr_array_recursive(g, array_const_val,
+ elem_index);
+ LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref);
+ const_val->global_refs->llvm_value = ptr_val;
+ render_const_val_global(g, const_val, "");
+ return ptr_val;
+ }
+ case ConstPtrSpecialBaseStruct:
+ {
+ ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val;
+ assert(struct_const_val->type->id == TypeTableEntryIdStruct);
+ if (struct_const_val->type->zero_bits) {
+ // make this a null pointer
+ TypeTableEntry *usize = g->builtin_types.entry_usize;
+ const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref),
+ const_val->type->type_ref);
+ render_const_val_global(g, const_val, "");
+ return const_val->global_refs->llvm_value;
+ }
+ size_t src_field_index = const_val->data.x_ptr.data.base_struct.field_index;
+ size_t gen_field_index =
+ struct_const_val->type->data.structure.fields[src_field_index].gen_index;
+ LLVMValueRef uncasted_ptr_val = gen_const_ptr_struct_recursive(g, struct_const_val,
+ gen_field_index);
+ LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref);
+ const_val->global_refs->llvm_value = ptr_val;
+ render_const_val_global(g, const_val, "");
+ return ptr_val;
+ }
+ case ConstPtrSpecialHardCodedAddr:
+ {
+ uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr;
+ TypeTableEntry *usize = g->builtin_types.entry_usize;
+ const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false),
+ const_val->type->type_ref);
+ render_const_val_global(g, const_val, "");
+ return const_val->global_refs->llvm_value;
+ }
+ case ConstPtrSpecialFunction:
+ return LLVMConstBitCast(fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry), const_val->type->type_ref);
+ }
+ zig_unreachable();
+}
+
static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const char *name) {
TypeTableEntry *type_entry = const_val->type;
assert(!type_entry->zero_bits);
@@ -4958,23 +5132,19 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
} else {
return LLVMConstNull(LLVMInt1Type());
}
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
if (child_type->zero_bits) {
- return LLVMConstInt(LLVMInt1Type(), const_val->data.x_maybe ? 1 : 0, false);
+ return LLVMConstInt(LLVMInt1Type(), const_val->data.x_optional ? 1 : 0, false);
} else if (type_is_codegen_pointer(child_type)) {
- if (const_val->data.x_maybe) {
- return gen_const_val(g, const_val->data.x_maybe, "");
- } else {
- return LLVMConstNull(child_type->type_ref);
- }
+ return gen_const_val_ptr(g, const_val, name);
} else {
LLVMValueRef child_val;
LLVMValueRef maybe_val;
bool make_unnamed_struct;
- if (const_val->data.x_maybe) {
- child_val = gen_const_val(g, const_val->data.x_maybe, "");
+ if (const_val->data.x_optional) {
+ child_val = gen_const_val(g, const_val->data.x_optional, "");
maybe_val = LLVMConstAllOnes(LLVMInt1Type());
make_unnamed_struct = is_llvm_value_unnamed_type(const_val->type, child_val);
@@ -5164,78 +5334,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
assert(const_val->data.x_ptr.mut == ConstPtrMutComptimeConst);
return fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry);
case TypeTableEntryIdPointer:
- {
- render_const_val_global(g, const_val, name);
- switch (const_val->data.x_ptr.special) {
- case ConstPtrSpecialInvalid:
- case ConstPtrSpecialDiscard:
- zig_unreachable();
- case ConstPtrSpecialRef:
- {
- ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee;
- render_const_val(g, pointee, "");
- render_const_val_global(g, pointee, "");
- ConstExprValue *other_val = pointee;
- const_val->global_refs->llvm_value = LLVMConstBitCast(other_val->global_refs->llvm_global, const_val->type->type_ref);
- render_const_val_global(g, const_val, "");
- return const_val->global_refs->llvm_value;
- }
- case ConstPtrSpecialBaseArray:
- {
- ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val;
- size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index;
- assert(array_const_val->type->id == TypeTableEntryIdArray);
- if (array_const_val->type->zero_bits) {
- // make this a null pointer
- TypeTableEntry *usize = g->builtin_types.entry_usize;
- const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref),
- const_val->type->type_ref);
- render_const_val_global(g, const_val, "");
- return const_val->global_refs->llvm_value;
- }
- LLVMValueRef uncasted_ptr_val = gen_const_ptr_array_recursive(g, array_const_val,
- elem_index);
- LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref);
- const_val->global_refs->llvm_value = ptr_val;
- render_const_val_global(g, const_val, "");
- return ptr_val;
- }
- case ConstPtrSpecialBaseStruct:
- {
- ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val;
- assert(struct_const_val->type->id == TypeTableEntryIdStruct);
- if (struct_const_val->type->zero_bits) {
- // make this a null pointer
- TypeTableEntry *usize = g->builtin_types.entry_usize;
- const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref),
- const_val->type->type_ref);
- render_const_val_global(g, const_val, "");
- return const_val->global_refs->llvm_value;
- }
- size_t src_field_index = const_val->data.x_ptr.data.base_struct.field_index;
- size_t gen_field_index =
- struct_const_val->type->data.structure.fields[src_field_index].gen_index;
- LLVMValueRef uncasted_ptr_val = gen_const_ptr_struct_recursive(g, struct_const_val,
- gen_field_index);
- LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref);
- const_val->global_refs->llvm_value = ptr_val;
- render_const_val_global(g, const_val, "");
- return ptr_val;
- }
- case ConstPtrSpecialHardCodedAddr:
- {
- uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr;
- TypeTableEntry *usize = g->builtin_types.entry_usize;
- const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false),
- const_val->type->type_ref);
- render_const_val_global(g, const_val, "");
- return const_val->global_refs->llvm_value;
- }
- case ConstPtrSpecialFunction:
- return LLVMConstBitCast(fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry), const_val->type->type_ref);
- }
- }
- zig_unreachable();
+ return gen_const_val_ptr(g, const_val, name);
case TypeTableEntryIdErrorUnion:
{
TypeTableEntry *payload_type = type_entry->data.error_union.payload_type;
@@ -5367,55 +5466,6 @@ static void generate_error_name_table(CodeGen *g) {
LLVMSetAlignment(g->err_name_table, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(err_name_table_init)));
}
-static void generate_enum_name_tables(CodeGen *g) {
- TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false,
- PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0);
- TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type);
-
- TypeTableEntry *usize = g->builtin_types.entry_usize;
- LLVMValueRef array_ptr_indices[] = {
- LLVMConstNull(usize->type_ref),
- LLVMConstNull(usize->type_ref),
- };
-
-
- for (size_t enum_i = 0; enum_i < g->name_table_enums.length; enum_i += 1) {
- TypeTableEntry *enum_type = g->name_table_enums.at(enum_i);
- assert(enum_type->id == TypeTableEntryIdEnum);
-
- size_t field_count = enum_type->data.enumeration.src_field_count;
- LLVMValueRef *values = allocate(field_count);
- for (size_t field_i = 0; field_i < field_count; field_i += 1) {
- Buf *name = enum_type->data.enumeration.fields[field_i].name;
-
- LLVMValueRef str_init = LLVMConstString(buf_ptr(name), (unsigned)buf_len(name), true);
- LLVMValueRef str_global = LLVMAddGlobal(g->module, LLVMTypeOf(str_init), "");
- LLVMSetInitializer(str_global, str_init);
- LLVMSetLinkage(str_global, LLVMPrivateLinkage);
- LLVMSetGlobalConstant(str_global, true);
- LLVMSetUnnamedAddr(str_global, true);
- LLVMSetAlignment(str_global, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(str_init)));
-
- LLVMValueRef fields[] = {
- LLVMConstGEP(str_global, array_ptr_indices, 2),
- LLVMConstInt(g->builtin_types.entry_usize->type_ref, buf_len(name), false),
- };
- values[field_i] = LLVMConstNamedStruct(str_type->type_ref, fields, 2);
- }
-
- LLVMValueRef name_table_init = LLVMConstArray(str_type->type_ref, values, (unsigned)field_count);
-
- Buf *table_name = get_mangled_name(g, buf_sprintf("%s_name_table", buf_ptr(&enum_type->name)), false);
- LLVMValueRef name_table = LLVMAddGlobal(g->module, LLVMTypeOf(name_table_init), buf_ptr(table_name));
- LLVMSetInitializer(name_table, name_table_init);
- LLVMSetLinkage(name_table, LLVMPrivateLinkage);
- LLVMSetGlobalConstant(name_table, true);
- LLVMSetUnnamedAddr(name_table, true);
- LLVMSetAlignment(name_table, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(name_table_init)));
- enum_type->data.enumeration.name_table = name_table;
- }
-}
-
static void build_all_basic_blocks(CodeGen *g, FnTableEntry *fn) {
IrExecutable *executable = &fn->analyzed_executable;
assert(executable->basic_block_list.length > 0);
@@ -5512,7 +5562,6 @@ static void do_code_gen(CodeGen *g) {
}
generate_error_name_table(g);
- generate_enum_name_tables(g);
// Generate module level variables
for (size_t i = 0; i < g->global_vars.length; i += 1) {
@@ -5651,8 +5700,8 @@ static void do_code_gen(CodeGen *g) {
} else if (instruction->id == IrInstructionIdSlice) {
IrInstructionSlice *slice_instruction = (IrInstructionSlice *)instruction;
slot = &slice_instruction->tmp_ptr;
- } else if (instruction->id == IrInstructionIdMaybeWrap) {
- IrInstructionMaybeWrap *maybe_wrap_instruction = (IrInstructionMaybeWrap *)instruction;
+ } else if (instruction->id == IrInstructionIdOptionalWrap) {
+ IrInstructionOptionalWrap *maybe_wrap_instruction = (IrInstructionOptionalWrap *)instruction;
slot = &maybe_wrap_instruction->tmp_ptr;
} else if (instruction->id == IrInstructionIdErrWrapPayload) {
IrInstructionErrWrapPayload *err_wrap_payload_instruction = (IrInstructionErrWrapPayload *)instruction;
@@ -6192,7 +6241,6 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdCImport, "cImport", 1);
create_builtin_fn(g, BuiltinFnIdErrName, "errorName", 1);
create_builtin_fn(g, BuiltinFnIdTypeName, "typeName", 1);
- create_builtin_fn(g, BuiltinFnIdCanImplicitCast, "canImplicitCast", 2);
create_builtin_fn(g, BuiltinFnIdEmbedFile, "embedFile", 1);
create_builtin_fn(g, BuiltinFnIdCmpxchgWeak, "cmpxchgWeak", 6);
create_builtin_fn(g, BuiltinFnIdCmpxchgStrong, "cmpxchgStrong", 6);
@@ -6250,13 +6298,7 @@ static const char *build_mode_to_str(BuildMode build_mode) {
zig_unreachable();
}
-static void define_builtin_compile_vars(CodeGen *g) {
- if (g->std_package == nullptr)
- return;
-
- const char *builtin_zig_basename = "builtin.zig";
- Buf *builtin_zig_path = buf_alloc();
- os_path_join(g->cache_dir, buf_create_from_str(builtin_zig_basename), builtin_zig_path);
+Buf *codegen_generate_builtin_source(CodeGen *g) {
Buf *contents = buf_alloc();
// Modifications to this struct must be coordinated with code that does anything with
@@ -6396,7 +6438,6 @@ static void define_builtin_compile_vars(CodeGen *g) {
const TypeTableEntryId id = type_id_at_index(i);
buf_appendf(contents, " %s,\n", type_id_name(id));
}
- buf_appendf(contents, " Slice,\n");
buf_appendf(contents, "};\n\n");
}
{
@@ -6409,14 +6450,13 @@ static void define_builtin_compile_vars(CodeGen *g) {
" Int: Int,\n"
" Float: Float,\n"
" Pointer: Pointer,\n"
- " Slice: Slice,\n"
" Array: Array,\n"
" Struct: Struct,\n"
" ComptimeFloat: void,\n"
" ComptimeInt: void,\n"
" Undefined: void,\n"
" Null: void,\n"
- " Nullable: Nullable,\n"
+ " Optional: Optional,\n"
" ErrorUnion: ErrorUnion,\n"
" ErrorSet: ErrorSet,\n"
" Enum: Enum,\n"
@@ -6439,13 +6479,18 @@ static void define_builtin_compile_vars(CodeGen *g) {
" };\n"
"\n"
" pub const Pointer = struct {\n"
+ " size: Size,\n"
" is_const: bool,\n"
" is_volatile: bool,\n"
" alignment: u32,\n"
" child: type,\n"
- " };\n"
"\n"
- " pub const Slice = Pointer;\n"
+ " pub const Size = enum {\n"
+ " One,\n"
+ " Many,\n"
+ " Slice,\n"
+ " };\n"
+ " };\n"
"\n"
" pub const Array = struct {\n"
" len: usize,\n"
@@ -6470,7 +6515,7 @@ static void define_builtin_compile_vars(CodeGen *g) {
" defs: []Definition,\n"
" };\n"
"\n"
- " pub const Nullable = struct {\n"
+ " pub const Optional = struct {\n"
" child: type,\n"
" };\n"
"\n"
@@ -6619,6 +6664,19 @@ static void define_builtin_compile_vars(CodeGen *g) {
buf_appendf(contents, "pub const __zig_test_fn_slice = {}; // overwritten later\n");
+
+ return contents;
+}
+
+static void define_builtin_compile_vars(CodeGen *g) {
+ if (g->std_package == nullptr)
+ return;
+
+ const char *builtin_zig_basename = "builtin.zig";
+ Buf *builtin_zig_path = buf_alloc();
+ os_path_join(g->cache_dir, buf_create_from_str(builtin_zig_basename), builtin_zig_path);
+
+ Buf *contents = codegen_generate_builtin_source(g);
ensure_cache_dir(g);
os_write_file(builtin_zig_path, contents);
@@ -7032,7 +7090,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, TypeTableEntry
case TypeTableEntryIdArray:
prepend_c_type_to_decl_list(g, gen_h, type_entry->data.array.child_type);
return;
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
prepend_c_type_to_decl_list(g, gen_h, type_entry->data.maybe.child_type);
return;
case TypeTableEntryIdFn:
@@ -7121,7 +7179,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, TypeTableEntry *type_entry, Buf
buf_appendf(out_buf, "%s%s *", const_str, buf_ptr(&child_buf));
break;
}
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
if (child_type->zero_bits) {
@@ -7335,7 +7393,7 @@ static void gen_h_file(CodeGen *g) {
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdArgTuple:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdFn:
case TypeTableEntryIdPromise:
zig_unreachable();
diff --git a/src/codegen.hpp b/src/codegen.hpp
index a7a4b748c4..b5f3374ec4 100644
--- a/src/codegen.hpp
+++ b/src/codegen.hpp
@@ -59,5 +59,7 @@ void codegen_add_object(CodeGen *g, Buf *object_path);
void codegen_translate_c(CodeGen *g, Buf *path);
+Buf *codegen_generate_builtin_source(CodeGen *g);
+
#endif
diff --git a/src/ir.cpp b/src/ir.cpp
index 9578795fcc..e5e8dcbb9d 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -47,7 +47,7 @@ enum ConstCastResultId {
ConstCastResultIdErrSetGlobal,
ConstCastResultIdPointerChild,
ConstCastResultIdSliceChild,
- ConstCastResultIdNullableChild,
+ ConstCastResultIdOptionalChild,
ConstCastResultIdErrorUnionPayload,
ConstCastResultIdErrorUnionErrorSet,
ConstCastResultIdFnAlign,
@@ -62,6 +62,7 @@ enum ConstCastResultId {
ConstCastResultIdType,
ConstCastResultIdUnresolvedInferredErrSet,
ConstCastResultIdAsyncAllocatorType,
+ ConstCastResultIdNullWrapPtr,
};
struct ConstCastErrSetMismatch {
@@ -85,11 +86,12 @@ struct ConstCastOnly {
ConstCastErrSetMismatch error_set;
ConstCastOnly *pointer_child;
ConstCastOnly *slice_child;
- ConstCastOnly *nullable_child;
+ ConstCastOnly *optional_child;
ConstCastOnly *error_union_payload;
ConstCastOnly *error_union_error_set;
ConstCastOnly *return_type;
ConstCastOnly *async_allocator_type;
+ ConstCastOnly *null_wrap_ptr_child;
ConstCastArg fn_arg;
ConstCastArgNoAlias arg_no_alias;
} data;
@@ -108,9 +110,10 @@ static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction,
static TypeTableEntry *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op);
static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval);
static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, uint32_t new_align);
+static TypeTableEntry *adjust_slice_align(CodeGen *g, TypeTableEntry *slice_type, uint32_t new_align);
ConstExprValue *const_ptr_pointee(CodeGen *g, ConstExprValue *const_val) {
- assert(const_val->type->id == TypeTableEntryIdPointer);
+ assert(get_codegen_ptr_type(const_val->type) != nullptr);
assert(const_val->special == ConstValSpecialStatic);
switch (const_val->data.x_ptr.special) {
case ConstPtrSpecialInvalid:
@@ -369,8 +372,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTestNonNull *) {
return IrInstructionIdTestNonNull;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapMaybe *) {
- return IrInstructionIdUnwrapMaybe;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapOptional *) {
+ return IrInstructionIdUnwrapOptional;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionClz *) {
@@ -521,8 +524,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapErrPayload
return IrInstructionIdUnwrapErrPayload;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionMaybeWrap *) {
- return IrInstructionIdMaybeWrap;
+static constexpr IrInstructionId ir_instruction_id(IrInstructionOptionalWrap *) {
+ return IrInstructionIdOptionalWrap;
}
static constexpr IrInstructionId ir_instruction_id(IrInstructionErrWrapPayload *) {
@@ -585,10 +588,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTypeName *) {
return IrInstructionIdTypeName;
}
-static constexpr IrInstructionId ir_instruction_id(IrInstructionCanImplicitCast *) {
- return IrInstructionIdCanImplicitCast;
-}
-
static constexpr IrInstructionId ir_instruction_id(IrInstructionDeclRef *) {
return IrInstructionIdDeclRef;
}
@@ -1572,7 +1571,7 @@ static IrInstruction *ir_build_test_nonnull_from(IrBuilder *irb, IrInstruction *
static IrInstruction *ir_build_unwrap_maybe(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value,
bool safety_check_on)
{
- IrInstructionUnwrapMaybe *instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionUnwrapOptional *instruction = ir_build_instruction(irb, scope, source_node);
instruction->value = value;
instruction->safety_check_on = safety_check_on;
@@ -1591,7 +1590,7 @@ static IrInstruction *ir_build_unwrap_maybe_from(IrBuilder *irb, IrInstruction *
}
static IrInstruction *ir_build_maybe_wrap(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value) {
- IrInstructionMaybeWrap *instruction = ir_build_instruction(irb, scope, source_node);
+ IrInstructionOptionalWrap *instruction = ir_build_instruction(irb, scope, source_node);
instruction->value = value;
ir_ref_instruction(value, irb->current_basic_block);
@@ -2348,20 +2347,6 @@ static IrInstruction *ir_build_type_name(IrBuilder *irb, Scope *scope, AstNode *
return &instruction->base;
}
-static IrInstruction *ir_build_can_implicit_cast(IrBuilder *irb, Scope *scope, AstNode *source_node,
- IrInstruction *type_value, IrInstruction *target_value)
-{
- IrInstructionCanImplicitCast *instruction = ir_build_instruction(
- irb, scope, source_node);
- instruction->type_value = type_value;
- instruction->target_value = target_value;
-
- ir_ref_instruction(type_value, irb->current_basic_block);
- ir_ref_instruction(target_value, irb->current_basic_block);
-
- return &instruction->base;
-}
-
static IrInstruction *ir_build_decl_ref(IrBuilder *irb, Scope *scope, AstNode *source_node,
Tld *tld, LVal lval)
{
@@ -2511,9 +2496,9 @@ static IrInstruction *ir_build_arg_type(IrBuilder *irb, Scope *scope, AstNode *s
return &instruction->base;
}
-static IrInstruction *ir_build_error_return_trace(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstructionErrorReturnTrace::Nullable nullable) {
+static IrInstruction *ir_build_error_return_trace(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstructionErrorReturnTrace::Optional optional) {
IrInstructionErrorReturnTrace *instruction = ir_build_instruction(irb, scope, source_node);
- instruction->nullable = nullable;
+ instruction->optional = optional;
return &instruction->base;
}
@@ -3310,9 +3295,9 @@ static IrInstruction *ir_gen_maybe_ok_or(IrBuilder *irb, Scope *parent_scope, As
is_comptime = ir_build_test_comptime(irb, parent_scope, node, is_non_null);
}
- IrBasicBlock *ok_block = ir_create_basic_block(irb, parent_scope, "MaybeNonNull");
- IrBasicBlock *null_block = ir_create_basic_block(irb, parent_scope, "MaybeNull");
- IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "MaybeEnd");
+ IrBasicBlock *ok_block = ir_create_basic_block(irb, parent_scope, "OptionalNonNull");
+ IrBasicBlock *null_block = ir_create_basic_block(irb, parent_scope, "OptionalNull");
+ IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "OptionalEnd");
ir_build_cond_br(irb, parent_scope, node, is_non_null, ok_block, null_block, is_comptime);
ir_set_cursor_at_end_and_append_block(irb, null_block);
@@ -3441,7 +3426,7 @@ static IrInstruction *ir_gen_bin_op(IrBuilder *irb, Scope *scope, AstNode *node)
return ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayMult);
case BinOpTypeMergeErrorSets:
return ir_gen_bin_op_id(irb, scope, node, IrBinOpMergeErrorSets);
- case BinOpTypeUnwrapMaybe:
+ case BinOpTypeUnwrapOptional:
return ir_gen_maybe_ok_or(irb, scope, node);
case BinOpTypeErrorUnion:
return ir_gen_error_union(irb, scope, node);
@@ -4132,21 +4117,6 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
IrInstruction *type_name = ir_build_type_name(irb, scope, node, arg0_value);
return ir_lval_wrap(irb, scope, type_name, lval);
}
- case BuiltinFnIdCanImplicitCast:
- {
- AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
- IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
- if (arg0_value == irb->codegen->invalid_instruction)
- return arg0_value;
-
- AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
- IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
- if (arg1_value == irb->codegen->invalid_instruction)
- return arg1_value;
-
- IrInstruction *can_implicit_cast = ir_build_can_implicit_cast(irb, scope, node, arg0_value, arg1_value);
- return ir_lval_wrap(irb, scope, can_implicit_cast, lval);
- }
case BuiltinFnIdPanic:
{
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
@@ -4620,11 +4590,8 @@ static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *
static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypePointerType);
- // The null check here is for C imports which don't set a token on the AST node. We could potentially
- // update that code to create a fake token and then remove this check.
- PtrLen ptr_len = (node->data.pointer_type.star_token != nullptr &&
- (node->data.pointer_type.star_token->id == TokenIdStar ||
- node->data.pointer_type.star_token->id == TokenIdStarStar)) ? PtrLenSingle : PtrLenUnknown;
+ PtrLen ptr_len = (node->data.pointer_type.star_token->id == TokenIdStar ||
+ node->data.pointer_type.star_token->id == TokenIdStarStar) ? PtrLenSingle : PtrLenUnknown;
bool is_const = node->data.pointer_type.is_const;
bool is_volatile = node->data.pointer_type.is_volatile;
AstNode *expr_node = node->data.pointer_type.op_expr;
@@ -4694,21 +4661,6 @@ static IrInstruction *ir_gen_err_assert_ok(IrBuilder *irb, Scope *scope, AstNode
return ir_build_load_ptr(irb, scope, source_node, payload_ptr);
}
-static IrInstruction *ir_gen_maybe_assert_ok(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) {
- assert(node->type == NodeTypePrefixOpExpr);
- AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
-
- IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
- if (maybe_ptr == irb->codegen->invalid_instruction)
- return irb->codegen->invalid_instruction;
-
- IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true);
- if (lval.is_ptr)
- return unwrapped_ptr;
-
- return ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
-}
-
static IrInstruction *ir_gen_bool_not(IrBuilder *irb, Scope *scope, AstNode *node) {
assert(node->type == NodeTypePrefixOpExpr);
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
@@ -4736,10 +4688,8 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegation), lval);
case PrefixOpNegationWrap:
return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegationWrap), lval);
- case PrefixOpMaybe:
- return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpMaybe), lval);
- case PrefixOpUnwrapMaybe:
- return ir_gen_maybe_assert_ok(irb, scope, node, lval);
+ case PrefixOpOptional:
+ return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval);
case PrefixOpAddrOf: {
AstNode *expr_node = node->data.prefix_op_expr.primary_expr;
return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR), lval);
@@ -5403,9 +5353,9 @@ static IrInstruction *ir_gen_test_expr(IrBuilder *irb, Scope *scope, AstNode *no
IrInstruction *maybe_val = ir_build_load_ptr(irb, scope, node, maybe_val_ptr);
IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_val);
- IrBasicBlock *then_block = ir_create_basic_block(irb, scope, "MaybeThen");
- IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "MaybeElse");
- IrBasicBlock *endif_block = ir_create_basic_block(irb, scope, "MaybeEndIf");
+ IrBasicBlock *then_block = ir_create_basic_block(irb, scope, "OptionalThen");
+ IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "OptionalElse");
+ IrBasicBlock *endif_block = ir_create_basic_block(irb, scope, "OptionalEndIf");
IrInstruction *is_comptime;
if (ir_should_inline(irb->exec, scope)) {
@@ -6574,7 +6524,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_build_load_ptr(irb, scope, node, ptr_instruction);
}
case NodeTypePtrDeref: {
- assert(node->type == NodeTypePtrDeref);
AstNode *expr_node = node->data.ptr_deref_expr.target;
IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval);
if (value == irb->codegen->invalid_instruction)
@@ -6582,6 +6531,19 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop
return ir_build_un_op(irb, scope, node, IrUnOpDereference, value);
}
+ case NodeTypeUnwrapOptional: {
+ AstNode *expr_node = node->data.unwrap_optional.expr;
+
+ IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR);
+ if (maybe_ptr == irb->codegen->invalid_instruction)
+ return irb->codegen->invalid_instruction;
+
+ IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true);
+ if (lval.is_ptr)
+ return unwrapped_ptr;
+
+ return ir_build_load_ptr(irb, scope, node, unwrapped_ptr);
+ }
case NodeTypeThisLiteral:
return ir_lval_wrap(irb, scope, ir_gen_this_literal(irb, scope, node), lval);
case NodeTypeBoolLiteral:
@@ -7552,7 +7514,7 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc
}
} else if (const_val_fits_in_num_lit(const_val, other_type)) {
return true;
- } else if (other_type->id == TypeTableEntryIdMaybe) {
+ } else if (other_type->id == TypeTableEntryIdOptional) {
TypeTableEntry *child_type = other_type->data.maybe.child_type;
if (const_val_fits_in_num_lit(const_val, child_type)) {
return true;
@@ -7685,27 +7647,44 @@ static TypeTableEntry *get_error_set_intersection(IrAnalyze *ira, TypeTableEntry
}
-static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry *expected_type,
- TypeTableEntry *actual_type, AstNode *source_node)
+static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry *wanted_type,
+ TypeTableEntry *actual_type, AstNode *source_node, bool wanted_is_mutable)
{
CodeGen *g = ira->codegen;
ConstCastOnly result = {};
result.id = ConstCastResultIdOk;
- if (expected_type == actual_type)
+ if (wanted_type == actual_type)
return result;
- // pointer const
- if (expected_type->id == TypeTableEntryIdPointer &&
- actual_type->id == TypeTableEntryIdPointer &&
- (actual_type->data.pointer.ptr_len == expected_type->data.pointer.ptr_len) &&
- (!actual_type->data.pointer.is_const || expected_type->data.pointer.is_const) &&
- (!actual_type->data.pointer.is_volatile || expected_type->data.pointer.is_volatile) &&
- actual_type->data.pointer.bit_offset == expected_type->data.pointer.bit_offset &&
- actual_type->data.pointer.unaligned_bit_count == expected_type->data.pointer.unaligned_bit_count &&
- actual_type->data.pointer.alignment >= expected_type->data.pointer.alignment)
+ // * and [*] can do a const-cast-only to ?* and ?[*], respectively
+ // but not if there is a mutable parent pointer
+ if (!wanted_is_mutable && wanted_type->id == TypeTableEntryIdOptional &&
+ wanted_type->data.maybe.child_type->id == TypeTableEntryIdPointer &&
+ actual_type->id == TypeTableEntryIdPointer)
{
- ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.pointer.child_type, actual_type->data.pointer.child_type, source_node);
+ ConstCastOnly child = types_match_const_cast_only(ira,
+ wanted_type->data.maybe.child_type, actual_type, source_node, wanted_is_mutable);
+ if (child.id != ConstCastResultIdOk) {
+ result.id = ConstCastResultIdNullWrapPtr;
+ result.data.null_wrap_ptr_child = allocate_nonzero(1);
+ *result.data.null_wrap_ptr_child = child;
+ }
+ return result;
+ }
+
+ // pointer const
+ if (wanted_type->id == TypeTableEntryIdPointer &&
+ actual_type->id == TypeTableEntryIdPointer &&
+ (actual_type->data.pointer.ptr_len == wanted_type->data.pointer.ptr_len) &&
+ (!actual_type->data.pointer.is_const || wanted_type->data.pointer.is_const) &&
+ (!actual_type->data.pointer.is_volatile || wanted_type->data.pointer.is_volatile) &&
+ actual_type->data.pointer.bit_offset == wanted_type->data.pointer.bit_offset &&
+ actual_type->data.pointer.unaligned_bit_count == wanted_type->data.pointer.unaligned_bit_count &&
+ actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment)
+ {
+ ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type, source_node, !wanted_type->data.pointer.is_const);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdPointerChild;
result.data.pointer_child = allocate_nonzero(1);
@@ -7715,17 +7694,17 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
}
// slice const
- if (is_slice(expected_type) && is_slice(actual_type)) {
+ if (is_slice(wanted_type) && is_slice(actual_type)) {
TypeTableEntry *actual_ptr_type = actual_type->data.structure.fields[slice_ptr_index].type_entry;
- TypeTableEntry *expected_ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry;
- if ((!actual_ptr_type->data.pointer.is_const || expected_ptr_type->data.pointer.is_const) &&
- (!actual_ptr_type->data.pointer.is_volatile || expected_ptr_type->data.pointer.is_volatile) &&
- actual_ptr_type->data.pointer.bit_offset == expected_ptr_type->data.pointer.bit_offset &&
- actual_ptr_type->data.pointer.unaligned_bit_count == expected_ptr_type->data.pointer.unaligned_bit_count &&
- actual_ptr_type->data.pointer.alignment >= expected_ptr_type->data.pointer.alignment)
+ TypeTableEntry *wanted_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ if ((!actual_ptr_type->data.pointer.is_const || wanted_ptr_type->data.pointer.is_const) &&
+ (!actual_ptr_type->data.pointer.is_volatile || wanted_ptr_type->data.pointer.is_volatile) &&
+ actual_ptr_type->data.pointer.bit_offset == wanted_ptr_type->data.pointer.bit_offset &&
+ actual_ptr_type->data.pointer.unaligned_bit_count == wanted_ptr_type->data.pointer.unaligned_bit_count &&
+ actual_ptr_type->data.pointer.alignment >= wanted_ptr_type->data.pointer.alignment)
{
- ConstCastOnly child = types_match_const_cast_only(ira, expected_ptr_type->data.pointer.child_type,
- actual_ptr_type->data.pointer.child_type, source_node);
+ ConstCastOnly child = types_match_const_cast_only(ira, wanted_ptr_type->data.pointer.child_type,
+ actual_ptr_type->data.pointer.child_type, source_node, !wanted_ptr_type->data.pointer.is_const);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdSliceChild;
result.data.slice_child = allocate_nonzero(1);
@@ -7736,26 +7715,29 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
}
// maybe
- if (expected_type->id == TypeTableEntryIdMaybe && actual_type->id == TypeTableEntryIdMaybe) {
- ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.maybe.child_type, actual_type->data.maybe.child_type, source_node);
+ if (wanted_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) {
+ ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.maybe.child_type,
+ actual_type->data.maybe.child_type, source_node, wanted_is_mutable);
if (child.id != ConstCastResultIdOk) {
- result.id = ConstCastResultIdNullableChild;
- result.data.nullable_child = allocate_nonzero(1);
- *result.data.nullable_child = child;
+ result.id = ConstCastResultIdOptionalChild;
+ result.data.optional_child = allocate_nonzero(1);
+ *result.data.optional_child = child;
}
return result;
}
// error union
- if (expected_type->id == TypeTableEntryIdErrorUnion && actual_type->id == TypeTableEntryIdErrorUnion) {
- ConstCastOnly payload_child = types_match_const_cast_only(ira, expected_type->data.error_union.payload_type, actual_type->data.error_union.payload_type, source_node);
+ if (wanted_type->id == TypeTableEntryIdErrorUnion && actual_type->id == TypeTableEntryIdErrorUnion) {
+ ConstCastOnly payload_child = types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type,
+ actual_type->data.error_union.payload_type, source_node, wanted_is_mutable);
if (payload_child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdErrorUnionPayload;
result.data.error_union_payload = allocate_nonzero(1);
*result.data.error_union_payload = payload_child;
return result;
}
- ConstCastOnly error_set_child = types_match_const_cast_only(ira, expected_type->data.error_union.err_set_type, actual_type->data.error_union.err_set_type, source_node);
+ ConstCastOnly error_set_child = types_match_const_cast_only(ira, wanted_type->data.error_union.err_set_type,
+ actual_type->data.error_union.err_set_type, source_node, wanted_is_mutable);
if (error_set_child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdErrorUnionErrorSet;
result.data.error_union_error_set = allocate_nonzero(1);
@@ -7766,9 +7748,9 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
}
// error set
- if (expected_type->id == TypeTableEntryIdErrorSet && actual_type->id == TypeTableEntryIdErrorSet) {
+ if (wanted_type->id == TypeTableEntryIdErrorSet && actual_type->id == TypeTableEntryIdErrorSet) {
TypeTableEntry *contained_set = actual_type;
- TypeTableEntry *container_set = expected_type;
+ TypeTableEntry *container_set = wanted_type;
// if the container set is inferred, then this will always work.
if (container_set->data.error_set.infer_fn != nullptr) {
@@ -7809,36 +7791,37 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
return result;
}
- if (expected_type == ira->codegen->builtin_types.entry_promise &&
+ if (wanted_type == ira->codegen->builtin_types.entry_promise &&
actual_type->id == TypeTableEntryIdPromise)
{
return result;
}
// fn
- if (expected_type->id == TypeTableEntryIdFn &&
+ if (wanted_type->id == TypeTableEntryIdFn &&
actual_type->id == TypeTableEntryIdFn)
{
- if (expected_type->data.fn.fn_type_id.alignment > actual_type->data.fn.fn_type_id.alignment) {
+ if (wanted_type->data.fn.fn_type_id.alignment > actual_type->data.fn.fn_type_id.alignment) {
result.id = ConstCastResultIdFnAlign;
return result;
}
- if (expected_type->data.fn.fn_type_id.cc != actual_type->data.fn.fn_type_id.cc) {
+ if (wanted_type->data.fn.fn_type_id.cc != actual_type->data.fn.fn_type_id.cc) {
result.id = ConstCastResultIdFnCC;
return result;
}
- if (expected_type->data.fn.fn_type_id.is_var_args != actual_type->data.fn.fn_type_id.is_var_args) {
+ if (wanted_type->data.fn.fn_type_id.is_var_args != actual_type->data.fn.fn_type_id.is_var_args) {
result.id = ConstCastResultIdFnVarArgs;
return result;
}
- if (expected_type->data.fn.is_generic != actual_type->data.fn.is_generic) {
+ if (wanted_type->data.fn.is_generic != actual_type->data.fn.is_generic) {
result.id = ConstCastResultIdFnIsGeneric;
return result;
}
- if (!expected_type->data.fn.is_generic &&
+ if (!wanted_type->data.fn.is_generic &&
actual_type->data.fn.fn_type_id.return_type->id != TypeTableEntryIdUnreachable)
{
- ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.fn.fn_type_id.return_type, actual_type->data.fn.fn_type_id.return_type, source_node);
+ ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.fn.fn_type_id.return_type,
+ actual_type->data.fn.fn_type_id.return_type, source_node, false);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdFnReturnType;
result.data.return_type = allocate_nonzero(1);
@@ -7846,9 +7829,11 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
return result;
}
}
- if (!expected_type->data.fn.is_generic && expected_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
- ConstCastOnly child = types_match_const_cast_only(ira, actual_type->data.fn.fn_type_id.async_allocator_type,
- expected_type->data.fn.fn_type_id.async_allocator_type, source_node);
+ if (!wanted_type->data.fn.is_generic && wanted_type->data.fn.fn_type_id.cc == CallingConventionAsync) {
+ ConstCastOnly child = types_match_const_cast_only(ira,
+ actual_type->data.fn.fn_type_id.async_allocator_type,
+ wanted_type->data.fn.fn_type_id.async_allocator_type,
+ source_node, false);
if (child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdAsyncAllocatorType;
result.data.async_allocator_type = allocate_nonzero(1);
@@ -7856,22 +7841,23 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
return result;
}
}
- if (expected_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) {
+ if (wanted_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) {
result.id = ConstCastResultIdFnArgCount;
return result;
}
- if (expected_type->data.fn.fn_type_id.next_param_index != actual_type->data.fn.fn_type_id.next_param_index) {
+ if (wanted_type->data.fn.fn_type_id.next_param_index != actual_type->data.fn.fn_type_id.next_param_index) {
result.id = ConstCastResultIdFnGenericArgCount;
return result;
}
- assert(expected_type->data.fn.is_generic ||
- expected_type->data.fn.fn_type_id.next_param_index == expected_type->data.fn.fn_type_id.param_count);
- for (size_t i = 0; i < expected_type->data.fn.fn_type_id.next_param_index; i += 1) {
+ assert(wanted_type->data.fn.is_generic ||
+ wanted_type->data.fn.fn_type_id.next_param_index == wanted_type->data.fn.fn_type_id.param_count);
+ for (size_t i = 0; i < wanted_type->data.fn.fn_type_id.next_param_index; i += 1) {
// note it's reversed for parameters
FnTypeParamInfo *actual_param_info = &actual_type->data.fn.fn_type_id.param_info[i];
- FnTypeParamInfo *expected_param_info = &expected_type->data.fn.fn_type_id.param_info[i];
+ FnTypeParamInfo *expected_param_info = &wanted_type->data.fn.fn_type_id.param_info[i];
- ConstCastOnly arg_child = types_match_const_cast_only(ira, actual_param_info->type, expected_param_info->type, source_node);
+ ConstCastOnly arg_child = types_match_const_cast_only(ira, actual_param_info->type,
+ expected_param_info->type, source_node, false);
if (arg_child.id != ConstCastResultIdOk) {
result.id = ConstCastResultIdFnArg;
result.data.fn_arg.arg_index = i;
@@ -7899,11 +7885,12 @@ enum ImplicitCastMatchResult {
ImplicitCastMatchResultReportedError,
};
-static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, TypeTableEntry *expected_type,
+static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, TypeTableEntry *wanted_type,
TypeTableEntry *actual_type, IrInstruction *value)
{
AstNode *source_node = value->source_node;
- ConstCastOnly const_cast_result = types_match_const_cast_only(ira, expected_type, actual_type, source_node);
+ ConstCastOnly const_cast_result = types_match_const_cast_only(ira, wanted_type, actual_type,
+ source_node, false);
if (const_cast_result.id == ConstCastResultIdOk) {
return ImplicitCastMatchResultYes;
}
@@ -7918,21 +7905,21 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
missing_errors = &const_cast_result.data.error_union_error_set->data.error_set.missing_errors;
} else if (const_cast_result.data.error_union_error_set->id == ConstCastResultIdErrSetGlobal) {
ErrorMsg *msg = ir_add_error(ira, value,
- buf_sprintf("expected '%s', found '%s'", buf_ptr(&expected_type->name), buf_ptr(&actual_type->name)));
+ buf_sprintf("expected '%s', found '%s'", buf_ptr(&wanted_type->name), buf_ptr(&actual_type->name)));
add_error_note(ira->codegen, msg, value->source_node,
buf_sprintf("unable to cast global error set into smaller set"));
return ImplicitCastMatchResultReportedError;
}
} else if (const_cast_result.id == ConstCastResultIdErrSetGlobal) {
ErrorMsg *msg = ir_add_error(ira, value,
- buf_sprintf("expected '%s', found '%s'", buf_ptr(&expected_type->name), buf_ptr(&actual_type->name)));
+ buf_sprintf("expected '%s', found '%s'", buf_ptr(&wanted_type->name), buf_ptr(&actual_type->name)));
add_error_note(ira->codegen, msg, value->source_node,
buf_sprintf("unable to cast global error set into smaller set"));
return ImplicitCastMatchResultReportedError;
}
if (missing_errors != nullptr) {
ErrorMsg *msg = ir_add_error(ira, value,
- buf_sprintf("expected '%s', found '%s'", buf_ptr(&expected_type->name), buf_ptr(&actual_type->name)));
+ buf_sprintf("expected '%s', found '%s'", buf_ptr(&wanted_type->name), buf_ptr(&actual_type->name)));
for (size_t i = 0; i < missing_errors->length; i += 1) {
ErrorTableEntry *error_entry = missing_errors->at(i);
add_error_note(ira->codegen, msg, error_entry->decl_node,
@@ -7943,133 +7930,168 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
}
// implicit conversion from ?T to ?U
- if (expected_type->id == TypeTableEntryIdMaybe && actual_type->id == TypeTableEntryIdMaybe) {
- ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, expected_type->data.maybe.child_type,
+ if (wanted_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) {
+ ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, wanted_type->data.maybe.child_type,
actual_type->data.maybe.child_type, value);
if (res != ImplicitCastMatchResultNo)
return res;
}
// implicit conversion from non maybe type to maybe type
- if (expected_type->id == TypeTableEntryIdMaybe) {
- ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, expected_type->data.maybe.child_type,
+ if (wanted_type->id == TypeTableEntryIdOptional) {
+ ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, wanted_type->data.maybe.child_type,
actual_type, value);
if (res != ImplicitCastMatchResultNo)
return res;
}
// implicit conversion from null literal to maybe type
- if (expected_type->id == TypeTableEntryIdMaybe &&
+ if (wanted_type->id == TypeTableEntryIdOptional &&
actual_type->id == TypeTableEntryIdNull)
{
return ImplicitCastMatchResultYes;
}
// implicit T to U!T
- if (expected_type->id == TypeTableEntryIdErrorUnion &&
- ir_types_match_with_implicit_cast(ira, expected_type->data.error_union.payload_type, actual_type, value))
+ if (wanted_type->id == TypeTableEntryIdErrorUnion &&
+ ir_types_match_with_implicit_cast(ira, wanted_type->data.error_union.payload_type, actual_type, value))
{
return ImplicitCastMatchResultYes;
}
// implicit conversion from error set to error union type
- if (expected_type->id == TypeTableEntryIdErrorUnion &&
+ if (wanted_type->id == TypeTableEntryIdErrorUnion &&
actual_type->id == TypeTableEntryIdErrorSet)
{
return ImplicitCastMatchResultYes;
}
// implicit conversion from T to U!?T
- if (expected_type->id == TypeTableEntryIdErrorUnion &&
- expected_type->data.error_union.payload_type->id == TypeTableEntryIdMaybe &&
+ if (wanted_type->id == TypeTableEntryIdErrorUnion &&
+ wanted_type->data.error_union.payload_type->id == TypeTableEntryIdOptional &&
ir_types_match_with_implicit_cast(ira,
- expected_type->data.error_union.payload_type->data.maybe.child_type,
+ wanted_type->data.error_union.payload_type->data.maybe.child_type,
actual_type, value))
{
return ImplicitCastMatchResultYes;
}
// implicit widening conversion
- if (expected_type->id == TypeTableEntryIdInt &&
+ if (wanted_type->id == TypeTableEntryIdInt &&
actual_type->id == TypeTableEntryIdInt &&
- expected_type->data.integral.is_signed == actual_type->data.integral.is_signed &&
- expected_type->data.integral.bit_count >= actual_type->data.integral.bit_count)
+ wanted_type->data.integral.is_signed == actual_type->data.integral.is_signed &&
+ wanted_type->data.integral.bit_count >= actual_type->data.integral.bit_count)
{
return ImplicitCastMatchResultYes;
}
// small enough unsigned ints can get casted to large enough signed ints
- if (expected_type->id == TypeTableEntryIdInt && expected_type->data.integral.is_signed &&
+ if (wanted_type->id == TypeTableEntryIdInt && wanted_type->data.integral.is_signed &&
actual_type->id == TypeTableEntryIdInt && !actual_type->data.integral.is_signed &&
- expected_type->data.integral.bit_count > actual_type->data.integral.bit_count)
+ wanted_type->data.integral.bit_count > actual_type->data.integral.bit_count)
{
return ImplicitCastMatchResultYes;
}
// implicit float widening conversion
- if (expected_type->id == TypeTableEntryIdFloat &&
+ if (wanted_type->id == TypeTableEntryIdFloat &&
actual_type->id == TypeTableEntryIdFloat &&
- expected_type->data.floating.bit_count >= actual_type->data.floating.bit_count)
+ wanted_type->data.floating.bit_count >= actual_type->data.floating.bit_count)
{
return ImplicitCastMatchResultYes;
}
// implicit [N]T to []const T
- if (is_slice(expected_type) && actual_type->id == TypeTableEntryIdArray) {
- TypeTableEntry *ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry;
+ if (is_slice(wanted_type) && actual_type->id == TypeTableEntryIdArray) {
+ TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
return ImplicitCastMatchResultYes;
}
}
// implicit &const [N]T to []const T
- if (is_slice(expected_type) &&
+ if (is_slice(wanted_type) &&
actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
actual_type->data.pointer.is_const &&
actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
{
- TypeTableEntry *ptr_type = expected_type->data.structure.fields[slice_ptr_index].type_entry;
+ TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
TypeTableEntry *array_type = actual_type->data.pointer.child_type;
if ((ptr_type->data.pointer.is_const || array_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
return ImplicitCastMatchResultYes;
}
}
// implicit [N]T to &const []const T
- if (expected_type->id == TypeTableEntryIdPointer &&
- expected_type->data.pointer.is_const &&
- is_slice(expected_type->data.pointer.child_type) &&
+ if (wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.is_const &&
+ wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ is_slice(wanted_type->data.pointer.child_type) &&
actual_type->id == TypeTableEntryIdArray)
{
TypeTableEntry *ptr_type =
- expected_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry;
+ wanted_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type,
+ actual_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
+ {
+ return ImplicitCastMatchResultYes;
+ }
+ }
+
+ // implicit *[N]T to [*]T
+ if (wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.ptr_len == PtrLenUnknown &&
+ actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == TypeTableEntryIdArray &&
+ types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ {
+ return ImplicitCastMatchResultYes;
+ }
+
+ // implicit *[N]T to []T
+ if (is_slice(wanted_type) &&
+ actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
+ {
+ TypeTableEntry *slice_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ assert(slice_ptr_type->id == TypeTableEntryIdPointer);
+ if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk)
{
return ImplicitCastMatchResultYes;
}
}
// implicit [N]T to ?[]const T
- if (expected_type->id == TypeTableEntryIdMaybe &&
- is_slice(expected_type->data.maybe.child_type) &&
+ if (wanted_type->id == TypeTableEntryIdOptional &&
+ is_slice(wanted_type->data.maybe.child_type) &&
actual_type->id == TypeTableEntryIdArray)
{
TypeTableEntry *ptr_type =
- expected_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry;
+ wanted_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type,
+ actual_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
{
return ImplicitCastMatchResultYes;
}
@@ -8081,15 +8103,16 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
if (actual_type->id == TypeTableEntryIdComptimeFloat ||
actual_type->id == TypeTableEntryIdComptimeInt)
{
- if (expected_type->id == TypeTableEntryIdPointer &&
- expected_type->data.pointer.is_const)
+ if (wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ wanted_type->data.pointer.is_const)
{
- if (ir_num_lit_fits_in_other_type(ira, value, expected_type->data.pointer.child_type, false)) {
+ if (ir_num_lit_fits_in_other_type(ira, value, wanted_type->data.pointer.child_type, false)) {
return ImplicitCastMatchResultYes;
} else {
return ImplicitCastMatchResultReportedError;
}
- } else if (ir_num_lit_fits_in_other_type(ira, value, expected_type, false)) {
+ } else if (ir_num_lit_fits_in_other_type(ira, value, wanted_type, false)) {
return ImplicitCastMatchResultYes;
} else {
return ImplicitCastMatchResultReportedError;
@@ -8099,38 +8122,41 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
// implicit typed number to integer or float literal.
// works when the number is known
if (value->value.special == ConstValSpecialStatic) {
- if (actual_type->id == TypeTableEntryIdInt && expected_type->id == TypeTableEntryIdComptimeInt) {
+ if (actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdComptimeInt) {
return ImplicitCastMatchResultYes;
- } else if (actual_type->id == TypeTableEntryIdFloat && expected_type->id == TypeTableEntryIdComptimeFloat) {
+ } else if (actual_type->id == TypeTableEntryIdFloat && wanted_type->id == TypeTableEntryIdComptimeFloat) {
return ImplicitCastMatchResultYes;
}
}
// implicit union to its enum tag type
- if (expected_type->id == TypeTableEntryIdEnum && actual_type->id == TypeTableEntryIdUnion &&
+ if (wanted_type->id == TypeTableEntryIdEnum && actual_type->id == TypeTableEntryIdUnion &&
(actual_type->data.unionation.decl_node->data.container_decl.auto_enum ||
actual_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
{
type_ensure_zero_bits_known(ira->codegen, actual_type);
- if (actual_type->data.unionation.tag_type == expected_type) {
+ if (actual_type->data.unionation.tag_type == wanted_type) {
return ImplicitCastMatchResultYes;
}
}
// implicit enum to union which has the enum as the tag type
- if (expected_type->id == TypeTableEntryIdUnion && actual_type->id == TypeTableEntryIdEnum &&
- (expected_type->data.unionation.decl_node->data.container_decl.auto_enum ||
- expected_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
+ if (wanted_type->id == TypeTableEntryIdUnion && actual_type->id == TypeTableEntryIdEnum &&
+ (wanted_type->data.unionation.decl_node->data.container_decl.auto_enum ||
+ wanted_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr))
{
- type_ensure_zero_bits_known(ira->codegen, expected_type);
- if (expected_type->data.unionation.tag_type == actual_type) {
+ type_ensure_zero_bits_known(ira->codegen, wanted_type);
+ if (wanted_type->data.unionation.tag_type == actual_type) {
return ImplicitCastMatchResultYes;
}
}
// implicit enum to &const union which has the enum as the tag type
- if (actual_type->id == TypeTableEntryIdEnum && expected_type->id == TypeTableEntryIdPointer) {
- TypeTableEntry *union_type = expected_type->data.pointer.child_type;
+ if (actual_type->id == TypeTableEntryIdEnum &&
+ wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.ptr_len == PtrLenSingle)
+ {
+ TypeTableEntry *union_type = wanted_type->data.pointer.child_type;
if (union_type->data.unionation.decl_node->data.container_decl.auto_enum ||
union_type->data.unionation.decl_node->data.container_decl.init_arg_expr != nullptr)
{
@@ -8141,6 +8167,17 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
}
}
+ // implicit T to *T where T is zero bits
+ if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type, source_node, false).id == ConstCastResultIdOk)
+ {
+ type_ensure_zero_bits_known(ira->codegen, actual_type);
+ if (!type_has_bits(actual_type)) {
+ return ImplicitCastMatchResultYes;
+ }
+ }
+
// implicit undefined literal to anything
if (actual_type->id == TypeTableEntryIdUndefined) {
return ImplicitCastMatchResultYes;
@@ -8149,7 +8186,11 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira,
// implicitly take a const pointer to something
if (!type_requires_comptime(actual_type)) {
TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true);
- if (types_match_const_cast_only(ira, expected_type, const_ptr_actual, source_node).id == ConstCastResultIdOk) {
+ if (wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ types_match_const_cast_only(ira, wanted_type, const_ptr_actual,
+ source_node, false).id == ConstCastResultIdOk)
+ {
return ImplicitCastMatchResultYes;
}
}
@@ -8390,9 +8431,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
TypeTableEntry *cur_payload_type = cur_type->data.error_union.payload_type;
bool const_cast_prev = types_match_const_cast_only(ira, prev_payload_type, cur_payload_type,
- source_node).id == ConstCastResultIdOk;
+ source_node, false).id == ConstCastResultIdOk;
bool const_cast_cur = types_match_const_cast_only(ira, cur_payload_type, prev_payload_type,
- source_node).id == ConstCastResultIdOk;
+ source_node, false).id == ConstCastResultIdOk;
if (const_cast_prev || const_cast_cur) {
if (const_cast_cur) {
@@ -8479,11 +8520,11 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
continue;
}
- if (types_match_const_cast_only(ira, prev_type, cur_type, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, prev_type, cur_type, source_node, false).id == ConstCastResultIdOk) {
continue;
}
- if (types_match_const_cast_only(ira, cur_type, prev_type, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, cur_type, prev_type, source_node, false).id == ConstCastResultIdOk) {
prev_inst = cur_inst;
continue;
}
@@ -8506,13 +8547,15 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
if (prev_type->id == TypeTableEntryIdErrorUnion &&
- types_match_const_cast_only(ira, prev_type->data.error_union.payload_type, cur_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, prev_type->data.error_union.payload_type, cur_type,
+ source_node, false).id == ConstCastResultIdOk)
{
continue;
}
if (cur_type->id == TypeTableEntryIdErrorUnion &&
- types_match_const_cast_only(ira, cur_type->data.error_union.payload_type, prev_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, cur_type->data.error_union.payload_type, prev_type,
+ source_node, false).id == ConstCastResultIdOk)
{
if (err_set_type != nullptr) {
TypeTableEntry *cur_err_set_type = cur_type->data.error_union.err_set_type;
@@ -8533,14 +8576,16 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
continue;
}
- if (prev_type->id == TypeTableEntryIdMaybe &&
- types_match_const_cast_only(ira, prev_type->data.maybe.child_type, cur_type, source_node).id == ConstCastResultIdOk)
+ if (prev_type->id == TypeTableEntryIdOptional &&
+ types_match_const_cast_only(ira, prev_type->data.maybe.child_type, cur_type,
+ source_node, false).id == ConstCastResultIdOk)
{
continue;
}
- if (cur_type->id == TypeTableEntryIdMaybe &&
- types_match_const_cast_only(ira, cur_type->data.maybe.child_type, prev_type, source_node).id == ConstCastResultIdOk)
+ if (cur_type->id == TypeTableEntryIdOptional &&
+ types_match_const_cast_only(ira, cur_type->data.maybe.child_type, prev_type,
+ source_node, false).id == ConstCastResultIdOk)
{
prev_inst = cur_inst;
continue;
@@ -8577,8 +8622,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
if (cur_type->id == TypeTableEntryIdArray && prev_type->id == TypeTableEntryIdArray &&
- cur_type->data.array.len != prev_type->data.array.len &&
- types_match_const_cast_only(ira, cur_type->data.array.child_type, prev_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ cur_type->data.array.len != prev_type->data.array.len &&
+ types_match_const_cast_only(ira, cur_type->data.array.child_type, prev_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
convert_to_const_slice = true;
prev_inst = cur_inst;
@@ -8586,8 +8632,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
}
if (cur_type->id == TypeTableEntryIdArray && prev_type->id == TypeTableEntryIdArray &&
- cur_type->data.array.len != prev_type->data.array.len &&
- types_match_const_cast_only(ira, prev_type->data.array.child_type, cur_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ cur_type->data.array.len != prev_type->data.array.len &&
+ types_match_const_cast_only(ira, prev_type->data.array.child_type, cur_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
convert_to_const_slice = true;
continue;
@@ -8596,8 +8643,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
if (cur_type->id == TypeTableEntryIdArray && is_slice(prev_type) &&
(prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const ||
cur_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type,
- cur_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira,
+ prev_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type,
+ cur_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
{
convert_to_const_slice = false;
continue;
@@ -8606,8 +8654,9 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
if (prev_type->id == TypeTableEntryIdArray && is_slice(cur_type) &&
(cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const ||
prev_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type,
- prev_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira,
+ cur_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.child_type,
+ prev_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk)
{
prev_inst = cur_inst;
convert_to_const_slice = false;
@@ -8692,7 +8741,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
ir_add_error_node(ira, source_node,
buf_sprintf("unable to make maybe out of number literal"));
return ira->codegen->builtin_types.entry_invalid;
- } else if (prev_inst->value.type->id == TypeTableEntryIdMaybe) {
+ } else if (prev_inst->value.type->id == TypeTableEntryIdOptional) {
return prev_inst->value.type;
} else {
return get_maybe_type(ira->codegen, prev_inst->value.type);
@@ -8735,10 +8784,12 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
zig_unreachable();
case CastOpErrSet:
case CastOpBitCast:
+ case CastOpPtrOfArrayToSlice:
zig_panic("TODO");
case CastOpNoop:
{
- copy_const_val(const_val, other_val, other_val->special == ConstValSpecialStatic);
+ bool same_global_refs = other_val->special == ConstValSpecialStatic;
+ copy_const_val(const_val, other_val, same_global_refs);
const_val->type = new_type;
break;
}
@@ -8804,7 +8855,7 @@ static void eval_const_expr_implicit_cast(CastOp cast_op,
static IrInstruction *ir_resolve_cast(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value,
TypeTableEntry *wanted_type, CastOp cast_op, bool need_alloca)
{
- if (value->value.special != ConstValSpecialRuntime &&
+ if ((instr_is_comptime(value) || !type_has_bits(wanted_type)) &&
cast_op != CastOpResizeSlice && cast_op != CastOpBytesToSlice)
{
IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
@@ -8822,6 +8873,63 @@ static IrInstruction *ir_resolve_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
+static IrInstruction *ir_resolve_ptr_of_array_to_unknown_len_ptr(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *value, TypeTableEntry *wanted_type)
+{
+ assert(value->value.type->id == TypeTableEntryIdPointer);
+ wanted_type = adjust_ptr_align(ira->codegen, wanted_type, value->value.type->data.pointer.alignment);
+
+ if (instr_is_comptime(value)) {
+ ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &value->value);
+ if (pointee->special != ConstValSpecialRuntime) {
+ IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
+ source_instr->source_node, wanted_type);
+ result->value.type = wanted_type;
+ result->value.data.x_ptr.special = ConstPtrSpecialBaseArray;
+ result->value.data.x_ptr.mut = value->value.data.x_ptr.mut;
+ result->value.data.x_ptr.data.base_array.array_val = pointee;
+ result->value.data.x_ptr.data.base_array.elem_index = 0;
+ result->value.data.x_ptr.data.base_array.is_cstr = false;
+ return result;
+ }
+ }
+
+ IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node,
+ wanted_type, value, CastOpBitCast);
+ result->value.type = wanted_type;
+ return result;
+}
+
+static IrInstruction *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInstruction *source_instr,
+ IrInstruction *value, TypeTableEntry *wanted_type)
+{
+ wanted_type = adjust_slice_align(ira->codegen, wanted_type, value->value.type->data.pointer.alignment);
+
+ if (instr_is_comptime(value)) {
+ ConstExprValue *pointee = const_ptr_pointee(ira->codegen, &value->value);
+ if (pointee->special != ConstValSpecialRuntime) {
+ assert(value->value.type->id == TypeTableEntryIdPointer);
+ TypeTableEntry *array_type = value->value.type->data.pointer.child_type;
+ assert(is_slice(wanted_type));
+ bool is_const = wanted_type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const;
+
+ IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
+ source_instr->source_node, wanted_type);
+ init_const_slice(ira->codegen, &result->value, pointee, 0, array_type->data.array.len, is_const);
+ result->value.data.x_struct.fields[slice_ptr_index].data.x_ptr.mut =
+ value->value.data.x_ptr.mut;
+ result->value.type = wanted_type;
+ return result;
+ }
+ }
+
+ IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node,
+ wanted_type, value, CastOpPtrOfArrayToSlice);
+ result->value.type = wanted_type;
+ ir_add_alloca(ira, result, wanted_type);
+ return result;
+}
+
static bool is_container(TypeTableEntry *type) {
return type->id == TypeTableEntryIdStruct ||
type->id == TypeTableEntryIdEnum ||
@@ -9115,7 +9223,7 @@ static FnTableEntry *ir_resolve_fn(IrAnalyze *ira, IrInstruction *fn_value) {
}
static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, TypeTableEntry *wanted_type) {
- assert(wanted_type->id == TypeTableEntryIdMaybe);
+ assert(wanted_type->id == TypeTableEntryIdOptional);
if (instr_is_comptime(value)) {
TypeTableEntry *payload_type = wanted_type->data.maybe.child_type;
@@ -9129,15 +9237,19 @@ static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *sourc
IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb,
source_instr->scope, source_instr->source_node);
- const_instruction->base.value.type = wanted_type;
const_instruction->base.value.special = ConstValSpecialStatic;
- const_instruction->base.value.data.x_maybe = val;
+ if (get_codegen_ptr_type(wanted_type) != nullptr) {
+ copy_const_val(&const_instruction->base.value, val, val->data.x_ptr.mut == ConstPtrMutComptimeConst);
+ } else {
+ const_instruction->base.value.data.x_optional = val;
+ }
+ const_instruction->base.value.type = wanted_type;
return &const_instruction->base;
}
IrInstruction *result = ir_build_maybe_wrap(&ira->new_irb, source_instr->scope, source_instr->source_node, value);
result->value.type = wanted_type;
- result->value.data.rh_maybe = RuntimeHintMaybeNonNull;
+ result->value.data.rh_maybe = RuntimeHintOptionalNonNull;
ir_add_alloca(ira, result, wanted_type);
return result;
}
@@ -9279,16 +9391,21 @@ static IrInstruction *ir_analyze_cast_ref(IrAnalyze *ira, IrInstruction *source_
}
static IrInstruction *ir_analyze_null_to_maybe(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, TypeTableEntry *wanted_type) {
- assert(wanted_type->id == TypeTableEntryIdMaybe);
+ assert(wanted_type->id == TypeTableEntryIdOptional);
assert(instr_is_comptime(value));
ConstExprValue *val = ir_resolve_const(ira, value, UndefBad);
assert(val);
IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb, source_instr->scope, source_instr->source_node);
- const_instruction->base.value.type = wanted_type;
const_instruction->base.value.special = ConstValSpecialStatic;
- const_instruction->base.value.data.x_maybe = nullptr;
+ if (get_codegen_ptr_type(wanted_type) != nullptr) {
+ const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
+ const_instruction->base.value.data.x_ptr.data.hard_coded_addr.addr = 0;
+ } else {
+ const_instruction->base.value.data.x_optional = nullptr;
+ }
+ const_instruction->base.value.type = wanted_type;
return &const_instruction->base;
}
@@ -9300,9 +9417,19 @@ static IrInstruction *ir_get_ref(IrAnalyze *ira, IrInstruction *source_instructi
if (value->id == IrInstructionIdLoadPtr) {
IrInstructionLoadPtr *load_ptr_inst = (IrInstructionLoadPtr *) value;
+
if (load_ptr_inst->ptr->value.type->data.pointer.is_const) {
return load_ptr_inst->ptr;
}
+
+ type_ensure_zero_bits_known(ira->codegen, value->value.type);
+ if (type_is_invalid(value->value.type)) {
+ return ira->codegen->invalid_instruction;
+ }
+
+ if (!type_has_bits(value->value.type)) {
+ return load_ptr_inst->ptr;
+ }
}
if (instr_is_comptime(value)) {
@@ -9810,7 +9937,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
// explicit match or non-const to const
- if (types_match_const_cast_only(ira, wanted_type, actual_type, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, wanted_type, actual_type, source_node, false).id == ConstCastResultIdOk) {
return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop, false);
}
@@ -9856,7 +9983,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
TypeTableEntry *ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type);
}
@@ -9874,7 +10002,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
TypeTableEntry *array_type = actual_type->data.pointer.child_type;
if ((ptr_type->data.pointer.is_const || array_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, array_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type);
}
@@ -9890,7 +10019,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
wanted_type->data.pointer.child_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.pointer.child_type, value);
if (type_is_invalid(cast1->value.type))
@@ -9905,7 +10035,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
// explicit cast from [N]T to ?[]const N
- if (wanted_type->id == TypeTableEntryIdMaybe &&
+ if (wanted_type->id == TypeTableEntryIdOptional &&
is_slice(wanted_type->data.maybe.child_type) &&
actual_type->id == TypeTableEntryIdArray)
{
@@ -9913,7 +10043,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
wanted_type->data.maybe.child_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.maybe.child_type, value);
if (type_is_invalid(cast1->value.type))
@@ -9973,10 +10104,44 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from child type of maybe type to maybe type
- if (wanted_type->id == TypeTableEntryIdMaybe) {
+ // explicit *[N]T to [*]T
+ if (wanted_type->id == TypeTableEntryIdPointer &&
+ wanted_type->data.pointer.ptr_len == PtrLenUnknown &&
+ actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == TypeTableEntryIdArray &&
+ actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment &&
+ types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ {
+ return ir_resolve_ptr_of_array_to_unknown_len_ptr(ira, source_instr, value, wanted_type);
+ }
+
+ // explicit *[N]T to []T
+ if (is_slice(wanted_type) &&
+ actual_type->id == TypeTableEntryIdPointer &&
+ actual_type->data.pointer.ptr_len == PtrLenSingle &&
+ actual_type->data.pointer.child_type->id == TypeTableEntryIdArray)
+ {
+ TypeTableEntry *slice_ptr_type = wanted_type->data.structure.fields[slice_ptr_index].type_entry;
+ assert(slice_ptr_type->id == TypeTableEntryIdPointer);
+ if (types_match_const_cast_only(ira, slice_ptr_type->data.pointer.child_type,
+ actual_type->data.pointer.child_type->data.array.child_type, source_node,
+ !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ {
+ return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, wanted_type);
+ }
+ }
+
+
+ // explicit cast from T to ?T
+ // note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism
+ if (wanted_type->id == TypeTableEntryIdOptional) {
TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type;
- if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node,
+ false).id == ConstCastResultIdOk)
+ {
return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type);
} else if (actual_type->id == TypeTableEntryIdComptimeInt ||
actual_type->id == TypeTableEntryIdComptimeFloat)
@@ -10003,7 +10168,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
// explicit cast from null literal to maybe type
- if (wanted_type->id == TypeTableEntryIdMaybe &&
+ if (wanted_type->id == TypeTableEntryIdOptional &&
actual_type->id == TypeTableEntryIdNull)
{
return ir_analyze_null_to_maybe(ira, source_instr, value, wanted_type);
@@ -10011,7 +10176,9 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
// explicit cast from child type of error type to error type
if (wanted_type->id == TypeTableEntryIdErrorUnion) {
- if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type,
+ source_node, false).id == ConstCastResultIdOk)
+ {
return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type);
} else if (actual_type->id == TypeTableEntryIdComptimeInt ||
actual_type->id == TypeTableEntryIdComptimeFloat)
@@ -10024,7 +10191,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
- // explicit cast from [N]T to %[]const T
+ // explicit cast from [N]T to E![]const T
if (wanted_type->id == TypeTableEntryIdErrorUnion &&
is_slice(wanted_type->data.error_union.payload_type) &&
actual_type->id == TypeTableEntryIdArray)
@@ -10033,7 +10200,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
wanted_type->data.error_union.payload_type->data.structure.fields[slice_ptr_index].type_entry;
assert(ptr_type->id == TypeTableEntryIdPointer);
if ((ptr_type->data.pointer.is_const || actual_type->data.array.len == 0) &&
- types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node).id == ConstCastResultIdOk)
+ types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type,
+ source_node, false).id == ConstCastResultIdOk)
{
IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value);
if (type_is_invalid(cast1->value.type))
@@ -10054,13 +10222,13 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
return ir_analyze_err_wrap_code(ira, source_instr, value, wanted_type);
}
- // explicit cast from T to %?T
+ // explicit cast from T to E!?T
if (wanted_type->id == TypeTableEntryIdErrorUnion &&
- wanted_type->data.error_union.payload_type->id == TypeTableEntryIdMaybe &&
- actual_type->id != TypeTableEntryIdMaybe)
+ wanted_type->data.error_union.payload_type->id == TypeTableEntryIdOptional &&
+ actual_type->id != TypeTableEntryIdOptional)
{
TypeTableEntry *wanted_child_type = wanted_type->data.error_union.payload_type->data.maybe.child_type;
- if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk ||
+ if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node, false).id == ConstCastResultIdOk ||
actual_type->id == TypeTableEntryIdNull ||
actual_type->id == TypeTableEntryIdComptimeInt ||
actual_type->id == TypeTableEntryIdComptimeFloat)
@@ -10078,7 +10246,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
// explicit cast from number literal to another type
- // explicit cast from number literal to &const integer
+ // explicit cast from number literal to *const integer
if (actual_type->id == TypeTableEntryIdComptimeFloat ||
actual_type->id == TypeTableEntryIdComptimeInt)
{
@@ -10212,7 +10380,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
TypeTableEntry *array_type = wanted_type->data.pointer.child_type;
if (array_type->id == TypeTableEntryIdArray && array_type->data.array.len == 1 &&
types_match_const_cast_only(ira, array_type->data.array.child_type,
- actual_type->data.pointer.child_type, source_node).id == ConstCastResultIdOk)
+ actual_type->data.pointer.child_type, source_node,
+ !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
{
if (wanted_type->data.pointer.alignment > actual_type->data.pointer.alignment) {
ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("cast increases pointer alignment"));
@@ -10228,6 +10397,20 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
}
}
+ // explicit cast from T to *T where T is zero bits
+ if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle &&
+ types_match_const_cast_only(ira, wanted_type->data.pointer.child_type,
+ actual_type, source_node, !wanted_type->data.pointer.is_const).id == ConstCastResultIdOk)
+ {
+ type_ensure_zero_bits_known(ira->codegen, actual_type);
+ if (type_is_invalid(actual_type)) {
+ return ira->codegen->invalid_instruction;
+ }
+ if (!type_has_bits(actual_type)) {
+ return ir_get_ref(ira, source_instr, value, false, false);
+ }
+ }
+
// explicit cast from undefined to anything
if (actual_type->id == TypeTableEntryIdUndefined) {
@@ -10237,7 +10420,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst
// explicit cast from something to const pointer of it
if (!type_requires_comptime(actual_type)) {
TypeTableEntry *const_ptr_actual = get_pointer_to_type(ira->codegen, actual_type, true);
- if (types_match_const_cast_only(ira, wanted_type, const_ptr_actual, source_node).id == ConstCastResultIdOk) {
+ if (types_match_const_cast_only(ira, wanted_type, const_ptr_actual, source_node, false).id == ConstCastResultIdOk) {
return ir_analyze_cast_ref(ira, source_instr, value, wanted_type);
}
}
@@ -10302,6 +10485,7 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc
IrInstruction *result = ir_create_const(&ira->new_irb, source_instruction->scope,
source_instruction->source_node, child_type);
copy_const_val(&result->value, pointee, ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst);
+ result->value.type = child_type;
return result;
}
}
@@ -10619,6 +10803,16 @@ static bool resolve_cmp_op_id(IrBinOp op_id, Cmp cmp) {
}
}
+static bool optional_value_is_null(ConstExprValue *val) {
+ assert(val->special == ConstValSpecialStatic);
+ if (get_codegen_ptr_type(val->type) != nullptr) {
+ return val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr &&
+ val->data.x_ptr.data.hard_coded_addr.addr == 0;
+ } else {
+ return val->data.x_optional == nullptr;
+ }
+}
+
static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) {
IrInstruction *op1 = bin_op_instruction->op1->other;
IrInstruction *op2 = bin_op_instruction->op2->other;
@@ -10627,8 +10821,8 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
IrBinOp op_id = bin_op_instruction->op_id;
bool is_equality_cmp = (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq);
if (is_equality_cmp &&
- ((op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdMaybe) ||
- (op2->value.type->id == TypeTableEntryIdNull && op1->value.type->id == TypeTableEntryIdMaybe) ||
+ ((op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdOptional) ||
+ (op2->value.type->id == TypeTableEntryIdNull && op1->value.type->id == TypeTableEntryIdOptional) ||
(op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdNull)))
{
if (op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdNull) {
@@ -10648,7 +10842,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
ConstExprValue *maybe_val = ir_resolve_const(ira, maybe_op, UndefBad);
if (!maybe_val)
return ira->codegen->builtin_types.entry_invalid;
- bool is_null = (maybe_val->data.x_maybe == nullptr);
+ bool is_null = optional_value_is_null(maybe_val);
ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base);
out_val->data.x_bool = (op_id == IrBinOpCmpEq) ? is_null : !is_null;
return ira->codegen->builtin_types.entry_bool;
@@ -10797,7 +10991,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
case TypeTableEntryIdStruct:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdUnion:
ir_add_error_node(ira, source_node,
@@ -11135,7 +11329,13 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp *
static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) {
IrInstruction *op1 = bin_op_instruction->op1->other;
+ if (type_is_invalid(op1->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *op2 = bin_op_instruction->op2->other;
+ if (type_is_invalid(op2->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
IrBinOp op_id = bin_op_instruction->op_id;
// look for pointer math
@@ -11621,61 +11821,6 @@ static TypeTableEntry *ir_analyze_instruction_bin_op(IrAnalyze *ira, IrInstructi
zig_unreachable();
}
-enum VarClassRequired {
- VarClassRequiredAny,
- VarClassRequiredConst,
- VarClassRequiredIllegal,
-};
-
-static VarClassRequired get_var_class_required(TypeTableEntry *type_entry) {
- switch (type_entry->id) {
- case TypeTableEntryIdInvalid:
- zig_unreachable();
- case TypeTableEntryIdUnreachable:
- return VarClassRequiredIllegal;
- case TypeTableEntryIdBool:
- case TypeTableEntryIdInt:
- case TypeTableEntryIdFloat:
- case TypeTableEntryIdVoid:
- case TypeTableEntryIdErrorSet:
- case TypeTableEntryIdFn:
- case TypeTableEntryIdPromise:
- return VarClassRequiredAny;
- case TypeTableEntryIdComptimeFloat:
- case TypeTableEntryIdComptimeInt:
- case TypeTableEntryIdUndefined:
- case TypeTableEntryIdBlock:
- case TypeTableEntryIdNull:
- case TypeTableEntryIdOpaque:
- case TypeTableEntryIdMetaType:
- case TypeTableEntryIdNamespace:
- case TypeTableEntryIdBoundFn:
- case TypeTableEntryIdArgTuple:
- return VarClassRequiredConst;
-
- case TypeTableEntryIdPointer:
- if (type_entry->data.pointer.child_type->id == TypeTableEntryIdOpaque) {
- return VarClassRequiredAny;
- } else {
- return get_var_class_required(type_entry->data.pointer.child_type);
- }
- case TypeTableEntryIdArray:
- return get_var_class_required(type_entry->data.array.child_type);
- case TypeTableEntryIdMaybe:
- return get_var_class_required(type_entry->data.maybe.child_type);
- case TypeTableEntryIdErrorUnion:
- return get_var_class_required(type_entry->data.error_union.payload_type);
-
- case TypeTableEntryIdStruct:
- case TypeTableEntryIdEnum:
- case TypeTableEntryIdUnion:
- // TODO check the fields of these things and make sure that they don't recursively
- // contain any of the other variable classes
- return VarClassRequiredAny;
- }
- zig_unreachable();
-}
-
static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstructionDeclVar *decl_var_instruction) {
VariableTableEntry *var = decl_var_instruction->var;
@@ -11710,36 +11855,41 @@ static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstruc
if (type_is_invalid(result_type)) {
result_type = ira->codegen->builtin_types.entry_invalid;
} else {
- switch (get_var_class_required(result_type)) {
- case VarClassRequiredIllegal:
+ type_ensure_zero_bits_known(ira->codegen, result_type);
+ if (type_is_invalid(result_type)) {
+ result_type = ira->codegen->builtin_types.entry_invalid;
+ }
+ }
+
+ if (!type_is_invalid(result_type)) {
+ if (result_type->id == TypeTableEntryIdUnreachable ||
+ result_type->id == TypeTableEntryIdOpaque)
+ {
+ ir_add_error_node(ira, source_node,
+ buf_sprintf("variable of type '%s' not allowed", buf_ptr(&result_type->name)));
+ result_type = ira->codegen->builtin_types.entry_invalid;
+ } else if (type_requires_comptime(result_type)) {
+ var_class_requires_const = true;
+ if (!var->src_is_const && !is_comptime_var) {
ir_add_error_node(ira, source_node,
- buf_sprintf("variable of type '%s' not allowed", buf_ptr(&result_type->name)));
+ buf_sprintf("variable of type '%s' must be const or comptime",
+ buf_ptr(&result_type->name)));
result_type = ira->codegen->builtin_types.entry_invalid;
- break;
- case VarClassRequiredConst:
+ }
+ } else {
+ if (casted_init_value->value.special == ConstValSpecialStatic &&
+ casted_init_value->value.type->id == TypeTableEntryIdFn &&
+ casted_init_value->value.data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways)
+ {
var_class_requires_const = true;
if (!var->src_is_const && !is_comptime_var) {
- ir_add_error_node(ira, source_node,
- buf_sprintf("variable of type '%s' must be const or comptime",
- buf_ptr(&result_type->name)));
+ ErrorMsg *msg = ir_add_error_node(ira, source_node,
+ buf_sprintf("functions marked inline must be stored in const or comptime var"));
+ AstNode *proto_node = casted_init_value->value.data.x_ptr.data.fn.fn_entry->proto_node;
+ add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here"));
result_type = ira->codegen->builtin_types.entry_invalid;
}
- break;
- case VarClassRequiredAny:
- if (casted_init_value->value.special == ConstValSpecialStatic &&
- casted_init_value->value.type->id == TypeTableEntryIdFn &&
- casted_init_value->value.data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways)
- {
- var_class_requires_const = true;
- if (!var->src_is_const && !is_comptime_var) {
- ErrorMsg *msg = ir_add_error_node(ira, source_node,
- buf_sprintf("functions marked inline must be stored in const or comptime var"));
- AstNode *proto_node = casted_init_value->value.data.x_ptr.data.fn.fn_entry->proto_node;
- add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here"));
- result_type = ira->codegen->builtin_types.entry_invalid;
- }
- }
- break;
+ }
}
}
@@ -11914,7 +12064,7 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdNamespace:
@@ -11938,7 +12088,7 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
zig_panic("TODO export const value of type %s", buf_ptr(&target->value.type->name));
@@ -11965,22 +12115,24 @@ static bool exec_has_err_ret_trace(CodeGen *g, IrExecutable *exec) {
static TypeTableEntry *ir_analyze_instruction_error_return_trace(IrAnalyze *ira,
IrInstructionErrorReturnTrace *instruction)
{
- if (instruction->nullable == IrInstructionErrorReturnTrace::Null) {
+ if (instruction->optional == IrInstructionErrorReturnTrace::Null) {
TypeTableEntry *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen);
- TypeTableEntry *nullable_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type);
+ TypeTableEntry *optional_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type);
if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) {
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- out_val->data.x_maybe = nullptr;
- return nullable_type;
+ assert(get_codegen_ptr_type(optional_type) != nullptr);
+ out_val->data.x_ptr.special = ConstPtrSpecialHardCodedAddr;
+ out_val->data.x_ptr.data.hard_coded_addr.addr = 0;
+ return optional_type;
}
IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, instruction->nullable);
+ instruction->base.source_node, instruction->optional);
ir_link_new_instruction(new_instruction, &instruction->base);
- return nullable_type;
+ return optional_type;
} else {
assert(ira->codegen->have_err_ret_tracing);
IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope,
- instruction->base.source_node, instruction->nullable);
+ instruction->base.source_node, instruction->optional);
ir_link_new_instruction(new_instruction, &instruction->base);
return get_ptr_to_stack_trace_type(ira->codegen);
}
@@ -12620,6 +12772,10 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
inst_fn_type_id.return_type = specified_return_type;
}
+ type_ensure_zero_bits_known(ira->codegen, specified_return_type);
+ if (type_is_invalid(specified_return_type))
+ return ira->codegen->builtin_types.entry_invalid;
+
if (type_requires_comptime(specified_return_type)) {
// Throw out our work and call the function as if it were comptime.
return ir_analyze_fn_call(ira, call_instruction, fn_entry, fn_type, fn_ref, first_arg_ptr, true, FnInlineAuto);
@@ -12854,6 +13010,12 @@ static TypeTableEntry *ir_analyze_dereference(IrAnalyze *ira, IrInstructionUnOp
if (type_is_invalid(ptr_type)) {
return ira->codegen->builtin_types.entry_invalid;
} else if (ptr_type->id == TypeTableEntryIdPointer) {
+ if (ptr_type->data.pointer.ptr_len == PtrLenUnknown) {
+ ir_add_error_node(ira, un_op_instruction->base.source_node,
+ buf_sprintf("index syntax required for unknown-length pointer type '%s'",
+ buf_ptr(&ptr_type->name)));
+ return ira->codegen->builtin_types.entry_invalid;
+ }
child_type = ptr_type->data.pointer.child_type;
} else {
ir_add_error_node(ira, un_op_instruction->base.source_node,
@@ -12902,7 +13064,7 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -12921,7 +13083,7 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op
case TypeTableEntryIdUnreachable:
case TypeTableEntryIdOpaque:
ir_add_error_node(ira, un_op_instruction->base.source_node,
- buf_sprintf("type '%s' not nullable", buf_ptr(&type_entry->name)));
+ buf_sprintf("type '%s' not optional", buf_ptr(&type_entry->name)));
return ira->codegen->builtin_types.entry_invalid;
}
zig_unreachable();
@@ -13013,7 +13175,7 @@ static TypeTableEntry *ir_analyze_instruction_un_op(IrAnalyze *ira, IrInstructio
return ir_analyze_negation(ira, un_op_instruction);
case IrUnOpDereference:
return ir_analyze_dereference(ira, un_op_instruction);
- case IrUnOpMaybe:
+ case IrUnOpOptional:
return ir_analyze_maybe(ira, un_op_instruction);
}
zig_unreachable();
@@ -13220,6 +13382,13 @@ static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, ui
ptr_type->data.pointer.bit_offset, ptr_type->data.pointer.unaligned_bit_count);
}
+static TypeTableEntry *adjust_slice_align(CodeGen *g, TypeTableEntry *slice_type, uint32_t new_align) {
+ assert(is_slice(slice_type));
+ TypeTableEntry *ptr_type = adjust_ptr_align(g, slice_type->data.structure.fields[slice_ptr_index].type_entry,
+ new_align);
+ return get_slice_type(g, ptr_type);
+}
+
static TypeTableEntry *adjust_ptr_len(CodeGen *g, TypeTableEntry *ptr_type, PtrLen ptr_len) {
assert(ptr_type->id == TypeTableEntryIdPointer);
return get_pointer_to_type_extra(g,
@@ -13794,10 +13963,14 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
ir_link_new_instruction(result, &field_ptr_instruction->base);
return result->value.type;
}
- } else if (container_type->id == TypeTableEntryIdArray) {
+ } else if (is_array_ref(container_type)) {
if (buf_eql_str(field_name, "len")) {
ConstExprValue *len_val = create_const_vals(1);
- init_const_usize(ira->codegen, len_val, container_type->data.array.len);
+ if (container_type->id == TypeTableEntryIdPointer) {
+ init_const_usize(ira->codegen, len_val, container_type->data.pointer.child_type->data.array.len);
+ } else {
+ init_const_usize(ira->codegen, len_val, container_type->data.array.len);
+ }
TypeTableEntry *usize = ira->codegen->builtin_types.entry_usize;
bool ptr_is_const = true;
@@ -14048,7 +14221,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
buf_ptr(&child_type->name), buf_ptr(field_name)));
return ira->codegen->builtin_types.entry_invalid;
}
- } else if (child_type->id == TypeTableEntryIdMaybe) {
+ } else if (child_type->id == TypeTableEntryIdOptional) {
if (buf_eql_str(field_name, "Child")) {
bool ptr_is_const = true;
bool ptr_is_volatile = false;
@@ -14141,6 +14314,9 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
static TypeTableEntry *ir_analyze_instruction_load_ptr(IrAnalyze *ira, IrInstructionLoadPtr *load_ptr_instruction) {
IrInstruction *ptr = load_ptr_instruction->ptr->other;
+ if (type_is_invalid(ptr->value.type))
+ return ira->codegen->builtin_types.entry_invalid;
+
IrInstruction *result = ir_get_deref(ira, &load_ptr_instruction->base, ptr);
ir_link_new_instruction(result, &load_ptr_instruction->base);
assert(result->value.type);
@@ -14229,7 +14405,7 @@ static TypeTableEntry *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructi
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -14497,7 +14673,7 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira,
case TypeTableEntryIdStruct:
case TypeTableEntryIdComptimeFloat:
case TypeTableEntryIdComptimeInt:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -14605,7 +14781,7 @@ static TypeTableEntry *ir_analyze_instruction_array_type(IrAnalyze *ira,
case TypeTableEntryIdStruct:
case TypeTableEntryIdComptimeFloat:
case TypeTableEntryIdComptimeInt:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -14676,7 +14852,7 @@ static TypeTableEntry *ir_analyze_instruction_size_of(IrAnalyze *ira,
case TypeTableEntryIdPointer:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -14700,14 +14876,14 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn
TypeTableEntry *type_entry = value->value.type;
- if (type_entry->id == TypeTableEntryIdMaybe) {
+ if (type_entry->id == TypeTableEntryIdOptional) {
if (instr_is_comptime(value)) {
ConstExprValue *maybe_val = ir_resolve_const(ira, value, UndefBad);
if (!maybe_val)
return ira->codegen->builtin_types.entry_invalid;
ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- out_val->data.x_bool = (maybe_val->data.x_maybe != nullptr);
+ out_val->data.x_bool = !optional_value_is_null(maybe_val);
return ira->codegen->builtin_types.entry_bool;
}
@@ -14725,7 +14901,7 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn
}
static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
- IrInstructionUnwrapMaybe *unwrap_maybe_instruction)
+ IrInstructionUnwrapOptional *unwrap_maybe_instruction)
{
IrInstruction *value = unwrap_maybe_instruction->value->other;
if (type_is_invalid(value->value.type))
@@ -14737,25 +14913,9 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
TypeTableEntry *type_entry = ptr_type->data.pointer.child_type;
if (type_is_invalid(type_entry)) {
return ira->codegen->builtin_types.entry_invalid;
- } else if (type_entry->id == TypeTableEntryIdMetaType) {
- // surprise! actually this is just ??T not an unwrap maybe instruction
- ConstExprValue *ptr_val = const_ptr_pointee(ira->codegen, &value->value);
- assert(ptr_val->type->id == TypeTableEntryIdMetaType);
- TypeTableEntry *child_type = ptr_val->data.x_type;
-
- type_ensure_zero_bits_known(ira->codegen, child_type);
- TypeTableEntry *layer1 = get_maybe_type(ira->codegen, child_type);
- TypeTableEntry *layer2 = get_maybe_type(ira->codegen, layer1);
-
- IrInstruction *const_instr = ir_build_const_type(&ira->new_irb, unwrap_maybe_instruction->base.scope,
- unwrap_maybe_instruction->base.source_node, layer2);
- IrInstruction *result_instr = ir_get_ref(ira, &unwrap_maybe_instruction->base, const_instr,
- ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile);
- ir_link_new_instruction(result_instr, &unwrap_maybe_instruction->base);
- return result_instr->value.type;
- } else if (type_entry->id != TypeTableEntryIdMaybe) {
+ } else if (type_entry->id != TypeTableEntryIdOptional) {
ir_add_error_node(ira, unwrap_maybe_instruction->value->source_node,
- buf_sprintf("expected nullable type, found '%s'", buf_ptr(&type_entry->name)));
+ buf_sprintf("expected optional type, found '%s'", buf_ptr(&type_entry->name)));
return ira->codegen->builtin_types.entry_invalid;
}
TypeTableEntry *child_type = type_entry->data.maybe.child_type;
@@ -14771,13 +14931,18 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira,
ConstExprValue *maybe_val = const_ptr_pointee(ira->codegen, val);
if (val->data.x_ptr.mut != ConstPtrMutRuntimeVar) {
- if (!maybe_val->data.x_maybe) {
+ if (optional_value_is_null(maybe_val)) {
ir_add_error(ira, &unwrap_maybe_instruction->base, buf_sprintf("unable to unwrap null"));
return ira->codegen->builtin_types.entry_invalid;
}
ConstExprValue *out_val = ir_build_const_from(ira, &unwrap_maybe_instruction->base);
out_val->data.x_ptr.special = ConstPtrSpecialRef;
- out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_maybe;
+ out_val->data.x_ptr.mut = val->data.x_ptr.mut;
+ if (type_is_codegen_pointer(child_type)) {
+ out_val->data.x_ptr.data.ref.pointee = maybe_val;
+ } else {
+ out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_optional;
+ }
return result_type;
}
}
@@ -15101,7 +15266,7 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira,
case TypeTableEntryIdStruct:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdBlock:
case TypeTableEntryIdBoundFn:
case TypeTableEntryIdArgTuple:
@@ -15622,7 +15787,7 @@ static TypeTableEntry *ir_analyze_min_max(IrAnalyze *ira, IrInstruction *source_
case TypeTableEntryIdComptimeInt:
case TypeTableEntryIdUndefined:
case TypeTableEntryIdNull:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdUnion:
@@ -15743,11 +15908,6 @@ static TypeTableEntry *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrIn
return out_val->type;
}
- if (!target->value.type->data.enumeration.generate_name_table) {
- target->value.type->data.enumeration.generate_name_table = true;
- ira->codegen->name_table_enums.append(target->value.type);
- }
-
IrInstruction *result = ir_build_tag_name(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, target);
ir_link_new_instruction(result, &instruction->base);
@@ -16140,12 +16300,12 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
0, 0);
fn_def_fields[6].type = get_maybe_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr));
if (fn_node->is_extern && buf_len(fn_node->lib_name) > 0) {
- fn_def_fields[6].data.x_maybe = create_const_vals(1);
+ fn_def_fields[6].data.x_optional = create_const_vals(1);
ConstExprValue *lib_name = create_const_str_lit(ira->codegen, fn_node->lib_name);
- init_const_slice(ira->codegen, fn_def_fields[6].data.x_maybe, lib_name, 0, buf_len(fn_node->lib_name), true);
+ init_const_slice(ira->codegen, fn_def_fields[6].data.x_optional, lib_name, 0, buf_len(fn_node->lib_name), true);
+ } else {
+ fn_def_fields[6].data.x_optional = nullptr;
}
- else
- fn_def_fields[6].data.x_maybe = nullptr;
// return_type: type
ensure_field_index(fn_def_val->type, "return_type", 7);
fn_def_fields[7].special = ConstValSpecialStatic;
@@ -16213,8 +16373,7 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop
return true;
}
-static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *type_entry)
-{
+static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *type_entry) {
assert(type_entry != nullptr);
assert(!type_is_invalid(type_entry));
@@ -16239,38 +16398,67 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
enum_field_val->data.x_struct.fields = inner_fields;
};
- const auto create_ptr_like_type_info = [ira](const char *name, TypeTableEntry *ptr_type_entry) {
+ const auto create_ptr_like_type_info = [ira](TypeTableEntry *ptr_type_entry) {
+ TypeTableEntry *attrs_type;
+ uint32_t size_enum_index;
+ if (is_slice(ptr_type_entry)) {
+ attrs_type = ptr_type_entry->data.structure.fields[slice_ptr_index].type_entry;
+ size_enum_index = 2;
+ } else if (ptr_type_entry->id == TypeTableEntryIdPointer) {
+ attrs_type = ptr_type_entry;
+ size_enum_index = (ptr_type_entry->data.pointer.ptr_len == PtrLenSingle) ? 0 : 1;
+ } else {
+ zig_unreachable();
+ }
+
+ TypeTableEntry *type_info_pointer_type = ir_type_info_get_type(ira, "Pointer");
+ ensure_complete_type(ira->codegen, type_info_pointer_type);
+ assert(!type_is_invalid(type_info_pointer_type));
+
ConstExprValue *result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, name);
+ result->type = type_info_pointer_type;
- ConstExprValue *fields = create_const_vals(4);
+ ConstExprValue *fields = create_const_vals(5);
result->data.x_struct.fields = fields;
- // is_const: bool
- ensure_field_index(result->type, "is_const", 0);
+ // size: Size
+ ensure_field_index(result->type, "size", 0);
+ TypeTableEntry *type_info_pointer_size_type = ir_type_info_get_type(ira, "Size", type_info_pointer_type);
+ ensure_complete_type(ira->codegen, type_info_pointer_size_type);
+ assert(!type_is_invalid(type_info_pointer_size_type));
fields[0].special = ConstValSpecialStatic;
- fields[0].type = ira->codegen->builtin_types.entry_bool;
- fields[0].data.x_bool = ptr_type_entry->data.pointer.is_const;
- // is_volatile: bool
- ensure_field_index(result->type, "is_volatile", 1);
+ fields[0].type = type_info_pointer_size_type;
+ bigint_init_unsigned(&fields[0].data.x_enum_tag, size_enum_index);
+
+ // is_const: bool
+ ensure_field_index(result->type, "is_const", 1);
fields[1].special = ConstValSpecialStatic;
fields[1].type = ira->codegen->builtin_types.entry_bool;
- fields[1].data.x_bool = ptr_type_entry->data.pointer.is_volatile;
- // alignment: u32
- ensure_field_index(result->type, "alignment", 2);
+ fields[1].data.x_bool = attrs_type->data.pointer.is_const;
+ // is_volatile: bool
+ ensure_field_index(result->type, "is_volatile", 2);
fields[2].special = ConstValSpecialStatic;
- fields[2].type = ira->codegen->builtin_types.entry_u32;
- bigint_init_unsigned(&fields[2].data.x_bigint, ptr_type_entry->data.pointer.alignment);
- // child: type
- ensure_field_index(result->type, "child", 3);
+ fields[2].type = ira->codegen->builtin_types.entry_bool;
+ fields[2].data.x_bool = attrs_type->data.pointer.is_volatile;
+ // alignment: u32
+ ensure_field_index(result->type, "alignment", 3);
fields[3].special = ConstValSpecialStatic;
- fields[3].type = ira->codegen->builtin_types.entry_type;
- fields[3].data.x_type = ptr_type_entry->data.pointer.child_type;
+ fields[3].type = ira->codegen->builtin_types.entry_u32;
+ bigint_init_unsigned(&fields[3].data.x_bigint, attrs_type->data.pointer.alignment);
+ // child: type
+ ensure_field_index(result->type, "child", 4);
+ fields[4].special = ConstValSpecialStatic;
+ fields[4].type = ira->codegen->builtin_types.entry_type;
+ fields[4].data.x_type = attrs_type->data.pointer.child_type;
return result;
};
+ if (type_entry == ira->codegen->builtin_types.entry_global_error_set) {
+ zig_panic("TODO implement @typeInfo for global error set");
+ }
+
ConstExprValue *result = nullptr;
switch (type_entry->id)
{
@@ -16339,7 +16527,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
}
case TypeTableEntryIdPointer:
{
- result = create_ptr_like_type_info("Pointer", type_entry);
+ result = create_ptr_like_type_info(type_entry);
break;
}
case TypeTableEntryIdArray:
@@ -16364,11 +16552,11 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
break;
}
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
{
result = create_const_vals(1);
result->special = ConstValSpecialStatic;
- result->type = ir_type_info_get_type(ira, "Nullable");
+ result->type = ir_type_info_get_type(ira, "Optional");
ConstExprValue *fields = create_const_vals(1);
result->data.x_struct.fields = fields;
@@ -16570,8 +16758,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
TypeTableEntry *type_info_enum_field_type = ir_type_info_get_type(ira, "EnumField");
- for (uint32_t union_field_index = 0; union_field_index < union_field_count; union_field_index++)
- {
+ for (uint32_t union_field_index = 0; union_field_index < union_field_count; union_field_index++) {
TypeUnionField *union_field = &type_entry->data.unionation.fields[union_field_index];
ConstExprValue *union_field_val = &union_field_array->data.x_array.s_none.elements[union_field_index];
@@ -16582,12 +16769,11 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
inner_fields[1].special = ConstValSpecialStatic;
inner_fields[1].type = get_maybe_type(ira->codegen, type_info_enum_field_type);
- if (fields[1].data.x_type == ira->codegen->builtin_types.entry_undef)
- inner_fields[1].data.x_maybe = nullptr;
- else
- {
- inner_fields[1].data.x_maybe = create_const_vals(1);
- make_enum_field_val(inner_fields[1].data.x_maybe, union_field->enum_field, type_info_enum_field_type);
+ if (fields[1].data.x_type == ira->codegen->builtin_types.entry_undef) {
+ inner_fields[1].data.x_optional = nullptr;
+ } else {
+ inner_fields[1].data.x_optional = create_const_vals(1);
+ make_enum_field_val(inner_fields[1].data.x_optional, union_field->enum_field, type_info_enum_field_type);
}
inner_fields[2].special = ConstValSpecialStatic;
@@ -16612,15 +16798,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
case TypeTableEntryIdStruct:
{
if (type_entry->data.structure.is_slice) {
- Buf ptr_field_name = BUF_INIT;
- buf_init_from_str(&ptr_field_name, "ptr");
- TypeTableEntry *ptr_type = type_entry->data.structure.fields_by_name.get(&ptr_field_name)->type_entry;
- ensure_complete_type(ira->codegen, ptr_type);
- if (type_is_invalid(ptr_type))
- return nullptr;
- buf_deinit(&ptr_field_name);
-
- result = create_ptr_like_type_info("Slice", ptr_type);
+ result = create_ptr_like_type_info(type_entry);
break;
}
@@ -16651,8 +16829,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
init_const_slice(ira->codegen, &fields[1], struct_field_array, 0, struct_field_count, false);
- for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++)
- {
+ for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++) {
TypeStructField *struct_field = &type_entry->data.structure.fields[struct_field_index];
ConstExprValue *struct_field_val = &struct_field_array->data.x_array.s_none.elements[struct_field_index];
@@ -16663,15 +16840,14 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t
inner_fields[1].special = ConstValSpecialStatic;
inner_fields[1].type = get_maybe_type(ira->codegen, ira->codegen->builtin_types.entry_usize);
- if (!type_has_bits(struct_field->type_entry))
- inner_fields[1].data.x_maybe = nullptr;
- else
- {
+ if (!type_has_bits(struct_field->type_entry)) {
+ inner_fields[1].data.x_optional = nullptr;
+ } else {
size_t byte_offset = LLVMOffsetOfElement(ira->codegen->target_data_ref, type_entry->type_ref, struct_field->gen_index);
- inner_fields[1].data.x_maybe = create_const_vals(1);
- inner_fields[1].data.x_maybe->special = ConstValSpecialStatic;
- inner_fields[1].data.x_maybe->type = ira->codegen->builtin_types.entry_usize;
- bigint_init_unsigned(&inner_fields[1].data.x_maybe->data.x_bigint, byte_offset);
+ inner_fields[1].data.x_optional = create_const_vals(1);
+ inner_fields[1].data.x_optional->special = ConstValSpecialStatic;
+ inner_fields[1].data.x_optional->type = ira->codegen->builtin_types.entry_usize;
+ bigint_init_unsigned(&inner_fields[1].data.x_optional->data.x_bigint, byte_offset);
}
inner_fields[2].special = ConstValSpecialStatic;
@@ -17896,7 +18072,7 @@ static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruc
case TypeTableEntryIdPromise:
case TypeTableEntryIdArray:
case TypeTableEntryIdStruct:
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
case TypeTableEntryIdErrorUnion:
case TypeTableEntryIdErrorSet:
case TypeTableEntryIdEnum:
@@ -18422,30 +18598,6 @@ static TypeTableEntry *ir_analyze_instruction_check_statement_is_void(IrAnalyze
return ira->codegen->builtin_types.entry_void;
}
-static TypeTableEntry *ir_analyze_instruction_can_implicit_cast(IrAnalyze *ira,
- IrInstructionCanImplicitCast *instruction)
-{
- IrInstruction *type_value = instruction->type_value->other;
- TypeTableEntry *type_entry = ir_resolve_type(ira, type_value);
- if (type_is_invalid(type_entry))
- return ira->codegen->builtin_types.entry_invalid;
-
- IrInstruction *target_value = instruction->target_value->other;
- if (type_is_invalid(target_value->value.type))
- return ira->codegen->builtin_types.entry_invalid;
-
- ImplicitCastMatchResult result = ir_types_match_with_implicit_cast(ira, type_entry, target_value->value.type,
- target_value);
-
- if (result == ImplicitCastMatchResultReportedError) {
- zig_panic("TODO refactor implicit cast tester to return bool without reporting errors");
- }
-
- ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base);
- out_val->data.x_bool = (result == ImplicitCastMatchResultYes);
- return ira->codegen->builtin_types.entry_bool;
-}
-
static TypeTableEntry *ir_analyze_instruction_panic(IrAnalyze *ira, IrInstructionPanic *instruction) {
IrInstruction *msg = instruction->msg->other;
if (type_is_invalid(msg->value.type))
@@ -18484,7 +18636,7 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3
old_align_bytes = fn_type_id.alignment;
fn_type_id.alignment = align_bytes;
result_type = get_fn_type(ira->codegen, &fn_type_id);
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdPointer)
{
TypeTableEntry *ptr_type = target_type->data.maybe.child_type;
@@ -18492,7 +18644,7 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3
TypeTableEntry *better_ptr_type = adjust_ptr_align(ira->codegen, ptr_type, align_bytes);
result_type = get_maybe_type(ira->codegen, better_ptr_type);
- } else if (target_type->id == TypeTableEntryIdMaybe &&
+ } else if (target_type->id == TypeTableEntryIdOptional &&
target_type->data.maybe.child_type->id == TypeTableEntryIdFn)
{
FnTypeId fn_type_id = target_type->data.maybe.child_type->data.fn.fn_type_id;
@@ -18650,7 +18802,7 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
return;
case TypeTableEntryIdStruct:
zig_panic("TODO buf_write_value_bytes struct type");
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
zig_panic("TODO buf_write_value_bytes maybe type");
case TypeTableEntryIdErrorUnion:
zig_panic("TODO buf_write_value_bytes error union");
@@ -18708,7 +18860,7 @@ static void buf_read_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue
zig_panic("TODO buf_read_value_bytes array type");
case TypeTableEntryIdStruct:
zig_panic("TODO buf_read_value_bytes struct type");
- case TypeTableEntryIdMaybe:
+ case TypeTableEntryIdOptional:
zig_panic("TODO buf_read_value_bytes maybe type");
case TypeTableEntryIdErrorUnion:
zig_panic("TODO buf_read_value_bytes error union");
@@ -18946,9 +19098,6 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr
ConstExprValue *val = ir_resolve_const(ira, target, UndefBad);
if (!val)
return ira->codegen->builtin_types.entry_invalid;
- if (target->value.type->id == TypeTableEntryIdMaybe) {
- val = val->data.x_maybe;
- }
if (val->type->id == TypeTableEntryIdPointer && val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) {
IrInstruction *result = ir_create_const(&ira->new_irb, instruction->base.scope,
instruction->base.source_node, usize);
@@ -18973,6 +19122,9 @@ static TypeTableEntry *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstruc
if (child_type->id == TypeTableEntryIdUnreachable) {
ir_add_error(ira, &instruction->base, buf_sprintf("pointer to noreturn not allowed"));
return ira->codegen->builtin_types.entry_invalid;
+ } else if (child_type->id == TypeTableEntryIdOpaque && instruction->ptr_len == PtrLenUnknown) {
+ ir_add_error(ira, &instruction->base, buf_sprintf("unknown-length pointer to opaque"));
+ return ira->codegen->builtin_types.entry_invalid;
}
uint32_t align_bytes;
@@ -19624,7 +19776,7 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
case IrInstructionIdUnionInit:
case IrInstructionIdStructFieldPtr:
case IrInstructionIdUnionFieldPtr:
- case IrInstructionIdMaybeWrap:
+ case IrInstructionIdOptionalWrap:
case IrInstructionIdErrWrapCode:
case IrInstructionIdErrWrapPayload:
case IrInstructionIdCast:
@@ -19684,8 +19836,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_size_of(ira, (IrInstructionSizeOf *)instruction);
case IrInstructionIdTestNonNull:
return ir_analyze_instruction_test_non_null(ira, (IrInstructionTestNonNull *)instruction);
- case IrInstructionIdUnwrapMaybe:
- return ir_analyze_instruction_unwrap_maybe(ira, (IrInstructionUnwrapMaybe *)instruction);
+ case IrInstructionIdUnwrapOptional:
+ return ir_analyze_instruction_unwrap_maybe(ira, (IrInstructionUnwrapOptional *)instruction);
case IrInstructionIdClz:
return ir_analyze_instruction_clz(ira, (IrInstructionClz *)instruction);
case IrInstructionIdCtz:
@@ -19776,8 +19928,6 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
return ir_analyze_instruction_check_switch_prongs(ira, (IrInstructionCheckSwitchProngs *)instruction);
case IrInstructionIdCheckStatementIsVoid:
return ir_analyze_instruction_check_statement_is_void(ira, (IrInstructionCheckStatementIsVoid *)instruction);
- case IrInstructionIdCanImplicitCast:
- return ir_analyze_instruction_can_implicit_cast(ira, (IrInstructionCanImplicitCast *)instruction);
case IrInstructionIdDeclRef:
return ir_analyze_instruction_decl_ref(ira, (IrInstructionDeclRef *)instruction);
case IrInstructionIdPanic:
@@ -19873,6 +20023,7 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi
static TypeTableEntry *ir_analyze_instruction(IrAnalyze *ira, IrInstruction *instruction) {
TypeTableEntry *instruction_type = ir_analyze_instruction_nocast(ira, instruction);
instruction->value.type = instruction_type;
+
if (instruction->other) {
instruction->other->value.type = instruction_type;
} else {
@@ -20022,7 +20173,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdSliceType:
case IrInstructionIdSizeOf:
case IrInstructionIdTestNonNull:
- case IrInstructionIdUnwrapMaybe:
+ case IrInstructionIdUnwrapOptional:
case IrInstructionIdClz:
case IrInstructionIdCtz:
case IrInstructionIdSwitchVar:
@@ -20044,7 +20195,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdFrameAddress:
case IrInstructionIdTestErr:
case IrInstructionIdUnwrapErrCode:
- case IrInstructionIdMaybeWrap:
+ case IrInstructionIdOptionalWrap:
case IrInstructionIdErrWrapCode:
case IrInstructionIdErrWrapPayload:
case IrInstructionIdFnProto:
@@ -20057,7 +20208,6 @@ bool ir_has_side_effects(IrInstruction *instruction) {
case IrInstructionIdIntToEnum:
case IrInstructionIdIntToErr:
case IrInstructionIdErrToInt:
- case IrInstructionIdCanImplicitCast:
case IrInstructionIdDeclRef:
case IrInstructionIdErrName:
case IrInstructionIdTypeName:
diff --git a/src/ir_print.cpp b/src/ir_print.cpp
index 3c177a8bbf..43907fa9d4 100644
--- a/src/ir_print.cpp
+++ b/src/ir_print.cpp
@@ -148,7 +148,7 @@ static const char *ir_un_op_id_str(IrUnOp op_id) {
return "-%";
case IrUnOpDereference:
return "*";
- case IrUnOpMaybe:
+ case IrUnOpOptional:
return "?";
}
zig_unreachable();
@@ -481,7 +481,7 @@ static void ir_print_test_null(IrPrint *irp, IrInstructionTestNonNull *instructi
fprintf(irp->f, " != null");
}
-static void ir_print_unwrap_maybe(IrPrint *irp, IrInstructionUnwrapMaybe *instruction) {
+static void ir_print_unwrap_maybe(IrPrint *irp, IrInstructionUnwrapOptional *instruction) {
fprintf(irp->f, "&??*");
ir_print_other_instruction(irp, instruction->value);
if (!instruction->safety_check_on) {
@@ -777,7 +777,7 @@ static void ir_print_unwrap_err_payload(IrPrint *irp, IrInstructionUnwrapErrPayl
}
}
-static void ir_print_maybe_wrap(IrPrint *irp, IrInstructionMaybeWrap *instruction) {
+static void ir_print_maybe_wrap(IrPrint *irp, IrInstructionOptionalWrap *instruction) {
fprintf(irp->f, "@maybeWrap(");
ir_print_other_instruction(irp, instruction->value);
fprintf(irp->f, ")");
@@ -913,14 +913,6 @@ static void ir_print_tag_name(IrPrint *irp, IrInstructionTagName *instruction) {
ir_print_other_instruction(irp, instruction->target);
}
-static void ir_print_can_implicit_cast(IrPrint *irp, IrInstructionCanImplicitCast *instruction) {
- fprintf(irp->f, "@canImplicitCast(");
- ir_print_other_instruction(irp, instruction->type_value);
- fprintf(irp->f, ",");
- ir_print_other_instruction(irp, instruction->target_value);
- fprintf(irp->f, ")");
-}
-
static void ir_print_ptr_type(IrPrint *irp, IrInstructionPtrType *instruction) {
fprintf(irp->f, "&");
if (instruction->align_value != nullptr) {
@@ -1040,7 +1032,7 @@ static void ir_print_export(IrPrint *irp, IrInstructionExport *instruction) {
static void ir_print_error_return_trace(IrPrint *irp, IrInstructionErrorReturnTrace *instruction) {
fprintf(irp->f, "@errorReturnTrace(");
- switch (instruction->nullable) {
+ switch (instruction->optional) {
case IrInstructionErrorReturnTrace::Null:
fprintf(irp->f, "Null");
break;
@@ -1356,8 +1348,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdTestNonNull:
ir_print_test_null(irp, (IrInstructionTestNonNull *)instruction);
break;
- case IrInstructionIdUnwrapMaybe:
- ir_print_unwrap_maybe(irp, (IrInstructionUnwrapMaybe *)instruction);
+ case IrInstructionIdUnwrapOptional:
+ ir_print_unwrap_maybe(irp, (IrInstructionUnwrapOptional *)instruction);
break;
case IrInstructionIdCtz:
ir_print_ctz(irp, (IrInstructionCtz *)instruction);
@@ -1473,8 +1465,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdUnwrapErrPayload:
ir_print_unwrap_err_payload(irp, (IrInstructionUnwrapErrPayload *)instruction);
break;
- case IrInstructionIdMaybeWrap:
- ir_print_maybe_wrap(irp, (IrInstructionMaybeWrap *)instruction);
+ case IrInstructionIdOptionalWrap:
+ ir_print_maybe_wrap(irp, (IrInstructionOptionalWrap *)instruction);
break;
case IrInstructionIdErrWrapCode:
ir_print_err_wrap_code(irp, (IrInstructionErrWrapCode *)instruction);
@@ -1524,9 +1516,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) {
case IrInstructionIdTagName:
ir_print_tag_name(irp, (IrInstructionTagName *)instruction);
break;
- case IrInstructionIdCanImplicitCast:
- ir_print_can_implicit_cast(irp, (IrInstructionCanImplicitCast *)instruction);
- break;
case IrInstructionIdPtrType:
ir_print_ptr_type(irp, (IrInstructionPtrType *)instruction);
break;
diff --git a/src/link.cpp b/src/link.cpp
index d454d77aae..d2925cb5a8 100644
--- a/src/link.cpp
+++ b/src/link.cpp
@@ -391,6 +391,19 @@ static void construct_linker_job_elf(LinkJob *lj) {
}
}
+static void construct_linker_job_wasm(LinkJob *lj) {
+ CodeGen *g = lj->codegen;
+
+ lj->args.append("--relocatable"); // So lld doesn't look for _start.
+ lj->args.append("-o");
+ lj->args.append(buf_ptr(&lj->out_file));
+
+ // .o files
+ for (size_t i = 0; i < g->link_objects.length; i += 1) {
+ lj->args.append((const char *)buf_ptr(g->link_objects.at(i)));
+ }
+}
+
//static bool is_target_cyg_mingw(const ZigTarget *target) {
// return (target->os == ZigLLVM_Win32 && target->env_type == ZigLLVM_Cygnus) ||
// (target->os == ZigLLVM_Win32 && target->env_type == ZigLLVM_GNU);
@@ -924,7 +937,7 @@ static void construct_linker_job(LinkJob *lj) {
case ZigLLVM_MachO:
return construct_linker_job_macho(lj);
case ZigLLVM_Wasm:
- zig_panic("TODO link wasm");
+ return construct_linker_job_wasm(lj);
}
}
diff --git a/src/main.cpp b/src/main.cpp
index 9c36f9b091..c63a143bff 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -23,6 +23,7 @@ static int usage(const char *arg0) {
" build-exe [source] create executable from source or object files\n"
" build-lib [source] create library from source or object files\n"
" build-obj [source] create object from source or assembly\n"
+ " builtin show the source code of that @import(\"builtin\")\n"
" run [source] create executable and run immediately\n"
" translate-c [source] convert c code to zig code\n"
" targets list available compilation targets\n"
@@ -214,6 +215,7 @@ static Buf *resolve_zig_lib_dir(void) {
enum Cmd {
CmdInvalid,
CmdBuild,
+ CmdBuiltin,
CmdRun,
CmdTest,
CmdVersion,
@@ -664,6 +666,8 @@ int main(int argc, char **argv) {
out_type = OutTypeExe;
} else if (strcmp(arg, "targets") == 0) {
cmd = CmdTargets;
+ } else if (strcmp(arg, "builtin") == 0) {
+ cmd = CmdBuiltin;
} else {
fprintf(stderr, "Unrecognized command: %s\n", arg);
return usage(arg0);
@@ -681,6 +685,7 @@ int main(int argc, char **argv) {
return usage(arg0);
}
break;
+ case CmdBuiltin:
case CmdVersion:
case CmdZen:
case CmdTargets:
@@ -727,6 +732,16 @@ int main(int argc, char **argv) {
}
switch (cmd) {
+ case CmdBuiltin: {
+ Buf *zig_lib_dir_buf = resolve_zig_lib_dir();
+ CodeGen *g = codegen_create(nullptr, target, out_type, build_mode, zig_lib_dir_buf);
+ Buf *builtin_source = codegen_generate_builtin_source(g);
+ if (fwrite(buf_ptr(builtin_source), 1, buf_len(builtin_source), stdout) != buf_len(builtin_source)) {
+ fprintf(stderr, "unable to write to stdout: %s\n", strerror(ferror(stdout)));
+ return EXIT_FAILURE;
+ }
+ return EXIT_SUCCESS;
+ }
case CmdRun:
case CmdBuild:
case CmdTranslateC:
diff --git a/src/parser.cpp b/src/parser.cpp
index 3ad2de906b..adb1633f5d 100644
--- a/src/parser.cpp
+++ b/src/parser.cpp
@@ -1046,12 +1046,11 @@ static AstNode *ast_parse_fn_proto_partial(ParseContext *pc, size_t *token_index
}
/*
-SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | PtrDerefExpression | SliceExpression)
+SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | ".*" | ".?")
FnCallExpression : token(LParen) list(Expression, token(Comma)) token(RParen)
ArrayAccessExpression : token(LBracket) Expression token(RBracket)
SliceExpression = "[" Expression ".." option(Expression) "]"
FieldAccessExpression : token(Dot) token(Symbol)
-PtrDerefExpression = ".*"
StructLiteralField : token(Dot) token(Symbol) token(Eq) Expression
*/
static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index, bool mandatory) {
@@ -1148,6 +1147,13 @@ static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index,
AstNode *node = ast_create_node(pc, NodeTypePtrDeref, first_token);
node->data.ptr_deref_expr.target = primary_expr;
+ primary_expr = node;
+ } else if (token->id == TokenIdQuestion) {
+ *token_index += 1;
+
+ AstNode *node = ast_create_node(pc, NodeTypeUnwrapOptional, first_token);
+ node->data.unwrap_optional.expr = primary_expr;
+
primary_expr = node;
} else {
ast_invalid_token_error(pc, token);
@@ -1165,8 +1171,7 @@ static PrefixOp tok_to_prefix_op(Token *token) {
case TokenIdDash: return PrefixOpNegation;
case TokenIdMinusPercent: return PrefixOpNegationWrap;
case TokenIdTilde: return PrefixOpBinNot;
- case TokenIdMaybe: return PrefixOpMaybe;
- case TokenIdDoubleQuestion: return PrefixOpUnwrapMaybe;
+ case TokenIdQuestion: return PrefixOpOptional;
case TokenIdAmpersand: return PrefixOpAddrOf;
default: return PrefixOpInvalid;
}
@@ -2304,8 +2309,8 @@ static BinOpType ast_parse_ass_op(ParseContext *pc, size_t *token_index, bool ma
}
/*
-UnwrapExpression : BoolOrExpression (UnwrapMaybe | UnwrapError) | BoolOrExpression
-UnwrapMaybe : "??" BoolOrExpression
+UnwrapExpression : BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression
+UnwrapOptional = "orelse" Expression
UnwrapError = "catch" option("|" Symbol "|") Expression
*/
static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, bool mandatory) {
@@ -2315,14 +2320,14 @@ static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, boo
Token *token = &pc->tokens->at(*token_index);
- if (token->id == TokenIdDoubleQuestion) {
+ if (token->id == TokenIdKeywordOrElse) {
*token_index += 1;
AstNode *rhs = ast_parse_expression(pc, token_index, true);
AstNode *node = ast_create_node(pc, NodeTypeBinOpExpr, token);
node->data.bin_op_expr.op1 = lhs;
- node->data.bin_op_expr.bin_op = BinOpTypeUnwrapMaybe;
+ node->data.bin_op_expr.bin_op = BinOpTypeUnwrapOptional;
node->data.bin_op_expr.op2 = rhs;
return node;
@@ -3028,6 +3033,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont
case NodeTypePtrDeref:
visit_field(&node->data.ptr_deref_expr.target, visit, context);
break;
+ case NodeTypeUnwrapOptional:
+ visit_field(&node->data.unwrap_optional.expr, visit, context);
+ break;
case NodeTypeUse:
visit_field(&node->data.use.expr, visit, context);
break;
diff --git a/src/target.cpp b/src/target.cpp
index 563eb66bca..65c8b45e1b 100644
--- a/src/target.cpp
+++ b/src/target.cpp
@@ -596,12 +596,15 @@ void resolve_target_object_format(ZigTarget *target) {
case ZigLLVM_tce:
case ZigLLVM_tcele:
case ZigLLVM_thumbeb:
- case ZigLLVM_wasm32:
- case ZigLLVM_wasm64:
case ZigLLVM_xcore:
target->oformat= ZigLLVM_ELF;
return;
+ case ZigLLVM_wasm32:
+ case ZigLLVM_wasm64:
+ target->oformat = ZigLLVM_Wasm;
+ return;
+
case ZigLLVM_ppc:
case ZigLLVM_ppc64:
if (is_os_darwin(target)) {
diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp
index badbd695ec..2950b4eb49 100644
--- a/src/tokenizer.cpp
+++ b/src/tokenizer.cpp
@@ -134,6 +134,7 @@ static const struct ZigKeyword zig_keywords[] = {
{"noalias", TokenIdKeywordNoAlias},
{"null", TokenIdKeywordNull},
{"or", TokenIdKeywordOr},
+ {"orelse", TokenIdKeywordOrElse},
{"packed", TokenIdKeywordPacked},
{"promise", TokenIdKeywordPromise},
{"pub", TokenIdKeywordPub},
@@ -215,7 +216,6 @@ enum TokenizeState {
TokenizeStateSawGreaterThanGreaterThan,
TokenizeStateSawDot,
TokenizeStateSawDotDot,
- TokenizeStateSawQuestionMark,
TokenizeStateSawAtSign,
TokenizeStateCharCode,
TokenizeStateError,
@@ -532,6 +532,10 @@ void tokenize(Buf *buf, Tokenization *out) {
begin_token(&t, TokenIdComma);
end_token(&t);
break;
+ case '?':
+ begin_token(&t, TokenIdQuestion);
+ end_token(&t);
+ break;
case '{':
begin_token(&t, TokenIdLBrace);
end_token(&t);
@@ -624,33 +628,10 @@ void tokenize(Buf *buf, Tokenization *out) {
begin_token(&t, TokenIdDot);
t.state = TokenizeStateSawDot;
break;
- case '?':
- begin_token(&t, TokenIdMaybe);
- t.state = TokenizeStateSawQuestionMark;
- break;
default:
invalid_char_error(&t, c);
}
break;
- case TokenizeStateSawQuestionMark:
- switch (c) {
- case '?':
- set_token_id(&t, t.cur_tok, TokenIdDoubleQuestion);
- end_token(&t);
- t.state = TokenizeStateStart;
- break;
- case '=':
- set_token_id(&t, t.cur_tok, TokenIdMaybeAssign);
- end_token(&t);
- t.state = TokenizeStateStart;
- break;
- default:
- t.pos -= 1;
- end_token(&t);
- t.state = TokenizeStateStart;
- continue;
- }
- break;
case TokenizeStateSawDot:
switch (c) {
case '.':
@@ -1485,7 +1466,6 @@ void tokenize(Buf *buf, Tokenization *out) {
case TokenizeStateSawGreaterThan:
case TokenizeStateSawGreaterThanGreaterThan:
case TokenizeStateSawDot:
- case TokenizeStateSawQuestionMark:
case TokenizeStateSawAtSign:
case TokenizeStateSawStarPercent:
case TokenizeStateSawPlusPercent:
@@ -1550,7 +1530,6 @@ const char * token_name(TokenId id) {
case TokenIdDash: return "-";
case TokenIdDivEq: return "/=";
case TokenIdDot: return ".";
- case TokenIdDoubleQuestion: return "??";
case TokenIdEllipsis2: return "..";
case TokenIdEllipsis3: return "...";
case TokenIdEof: return "EOF";
@@ -1587,6 +1566,7 @@ const char * token_name(TokenId id) {
case TokenIdKeywordNoAlias: return "noalias";
case TokenIdKeywordNull: return "null";
case TokenIdKeywordOr: return "or";
+ case TokenIdKeywordOrElse: return "orelse";
case TokenIdKeywordPacked: return "packed";
case TokenIdKeywordPromise: return "promise";
case TokenIdKeywordPub: return "pub";
@@ -1609,8 +1589,7 @@ const char * token_name(TokenId id) {
case TokenIdLBrace: return "{";
case TokenIdLBracket: return "[";
case TokenIdLParen: return "(";
- case TokenIdMaybe: return "?";
- case TokenIdMaybeAssign: return "?=";
+ case TokenIdQuestion: return "?";
case TokenIdMinusEq: return "-=";
case TokenIdMinusPercent: return "-%";
case TokenIdMinusPercentEq: return "-%=";
diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp
index d659c0a772..75c7feb476 100644
--- a/src/tokenizer.hpp
+++ b/src/tokenizer.hpp
@@ -41,7 +41,6 @@ enum TokenId {
TokenIdDash,
TokenIdDivEq,
TokenIdDot,
- TokenIdDoubleQuestion,
TokenIdEllipsis2,
TokenIdEllipsis3,
TokenIdEof,
@@ -76,6 +75,7 @@ enum TokenId {
TokenIdKeywordNoAlias,
TokenIdKeywordNull,
TokenIdKeywordOr,
+ TokenIdKeywordOrElse,
TokenIdKeywordPacked,
TokenIdKeywordPromise,
TokenIdKeywordPub,
@@ -100,8 +100,7 @@ enum TokenId {
TokenIdLBrace,
TokenIdLBracket,
TokenIdLParen,
- TokenIdMaybe,
- TokenIdMaybeAssign,
+ TokenIdQuestion,
TokenIdMinusEq,
TokenIdMinusPercent,
TokenIdMinusPercentEq,
@@ -170,6 +169,8 @@ struct Token {
TokenCharLit char_lit;
} data;
};
+// work around conflicting name Token which is also found in libclang
+typedef Token ZigToken;
struct Tokenization {
ZigList *tokens;
diff --git a/src/translate_c.cpp b/src/translate_c.cpp
index 6d641d2680..482b5e3878 100644
--- a/src/translate_c.cpp
+++ b/src/translate_c.cpp
@@ -260,6 +260,12 @@ static AstNode *trans_create_node_prefix_op(Context *c, PrefixOp op, AstNode *ch
return node;
}
+static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child_node) {
+ AstNode *node = trans_create_node(c, NodeTypeUnwrapOptional);
+ node->data.unwrap_optional.expr = child_node;
+ return node;
+}
+
static AstNode *trans_create_node_bin_op(Context *c, AstNode *lhs_node, BinOpType op, AstNode *rhs_node) {
AstNode *node = trans_create_node(c, NodeTypeBinOpExpr);
node->data.bin_op_expr.op1 = lhs_node;
@@ -276,8 +282,11 @@ static AstNode *maybe_suppress_result(Context *c, ResultUsed result_used, AstNod
node);
}
-static AstNode *trans_create_node_ptr_type(Context *c, bool is_const, bool is_volatile, AstNode *child_node) {
+static AstNode *trans_create_node_ptr_type(Context *c, bool is_const, bool is_volatile, AstNode *child_node, PtrLen ptr_len) {
AstNode *node = trans_create_node(c, NodeTypePointerType);
+ node->data.pointer_type.star_token = allocate(1);
+ node->data.pointer_type.star_token->id = (ptr_len == PtrLenSingle) ? TokenIdStar: TokenIdBracketStarBracket;
+ node->data.pointer_type.is_const = is_const;
node->data.pointer_type.is_const = is_const;
node->data.pointer_type.is_volatile = is_volatile;
node->data.pointer_type.op_expr = child_node;
@@ -379,7 +388,7 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r
fn_def->data.fn_def.fn_proto = fn_proto;
fn_proto->data.fn_proto.fn_def_node = fn_def;
- AstNode *unwrap_node = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, ref_node);
+ AstNode *unwrap_node = trans_create_node_unwrap_null(c, ref_node);
AstNode *fn_call_node = trans_create_node(c, NodeTypeFnCallExpr);
fn_call_node->data.fn_call_expr.fn_ref_expr = unwrap_node;
@@ -406,10 +415,6 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r
return fn_def;
}
-static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child) {
- return trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, child);
-}
-
static AstNode *get_global(Context *c, Buf *name) {
{
auto entry = c->global_table.maybe_get(name);
@@ -731,6 +736,30 @@ static bool qual_type_has_wrapping_overflow(Context *c, QualType qt) {
}
}
+static bool type_is_opaque(Context *c, const Type *ty, const SourceLocation &source_loc) {
+ switch (ty->getTypeClass()) {
+ case Type::Builtin: {
+ const BuiltinType *builtin_ty = static_cast(ty);
+ return builtin_ty->getKind() == BuiltinType::Void;
+ }
+ case Type::Record: {
+ const RecordType *record_ty = static_cast(ty);
+ return record_ty->getDecl()->getDefinition() == nullptr;
+ }
+ case Type::Elaborated: {
+ const ElaboratedType *elaborated_ty = static_cast(ty);
+ return type_is_opaque(c, elaborated_ty->getNamedType().getTypePtr(), source_loc);
+ }
+ case Type::Typedef: {
+ const TypedefType *typedef_ty = static_cast(ty);
+ const TypedefNameDecl *typedef_decl = typedef_ty->getDecl();
+ return type_is_opaque(c, typedef_decl->getUnderlyingType().getTypePtr(), source_loc);
+ }
+ default:
+ return false;
+ }
+}
+
static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &source_loc) {
switch (ty->getTypeClass()) {
case Type::Builtin:
@@ -859,12 +888,14 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
}
if (qual_type_child_is_fn_proto(child_qt)) {
- return trans_create_node_prefix_op(c, PrefixOpMaybe, child_node);
+ return trans_create_node_prefix_op(c, PrefixOpOptional, child_node);
}
+ PtrLen ptr_len = type_is_opaque(c, child_qt.getTypePtr(), source_loc) ? PtrLenSingle : PtrLenUnknown;
+
AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
- child_qt.isVolatileQualified(), child_node);
- return trans_create_node_prefix_op(c, PrefixOpMaybe, pointer_node);
+ child_qt.isVolatileQualified(), child_node, ptr_len);
+ return trans_create_node_prefix_op(c, PrefixOpOptional, pointer_node);
}
case Type::Typedef:
{
@@ -1048,7 +1079,7 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou
return nullptr;
}
AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(),
- child_qt.isVolatileQualified(), child_type_node);
+ child_qt.isVolatileQualified(), child_type_node, PtrLenUnknown);
return pointer_node;
}
case Type::BlockPointer:
@@ -1941,7 +1972,7 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc
bool is_fn_ptr = qual_type_is_fn_ptr(stmt->getSubExpr()->getType());
if (is_fn_ptr)
return value_node;
- AstNode *unwrapped = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, value_node);
+ AstNode *unwrapped = trans_create_node_unwrap_null(c, value_node);
return trans_create_node_ptr_deref(c, unwrapped);
}
case UO_Plus:
@@ -2572,7 +2603,7 @@ static AstNode *trans_call_expr(Context *c, ResultUsed result_used, TransScope *
}
}
if (callee_node == nullptr) {
- callee_node = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, callee_raw_node);
+ callee_node = trans_create_node_unwrap_null(c, callee_raw_node);
}
} else {
callee_node = callee_raw_node;
@@ -4286,7 +4317,7 @@ static AstNode *trans_lookup_ast_maybe_fn(Context *c, AstNode *ref_node) {
return nullptr;
if (prefix_node->type != NodeTypePrefixOpExpr)
return nullptr;
- if (prefix_node->data.prefix_op_expr.prefix_op != PrefixOpMaybe)
+ if (prefix_node->data.prefix_op_expr.prefix_op != PrefixOpOptional)
return nullptr;
AstNode *fn_proto_node = prefix_node->data.prefix_op_expr.primary_expr;
@@ -4462,7 +4493,7 @@ static AstNode *parse_ctok_suffix_op_expr(Context *c, CTokenize *ctok, size_t *t
} else if (first_tok->id == CTokIdAsterisk) {
*tok_i += 1;
- node = trans_create_node_ptr_type(c, false, false, node);
+ node = trans_create_node_ptr_type(c, false, false, node, PtrLenUnknown);
} else {
return node;
}
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index 8cd3e6fb78..b990ae2310 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -853,7 +853,7 @@ bool ZigLLDLink(ZigLLVM_ObjectFormatType oformat, const char **args, size_t arg_
return lld::mach_o::link(array_ref_args, diag);
case ZigLLVM_Wasm:
- assert(false); // TODO ZigLLDLink for Wasm
+ return lld::wasm::link(array_ref_args, false, diag);
}
assert(false); // unreachable
abort();
diff --git a/std/array_list.zig b/std/array_list.zig
index 07a1db6451..1a235d28a3 100644
--- a/std/array_list.zig
+++ b/std/array_list.zig
@@ -1,6 +1,7 @@
const std = @import("index.zig");
const debug = std.debug;
const assert = debug.assert;
+const assertError = debug.assertError;
const mem = std.mem;
const Allocator = mem.Allocator;
@@ -28,20 +29,33 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
};
}
- pub fn deinit(l: *const Self) void {
- l.allocator.free(l.items);
+ pub fn deinit(self: *const Self) void {
+ self.allocator.free(self.items);
}
- pub fn toSlice(l: *const Self) []align(A) T {
- return l.items[0..l.len];
+ pub fn toSlice(self: *const Self) []align(A) T {
+ return self.items[0..self.len];
}
- pub fn toSliceConst(l: *const Self) []align(A) const T {
- return l.items[0..l.len];
+ pub fn toSliceConst(self: *const Self) []align(A) const T {
+ return self.items[0..self.len];
}
- pub fn at(l: *const Self, n: usize) T {
- return l.toSliceConst()[n];
+ pub fn at(self: *const Self, n: usize) T {
+ return self.toSliceConst()[n];
+ }
+
+ /// Sets the value at index `i`, or returns `error.OutOfBounds` if
+ /// the index is not in range.
+ pub fn setOrError(self: *const Self, i: usize, item: *const T) !void {
+ if (i >= self.len) return error.OutOfBounds;
+ self.items[i] = item.*;
+ }
+
+ /// Sets the value at index `i`, asserting that the value is in range.
+ pub fn set(self: *const Self, i: usize, item: *const T) void {
+ assert(i < self.len);
+ self.items[i] = item.*;
}
pub fn count(self: *const Self) usize {
@@ -67,58 +81,58 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
return result;
}
- pub fn insert(l: *Self, n: usize, item: *const T) !void {
- try l.ensureCapacity(l.len + 1);
- l.len += 1;
+ pub fn insert(self: *Self, n: usize, item: *const T) !void {
+ try self.ensureCapacity(self.len + 1);
+ self.len += 1;
- mem.copy(T, l.items[n + 1 .. l.len], l.items[n .. l.len - 1]);
- l.items[n] = item.*;
+ mem.copy(T, self.items[n + 1 .. self.len], self.items[n .. self.len - 1]);
+ self.items[n] = item.*;
}
- pub fn insertSlice(l: *Self, n: usize, items: []align(A) const T) !void {
- try l.ensureCapacity(l.len + items.len);
- l.len += items.len;
+ pub fn insertSlice(self: *Self, n: usize, items: []align(A) const T) !void {
+ try self.ensureCapacity(self.len + items.len);
+ self.len += items.len;
- mem.copy(T, l.items[n + items.len .. l.len], l.items[n .. l.len - items.len]);
- mem.copy(T, l.items[n .. n + items.len], items);
+ mem.copy(T, self.items[n + items.len .. self.len], self.items[n .. self.len - items.len]);
+ mem.copy(T, self.items[n .. n + items.len], items);
}
- pub fn append(l: *Self, item: *const T) !void {
- const new_item_ptr = try l.addOne();
+ pub fn append(self: *Self, item: *const T) !void {
+ const new_item_ptr = try self.addOne();
new_item_ptr.* = item.*;
}
- pub fn appendSlice(l: *Self, items: []align(A) const T) !void {
- try l.ensureCapacity(l.len + items.len);
- mem.copy(T, l.items[l.len..], items);
- l.len += items.len;
+ pub fn appendSlice(self: *Self, items: []align(A) const T) !void {
+ try self.ensureCapacity(self.len + items.len);
+ mem.copy(T, self.items[self.len..], items);
+ self.len += items.len;
}
- pub fn resize(l: *Self, new_len: usize) !void {
- try l.ensureCapacity(new_len);
- l.len = new_len;
+ pub fn resize(self: *Self, new_len: usize) !void {
+ try self.ensureCapacity(new_len);
+ self.len = new_len;
}
- pub fn shrink(l: *Self, new_len: usize) void {
- assert(new_len <= l.len);
- l.len = new_len;
+ pub fn shrink(self: *Self, new_len: usize) void {
+ assert(new_len <= self.len);
+ self.len = new_len;
}
- pub fn ensureCapacity(l: *Self, new_capacity: usize) !void {
- var better_capacity = l.items.len;
+ pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
+ var better_capacity = self.items.len;
if (better_capacity >= new_capacity) return;
while (true) {
better_capacity += better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
- l.items = try l.allocator.alignedRealloc(T, A, l.items, better_capacity);
+ self.items = try self.allocator.alignedRealloc(T, A, self.items, better_capacity);
}
- pub fn addOne(l: *Self) !*T {
- const new_length = l.len + 1;
- try l.ensureCapacity(new_length);
- const result = &l.items[l.len];
- l.len = new_length;
+ pub fn addOne(self: *Self) !*T {
+ const new_length = self.len + 1;
+ try self.ensureCapacity(new_length);
+ const result = &self.items[self.len];
+ self.len = new_length;
return result;
}
@@ -159,9 +173,15 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
}
test "basic ArrayList test" {
- var list = ArrayList(i32).init(debug.global_allocator);
+ var bytes: [1024]u8 = undefined;
+ const allocator = &std.heap.FixedBufferAllocator.init(bytes[0..]).allocator;
+
+ var list = ArrayList(i32).init(allocator);
defer list.deinit();
+ // setting on empty list is out of bounds
+ assertError(list.setOrError(0, 1), error.OutOfBounds);
+
{
var i: usize = 0;
while (i < 10) : (i += 1) {
@@ -200,6 +220,16 @@ test "basic ArrayList test" {
list.appendSlice([]const i32{}) catch unreachable;
assert(list.len == 9);
+
+ // can only set on indices < self.len
+ list.set(7, 33);
+ list.set(8, 42);
+
+ assertError(list.setOrError(9, 99), error.OutOfBounds);
+ assertError(list.setOrError(10, 123), error.OutOfBounds);
+
+ assert(list.pop() == 42);
+ assert(list.pop() == 33);
}
test "iterator ArrayList test" {
@@ -228,7 +258,7 @@ test "iterator ArrayList test" {
}
it.reset();
- assert(??it.next() == 1);
+ assert(it.next().? == 1);
}
test "insert ArrayList test" {
diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
index 142c958173..3dc64dbea2 100644
--- a/std/atomic/queue.zig
+++ b/std/atomic/queue.zig
@@ -33,8 +33,8 @@ pub fn Queue(comptime T: type) type {
pub fn get(self: *Self) ?*Node {
var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst);
while (true) {
- const node = head.next ?? return null;
- head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return node;
+ const node = head.next orelse return null;
+ head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return node;
}
}
};
@@ -94,8 +94,18 @@ test "std.atomic.queue" {
for (getters) |t|
t.wait();
- std.debug.assert(context.put_sum == context.get_sum);
- std.debug.assert(context.get_count == puts_per_thread * put_thread_count);
+ if (context.put_sum != context.get_sum) {
+ std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum);
+ }
+
+ if (context.get_count != puts_per_thread * put_thread_count) {
+ std.debug.panic(
+ "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}",
+ context.get_count,
+ u32(puts_per_thread),
+ u32(put_thread_count),
+ );
+ }
}
fn startPuts(ctx: *Context) u8 {
@@ -114,15 +124,14 @@ fn startPuts(ctx: *Context) u8 {
fn startGets(ctx: *Context) u8 {
while (true) {
+ const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
+
while (ctx.queue.get()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
}
- if (@atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1) {
- break;
- }
+ if (last) return 0;
}
- return 0;
}
diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig
index 15611188d2..9e81d89257 100644
--- a/std/atomic/stack.zig
+++ b/std/atomic/stack.zig
@@ -28,14 +28,14 @@ pub fn Stack(comptime T: type) type {
var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
node.next = root;
- root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break;
+ root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse break;
}
}
pub fn pop(self: *Self) ?*Node {
var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst);
while (true) {
- root = @cmpxchgWeak(?*Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root;
+ root = @cmpxchgWeak(?*Node, &self.root, root, (root orelse return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return root;
}
}
@@ -97,8 +97,18 @@ test "std.atomic.stack" {
for (getters) |t|
t.wait();
- std.debug.assert(context.put_sum == context.get_sum);
- std.debug.assert(context.get_count == puts_per_thread * put_thread_count);
+ if (context.put_sum != context.get_sum) {
+ std.debug.panic("failure\nput_sum:{} != get_sum:{}", context.put_sum, context.get_sum);
+ }
+
+ if (context.get_count != puts_per_thread * put_thread_count) {
+ std.debug.panic(
+ "failure\nget_count:{} != puts_per_thread:{} * put_thread_count:{}",
+ context.get_count,
+ u32(puts_per_thread),
+ u32(put_thread_count),
+ );
+ }
}
fn startPuts(ctx: *Context) u8 {
@@ -117,15 +127,14 @@ fn startPuts(ctx: *Context) u8 {
fn startGets(ctx: *Context) u8 {
while (true) {
+ const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1;
+
while (ctx.stack.pop()) |node| {
std.os.time.sleep(0, 1); // let the os scheduler be our fuzz
_ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst);
_ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst);
}
- if (@atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1) {
- break;
- }
+ if (last) return 0;
}
- return 0;
}
diff --git a/std/buf_map.zig b/std/buf_map.zig
index 22d821ae7b..a82d1b731a 100644
--- a/std/buf_map.zig
+++ b/std/buf_map.zig
@@ -19,7 +19,7 @@ pub const BufMap = struct {
pub fn deinit(self: *const BufMap) void {
var it = self.hash_map.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
self.free(entry.key);
self.free(entry.value);
}
@@ -37,12 +37,12 @@ pub const BufMap = struct {
}
pub fn get(self: *const BufMap, key: []const u8) ?[]const u8 {
- const entry = self.hash_map.get(key) ?? return null;
+ const entry = self.hash_map.get(key) orelse return null;
return entry.value;
}
pub fn delete(self: *BufMap, key: []const u8) void {
- const entry = self.hash_map.remove(key) ?? return;
+ const entry = self.hash_map.remove(key) orelse return;
self.free(entry.key);
self.free(entry.value);
}
@@ -72,15 +72,15 @@ test "BufMap" {
defer bufmap.deinit();
try bufmap.set("x", "1");
- assert(mem.eql(u8, ??bufmap.get("x"), "1"));
+ assert(mem.eql(u8, bufmap.get("x").?, "1"));
assert(1 == bufmap.count());
try bufmap.set("x", "2");
- assert(mem.eql(u8, ??bufmap.get("x"), "2"));
+ assert(mem.eql(u8, bufmap.get("x").?, "2"));
assert(1 == bufmap.count());
try bufmap.set("x", "3");
- assert(mem.eql(u8, ??bufmap.get("x"), "3"));
+ assert(mem.eql(u8, bufmap.get("x").?, "3"));
assert(1 == bufmap.count());
bufmap.delete("x");
diff --git a/std/buf_set.zig b/std/buf_set.zig
index 03a050ed8b..ab2d8e7c34 100644
--- a/std/buf_set.zig
+++ b/std/buf_set.zig
@@ -17,7 +17,7 @@ pub const BufSet = struct {
pub fn deinit(self: *const BufSet) void {
var it = self.hash_map.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
self.free(entry.key);
}
@@ -33,7 +33,7 @@ pub const BufSet = struct {
}
pub fn delete(self: *BufSet, key: []const u8) void {
- const entry = self.hash_map.remove(key) ?? return;
+ const entry = self.hash_map.remove(key) orelse return;
self.free(entry.key);
}
diff --git a/std/buffer.zig b/std/buffer.zig
index 3b2936d223..0d82918580 100644
--- a/std/buffer.zig
+++ b/std/buffer.zig
@@ -28,7 +28,6 @@ pub const Buffer = struct {
/// Must deinitialize with deinit.
/// None of the other operations are valid until you do one of these:
/// * ::replaceContents
- /// * ::replaceContentsBuffer
/// * ::resize
pub fn initNull(allocator: *Allocator) Buffer {
return Buffer{ .list = ArrayList(u8).init(allocator) };
@@ -42,9 +41,9 @@ pub const Buffer = struct {
/// Buffer takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Must deinitialize with deinit.
- pub fn fromOwnedSlice(allocator: *Allocator, slice: []u8) Buffer {
+ pub fn fromOwnedSlice(allocator: *Allocator, slice: []u8) !Buffer {
var self = Buffer{ .list = ArrayList(u8).fromOwnedSlice(allocator, slice) };
- self.list.append(0);
+ try self.list.append(0);
return self;
}
@@ -116,7 +115,7 @@ pub const Buffer = struct {
return mem.eql(u8, self.list.items[start..l], m);
}
- pub fn replaceContents(self: *const Buffer, m: []const u8) !void {
+ pub fn replaceContents(self: *Buffer, m: []const u8) !void {
try self.resize(m.len);
mem.copy(u8, self.list.toSlice(), m);
}
diff --git a/std/build.zig b/std/build.zig
index fed02e0815..16ce426bcb 100644
--- a/std/build.zig
+++ b/std/build.zig
@@ -136,7 +136,7 @@ pub const Builder = struct {
}
pub fn setInstallPrefix(self: *Builder, maybe_prefix: ?[]const u8) void {
- self.prefix = maybe_prefix ?? "/usr/local"; // TODO better default
+ self.prefix = maybe_prefix orelse "/usr/local"; // TODO better default
self.lib_dir = os.path.join(self.allocator, self.prefix, "lib") catch unreachable;
self.exe_dir = os.path.join(self.allocator, self.prefix, "bin") catch unreachable;
}
@@ -312,9 +312,9 @@ pub const Builder = struct {
if (os.getEnvVarOwned(self.allocator, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| {
var it = mem.split(nix_cflags_compile, " ");
while (true) {
- const word = it.next() ?? break;
+ const word = it.next() orelse break;
if (mem.eql(u8, word, "-isystem")) {
- const include_path = it.next() ?? {
+ const include_path = it.next() orelse {
warn("Expected argument after -isystem in NIX_CFLAGS_COMPILE\n");
break;
};
@@ -330,9 +330,9 @@ pub const Builder = struct {
if (os.getEnvVarOwned(self.allocator, "NIX_LDFLAGS")) |nix_ldflags| {
var it = mem.split(nix_ldflags, " ");
while (true) {
- const word = it.next() ?? break;
+ const word = it.next() orelse break;
if (mem.eql(u8, word, "-rpath")) {
- const rpath = it.next() ?? {
+ const rpath = it.next() orelse {
warn("Expected argument after -rpath in NIX_LDFLAGS\n");
break;
};
@@ -362,7 +362,7 @@ pub const Builder = struct {
}
self.available_options_list.append(available_option) catch unreachable;
- const entry = self.user_input_options.get(name) ?? return null;
+ const entry = self.user_input_options.get(name) orelse return null;
entry.value.used = true;
switch (type_id) {
TypeId.Bool => switch (entry.value.value) {
@@ -416,9 +416,9 @@ pub const Builder = struct {
pub fn standardReleaseOptions(self: *Builder) builtin.Mode {
if (self.release_mode) |mode| return mode;
- const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") ?? false;
- const release_fast = self.option(bool, "release-fast", "optimizations on and safety off") ?? false;
- const release_small = self.option(bool, "release-small", "size optimizations on and safety off") ?? false;
+ const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") orelse false;
+ const release_fast = self.option(bool, "release-fast", "optimizations on and safety off") orelse false;
+ const release_small = self.option(bool, "release-small", "size optimizations on and safety off") orelse false;
const mode = if (release_safe and !release_fast and !release_small) builtin.Mode.ReleaseSafe else if (release_fast and !release_safe and !release_small) builtin.Mode.ReleaseFast else if (release_small and !release_fast and !release_safe) builtin.Mode.ReleaseSmall else if (!release_fast and !release_safe and !release_small) builtin.Mode.Debug else x: {
warn("Multiple release modes (of -Drelease-safe, -Drelease-fast and -Drelease-small)");
@@ -518,7 +518,7 @@ pub const Builder = struct {
// make sure all args are used
var it = self.user_input_options.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
if (!entry.value.used) {
warn("Invalid option: -D{}\n\n", entry.key);
self.markInvalidUserInput();
@@ -617,7 +617,7 @@ pub const Builder = struct {
warn("cp {} {}\n", source_path, dest_path);
}
- const dirname = os.path.dirname(dest_path);
+ const dirname = os.path.dirname(dest_path) orelse ".";
const abs_source_path = self.pathFromRoot(source_path);
os.makePath(self.allocator, dirname) catch |err| {
warn("Unable to create path {}: {}\n", dirname, @errorName(err));
@@ -1246,7 +1246,7 @@ pub const LibExeObjStep = struct {
{
var it = self.link_libs.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
zig_args.append("--library") catch unreachable;
zig_args.append(entry.key) catch unreachable;
}
@@ -1395,8 +1395,9 @@ pub const LibExeObjStep = struct {
cc_args.append(abs_source_file) catch unreachable;
const cache_o_src = os.path.join(builder.allocator, builder.cache_root, source_file) catch unreachable;
- const cache_o_dir = os.path.dirname(cache_o_src);
- try builder.makePath(cache_o_dir);
+ if (os.path.dirname(cache_o_src)) |cache_o_dir| {
+ try builder.makePath(cache_o_dir);
+ }
const cache_o_file = builder.fmt("{}{}", cache_o_src, self.target.oFileExt());
cc_args.append("-o") catch unreachable;
cc_args.append(builder.pathFromRoot(cache_o_file)) catch unreachable;
@@ -1509,8 +1510,9 @@ pub const LibExeObjStep = struct {
cc_args.append(abs_source_file) catch unreachable;
const cache_o_src = os.path.join(builder.allocator, builder.cache_root, source_file) catch unreachable;
- const cache_o_dir = os.path.dirname(cache_o_src);
- try builder.makePath(cache_o_dir);
+ if (os.path.dirname(cache_o_src)) |cache_o_dir| {
+ try builder.makePath(cache_o_dir);
+ }
const cache_o_file = builder.fmt("{}{}", cache_o_src, self.target.oFileExt());
cc_args.append("-o") catch unreachable;
cc_args.append(builder.pathFromRoot(cache_o_file)) catch unreachable;
@@ -1696,7 +1698,7 @@ pub const TestStep = struct {
{
var it = self.link_libs.iterator();
while (true) {
- const entry = it.next() ?? break;
+ const entry = it.next() orelse break;
try zig_args.append("--library");
try zig_args.append(entry.key);
}
@@ -1855,7 +1857,7 @@ pub const WriteFileStep = struct {
fn make(step: *Step) !void {
const self = @fieldParentPtr(WriteFileStep, "step", step);
const full_path = self.builder.pathFromRoot(self.file_path);
- const full_path_dir = os.path.dirname(full_path);
+ const full_path_dir = os.path.dirname(full_path) orelse ".";
os.makePath(self.builder.allocator, full_path_dir) catch |err| {
warn("unable to make path {}: {}\n", full_path_dir, @errorName(err));
return err;
@@ -1945,7 +1947,7 @@ pub const Step = struct {
};
fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void {
- const out_dir = os.path.dirname(output_path);
+ const out_dir = os.path.dirname(output_path) orelse ".";
const out_basename = os.path.basename(output_path);
// sym link for libfoo.so.1 to libfoo.so.1.2.3
const major_only_path = os.path.join(allocator, out_dir, filename_major_only) catch unreachable;
diff --git a/std/c/index.zig b/std/c/index.zig
index ade37f36c1..7de8634d07 100644
--- a/std/c/index.zig
+++ b/std/c/index.zig
@@ -20,11 +20,11 @@ pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: *Stat) c_int;
pub extern "c" fn lseek(fd: c_int, offset: isize, whence: c_int) isize;
pub extern "c" fn open(path: [*]const u8, oflag: c_int, ...) c_int;
pub extern "c" fn raise(sig: c_int) c_int;
-pub extern "c" fn read(fd: c_int, buf: [*]c_void, nbyte: usize) isize;
+pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize;
pub extern "c" fn stat(noalias path: [*]const u8, noalias buf: *Stat) c_int;
-pub extern "c" fn write(fd: c_int, buf: [*]const c_void, nbyte: usize) isize;
-pub extern "c" fn mmap(addr: ?[*]c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?[*]c_void;
-pub extern "c" fn munmap(addr: [*]c_void, len: usize) c_int;
+pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize;
+pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void;
+pub extern "c" fn munmap(addr: *c_void, len: usize) c_int;
pub extern "c" fn unlink(path: [*]const u8) c_int;
pub extern "c" fn getcwd(buf: [*]u8, size: usize) ?[*]u8;
pub extern "c" fn waitpid(pid: c_int, stat_loc: *c_int, options: c_int) c_int;
@@ -48,15 +48,15 @@ pub extern "c" fn setreuid(ruid: c_uint, euid: c_uint) c_int;
pub extern "c" fn setregid(rgid: c_uint, egid: c_uint) c_int;
pub extern "c" fn rmdir(path: [*]const u8) c_int;
-pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?[*]c_void;
-pub extern "c" fn malloc(usize) ?[*]c_void;
-pub extern "c" fn realloc([*]c_void, usize) ?[*]c_void;
-pub extern "c" fn free([*]c_void) void;
-pub extern "c" fn posix_memalign(memptr: *[*]c_void, alignment: usize, size: usize) c_int;
+pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void;
+pub extern "c" fn malloc(usize) ?*c_void;
+pub extern "c" fn realloc(*c_void, usize) ?*c_void;
+pub extern "c" fn free(*c_void) void;
+pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int;
pub extern "pthread" fn pthread_create(noalias newthread: *pthread_t, noalias attr: ?*const pthread_attr_t, start_routine: extern fn (?*c_void) ?*c_void, noalias arg: ?*c_void) c_int;
pub extern "pthread" fn pthread_attr_init(attr: *pthread_attr_t) c_int;
-pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: [*]c_void, stacksize: usize) c_int;
+pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int;
pub extern "pthread" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int;
pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int;
diff --git a/std/debug/index.zig b/std/debug/index.zig
index 00d9bef121..25f7a58b25 100644
--- a/std/debug/index.zig
+++ b/std/debug/index.zig
@@ -88,6 +88,16 @@ pub fn assert(ok: bool) void {
}
}
+/// TODO: add `==` operator for `error_union == error_set`, and then
+/// remove this function
+pub fn assertError(value: var, expected_error: error) void {
+ if (value) {
+ @panic("expected error");
+ } else |actual_error| {
+ assert(actual_error == expected_error);
+ }
+}
+
/// Call this function when you want to panic if the condition is not true.
/// If `ok` is `false`, this function will panic in every release mode.
pub fn assertOrPanic(ok: bool) void {
@@ -198,7 +208,7 @@ fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: us
.name = "???",
.address = address,
};
- const symbol = debug_info.symbol_table.search(address) ?? &unknown;
+ const symbol = debug_info.symbol_table.search(address) orelse &unknown;
try out_stream.print(WHITE ++ "{}" ++ RESET ++ ": " ++ DIM ++ ptr_hex ++ " in ??? (???)" ++ RESET ++ "\n", symbol.name, address);
},
else => {
@@ -258,10 +268,10 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace {
try st.elf.openFile(allocator, &st.self_exe_file);
errdefer st.elf.close();
- st.debug_info = (try st.elf.findSection(".debug_info")) ?? return error.MissingDebugInfo;
- st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) ?? return error.MissingDebugInfo;
- st.debug_str = (try st.elf.findSection(".debug_str")) ?? return error.MissingDebugInfo;
- st.debug_line = (try st.elf.findSection(".debug_line")) ?? return error.MissingDebugInfo;
+ st.debug_info = (try st.elf.findSection(".debug_info")) orelse return error.MissingDebugInfo;
+ st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) orelse return error.MissingDebugInfo;
+ st.debug_str = (try st.elf.findSection(".debug_str")) orelse return error.MissingDebugInfo;
+ st.debug_line = (try st.elf.findSection(".debug_line")) orelse return error.MissingDebugInfo;
st.debug_ranges = (try st.elf.findSection(".debug_ranges"));
try scanAllCompileUnits(st);
return st;
@@ -433,7 +443,7 @@ const Die = struct {
}
fn getAttrAddr(self: *const Die, id: u64) !u64 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Address => |value| value,
else => error.InvalidDebugInfo,
@@ -441,7 +451,7 @@ const Die = struct {
}
fn getAttrSecOffset(self: *const Die, id: u64) !u64 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
FormValue.SecOffset => |value| value,
@@ -450,7 +460,7 @@ const Die = struct {
}
fn getAttrUnsignedLe(self: *const Die, id: u64) !u64 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.Const => |value| value.asUnsignedLe(),
else => error.InvalidDebugInfo,
@@ -458,7 +468,7 @@ const Die = struct {
}
fn getAttrString(self: *const Die, st: *ElfStackTrace, id: u64) ![]u8 {
- const form_value = self.getAttr(id) ?? return error.MissingDebugInfo;
+ const form_value = self.getAttr(id) orelse return error.MissingDebugInfo;
return switch (form_value.*) {
FormValue.String => |value| value,
FormValue.StrPtr => |offset| getString(st, offset),
@@ -738,7 +748,7 @@ fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) !
var in_file_stream = io.FileInStream.init(in_file);
const in_stream = &in_file_stream.stream;
const abbrev_code = try readULeb128(in_stream);
- const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) ?? return error.InvalidDebugInfo;
+ const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) orelse return error.InvalidDebugInfo;
var result = Die{
.tag_id = table_entry.tag_id,
diff --git a/std/event.zig b/std/event.zig
index 89ab816bb6..0821c789b7 100644
--- a/std/event.zig
+++ b/std/event.zig
@@ -40,9 +40,9 @@ pub const TcpServer = struct {
self.listen_address = std.net.Address.initPosix(try std.os.posixGetSockName(self.sockfd));
self.accept_coro = try async TcpServer.handler(self);
- errdefer cancel ??self.accept_coro;
+ errdefer cancel self.accept_coro.?;
- try self.loop.addFd(self.sockfd, ??self.accept_coro);
+ try self.loop.addFd(self.sockfd, self.accept_coro.?);
errdefer self.loop.removeFd(self.sockfd);
}
diff --git a/std/fmt/index.zig b/std/fmt/index.zig
index 047a154bb8..cfc0948d2c 100644
--- a/std/fmt/index.zig
+++ b/std/fmt/index.zig
@@ -97,7 +97,11 @@ pub fn formatType(
output: fn (@typeOf(context), []const u8) Errors!void,
) Errors!void {
const T = @typeOf(value);
- switch (@typeId(T)) {
+ if (T == error) {
+ try output(context, "error.");
+ return output(context, @errorName(value));
+ }
+ switch (@typeInfo(T)) {
builtin.TypeId.Int, builtin.TypeId.Float => {
return formatValue(value, fmt, context, Errors, output);
},
@@ -107,7 +111,7 @@ pub fn formatType(
builtin.TypeId.Bool => {
return output(context, if (value) "true" else "false");
},
- builtin.TypeId.Nullable => {
+ builtin.TypeId.Optional => {
if (value) |payload| {
return formatType(payload, fmt, context, Errors, output);
} else {
@@ -125,12 +129,13 @@ pub fn formatType(
try output(context, "error.");
return output(context, @errorName(value));
},
- builtin.TypeId.Pointer => {
- switch (@typeId(T.Child)) {
- builtin.TypeId.Array => {
- if (T.Child.Child == u8) {
+ builtin.TypeId.Pointer => |ptr_info| switch (ptr_info.size) {
+ builtin.TypeInfo.Pointer.Size.One => switch (@typeInfo(ptr_info.child)) {
+ builtin.TypeId.Array => |info| {
+ if (info.child == u8) {
return formatText(value, fmt, context, Errors, output);
}
+ return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
},
builtin.TypeId.Enum, builtin.TypeId.Union, builtin.TypeId.Struct => {
const has_cust_fmt = comptime cf: {
@@ -154,14 +159,24 @@ pub fn formatType(
return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
},
else => return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)),
- }
- },
- else => if (@canImplicitCast([]const u8, value)) {
- const casted_value = ([]const u8)(value);
- return output(context, casted_value);
- } else {
- @compileError("Unable to format type '" ++ @typeName(T) ++ "'");
+ },
+ builtin.TypeInfo.Pointer.Size.Many => {
+ if (ptr_info.child == u8) {
+ //This is a bit of a hack, but it made more sense to
+ // do this check here than have formatText do it
+ if (fmt[0] == 's') {
+ const len = std.cstr.len(value);
+ return formatText(value[0..len], fmt, context, Errors, output);
+ }
+ }
+ return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value));
+ },
+ builtin.TypeInfo.Pointer.Size.Slice => {
+ const casted_value = ([]const u8)(value);
+ return output(context, casted_value);
+ },
},
+ else => @compileError("Unable to format type '" ++ @typeName(T) ++ "'"),
}
}
@@ -293,7 +308,7 @@ pub fn formatBuf(
var leftover_padding = if (width > buf.len) (width - buf.len) else return;
const pad_byte: u8 = ' ';
while (leftover_padding > 0) : (leftover_padding -= 1) {
- try output(context, (&pad_byte)[0..1]);
+ try output(context, (*[1]u8)(&pad_byte)[0..1]);
}
}
@@ -552,14 +567,19 @@ pub fn formatBytes(
return output(context, "0B");
}
- const mags = " KMGTPEZY";
+ const mags_si = " kMGTPEZY";
+ const mags_iec = " KMGTPEZY";
const magnitude = switch (radix) {
- 1000 => math.min(math.log2(value) / comptime math.log2(1000), mags.len - 1),
- 1024 => math.min(math.log2(value) / 10, mags.len - 1),
+ 1000 => math.min(math.log2(value) / comptime math.log2(1000), mags_si.len - 1),
+ 1024 => math.min(math.log2(value) / 10, mags_iec.len - 1),
else => unreachable,
};
const new_value = f64(value) / math.pow(f64, f64(radix), f64(magnitude));
- const suffix = mags[magnitude];
+ const suffix = switch (radix) {
+ 1000 => mags_si[magnitude],
+ 1024 => mags_iec[magnitude],
+ else => unreachable,
+ };
try formatFloatDecimal(new_value, width, context, Errors, output);
@@ -807,11 +827,11 @@ test "parse unsigned comptime" {
test "fmt.format" {
{
const value: ?i32 = 1234;
- try testFmt("nullable: 1234\n", "nullable: {}\n", value);
+ try testFmt("optional: 1234\n", "optional: {}\n", value);
}
{
const value: ?i32 = null;
- try testFmt("nullable: null\n", "nullable: {}\n", value);
+ try testFmt("optional: null\n", "optional: {}\n", value);
}
{
const value: error!i32 = 1234;
@@ -829,6 +849,10 @@ test "fmt.format" {
const value: u8 = 'a';
try testFmt("u8: a\n", "u8: {c}\n", value);
}
+ try testFmt("buf: Test \n", "buf: {s5}\n", "Test");
+ try testFmt("buf: Test\n Other text", "buf: {s}\n Other text", "Test");
+ try testFmt("cstr: Test C\n", "cstr: {s}\n", c"Test C");
+ try testFmt("cstr: Test C \n", "cstr: {s10}\n", c"Test C");
try testFmt("file size: 63MiB\n", "file size: {Bi}\n", usize(63 * 1024 * 1024));
try testFmt("file size: 66.06MB\n", "file size: {B2}\n", usize(63 * 1024 * 1024));
{
diff --git a/std/hash_map.zig b/std/hash_map.zig
index a323cdc197..3bd03d4f28 100644
--- a/std/hash_map.zig
+++ b/std/hash_map.zig
@@ -265,11 +265,11 @@ test "basic hash map usage" {
assert((map.put(4, 44) catch unreachable) == null);
assert((map.put(5, 55) catch unreachable) == null);
- assert(??(map.put(5, 66) catch unreachable) == 55);
- assert(??(map.put(5, 55) catch unreachable) == 66);
+ assert((map.put(5, 66) catch unreachable).? == 55);
+ assert((map.put(5, 55) catch unreachable).? == 66);
assert(map.contains(2));
- assert((??map.get(2)).value == 22);
+ assert(map.get(2).?.value == 22);
_ = map.remove(2);
assert(map.remove(2) == null);
assert(map.get(2) == null);
@@ -317,7 +317,7 @@ test "iterator hash map" {
}
it.reset();
- var entry = ??it.next();
+ var entry = it.next().?;
assert(entry.key == keys[0]);
assert(entry.value == values[0]);
}
diff --git a/std/heap.zig b/std/heap.zig
index 4444a2307a..172bc24118 100644
--- a/std/heap.zig
+++ b/std/heap.zig
@@ -22,7 +22,7 @@ fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 {
}
fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
- const old_ptr = @ptrCast([*]c_void, old_mem.ptr);
+ const old_ptr = @ptrCast(*c_void, old_mem.ptr);
if (c.realloc(old_ptr, new_size)) |buf| {
return @ptrCast([*]u8, buf)[0..new_size];
} else if (new_size <= old_mem.len) {
@@ -33,7 +33,7 @@ fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![
}
fn cFree(self: *Allocator, old_mem: []u8) void {
- const old_ptr = @ptrCast([*]c_void, old_mem.ptr);
+ const old_ptr = @ptrCast(*c_void, old_mem.ptr);
c.free(old_ptr);
}
@@ -97,12 +97,12 @@ pub const DirectAllocator = struct {
},
Os.windows => {
const amt = n + alignment + @sizeOf(usize);
- const heap_handle = self.heap_handle ?? blk: {
- const hh = os.windows.HeapCreate(os.windows.HEAP_NO_SERIALIZE, amt, 0) ?? return error.OutOfMemory;
+ const heap_handle = self.heap_handle orelse blk: {
+ const hh = os.windows.HeapCreate(os.windows.HEAP_NO_SERIALIZE, amt, 0) orelse return error.OutOfMemory;
self.heap_handle = hh;
break :blk hh;
};
- const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) ?? return error.OutOfMemory;
+ const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory;
const root_addr = @ptrToInt(ptr);
const rem = @rem(root_addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
@@ -140,9 +140,9 @@ pub const DirectAllocator = struct {
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
const old_record_addr = old_adjusted_addr + old_mem.len;
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
- const old_ptr = @intToPtr([*]c_void, root_addr);
+ const old_ptr = @intToPtr(*c_void, root_addr);
const amt = new_size + alignment + @sizeOf(usize);
- const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: {
+ const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) orelse blk: {
if (new_size > old_mem.len) return error.OutOfMemory;
const new_record_addr = old_record_addr - new_size + old_mem.len;
@intToPtr(*align(1) usize, new_record_addr).* = root_addr;
@@ -170,8 +170,8 @@ pub const DirectAllocator = struct {
Os.windows => {
const record_addr = @ptrToInt(bytes.ptr) + bytes.len;
const root_addr = @intToPtr(*align(1) usize, record_addr).*;
- const ptr = @intToPtr([*]c_void, root_addr);
- _ = os.windows.HeapFree(??self.heap_handle, 0, ptr);
+ const ptr = @intToPtr(*c_void, root_addr);
+ _ = os.windows.HeapFree(self.heap_handle.?, 0, ptr);
},
else => @compileError("Unsupported OS"),
}
@@ -343,7 +343,7 @@ pub const ThreadSafeFixedBufferAllocator = struct {
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
- end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) ?? return self.buffer[adjusted_index..new_end_index];
+ end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
}
}
diff --git a/std/json.zig b/std/json.zig
index 71673ad20f..75ea2eee1c 100644
--- a/std/json.zig
+++ b/std/json.zig
@@ -3,6 +3,7 @@
// https://tools.ietf.org/html/rfc8259
const std = @import("index.zig");
+const debug = std.debug;
const mem = std.mem;
const u1 = @IntType(false, 1);
@@ -86,7 +87,9 @@ pub const Token = struct {
// parsing state requires ~40-50 bytes of stack space.
//
// Conforms strictly to RFC8529.
-pub const StreamingJsonParser = struct {
+//
+// For a non-byte based wrapper, consider using TokenStream instead.
+pub const StreamingParser = struct {
// Current state
state: State,
// How many bytes we have counted for the current token
@@ -109,13 +112,13 @@ pub const StreamingJsonParser = struct {
const array_bit = 1;
const max_stack_size = @maxValue(u8);
- pub fn init() StreamingJsonParser {
- var p: StreamingJsonParser = undefined;
+ pub fn init() StreamingParser {
+ var p: StreamingParser = undefined;
p.reset();
return p;
}
- pub fn reset(p: *StreamingJsonParser) void {
+ pub fn reset(p: *StreamingParser) void {
p.state = State.TopLevelBegin;
p.count = 0;
// Set before ever read in main transition function
@@ -175,7 +178,7 @@ pub const StreamingJsonParser = struct {
// Only call this function to generate array/object final state.
pub fn fromInt(x: var) State {
- std.debug.assert(x == 0 or x == 1);
+ debug.assert(x == 0 or x == 1);
const T = @TagType(State);
return State(T(x));
}
@@ -205,7 +208,7 @@ pub const StreamingJsonParser = struct {
// tokens. token2 is always null if token1 is null.
//
// There is currently no error recovery on a bad stream.
- pub fn feed(p: *StreamingJsonParser, c: u8, token1: *?Token, token2: *?Token) Error!void {
+ pub fn feed(p: *StreamingParser, c: u8, token1: *?Token, token2: *?Token) Error!void {
token1.* = null;
token2.* = null;
p.count += 1;
@@ -217,7 +220,7 @@ pub const StreamingJsonParser = struct {
}
// Perform a single transition on the state machine and return any possible token.
- fn transition(p: *StreamingJsonParser, c: u8, token: *?Token) Error!bool {
+ fn transition(p: *StreamingParser, c: u8, token: *?Token) Error!bool {
switch (p.state) {
State.TopLevelBegin => switch (c) {
'{' => {
@@ -321,7 +324,9 @@ pub const StreamingJsonParser = struct {
p.complete = true;
p.state = State.TopLevelEnd;
},
- else => {},
+ else => {
+ p.state = State.ValueEnd;
+ },
}
token.* = Token.initMarker(Token.Id.ObjectEnd);
@@ -345,7 +350,9 @@ pub const StreamingJsonParser = struct {
p.complete = true;
p.state = State.TopLevelEnd;
},
- else => {},
+ else => {
+ p.state = State.ValueEnd;
+ },
}
token.* = Token.initMarker(Token.Id.ArrayEnd);
@@ -852,16 +859,122 @@ pub const StreamingJsonParser = struct {
}
};
+// A small wrapper over a StreamingParser for full slices. Returns a stream of json Tokens.
+pub const TokenStream = struct {
+ i: usize,
+ slice: []const u8,
+ parser: StreamingParser,
+ token: ?Token,
+
+ pub fn init(slice: []const u8) TokenStream {
+ return TokenStream{
+ .i = 0,
+ .slice = slice,
+ .parser = StreamingParser.init(),
+ .token = null,
+ };
+ }
+
+ pub fn next(self: *TokenStream) !?Token {
+ if (self.token) |token| {
+ self.token = null;
+ return token;
+ }
+
+ var t1: ?Token = undefined;
+ var t2: ?Token = undefined;
+
+ while (self.i < self.slice.len) {
+ try self.parser.feed(self.slice[self.i], &t1, &t2);
+ self.i += 1;
+
+ if (t1) |token| {
+ self.token = t2;
+ return token;
+ }
+ }
+
+ if (self.i > self.slice.len) {
+ try self.parser.feed(' ', &t1, &t2);
+ self.i += 1;
+
+ if (t1) |token| {
+ return token;
+ }
+ }
+
+ return null;
+ }
+};
+
+fn checkNext(p: *TokenStream, id: Token.Id) void {
+ const token = (p.next() catch unreachable).?;
+ debug.assert(token.id == id);
+}
+
+test "token" {
+ const s =
+ \\{
+ \\ "Image": {
+ \\ "Width": 800,
+ \\ "Height": 600,
+ \\ "Title": "View from 15th Floor",
+ \\ "Thumbnail": {
+ \\ "Url": "http://www.example.com/image/481989943",
+ \\ "Height": 125,
+ \\ "Width": 100
+ \\ },
+ \\ "Animated" : false,
+ \\ "IDs": [116, 943, 234, 38793]
+ \\ }
+ \\}
+ ;
+
+ var p = TokenStream.init(s);
+
+ checkNext(&p, Token.Id.ObjectBegin);
+ checkNext(&p, Token.Id.String); // Image
+ checkNext(&p, Token.Id.ObjectBegin);
+ checkNext(&p, Token.Id.String); // Width
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.String); // Height
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.String); // Title
+ checkNext(&p, Token.Id.String);
+ checkNext(&p, Token.Id.String); // Thumbnail
+ checkNext(&p, Token.Id.ObjectBegin);
+ checkNext(&p, Token.Id.String); // Url
+ checkNext(&p, Token.Id.String);
+ checkNext(&p, Token.Id.String); // Height
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.String); // Width
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.ObjectEnd);
+ checkNext(&p, Token.Id.String); // Animated
+ checkNext(&p, Token.Id.False);
+ checkNext(&p, Token.Id.String); // IDs
+ checkNext(&p, Token.Id.ArrayBegin);
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.Number);
+ checkNext(&p, Token.Id.ArrayEnd);
+ checkNext(&p, Token.Id.ObjectEnd);
+ checkNext(&p, Token.Id.ObjectEnd);
+
+ debug.assert((try p.next()) == null);
+}
+
// Validate a JSON string. This does not limit number precision so a decoder may not necessarily
// be able to decode the string even if this returns true.
pub fn validate(s: []const u8) bool {
- var p = StreamingJsonParser.init();
+ var p = StreamingParser.init();
for (s) |c, i| {
var token1: ?Token = undefined;
var token2: ?Token = undefined;
- p.feed(c, *token1, *token2) catch |err| {
+ p.feed(c, &token1, &token2) catch |err| {
return false;
};
}
@@ -869,6 +982,10 @@ pub fn validate(s: []const u8) bool {
return p.complete;
}
+test "json validate" {
+ debug.assert(validate("{}"));
+}
+
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const ArrayList = std.ArrayList;
@@ -897,46 +1014,46 @@ pub const Value = union(enum) {
pub fn dump(self: *const Value) void {
switch (self.*) {
Value.Null => {
- std.debug.warn("null");
+ debug.warn("null");
},
Value.Bool => |inner| {
- std.debug.warn("{}", inner);
+ debug.warn("{}", inner);
},
Value.Integer => |inner| {
- std.debug.warn("{}", inner);
+ debug.warn("{}", inner);
},
Value.Float => |inner| {
- std.debug.warn("{.5}", inner);
+ debug.warn("{.5}", inner);
},
Value.String => |inner| {
- std.debug.warn("\"{}\"", inner);
+ debug.warn("\"{}\"", inner);
},
Value.Array => |inner| {
var not_first = false;
- std.debug.warn("[");
+ debug.warn("[");
for (inner.toSliceConst()) |value| {
if (not_first) {
- std.debug.warn(",");
+ debug.warn(",");
}
not_first = true;
value.dump();
}
- std.debug.warn("]");
+ debug.warn("]");
},
Value.Object => |inner| {
var not_first = false;
- std.debug.warn("{{");
+ debug.warn("{{");
var it = inner.iterator();
while (it.next()) |entry| {
if (not_first) {
- std.debug.warn(",");
+ debug.warn(",");
}
not_first = true;
- std.debug.warn("\"{}\":", entry.key);
+ debug.warn("\"{}\":", entry.key);
entry.value.dump();
}
- std.debug.warn("}}");
+ debug.warn("}}");
},
}
}
@@ -952,53 +1069,53 @@ pub const Value = union(enum) {
fn dumpIndentLevel(self: *const Value, indent: usize, level: usize) void {
switch (self.*) {
Value.Null => {
- std.debug.warn("null");
+ debug.warn("null");
},
Value.Bool => |inner| {
- std.debug.warn("{}", inner);
+ debug.warn("{}", inner);
},
Value.Integer => |inner| {
- std.debug.warn("{}", inner);
+ debug.warn("{}", inner);
},
Value.Float => |inner| {
- std.debug.warn("{.5}", inner);
+ debug.warn("{.5}", inner);
},
Value.String => |inner| {
- std.debug.warn("\"{}\"", inner);
+ debug.warn("\"{}\"", inner);
},
Value.Array => |inner| {
var not_first = false;
- std.debug.warn("[\n");
+ debug.warn("[\n");
for (inner.toSliceConst()) |value| {
if (not_first) {
- std.debug.warn(",\n");
+ debug.warn(",\n");
}
not_first = true;
padSpace(level + indent);
value.dumpIndentLevel(indent, level + indent);
}
- std.debug.warn("\n");
+ debug.warn("\n");
padSpace(level);
- std.debug.warn("]");
+ debug.warn("]");
},
Value.Object => |inner| {
var not_first = false;
- std.debug.warn("{{\n");
+ debug.warn("{{\n");
var it = inner.iterator();
while (it.next()) |entry| {
if (not_first) {
- std.debug.warn(",\n");
+ debug.warn(",\n");
}
not_first = true;
padSpace(level + indent);
- std.debug.warn("\"{}\": ", entry.key);
+ debug.warn("\"{}\": ", entry.key);
entry.value.dumpIndentLevel(indent, level + indent);
}
- std.debug.warn("\n");
+ debug.warn("\n");
padSpace(level);
- std.debug.warn("}}");
+ debug.warn("}}");
},
}
}
@@ -1006,13 +1123,13 @@ pub const Value = union(enum) {
fn padSpace(indent: usize) void {
var i: usize = 0;
while (i < indent) : (i += 1) {
- std.debug.warn(" ");
+ debug.warn(" ");
}
}
};
// A non-stream JSON parser which constructs a tree of Value's.
-pub const JsonParser = struct {
+pub const Parser = struct {
allocator: *Allocator,
state: State,
copy_strings: bool,
@@ -1026,8 +1143,8 @@ pub const JsonParser = struct {
Simple,
};
- pub fn init(allocator: *Allocator, copy_strings: bool) JsonParser {
- return JsonParser{
+ pub fn init(allocator: *Allocator, copy_strings: bool) Parser {
+ return Parser{
.allocator = allocator,
.state = State.Simple,
.copy_strings = copy_strings,
@@ -1035,52 +1152,26 @@ pub const JsonParser = struct {
};
}
- pub fn deinit(p: *JsonParser) void {
+ pub fn deinit(p: *Parser) void {
p.stack.deinit();
}
- pub fn reset(p: *JsonParser) void {
+ pub fn reset(p: *Parser) void {
p.state = State.Simple;
p.stack.shrink(0);
}
- pub fn parse(p: *JsonParser, input: []const u8) !ValueTree {
- var mp = StreamingJsonParser.init();
+ pub fn parse(p: *Parser, input: []const u8) !ValueTree {
+ var s = TokenStream.init(input);
var arena = ArenaAllocator.init(p.allocator);
errdefer arena.deinit();
- for (input) |c, i| {
- var mt1: ?Token = undefined;
- var mt2: ?Token = undefined;
-
- try mp.feed(c, &mt1, &mt2);
- if (mt1) |t1| {
- try p.transition(&arena.allocator, input, i, t1);
-
- if (mt2) |t2| {
- try p.transition(&arena.allocator, input, i, t2);
- }
- }
+ while (try s.next()) |token| {
+ try p.transition(&arena.allocator, input, s.i - 1, token);
}
- // Handle top-level lonely number values.
- {
- const i = input.len;
- var mt1: ?Token = undefined;
- var mt2: ?Token = undefined;
-
- try mp.feed(' ', &mt1, &mt2);
- if (mt1) |t1| {
- try p.transition(&arena.allocator, input, i, t1);
- }
- }
-
- if (!mp.complete) {
- return error.IncompleteJsonInput;
- }
-
- std.debug.assert(p.stack.len == 1);
+ debug.assert(p.stack.len == 1);
return ValueTree{
.arena = arena,
@@ -1090,7 +1181,7 @@ pub const JsonParser = struct {
// Even though p.allocator exists, we take an explicit allocator so that allocation state
// can be cleaned up on error correctly during a `parse` on call.
- fn transition(p: *JsonParser, allocator: *Allocator, input: []const u8, i: usize, token: *const Token) !void {
+ fn transition(p: *Parser, allocator: *Allocator, input: []const u8, i: usize, token: *const Token) !void {
switch (p.state) {
State.ObjectKey => switch (token.id) {
Token.Id.ObjectEnd => {
@@ -1147,7 +1238,7 @@ pub const JsonParser = struct {
_ = p.stack.pop();
p.state = State.ObjectKey;
},
- else => {
+ Token.Id.ObjectEnd, Token.Id.ArrayEnd => {
unreachable;
},
}
@@ -1187,7 +1278,7 @@ pub const JsonParser = struct {
Token.Id.Null => {
try array.append(Value.Null);
},
- else => {
+ Token.Id.ObjectEnd => {
unreachable;
},
}
@@ -1223,7 +1314,7 @@ pub const JsonParser = struct {
}
}
- fn pushToParent(p: *JsonParser, value: *const Value) !void {
+ fn pushToParent(p: *Parser, value: *const Value) !void {
switch (p.stack.at(p.stack.len - 1)) {
// Object Parent -> [ ..., object, , value ]
Value.String => |key| {
@@ -1244,14 +1335,14 @@ pub const JsonParser = struct {
}
}
- fn parseString(p: *JsonParser, allocator: *Allocator, token: *const Token, input: []const u8, i: usize) !Value {
+ fn parseString(p: *Parser, allocator: *Allocator, token: *const Token, input: []const u8, i: usize) !Value {
// TODO: We don't strictly have to copy values which do not contain any escape
// characters if flagged with the option.
const slice = token.slice(input, i);
return Value{ .String = try mem.dupe(p.allocator, u8, slice) };
}
- fn parseNumber(p: *JsonParser, token: *const Token, input: []const u8, i: usize) !Value {
+ fn parseNumber(p: *Parser, token: *const Token, input: []const u8, i: usize) !Value {
return if (token.number_is_integer)
Value{ .Integer = try std.fmt.parseInt(i64, token.slice(input, i), 10) }
else
@@ -1259,10 +1350,8 @@ pub const JsonParser = struct {
}
};
-const debug = std.debug;
-
test "json parser dynamic" {
- var p = JsonParser.init(std.debug.global_allocator, false);
+ var p = Parser.init(debug.global_allocator, false);
defer p.deinit();
const s =
@@ -1287,17 +1376,17 @@ test "json parser dynamic" {
var root = tree.root;
- var image = (??root.Object.get("Image")).value;
+ var image = root.Object.get("Image").?.value;
- const width = (??image.Object.get("Width")).value;
+ const width = image.Object.get("Width").?.value;
debug.assert(width.Integer == 800);
- const height = (??image.Object.get("Height")).value;
+ const height = image.Object.get("Height").?.value;
debug.assert(height.Integer == 600);
- const title = (??image.Object.get("Title")).value;
+ const title = image.Object.get("Title").?.value;
debug.assert(mem.eql(u8, title.String, "View from 15th Floor"));
- const animated = (??image.Object.get("Animated")).value;
+ const animated = image.Object.get("Animated").?.value;
debug.assert(animated.Bool == false);
}
diff --git a/std/json_test.zig b/std/json_test.zig
index cb054d8e4e..8c8862441a 100644
--- a/std/json_test.zig
+++ b/std/json_test.zig
@@ -17,6 +17,16 @@ fn any(comptime s: []const u8) void {
std.debug.assert(true);
}
+////////////////////////////////////////////////////////////////////////////////////////////////////
+//
+// Additional tests not part of test JSONTestSuite.
+
+test "y_trailing_comma_after_empty" {
+ ok(
+ \\{"1":[],"2":{},"3":"4"}
+ );
+}
+
////////////////////////////////////////////////////////////////////////////////////////////////////
test "y_array_arraysWithSpaces" {
diff --git a/std/linked_list.zig b/std/linked_list.zig
index fbc0a0c42a..9e32b7d9da 100644
--- a/std/linked_list.zig
+++ b/std/linked_list.zig
@@ -169,7 +169,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Returns:
/// A pointer to the last node in the list.
pub fn pop(list: *Self) ?*Node {
- const last = list.last ?? return null;
+ const last = list.last orelse return null;
list.remove(last);
return last;
}
@@ -179,7 +179,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na
/// Returns:
/// A pointer to the first node in the list.
pub fn popFirst(list: *Self) ?*Node {
- const first = list.first ?? return null;
+ const first = list.first orelse return null;
list.remove(first);
return first;
}
@@ -270,8 +270,8 @@ test "basic linked list test" {
var last = list.pop(); // {2, 3, 4}
list.remove(three); // {2, 4}
- assert((??list.first).data == 2);
- assert((??list.last).data == 4);
+ assert(list.first.?.data == 2);
+ assert(list.last.?.data == 4);
assert(list.len == 2);
}
@@ -336,7 +336,7 @@ test "basic intrusive linked list test" {
var last = list.pop(); // {2, 3, 4}
list.remove(&three.link); // {2, 4}
- assert((??list.first).toData().value == 2);
- assert((??list.last).toData().value == 4);
+ assert(list.first.?.toData().value == 2);
+ assert(list.last.?.toData().value == 4);
assert(list.len == 2);
}
diff --git a/std/macho.zig b/std/macho.zig
index d6eef9a325..64f78ae4a3 100644
--- a/std/macho.zig
+++ b/std/macho.zig
@@ -130,7 +130,7 @@ pub fn loadSymbols(allocator: *mem.Allocator, in: *io.FileInStream) !SymbolTable
for (syms) |sym| {
if (!isSymbol(sym)) continue;
const start = sym.n_strx;
- const end = ??mem.indexOfScalarPos(u8, strings, start, 0);
+ const end = mem.indexOfScalarPos(u8, strings, start, 0).?;
const name = strings[start..end];
const address = sym.n_value;
symbols[nsym] = Symbol{ .name = name, .address = address };
diff --git a/std/math/big/index.zig b/std/math/big/index.zig
new file mode 100644
index 0000000000..26fa538c4f
--- /dev/null
+++ b/std/math/big/index.zig
@@ -0,0 +1,5 @@
+pub use @import("int.zig");
+
+test "math.big" {
+ _ = @import("int.zig");
+}
diff --git a/std/math/big/int.zig b/std/math/big/int.zig
new file mode 100644
index 0000000000..19af10e695
--- /dev/null
+++ b/std/math/big/int.zig
@@ -0,0 +1,2023 @@
+const std = @import("../../index.zig");
+const builtin = @import("builtin");
+const debug = std.debug;
+const math = std.math;
+const mem = std.mem;
+const Allocator = mem.Allocator;
+const ArrayList = std.ArrayList;
+
+const TypeId = builtin.TypeId;
+
+pub const Limb = usize;
+pub const DoubleLimb = @IntType(false, 2 * Limb.bit_count);
+pub const Log2Limb = math.Log2Int(Limb);
+
+comptime {
+ debug.assert(math.floorPowerOfTwo(usize, Limb.bit_count) == Limb.bit_count);
+ debug.assert(Limb.bit_count <= 64); // u128 set is unsupported
+ debug.assert(Limb.is_signed == false);
+}
+
+const wrapped_buffer_size = 512;
+
+// Converts primitive integer values onto a stack-based big integer, or passes through existing
+// Int types with no modifications. This can fail at runtime if using a very large dynamic
+// integer but it is very unlikely and is considered a user error.
+fn wrapInt(allocator: *Allocator, bn: var) *const Int {
+ const T = @typeOf(bn);
+ switch (@typeInfo(T)) {
+ TypeId.Pointer => |info| {
+ if (info.child == Int) {
+ return bn;
+ } else {
+ @compileError("cannot set Int using type " ++ @typeName(T));
+ }
+ },
+ else => {
+ var s = allocator.create(Int) catch unreachable;
+ s.* = Int{
+ .allocator = allocator,
+ .positive = false,
+ .limbs = block: {
+ var limbs = allocator.alloc(Limb, Int.default_capacity) catch unreachable;
+ limbs[0] = 0;
+ break :block limbs;
+ },
+ .len = 1,
+ };
+ s.set(bn) catch unreachable;
+ return s;
+ },
+ }
+}
+
+pub const Int = struct {
+ allocator: *Allocator,
+ positive: bool,
+ // - little-endian ordered
+ // - len >= 1 always
+ // - zero value -> len == 1 with limbs[0] == 0
+ limbs: []Limb,
+ len: usize,
+
+ const default_capacity = 4;
+
+ pub fn init(allocator: *Allocator) !Int {
+ return try Int.initCapacity(allocator, default_capacity);
+ }
+
+ pub fn initSet(allocator: *Allocator, value: var) !Int {
+ var s = try Int.init(allocator);
+ try s.set(value);
+ return s;
+ }
+
+ pub fn initCapacity(allocator: *Allocator, capacity: usize) !Int {
+ return Int{
+ .allocator = allocator,
+ .positive = true,
+ .limbs = block: {
+ var limbs = try allocator.alloc(Limb, math.max(default_capacity, capacity));
+ limbs[0] = 0;
+ break :block limbs;
+ },
+ .len = 1,
+ };
+ }
+
+ pub fn ensureCapacity(self: *Int, capacity: usize) !void {
+ if (capacity <= self.limbs.len) {
+ return;
+ }
+
+ self.limbs = try self.allocator.realloc(Limb, self.limbs, capacity);
+ }
+
+ pub fn deinit(self: *const Int) void {
+ self.allocator.free(self.limbs);
+ }
+
+ pub fn clone(other: *const Int) !Int {
+ return Int{
+ .allocator = other.allocator,
+ .positive = other.positive,
+ .limbs = block: {
+ var limbs = try other.allocator.alloc(Limb, other.len);
+ mem.copy(Limb, limbs[0..], other.limbs[0..other.len]);
+ break :block limbs;
+ },
+ .len = other.len,
+ };
+ }
+
+ pub fn copy(self: *Int, other: *const Int) !void {
+ if (self == other) {
+ return;
+ }
+
+ self.positive = other.positive;
+ try self.ensureCapacity(other.len);
+ mem.copy(Limb, self.limbs[0..], other.limbs[0..other.len]);
+ self.len = other.len;
+ }
+
+ pub fn swap(self: *Int, other: *Int) void {
+ mem.swap(Int, self, other);
+ }
+
+ pub fn dump(self: *const Int) void {
+ for (self.limbs) |limb| {
+ debug.warn("{x} ", limb);
+ }
+ debug.warn("\n");
+ }
+
+ pub fn negate(r: *Int) void {
+ r.positive = !r.positive;
+ }
+
+ pub fn abs(r: *Int) void {
+ r.positive = true;
+ }
+
+ pub fn isOdd(r: *const Int) bool {
+ return r.limbs[0] & 1 != 0;
+ }
+
+ pub fn isEven(r: *const Int) bool {
+ return !r.isOdd();
+ }
+
+ fn bitcount(self: *const Int) usize {
+ const u_bit_count = (self.len - 1) * Limb.bit_count + (Limb.bit_count - @clz(self.limbs[self.len - 1]));
+ return usize(!self.positive) + u_bit_count;
+ }
+
+ pub fn sizeInBase(self: *const Int, base: usize) usize {
+ return (self.bitcount() / math.log2(base)) + 1;
+ }
+
+ pub fn set(self: *Int, value: var) Allocator.Error!void {
+ const T = @typeOf(value);
+
+ switch (@typeInfo(T)) {
+ TypeId.Int => |info| {
+ const UT = if (T.is_signed) @IntType(false, T.bit_count - 1) else T;
+
+ try self.ensureCapacity(@sizeOf(UT) / @sizeOf(Limb));
+ self.positive = value >= 0;
+ self.len = 0;
+
+ var w_value: UT = if (value < 0) UT(-value) else UT(value);
+
+ if (info.bits <= Limb.bit_count) {
+ self.limbs[0] = Limb(w_value);
+ self.len = 1;
+ } else {
+ var i: usize = 0;
+ while (w_value != 0) : (i += 1) {
+ self.limbs[i] = @truncate(Limb, w_value);
+ self.len += 1;
+
+ // TODO: shift == 64 at compile-time fails. Fails on u128 limbs.
+ w_value >>= Limb.bit_count / 2;
+ w_value >>= Limb.bit_count / 2;
+ }
+ }
+ },
+ TypeId.ComptimeInt => {
+ comptime var w_value = if (value < 0) -value else value;
+
+ const req_limbs = @divFloor(math.log2(w_value), Limb.bit_count) + 1;
+ try self.ensureCapacity(req_limbs);
+
+ self.positive = value >= 0;
+ self.len = req_limbs;
+
+ if (w_value <= @maxValue(Limb)) {
+ self.limbs[0] = w_value;
+ } else {
+ const mask = (1 << Limb.bit_count) - 1;
+
+ comptime var i = 0;
+ inline while (w_value != 0) : (i += 1) {
+ self.limbs[i] = w_value & mask;
+
+ w_value >>= Limb.bit_count / 2;
+ w_value >>= Limb.bit_count / 2;
+ }
+ }
+ },
+ else => {
+ @compileError("cannot set Int using type " ++ @typeName(T));
+ },
+ }
+ }
+
+ pub const ConvertError = error{
+ NegativeIntoUnsigned,
+ TargetTooSmall,
+ };
+
+ pub fn to(self: *const Int, comptime T: type) ConvertError!T {
+ switch (@typeId(T)) {
+ TypeId.Int => {
+ const UT = if (T.is_signed) @IntType(false, T.bit_count - 1) else T;
+
+ if (self.bitcount() > 8 * @sizeOf(UT)) {
+ return error.TargetTooSmall;
+ }
+
+ var r: UT = 0;
+
+ if (@sizeOf(UT) <= @sizeOf(Limb)) {
+ r = UT(self.limbs[0]);
+ } else {
+ for (self.limbs[0..self.len]) |_, ri| {
+ const limb = self.limbs[self.len - ri - 1];
+ r <<= Limb.bit_count;
+ r |= limb;
+ }
+ }
+
+ if (!T.is_signed) {
+ return if (self.positive) r else error.NegativeIntoUnsigned;
+ } else {
+ return if (self.positive) T(r) else -T(r);
+ }
+ },
+ else => {
+ @compileError("cannot convert Int to type " ++ @typeName(T));
+ },
+ }
+ }
+
+ fn charToDigit(ch: u8, base: u8) !u8 {
+ const d = switch (ch) {
+ '0'...'9' => ch - '0',
+ 'a'...'f' => (ch - 'a') + 0xa,
+ else => return error.InvalidCharForDigit,
+ };
+
+ return if (d < base) d else return error.DigitTooLargeForBase;
+ }
+
+ fn digitToChar(d: u8, base: u8) !u8 {
+ if (d >= base) {
+ return error.DigitTooLargeForBase;
+ }
+
+ return switch (d) {
+ 0...9 => '0' + d,
+ 0xa...0xf => ('a' - 0xa) + d,
+ else => unreachable,
+ };
+ }
+
+ pub fn setString(self: *Int, base: u8, value: []const u8) !void {
+ if (base < 2 or base > 16) {
+ return error.InvalidBase;
+ }
+
+ var i: usize = 0;
+ var positive = true;
+ if (value.len > 0 and value[0] == '-') {
+ positive = false;
+ i += 1;
+ }
+
+ try self.set(0);
+ for (value[i..]) |ch| {
+ const d = try charToDigit(ch, base);
+ try self.mul(self, base);
+ try self.add(self, d);
+ }
+ self.positive = positive;
+ }
+
+ pub fn toString(self: *const Int, allocator: *Allocator, base: u8) ![]const u8 {
+ if (base < 2 or base > 16) {
+ return error.InvalidBase;
+ }
+
+ var digits = ArrayList(u8).init(allocator);
+ try digits.ensureCapacity(self.sizeInBase(base) + 1);
+ defer digits.deinit();
+
+ if (self.eqZero()) {
+ try digits.append('0');
+ return digits.toOwnedSlice();
+ }
+
+ // Power of two: can do a single pass and use masks to extract digits.
+ if (base & (base - 1) == 0) {
+ const base_shift = math.log2_int(Limb, base);
+
+ for (self.limbs[0..self.len]) |limb| {
+ var shift: usize = 0;
+ while (shift < Limb.bit_count) : (shift += base_shift) {
+ const r = u8((limb >> Log2Limb(shift)) & Limb(base - 1));
+ const ch = try digitToChar(r, base);
+ try digits.append(ch);
+ }
+ }
+
+ while (true) {
+ // always will have a non-zero digit somewhere
+ const c = digits.pop();
+ if (c != '0') {
+ digits.append(c) catch unreachable;
+ break;
+ }
+ }
+ } // Non power-of-two: batch divisions per word size.
+ else {
+ const digits_per_limb = math.log(Limb, base, @maxValue(Limb));
+ var limb_base: Limb = 1;
+ var j: usize = 0;
+ while (j < digits_per_limb) : (j += 1) {
+ limb_base *= base;
+ }
+
+ var q = try self.clone();
+ q.positive = true;
+ var r = try Int.init(allocator);
+ var b = try Int.initSet(allocator, limb_base);
+
+ while (q.len >= 2) {
+ try Int.divTrunc(&q, &r, &q, &b);
+
+ var r_word = r.limbs[0];
+ var i: usize = 0;
+ while (i < digits_per_limb) : (i += 1) {
+ const ch = try digitToChar(u8(r_word % base), base);
+ r_word /= base;
+ try digits.append(ch);
+ }
+ }
+
+ {
+ debug.assert(q.len == 1);
+
+ var r_word = q.limbs[0];
+ while (r_word != 0) {
+ const ch = try digitToChar(u8(r_word % base), base);
+ r_word /= base;
+ try digits.append(ch);
+ }
+ }
+ }
+
+ if (!self.positive) {
+ try digits.append('-');
+ }
+
+ var s = digits.toOwnedSlice();
+ mem.reverse(u8, s);
+ return s;
+ }
+
+ // returns -1, 0, 1 if |a| < |b|, |a| == |b| or |a| > |b| respectively.
+ pub fn cmpAbs(a: *const Int, bv: var) i8 {
+ // TODO: Thread-local buffer.
+ var buffer: [wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.len < b.len) {
+ return -1;
+ }
+ if (a.len > b.len) {
+ return 1;
+ }
+
+ var i: usize = a.len - 1;
+ while (i != 0) : (i -= 1) {
+ if (a.limbs[i] != b.limbs[i]) {
+ break;
+ }
+ }
+
+ if (a.limbs[i] < b.limbs[i]) {
+ return -1;
+ } else if (a.limbs[i] > b.limbs[i]) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+
+ // returns -1, 0, 1 if a < b, a == b or a > b respectively.
+ pub fn cmp(a: *const Int, bv: var) i8 {
+ var buffer: [wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.positive != b.positive) {
+ return if (a.positive) i8(1) else -1;
+ } else {
+ const r = cmpAbs(a, b);
+ return if (a.positive) r else -r;
+ }
+ }
+
+ // if a == 0
+ pub fn eqZero(a: *const Int) bool {
+ return a.len == 1 and a.limbs[0] == 0;
+ }
+
+ // if |a| == |b|
+ pub fn eqAbs(a: *const Int, b: var) bool {
+ return cmpAbs(a, b) == 0;
+ }
+
+ // if a == b
+ pub fn eq(a: *const Int, b: var) bool {
+ return cmp(a, b) == 0;
+ }
+
+ // Normalize for a possible single carry digit.
+ //
+ // [1, 2, 3, 4, 0] -> [1, 2, 3, 4]
+ // [1, 2, 3, 4, 5] -> [1, 2, 3, 4, 5]
+ // [0] -> [0]
+ fn norm1(r: *Int, length: usize) void {
+ debug.assert(length > 0);
+ debug.assert(length <= r.limbs.len);
+
+ if (r.limbs[length - 1] == 0) {
+ r.len = if (length > 1) length - 1 else 1;
+ } else {
+ r.len = length;
+ }
+ }
+
+ // Normalize a possible sequence of leading zeros.
+ //
+ // [1, 2, 3, 4, 0] -> [1, 2, 3, 4]
+ // [1, 2, 0, 0, 0] -> [1, 2]
+ // [0, 0, 0, 0, 0] -> [0]
+ fn normN(r: *Int, length: usize) void {
+ debug.assert(length > 0);
+ debug.assert(length <= r.limbs.len);
+
+ var j = length;
+ while (j > 0) : (j -= 1) {
+ if (r.limbs[j - 1] != 0) {
+ break;
+ }
+ }
+
+ // Handle zero
+ r.len = if (j != 0) j else 1;
+ }
+
+ // r = a + b
+ pub fn add(r: *Int, av: var, bv: var) Allocator.Error!void {
+ var buffer: [2 * wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.eqZero()) {
+ try r.copy(b);
+ return;
+ } else if (b.eqZero()) {
+ try r.copy(a);
+ return;
+ }
+
+ if (a.positive != b.positive) {
+ if (a.positive) {
+ // (a) + (-b) => a - b
+ const bp = Int{
+ .allocator = undefined,
+ .positive = true,
+ .limbs = b.limbs,
+ .len = b.len,
+ };
+ try r.sub(a, bp);
+ } else {
+ // (-a) + (b) => b - a
+ const ap = Int{
+ .allocator = undefined,
+ .positive = true,
+ .limbs = a.limbs,
+ .len = a.len,
+ };
+ try r.sub(b, ap);
+ }
+ } else {
+ if (a.len >= b.len) {
+ try r.ensureCapacity(a.len + 1);
+ lladd(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.norm1(a.len + 1);
+ } else {
+ try r.ensureCapacity(b.len + 1);
+ lladd(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.norm1(b.len + 1);
+ }
+
+ r.positive = a.positive;
+ }
+ }
+
+ // Knuth 4.3.1, Algorithm A.
+ fn lladd(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len != 0 and b.len != 0);
+ debug.assert(a.len >= b.len);
+ debug.assert(r.len >= a.len + 1);
+
+ var i: usize = 0;
+ var carry: Limb = 0;
+
+ while (i < b.len) : (i += 1) {
+ var c: Limb = 0;
+ c += Limb(@addWithOverflow(Limb, a[i], b[i], &r[i]));
+ c += Limb(@addWithOverflow(Limb, r[i], carry, &r[i]));
+ carry = c;
+ }
+
+ while (i < a.len) : (i += 1) {
+ carry = Limb(@addWithOverflow(Limb, a[i], carry, &r[i]));
+ }
+
+ r[i] = carry;
+ }
+
+ // r = a - b
+ pub fn sub(r: *Int, av: var, bv: var) !void {
+ var buffer: [wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.positive != b.positive) {
+ if (a.positive) {
+ // (a) - (-b) => a + b
+ const bp = Int{
+ .allocator = undefined,
+ .positive = true,
+ .limbs = b.limbs,
+ .len = b.len,
+ };
+ try r.add(a, bp);
+ } else {
+ // (-a) - (b) => -(a + b)
+ const ap = Int{
+ .allocator = undefined,
+ .positive = true,
+ .limbs = a.limbs,
+ .len = a.len,
+ };
+ try r.add(ap, b);
+ r.positive = false;
+ }
+ } else {
+ if (a.positive) {
+ // (a) - (b) => a - b
+ if (a.cmp(b) >= 0) {
+ try r.ensureCapacity(a.len + 1);
+ llsub(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.normN(a.len);
+ r.positive = true;
+ } else {
+ try r.ensureCapacity(b.len + 1);
+ llsub(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.normN(b.len);
+ r.positive = false;
+ }
+ } else {
+ // (-a) - (-b) => -(a - b)
+ if (a.cmp(b) < 0) {
+ try r.ensureCapacity(a.len + 1);
+ llsub(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.normN(a.len);
+ r.positive = false;
+ } else {
+ try r.ensureCapacity(b.len + 1);
+ llsub(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.normN(b.len);
+ r.positive = true;
+ }
+ }
+ }
+ }
+
+ // Knuth 4.3.1, Algorithm S.
+ fn llsub(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len != 0 and b.len != 0);
+ debug.assert(a.len > b.len or (a.len == b.len and a[a.len - 1] >= b[b.len - 1]));
+ debug.assert(r.len >= a.len);
+
+ var i: usize = 0;
+ var borrow: Limb = 0;
+
+ while (i < b.len) : (i += 1) {
+ var c: Limb = 0;
+ c += Limb(@subWithOverflow(Limb, a[i], b[i], &r[i]));
+ c += Limb(@subWithOverflow(Limb, r[i], borrow, &r[i]));
+ borrow = c;
+ }
+
+ while (i < a.len) : (i += 1) {
+ borrow = Limb(@subWithOverflow(Limb, a[i], borrow, &r[i]));
+ }
+
+ debug.assert(borrow == 0);
+ }
+
+ // rma = a * b
+ //
+ // For greatest efficiency, ensure rma does not alias a or b.
+ pub fn mul(rma: *Int, av: var, bv: var) !void {
+ var buffer: [2 * wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ var r = rma;
+ var aliased = rma == a or rma == b;
+
+ var sr: Int = undefined;
+ if (aliased) {
+ sr = try Int.initCapacity(rma.allocator, a.len + b.len);
+ r = &sr;
+ aliased = true;
+ }
+ defer if (aliased) {
+ rma.swap(r);
+ r.deinit();
+ };
+
+ try r.ensureCapacity(a.len + b.len);
+
+ if (a.len >= b.len) {
+ llmul(r.limbs, a.limbs[0..a.len], b.limbs[0..b.len]);
+ } else {
+ llmul(r.limbs, b.limbs[0..b.len], a.limbs[0..a.len]);
+ }
+
+ r.positive = a.positive == b.positive;
+ r.normN(a.len + b.len);
+ }
+
+ // a + b * c + *carry, sets carry to the overflow bits
+ pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
+ var r1: Limb = undefined;
+
+ // r1 = a + *carry
+ const c1 = Limb(@addWithOverflow(Limb, a, carry.*, &r1));
+
+ // r2 = b * c
+ //
+ // We still use a DoubleLimb here since the @mulWithOverflow builtin does not
+ // return the carry and lower bits separately so we would need to perform this
+ // anyway to get the carry bits. The branch on the overflow case costs more than
+ // just computing them unconditionally and splitting.
+ //
+ // This could be a single x86 mul instruction, which stores the carry/lower in rdx:rax.
+ const bc = DoubleLimb(b) * DoubleLimb(c);
+ const r2 = @truncate(Limb, bc);
+ const c2 = @truncate(Limb, bc >> Limb.bit_count);
+
+ // r1 = r1 + r2
+ const c3 = Limb(@addWithOverflow(Limb, r1, r2, &r1));
+
+ // This never overflows, c1, c3 are either 0 or 1 and if both are 1 then
+ // c2 is at least <= @maxValue(Limb) - 2.
+ carry.* = c1 + c2 + c3;
+
+ return r1;
+ }
+
+ // Knuth 4.3.1, Algorithm M.
+ //
+ // r MUST NOT alias any of a or b.
+ fn llmul(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len >= b.len);
+ debug.assert(r.len >= a.len + b.len);
+
+ mem.set(Limb, r[0 .. a.len + b.len], 0);
+
+ var i: usize = 0;
+ while (i < a.len) : (i += 1) {
+ var carry: Limb = 0;
+ var j: usize = 0;
+ while (j < b.len) : (j += 1) {
+ r[i + j] = @inlineCall(addMulLimbWithCarry, r[i + j], a[i], b[j], &carry);
+ }
+ r[i + j] = carry;
+ }
+ }
+
+ pub fn divFloor(q: *Int, r: *Int, a: var, b: var) !void {
+ try div(q, r, a, b);
+
+ // Trunc -> Floor.
+ if (!q.positive) {
+ try q.sub(q, 1);
+ try r.add(q, 1);
+ }
+ r.positive = b.positive;
+ }
+
+ pub fn divTrunc(q: *Int, r: *Int, a: var, b: var) !void {
+ try div(q, r, a, b);
+ r.positive = a.positive;
+ }
+
+ // Truncates by default.
+ fn div(quo: *Int, rem: *Int, av: var, bv: var) !void {
+ var buffer: [2 * wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (b.eqZero()) {
+ @panic("division by zero");
+ }
+ if (quo == rem) {
+ @panic("quo and rem cannot be same variable");
+ }
+
+ if (a.cmpAbs(b) < 0) {
+ // quo may alias a so handle rem first
+ try rem.copy(a);
+ rem.positive = a.positive == b.positive;
+
+ quo.positive = true;
+ quo.len = 1;
+ quo.limbs[0] = 0;
+ return;
+ }
+
+ if (b.len == 1) {
+ try quo.ensureCapacity(a.len);
+
+ lldiv1(quo.limbs[0..], &rem.limbs[0], a.limbs[0..a.len], b.limbs[0]);
+ quo.norm1(a.len);
+ quo.positive = a.positive == b.positive;
+
+ rem.len = 1;
+ rem.positive = true;
+ } else {
+ // x and y are modified during division
+ var x = try a.clone();
+ defer x.deinit();
+
+ var y = try b.clone();
+ defer y.deinit();
+
+ // x may grow one limb during normalization
+ try quo.ensureCapacity(a.len + y.len);
+ try divN(quo.allocator, quo, rem, &x, &y);
+
+ quo.positive = a.positive == b.positive;
+ }
+ }
+
+ // Knuth 4.3.1, Exercise 16.
+ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len > 1 or a[0] >= b);
+ debug.assert(quo.len >= a.len);
+
+ rem.* = 0;
+ for (a) |_, ri| {
+ const i = a.len - ri - 1;
+ const pdiv = ((DoubleLimb(rem.*) << Limb.bit_count) | a[i]);
+
+ if (pdiv == 0) {
+ quo[i] = 0;
+ rem.* = 0;
+ } else if (pdiv < b) {
+ quo[i] = 0;
+ rem.* = @truncate(Limb, pdiv);
+ } else if (pdiv == b) {
+ quo[i] = 1;
+ rem.* = 0;
+ } else {
+ quo[i] = @truncate(Limb, @divTrunc(pdiv, b));
+ rem.* = @truncate(Limb, pdiv - (quo[i] *% b));
+ }
+ }
+ }
+
+ // Handbook of Applied Cryptography, 14.20
+ //
+ // x = qy + r where 0 <= r < y
+ fn divN(allocator: *Allocator, q: *Int, r: *Int, x: *Int, y: *Int) !void {
+ debug.assert(y.len >= 2);
+ debug.assert(x.len >= y.len);
+ debug.assert(q.limbs.len >= x.len + y.len - 1);
+ debug.assert(default_capacity >= 3); // see 3.2
+
+ var tmp = try Int.init(allocator);
+ defer tmp.deinit();
+
+ // Normalize so y > Limb.bit_count / 2 (i.e. leading bit is set)
+ const norm_shift = @clz(y.limbs[y.len - 1]);
+ try x.shiftLeft(x, norm_shift);
+ try y.shiftLeft(y, norm_shift);
+
+ const n = x.len - 1;
+ const t = y.len - 1;
+
+ // 1.
+ q.len = n - t + 1;
+ mem.set(Limb, q.limbs[0..q.len], 0);
+
+ // 2.
+ try tmp.shiftLeft(y, Limb.bit_count * (n - t));
+ while (x.cmp(&tmp) >= 0) {
+ q.limbs[n - t] += 1;
+ try x.sub(x, tmp);
+ }
+
+ // 3.
+ var i = n;
+ while (i > t) : (i -= 1) {
+ // 3.1
+ if (x.limbs[i] == y.limbs[t]) {
+ q.limbs[i - t - 1] = @maxValue(Limb);
+ } else {
+ const num = (DoubleLimb(x.limbs[i]) << Limb.bit_count) | DoubleLimb(x.limbs[i - 1]);
+ const z = Limb(num / DoubleLimb(y.limbs[t]));
+ q.limbs[i - t - 1] = if (z > @maxValue(Limb)) @maxValue(Limb) else Limb(z);
+ }
+
+ // 3.2
+ tmp.limbs[0] = if (i >= 2) x.limbs[i - 2] else 0;
+ tmp.limbs[1] = if (i >= 1) x.limbs[i - 1] else 0;
+ tmp.limbs[2] = x.limbs[i];
+ tmp.normN(3);
+
+ while (true) {
+ // 2x1 limb multiplication unrolled against single-limb q[i-t-1]
+ var carry: Limb = 0;
+ r.limbs[0] = addMulLimbWithCarry(0, if (t >= 1) y.limbs[t - 1] else 0, q.limbs[i - t - 1], &carry);
+ r.limbs[1] = addMulLimbWithCarry(0, y.limbs[t], q.limbs[i - t - 1], &carry);
+ r.limbs[2] = carry;
+ r.normN(3);
+
+ if (r.cmpAbs(&tmp) <= 0) {
+ break;
+ }
+
+ q.limbs[i - t - 1] -= 1;
+ }
+
+ // 3.3
+ try tmp.set(q.limbs[i - t - 1]);
+ try tmp.mul(&tmp, y);
+ try tmp.shiftLeft(&tmp, Limb.bit_count * (i - t - 1));
+ try x.sub(x, &tmp);
+
+ if (!x.positive) {
+ try tmp.shiftLeft(y, Limb.bit_count * (i - t - 1));
+ try x.add(x, &tmp);
+ q.limbs[i - t - 1] -= 1;
+ }
+ }
+
+ // Denormalize
+ q.normN(q.len);
+
+ try r.shiftRight(x, norm_shift);
+ r.normN(r.len);
+ }
+
+ // r = a << shift, in other words, r = a * 2^shift
+ pub fn shiftLeft(r: *Int, av: var, shift: usize) !void {
+ var buffer: [wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+
+ try r.ensureCapacity(a.len + (shift / Limb.bit_count) + 1);
+ llshl(r.limbs[0..], a.limbs[0..a.len], shift);
+ r.norm1(a.len + (shift / Limb.bit_count) + 1);
+ r.positive = a.positive;
+ }
+
+ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len >= 1);
+ debug.assert(r.len >= a.len + (shift / Limb.bit_count) + 1);
+
+ const limb_shift = shift / Limb.bit_count + 1;
+ const interior_limb_shift = Log2Limb(shift % Limb.bit_count);
+
+ var carry: Limb = 0;
+ var i: usize = 0;
+ while (i < a.len) : (i += 1) {
+ const src_i = a.len - i - 1;
+ const dst_i = src_i + limb_shift;
+
+ const src_digit = a[src_i];
+ r[dst_i] = carry | @inlineCall(math.shr, Limb, src_digit, Limb.bit_count - Limb(interior_limb_shift));
+ carry = (src_digit << interior_limb_shift);
+ }
+
+ r[limb_shift - 1] = carry;
+ mem.set(Limb, r[0 .. limb_shift - 1], 0);
+ }
+
+ // r = a >> shift
+ pub fn shiftRight(r: *Int, av: var, shift: usize) !void {
+ var buffer: [wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+
+ if (a.len <= shift / Limb.bit_count) {
+ r.len = 1;
+ r.limbs[0] = 0;
+ r.positive = true;
+ return;
+ }
+
+ try r.ensureCapacity(a.len - (shift / Limb.bit_count));
+ const r_len = llshr(r.limbs[0..], a.limbs[0..a.len], shift);
+ r.len = a.len - (shift / Limb.bit_count);
+ r.positive = a.positive;
+ }
+
+ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
+ @setRuntimeSafety(false);
+ debug.assert(a.len >= 1);
+ debug.assert(r.len >= a.len - (shift / Limb.bit_count));
+
+ const limb_shift = shift / Limb.bit_count;
+ const interior_limb_shift = Log2Limb(shift % Limb.bit_count);
+
+ var carry: Limb = 0;
+ var i: usize = 0;
+ while (i < a.len - limb_shift) : (i += 1) {
+ const src_i = a.len - i - 1;
+ const dst_i = src_i - limb_shift;
+
+ const src_digit = a[src_i];
+ r[dst_i] = carry | (src_digit >> interior_limb_shift);
+ carry = @inlineCall(math.shl, Limb, src_digit, Limb.bit_count - Limb(interior_limb_shift));
+ }
+ }
+
+ // r = a | b
+ pub fn bitOr(r: *Int, av: var, bv: var) !void {
+ var buffer: [2 * wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.len > b.len) {
+ try r.ensureCapacity(a.len);
+ llor(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.len = a.len;
+ } else {
+ try r.ensureCapacity(b.len);
+ llor(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.len = b.len;
+ }
+ }
+
+ fn llor(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(r.len >= a.len);
+ debug.assert(a.len >= b.len);
+
+ var i: usize = 0;
+ while (i < b.len) : (i += 1) {
+ r[i] = a[i] | b[i];
+ }
+ while (i < a.len) : (i += 1) {
+ r[i] = a[i];
+ }
+ }
+
+ // r = a & b
+ pub fn bitAnd(r: *Int, av: var, bv: var) !void {
+ var buffer: [2 * wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.len > b.len) {
+ try r.ensureCapacity(b.len);
+ lland(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.normN(b.len);
+ } else {
+ try r.ensureCapacity(a.len);
+ lland(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.normN(a.len);
+ }
+ }
+
+ fn lland(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(r.len >= b.len);
+ debug.assert(a.len >= b.len);
+
+ var i: usize = 0;
+ while (i < b.len) : (i += 1) {
+ r[i] = a[i] & b[i];
+ }
+ }
+
+ // r = a ^ b
+ pub fn bitXor(r: *Int, av: var, bv: var) !void {
+ var buffer: [2 * wrapped_buffer_size]u8 = undefined;
+ var stack = std.heap.FixedBufferAllocator.init(buffer[0..]);
+ var a = wrapInt(&stack.allocator, av);
+ var b = wrapInt(&stack.allocator, bv);
+
+ if (a.len > b.len) {
+ try r.ensureCapacity(a.len);
+ llxor(r.limbs[0..], a.limbs[0..a.len], b.limbs[0..b.len]);
+ r.normN(a.len);
+ } else {
+ try r.ensureCapacity(b.len);
+ llxor(r.limbs[0..], b.limbs[0..b.len], a.limbs[0..a.len]);
+ r.normN(b.len);
+ }
+ }
+
+ fn llxor(r: []Limb, a: []const Limb, b: []const Limb) void {
+ @setRuntimeSafety(false);
+ debug.assert(r.len >= a.len);
+ debug.assert(a.len >= b.len);
+
+ var i: usize = 0;
+ while (i < b.len) : (i += 1) {
+ r[i] = a[i] ^ b[i];
+ }
+ while (i < a.len) : (i += 1) {
+ r[i] = a[i];
+ }
+ }
+};
+
+// NOTE: All the following tests assume the max machine-word will be 64-bit.
+//
+// They will still run on larger than this and should pass, but the multi-limb code-paths
+// may be untested in some cases.
+
+const u256 = @IntType(false, 256);
+var al = debug.global_allocator;
+
+test "big.int comptime_int set" {
+ comptime var s = 0xefffffff00000001eeeeeeefaaaaaaab;
+ var a = try Int.initSet(al, s);
+
+ const s_limb_count = 128 / Limb.bit_count;
+
+ comptime var i: usize = 0;
+ inline while (i < s_limb_count) : (i += 1) {
+ const result = Limb(s & @maxValue(Limb));
+ s >>= Limb.bit_count / 2;
+ s >>= Limb.bit_count / 2;
+ debug.assert(a.limbs[i] == result);
+ }
+}
+
+test "big.int comptime_int set negative" {
+ var a = try Int.initSet(al, -10);
+
+ debug.assert(a.limbs[0] == 10);
+ debug.assert(a.positive == false);
+}
+
+test "big.int int set unaligned small" {
+ var a = try Int.initSet(al, u7(45));
+
+ debug.assert(a.limbs[0] == 45);
+ debug.assert(a.positive == true);
+}
+
+test "big.int comptime_int to" {
+ const a = try Int.initSet(al, 0xefffffff00000001eeeeeeefaaaaaaab);
+
+ debug.assert((try a.to(u128)) == 0xefffffff00000001eeeeeeefaaaaaaab);
+}
+
+test "big.int sub-limb to" {
+ const a = try Int.initSet(al, 10);
+
+ debug.assert((try a.to(u8)) == 10);
+}
+
+test "big.int to target too small error" {
+ const a = try Int.initSet(al, 0xffffffff);
+
+ if (a.to(u8)) |_| {
+ unreachable;
+ } else |err| {
+ debug.assert(err == error.TargetTooSmall);
+ }
+}
+
+test "big.int norm1" {
+ var a = try Int.init(al);
+ try a.ensureCapacity(8);
+
+ a.limbs[0] = 1;
+ a.limbs[1] = 2;
+ a.limbs[2] = 3;
+ a.limbs[3] = 0;
+ a.norm1(4);
+ debug.assert(a.len == 3);
+
+ a.limbs[0] = 1;
+ a.limbs[1] = 2;
+ a.limbs[2] = 3;
+ a.norm1(3);
+ debug.assert(a.len == 3);
+
+ a.limbs[0] = 0;
+ a.limbs[1] = 0;
+ a.norm1(2);
+ debug.assert(a.len == 1);
+
+ a.limbs[0] = 0;
+ a.norm1(1);
+ debug.assert(a.len == 1);
+}
+
+test "big.int normN" {
+ var a = try Int.init(al);
+ try a.ensureCapacity(8);
+
+ a.limbs[0] = 1;
+ a.limbs[1] = 2;
+ a.limbs[2] = 0;
+ a.limbs[3] = 0;
+ a.normN(4);
+ debug.assert(a.len == 2);
+
+ a.limbs[0] = 1;
+ a.limbs[1] = 2;
+ a.limbs[2] = 3;
+ a.normN(3);
+ debug.assert(a.len == 3);
+
+ a.limbs[0] = 0;
+ a.limbs[1] = 0;
+ a.limbs[2] = 0;
+ a.limbs[3] = 0;
+ a.normN(4);
+ debug.assert(a.len == 1);
+
+ a.limbs[0] = 0;
+ a.normN(1);
+ debug.assert(a.len == 1);
+}
+
+test "big.int parity" {
+ var a = try Int.init(al);
+ try a.set(0);
+ debug.assert(a.isEven());
+ debug.assert(!a.isOdd());
+
+ try a.set(7);
+ debug.assert(!a.isEven());
+ debug.assert(a.isOdd());
+}
+
+test "big.int bitcount + sizeInBase" {
+ var a = try Int.init(al);
+
+ try a.set(0b100);
+ debug.assert(a.bitcount() == 3);
+ debug.assert(a.sizeInBase(2) >= 3);
+ debug.assert(a.sizeInBase(10) >= 1);
+
+ try a.set(0xffffffff);
+ debug.assert(a.bitcount() == 32);
+ debug.assert(a.sizeInBase(2) >= 32);
+ debug.assert(a.sizeInBase(10) >= 10);
+
+ try a.shiftLeft(&a, 5000);
+ debug.assert(a.bitcount() == 5032);
+ debug.assert(a.sizeInBase(2) >= 5032);
+ a.positive = false;
+
+ debug.assert(a.bitcount() == 5033);
+ debug.assert(a.sizeInBase(2) >= 5033);
+}
+
+test "big.int string set" {
+ var a = try Int.init(al);
+ try a.setString(10, "120317241209124781241290847124");
+
+ debug.assert((try a.to(u128)) == 120317241209124781241290847124);
+}
+
+test "big.int string negative" {
+ var a = try Int.init(al);
+ try a.setString(10, "-1023");
+ debug.assert((try a.to(i32)) == -1023);
+}
+
+test "big.int string set bad char error" {
+ var a = try Int.init(al);
+ a.setString(10, "x") catch |err| debug.assert(err == error.InvalidCharForDigit);
+}
+
+test "big.int string set bad base error" {
+ var a = try Int.init(al);
+ a.setString(45, "10") catch |err| debug.assert(err == error.InvalidBase);
+}
+
+test "big.int string to" {
+ const a = try Int.initSet(al, 120317241209124781241290847124);
+
+ const as = try a.toString(al, 10);
+ const es = "120317241209124781241290847124";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int string to base base error" {
+ const a = try Int.initSet(al, 0xffffffff);
+
+ if (a.toString(al, 45)) |_| {
+ unreachable;
+ } else |err| {
+ debug.assert(err == error.InvalidBase);
+ }
+}
+
+test "big.int string to base 2" {
+ const a = try Int.initSet(al, -0b1011);
+
+ const as = try a.toString(al, 2);
+ const es = "-1011";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int string to base 16" {
+ const a = try Int.initSet(al, 0xefffffff00000001eeeeeeefaaaaaaab);
+
+ const as = try a.toString(al, 16);
+ const es = "efffffff00000001eeeeeeefaaaaaaab";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int neg string to" {
+ const a = try Int.initSet(al, -123907434);
+
+ const as = try a.toString(al, 10);
+ const es = "-123907434";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int zero string to" {
+ const a = try Int.initSet(al, 0);
+
+ const as = try a.toString(al, 10);
+ const es = "0";
+
+ debug.assert(mem.eql(u8, as, es));
+}
+
+test "big.int clone" {
+ var a = try Int.initSet(al, 1234);
+ const b = try a.clone();
+
+ debug.assert((try a.to(u32)) == 1234);
+ debug.assert((try b.to(u32)) == 1234);
+
+ try a.set(77);
+ debug.assert((try a.to(u32)) == 77);
+ debug.assert((try b.to(u32)) == 1234);
+}
+
+test "big.int swap" {
+ var a = try Int.initSet(al, 1234);
+ var b = try Int.initSet(al, 5678);
+
+ debug.assert((try a.to(u32)) == 1234);
+ debug.assert((try b.to(u32)) == 5678);
+
+ a.swap(&b);
+
+ debug.assert((try a.to(u32)) == 5678);
+ debug.assert((try b.to(u32)) == 1234);
+}
+
+test "big.int to negative" {
+ var a = try Int.initSet(al, -10);
+
+ debug.assert((try a.to(i32)) == -10);
+}
+
+test "big.int compare" {
+ var a = try Int.initSet(al, -11);
+ var b = try Int.initSet(al, 10);
+
+ debug.assert(a.cmpAbs(&b) == 1);
+ debug.assert(a.cmp(&b) == -1);
+}
+
+test "big.int compare similar" {
+ var a = try Int.initSet(al, 0xffffffffeeeeeeeeffffffffeeeeeeee);
+ var b = try Int.initSet(al, 0xffffffffeeeeeeeeffffffffeeeeeeef);
+
+ debug.assert(a.cmpAbs(&b) == -1);
+ debug.assert(b.cmpAbs(&a) == 1);
+}
+
+test "big.int compare different limb size" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, 1);
+
+ debug.assert(a.cmpAbs(&b) == 1);
+ debug.assert(b.cmpAbs(&a) == -1);
+}
+
+test "big.int compare multi-limb" {
+ var a = try Int.initSet(al, -0x7777777799999999ffffeeeeffffeeeeffffeeeef);
+ var b = try Int.initSet(al, 0x7777777799999999ffffeeeeffffeeeeffffeeeee);
+
+ debug.assert(a.cmpAbs(&b) == 1);
+ debug.assert(a.cmp(&b) == -1);
+}
+
+test "big.int equality" {
+ var a = try Int.initSet(al, 0xffffffff1);
+ var b = try Int.initSet(al, -0xffffffff1);
+
+ debug.assert(a.eqAbs(&b));
+ debug.assert(!a.eq(&b));
+}
+
+test "big.int abs" {
+ var a = try Int.initSet(al, -5);
+
+ a.abs();
+ debug.assert((try a.to(u32)) == 5);
+
+ a.abs();
+ debug.assert((try a.to(u32)) == 5);
+}
+
+test "big.int negate" {
+ var a = try Int.initSet(al, 5);
+
+ a.negate();
+ debug.assert((try a.to(i32)) == -5);
+
+ a.negate();
+ debug.assert((try a.to(i32)) == 5);
+}
+
+test "big.int add single-single" {
+ var a = try Int.initSet(al, 50);
+ var b = try Int.initSet(al, 5);
+
+ var c = try Int.init(al);
+ try c.add(&a, &b);
+
+ debug.assert((try c.to(u32)) == 55);
+}
+
+test "big.int add multi-single" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, 1);
+
+ var c = try Int.init(al);
+
+ try c.add(&a, &b);
+ debug.assert((try c.to(DoubleLimb)) == @maxValue(Limb) + 2);
+
+ try c.add(&b, &a);
+ debug.assert((try c.to(DoubleLimb)) == @maxValue(Limb) + 2);
+}
+
+test "big.int add multi-multi" {
+ const op1 = 0xefefefef7f7f7f7f;
+ const op2 = 0xfefefefe9f9f9f9f;
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var c = try Int.init(al);
+ try c.add(&a, &b);
+
+ debug.assert((try c.to(u128)) == op1 + op2);
+}
+
+test "big.int add zero-zero" {
+ var a = try Int.initSet(al, 0);
+ var b = try Int.initSet(al, 0);
+
+ var c = try Int.init(al);
+ try c.add(&a, &b);
+
+ debug.assert((try c.to(u32)) == 0);
+}
+
+test "big.int add alias multi-limb nonzero-zero" {
+ const op1 = 0xffffffff777777771;
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, 0);
+
+ try a.add(&a, &b);
+
+ debug.assert((try a.to(u128)) == op1);
+}
+
+test "big.int add sign" {
+ var a = try Int.init(al);
+
+ try a.add(1, 2);
+ debug.assert((try a.to(i32)) == 3);
+
+ try a.add(-1, 2);
+ debug.assert((try a.to(i32)) == 1);
+
+ try a.add(1, -2);
+ debug.assert((try a.to(i32)) == -1);
+
+ try a.add(-1, -2);
+ debug.assert((try a.to(i32)) == -3);
+}
+
+test "big.int sub single-single" {
+ var a = try Int.initSet(al, 50);
+ var b = try Int.initSet(al, 5);
+
+ var c = try Int.init(al);
+ try c.sub(&a, &b);
+
+ debug.assert((try c.to(u32)) == 45);
+}
+
+test "big.int sub multi-single" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, 1);
+
+ var c = try Int.init(al);
+ try c.sub(&a, &b);
+
+ debug.assert((try c.to(Limb)) == @maxValue(Limb));
+}
+
+test "big.int sub multi-multi" {
+ const op1 = 0xefefefefefefefefefefefef;
+ const op2 = 0xabababababababababababab;
+
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var c = try Int.init(al);
+ try c.sub(&a, &b);
+
+ debug.assert((try c.to(u128)) == op1 - op2);
+}
+
+test "big.int sub equal" {
+ var a = try Int.initSet(al, 0x11efefefefefefefefefefefef);
+ var b = try Int.initSet(al, 0x11efefefefefefefefefefefef);
+
+ var c = try Int.init(al);
+ try c.sub(&a, &b);
+
+ debug.assert((try c.to(u32)) == 0);
+}
+
+test "big.int sub sign" {
+ var a = try Int.init(al);
+
+ try a.sub(1, 2);
+ debug.assert((try a.to(i32)) == -1);
+
+ try a.sub(-1, 2);
+ debug.assert((try a.to(i32)) == -3);
+
+ try a.sub(1, -2);
+ debug.assert((try a.to(i32)) == 3);
+
+ try a.sub(-1, -2);
+ debug.assert((try a.to(i32)) == 1);
+
+ try a.sub(-2, -1);
+ debug.assert((try a.to(i32)) == -1);
+}
+
+test "big.int mul single-single" {
+ var a = try Int.initSet(al, 50);
+ var b = try Int.initSet(al, 5);
+
+ var c = try Int.init(al);
+ try c.mul(&a, &b);
+
+ debug.assert((try c.to(u64)) == 250);
+}
+
+test "big.int mul multi-single" {
+ var a = try Int.initSet(al, @maxValue(Limb));
+ var b = try Int.initSet(al, 2);
+
+ var c = try Int.init(al);
+ try c.mul(&a, &b);
+
+ debug.assert((try c.to(DoubleLimb)) == 2 * @maxValue(Limb));
+}
+
+test "big.int mul multi-multi" {
+ const op1 = 0x998888efefefefefefefef;
+ const op2 = 0x333000abababababababab;
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var c = try Int.init(al);
+ try c.mul(&a, &b);
+
+ debug.assert((try c.to(u256)) == op1 * op2);
+}
+
+test "big.int mul alias r with a" {
+ var a = try Int.initSet(al, @maxValue(Limb));
+ var b = try Int.initSet(al, 2);
+
+ try a.mul(&a, &b);
+
+ debug.assert((try a.to(DoubleLimb)) == 2 * @maxValue(Limb));
+}
+
+test "big.int mul alias r with b" {
+ var a = try Int.initSet(al, @maxValue(Limb));
+ var b = try Int.initSet(al, 2);
+
+ try a.mul(&b, &a);
+
+ debug.assert((try a.to(DoubleLimb)) == 2 * @maxValue(Limb));
+}
+
+test "big.int mul alias r with a and b" {
+ var a = try Int.initSet(al, @maxValue(Limb));
+
+ try a.mul(&a, &a);
+
+ debug.assert((try a.to(DoubleLimb)) == @maxValue(Limb) * @maxValue(Limb));
+}
+
+test "big.int mul a*0" {
+ var a = try Int.initSet(al, 0xefefefefefefefef);
+ var b = try Int.initSet(al, 0);
+
+ var c = try Int.init(al);
+ try c.mul(&a, &b);
+
+ debug.assert((try c.to(u32)) == 0);
+}
+
+test "big.int mul 0*0" {
+ var a = try Int.initSet(al, 0);
+ var b = try Int.initSet(al, 0);
+
+ var c = try Int.init(al);
+ try c.mul(&a, &b);
+
+ debug.assert((try c.to(u32)) == 0);
+}
+
+test "big.int div single-single no rem" {
+ var a = try Int.initSet(al, 50);
+ var b = try Int.initSet(al, 5);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u32)) == 10);
+ debug.assert((try r.to(u32)) == 0);
+}
+
+test "big.int div single-single with rem" {
+ var a = try Int.initSet(al, 49);
+ var b = try Int.initSet(al, 5);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u32)) == 9);
+ debug.assert((try r.to(u32)) == 4);
+}
+
+test "big.int div multi-single no rem" {
+ const op1 = 0xffffeeeeddddcccc;
+ const op2 = 34;
+
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u64)) == op1 / op2);
+ debug.assert((try r.to(u64)) == 0);
+}
+
+test "big.int div multi-single with rem" {
+ const op1 = 0xffffeeeeddddcccf;
+ const op2 = 34;
+
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u64)) == op1 / op2);
+ debug.assert((try r.to(u64)) == 3);
+}
+
+test "big.int div multi>2-single" {
+ const op1 = 0xfefefefefefefefefefefefefefefefe;
+ const op2 = 0xefab8;
+
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u128)) == op1 / op2);
+ debug.assert((try r.to(u32)) == 0x3e4e);
+}
+
+test "big.int div single-single q < r" {
+ var a = try Int.initSet(al, 0x0078f432);
+ var b = try Int.initSet(al, 0x01000000);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u64)) == 0);
+ debug.assert((try r.to(u64)) == 0x0078f432);
+}
+
+test "big.int div single-single q == r" {
+ var a = try Int.initSet(al, 10);
+ var b = try Int.initSet(al, 10);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u64)) == 1);
+ debug.assert((try r.to(u64)) == 0);
+}
+
+test "big.int div q=0 alias" {
+ var a = try Int.initSet(al, 3);
+ var b = try Int.initSet(al, 10);
+
+ try Int.divTrunc(&a, &b, &a, &b);
+
+ debug.assert((try a.to(u64)) == 0);
+ debug.assert((try b.to(u64)) == 3);
+}
+
+test "big.int div multi-multi q < r" {
+ const op1 = 0x1ffffffff0078f432;
+ const op2 = 0x1ffffffff01000000;
+ var a = try Int.initSet(al, op1);
+ var b = try Int.initSet(al, op2);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u128)) == 0);
+ debug.assert((try r.to(u128)) == op1);
+}
+
+test "big.int div trunc single-single +/+" {
+ const u: i32 = 5;
+ const v: i32 = 3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // 5 = 1 * 3 + 2
+ const eq = @divTrunc(u, v);
+ const er = @mod(u, v);
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div trunc single-single -/+" {
+ const u: i32 = -5;
+ const v: i32 = 3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // -5 = 1 * -3 - 2
+ const eq = -1;
+ const er = -2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div trunc single-single +/-" {
+ const u: i32 = 5;
+ const v: i32 = -3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // 5 = -1 * -3 + 2
+ const eq = -1;
+ const er = 2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div trunc single-single -/-" {
+ const u: i32 = -5;
+ const v: i32 = -3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // -5 = 1 * -3 - 2
+ const eq = 1;
+ const er = -2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div floor single-single +/+" {
+ const u: i32 = 5;
+ const v: i32 = 3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divFloor(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // 5 = 1 * 3 + 2
+ const eq = 1;
+ const er = 2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div floor single-single -/+" {
+ const u: i32 = -5;
+ const v: i32 = 3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divFloor(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // -5 = -2 * 3 + 1
+ const eq = -2;
+ const er = 1;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div floor single-single +/-" {
+ const u: i32 = 5;
+ const v: i32 = -3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divFloor(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // 5 = -2 * -3 - 1
+ const eq = -2;
+ const er = -1;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div floor single-single -/-" {
+ const u: i32 = -5;
+ const v: i32 = -3;
+
+ var a = try Int.initSet(al, u);
+ var b = try Int.initSet(al, v);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divFloor(&q, &r, &a, &b);
+
+ // n = q * d + r
+ // -5 = 2 * -3 + 1
+ const eq = 1;
+ const er = -2;
+
+ debug.assert((try q.to(i32)) == eq);
+ debug.assert((try r.to(i32)) == er);
+}
+
+test "big.int div multi-multi with rem" {
+ var a = try Int.initSet(al, 0x8888999911110000ffffeeeeddddccccbbbbaaaa9999);
+ var b = try Int.initSet(al, 0x99990000111122223333);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u128)) == 0xe38f38e39161aaabd03f0f1b);
+ debug.assert((try r.to(u128)) == 0x28de0acacd806823638);
+}
+
+test "big.int div multi-multi no rem" {
+ var a = try Int.initSet(al, 0x8888999911110000ffffeeeedb4fec200ee3a4286361);
+ var b = try Int.initSet(al, 0x99990000111122223333);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u128)) == 0xe38f38e39161aaabd03f0f1b);
+ debug.assert((try r.to(u128)) == 0);
+}
+
+test "big.int div multi-multi (2 branch)" {
+ var a = try Int.initSet(al, 0x866666665555555588888887777777761111111111111111);
+ var b = try Int.initSet(al, 0x86666666555555554444444433333333);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u128)) == 0x10000000000000000);
+ debug.assert((try r.to(u128)) == 0x44444443444444431111111111111111);
+}
+
+test "big.int div multi-multi (3.1/3.3 branch)" {
+ var a = try Int.initSet(al, 0x11111111111111111111111111111111111111111111111111111111111111);
+ var b = try Int.initSet(al, 0x1111111111111111111111111111111111111111171);
+
+ var q = try Int.init(al);
+ var r = try Int.init(al);
+ try Int.divTrunc(&q, &r, &a, &b);
+
+ debug.assert((try q.to(u128)) == 0xfffffffffffffffffff);
+ debug.assert((try r.to(u256)) == 0x1111111111111111111110b12222222222222222282);
+}
+
+test "big.int shift-right single" {
+ var a = try Int.initSet(al, 0xffff0000);
+ try a.shiftRight(a, 16);
+
+ debug.assert((try a.to(u32)) == 0xffff);
+}
+
+test "big.int shift-right multi" {
+ var a = try Int.initSet(al, 0xffff0000eeee1111dddd2222cccc3333);
+ try a.shiftRight(a, 67);
+
+ debug.assert((try a.to(u64)) == 0x1fffe0001dddc222);
+}
+
+test "big.int shift-left single" {
+ var a = try Int.initSet(al, 0xffff);
+ try a.shiftLeft(a, 16);
+
+ debug.assert((try a.to(u64)) == 0xffff0000);
+}
+
+test "big.int shift-left multi" {
+ var a = try Int.initSet(al, 0x1fffe0001dddc222);
+ try a.shiftLeft(a, 67);
+
+ debug.assert((try a.to(u128)) == 0xffff0000eeee11100000000000000000);
+}
+
+test "big.int shift-right negative" {
+ var a = try Int.init(al);
+
+ try a.shiftRight(-20, 2);
+ debug.assert((try a.to(i32)) == -20 >> 2);
+
+ try a.shiftRight(-5, 10);
+ debug.assert((try a.to(i32)) == -5 >> 10);
+}
+
+test "big.int shift-left negative" {
+ var a = try Int.init(al);
+
+ try a.shiftRight(-10, 1232);
+ debug.assert((try a.to(i32)) == -10 >> 1232);
+}
+
+test "big.int bitwise and simple" {
+ var a = try Int.initSet(al, 0xffffffff11111111);
+ var b = try Int.initSet(al, 0xeeeeeeee22222222);
+
+ try a.bitAnd(&a, &b);
+
+ debug.assert((try a.to(u64)) == 0xeeeeeeee00000000);
+}
+
+test "big.int bitwise and multi-limb" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, @maxValue(Limb));
+
+ try a.bitAnd(&a, &b);
+
+ debug.assert((try a.to(u128)) == 0);
+}
+
+test "big.int bitwise xor simple" {
+ var a = try Int.initSet(al, 0xffffffff11111111);
+ var b = try Int.initSet(al, 0xeeeeeeee22222222);
+
+ try a.bitXor(&a, &b);
+
+ debug.assert((try a.to(u64)) == 0x1111111133333333);
+}
+
+test "big.int bitwise xor multi-limb" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, @maxValue(Limb));
+
+ try a.bitXor(&a, &b);
+
+ debug.assert((try a.to(DoubleLimb)) == (@maxValue(Limb) + 1) ^ @maxValue(Limb));
+}
+
+test "big.int bitwise or simple" {
+ var a = try Int.initSet(al, 0xffffffff11111111);
+ var b = try Int.initSet(al, 0xeeeeeeee22222222);
+
+ try a.bitOr(&a, &b);
+
+ debug.assert((try a.to(u64)) == 0xffffffff33333333);
+}
+
+test "big.int bitwise or multi-limb" {
+ var a = try Int.initSet(al, @maxValue(Limb) + 1);
+ var b = try Int.initSet(al, @maxValue(Limb));
+
+ try a.bitOr(&a, &b);
+
+ // TODO: big.int.cpp or is wrong on multi-limb.
+ debug.assert((try a.to(DoubleLimb)) == (@maxValue(Limb) + 1) + @maxValue(Limb));
+}
+
+test "big.int var args" {
+ var a = try Int.initSet(al, 5);
+
+ try a.add(&a, 6);
+ debug.assert((try a.to(u64)) == 11);
+
+ debug.assert(a.cmp(11) == 0);
+ debug.assert(a.cmp(14) <= 0);
+}
diff --git a/std/math/index.zig b/std/math/index.zig
index 33bc1082f7..cc1b833a37 100644
--- a/std/math/index.zig
+++ b/std/math/index.zig
@@ -132,6 +132,8 @@ pub const tan = @import("tan.zig").tan;
pub const complex = @import("complex/index.zig");
pub const Complex = complex.Complex;
+pub const big = @import("big/index.zig");
+
test "math" {
_ = @import("nan.zig");
_ = @import("isnan.zig");
@@ -177,6 +179,8 @@ test "math" {
_ = @import("tan.zig");
_ = @import("complex/index.zig");
+
+ _ = @import("big/index.zig");
}
pub fn min(x: var, y: var) @typeOf(x + y) {
@@ -306,7 +310,14 @@ test "math.rotl" {
}
pub fn Log2Int(comptime T: type) type {
- return @IntType(false, log2(T.bit_count));
+ // comptime ceil log2
+ comptime var count: usize = 0;
+ comptime var s = T.bit_count - 1;
+ inline while (s != 0) : (s >>= 1) {
+ count += 1;
+ }
+
+ return @IntType(false, count);
}
test "math overflow functions" {
diff --git a/std/mem.zig b/std/mem.zig
index 423460e73b..f961c7862b 100644
--- a/std/mem.zig
+++ b/std/mem.zig
@@ -304,20 +304,20 @@ pub fn indexOfPos(comptime T: type, haystack: []const T, start_index: usize, nee
}
test "mem.indexOf" {
- assert(??indexOf(u8, "one two three four", "four") == 14);
- assert(??lastIndexOf(u8, "one two three two four", "two") == 14);
+ assert(indexOf(u8, "one two three four", "four").? == 14);
+ assert(lastIndexOf(u8, "one two three two four", "two").? == 14);
assert(indexOf(u8, "one two three four", "gour") == null);
assert(lastIndexOf(u8, "one two three four", "gour") == null);
- assert(??indexOf(u8, "foo", "foo") == 0);
- assert(??lastIndexOf(u8, "foo", "foo") == 0);
+ assert(indexOf(u8, "foo", "foo").? == 0);
+ assert(lastIndexOf(u8, "foo", "foo").? == 0);
assert(indexOf(u8, "foo", "fool") == null);
assert(lastIndexOf(u8, "foo", "lfoo") == null);
assert(lastIndexOf(u8, "foo", "fool") == null);
- assert(??indexOf(u8, "foo foo", "foo") == 0);
- assert(??lastIndexOf(u8, "foo foo", "foo") == 4);
- assert(??lastIndexOfAny(u8, "boo, cat", "abo") == 6);
- assert(??lastIndexOfScalar(u8, "boo", 'o') == 2);
+ assert(indexOf(u8, "foo foo", "foo").? == 0);
+ assert(lastIndexOf(u8, "foo foo", "foo").? == 4);
+ assert(lastIndexOfAny(u8, "boo, cat", "abo").? == 6);
+ assert(lastIndexOfScalar(u8, "boo", 'o').? == 2);
}
/// Reads an integer from memory with size equal to bytes.len.
@@ -432,9 +432,9 @@ pub fn split(buffer: []const u8, split_bytes: []const u8) SplitIterator {
test "mem.split" {
var it = split(" abc def ghi ", " ");
- assert(eql(u8, ??it.next(), "abc"));
- assert(eql(u8, ??it.next(), "def"));
- assert(eql(u8, ??it.next(), "ghi"));
+ assert(eql(u8, it.next().?, "abc"));
+ assert(eql(u8, it.next().?, "def"));
+ assert(eql(u8, it.next().?, "ghi"));
assert(it.next() == null);
}
diff --git a/std/os/child_process.zig b/std/os/child_process.zig
index 822ade2eb8..1e3a732498 100644
--- a/std/os/child_process.zig
+++ b/std/os/child_process.zig
@@ -156,7 +156,7 @@ pub const ChildProcess = struct {
};
}
try self.waitUnwrappedWindows();
- return ??self.term;
+ return self.term.?;
}
pub fn killPosix(self: *ChildProcess) !Term {
@@ -175,7 +175,7 @@ pub const ChildProcess = struct {
};
}
self.waitUnwrapped();
- return ??self.term;
+ return self.term.?;
}
/// Blocks until child process terminates and then cleans up all resources.
@@ -212,8 +212,8 @@ pub const ChildProcess = struct {
defer Buffer.deinit(&stdout);
defer Buffer.deinit(&stderr);
- var stdout_file_in_stream = io.FileInStream.init(&??child.stdout);
- var stderr_file_in_stream = io.FileInStream.init(&??child.stderr);
+ var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?);
+ var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?);
try stdout_file_in_stream.stream.readAllBuffer(&stdout, max_output_size);
try stderr_file_in_stream.stream.readAllBuffer(&stderr, max_output_size);
@@ -232,7 +232,7 @@ pub const ChildProcess = struct {
}
try self.waitUnwrappedWindows();
- return ??self.term;
+ return self.term.?;
}
fn waitPosix(self: *ChildProcess) !Term {
@@ -242,7 +242,7 @@ pub const ChildProcess = struct {
}
self.waitUnwrapped();
- return ??self.term;
+ return self.term.?;
}
pub fn deinit(self: *ChildProcess) void {
@@ -619,13 +619,13 @@ pub const ChildProcess = struct {
self.term = null;
if (self.stdin_behavior == StdIo.Pipe) {
- os.close(??g_hChildStd_IN_Rd);
+ os.close(g_hChildStd_IN_Rd.?);
}
if (self.stderr_behavior == StdIo.Pipe) {
- os.close(??g_hChildStd_ERR_Wr);
+ os.close(g_hChildStd_ERR_Wr.?);
}
if (self.stdout_behavior == StdIo.Pipe) {
- os.close(??g_hChildStd_OUT_Wr);
+ os.close(g_hChildStd_OUT_Wr.?);
}
}
diff --git a/std/os/darwin.zig b/std/os/darwin.zig
index b8e18561cc..a835959103 100644
--- a/std/os/darwin.zig
+++ b/std/os/darwin.zig
@@ -327,7 +327,7 @@ pub fn raise(sig: i32) usize {
}
pub fn read(fd: i32, buf: [*]u8, nbyte: usize) usize {
- return errnoWrap(c.read(fd, @ptrCast([*]c_void, buf), nbyte));
+ return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte));
}
pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize {
@@ -335,17 +335,17 @@ pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize {
}
pub fn write(fd: i32, buf: [*]const u8, nbyte: usize) usize {
- return errnoWrap(c.write(fd, @ptrCast([*]const c_void, buf), nbyte));
+ return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte));
}
pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize {
- const ptr_result = c.mmap(@ptrCast([*]c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset);
+ const ptr_result = c.mmap(@ptrCast(*c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset);
const isize_result = @bitCast(isize, @ptrToInt(ptr_result));
return errnoWrap(isize_result);
}
pub fn munmap(address: usize, length: usize) usize {
- return errnoWrap(c.munmap(@intToPtr([*]c_void, address), length));
+ return errnoWrap(c.munmap(@intToPtr(*c_void, address), length));
}
pub fn unlink(path: [*]const u8) usize {
diff --git a/std/os/file.zig b/std/os/file.zig
index 378782507b..56da4f73a6 100644
--- a/std/os/file.zig
+++ b/std/os/file.zig
@@ -96,7 +96,20 @@ pub const File = struct {
return File{ .handle = handle };
}
- pub fn access(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) !bool {
+ pub const AccessError = error{
+ PermissionDenied,
+ NotFound,
+ NameTooLong,
+ BadMode,
+ BadPathName,
+ Io,
+ SystemResources,
+ OutOfMemory,
+
+ Unexpected,
+ };
+
+ pub fn access(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) AccessError!bool {
const path_with_null = try std.cstr.addNullByte(allocator, path);
defer allocator.free(path_with_null);
@@ -123,7 +136,7 @@ pub const File = struct {
}
return true;
} else if (is_windows) {
- if (os.windows.PathFileExists(path_with_null.ptr) == os.windows.TRUE) {
+ if (os.windows.GetFileAttributesA(path_with_null.ptr) != os.windows.INVALID_FILE_ATTRIBUTES) {
return true;
}
@@ -334,7 +347,7 @@ pub const File = struct {
while (index < buffer.len) {
const want_read_count = windows.DWORD(math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index));
var amt_read: windows.DWORD = undefined;
- if (windows.ReadFile(self.handle, @ptrCast([*]c_void, buffer.ptr + index), want_read_count, &amt_read, null) == 0) {
+ if (windows.ReadFile(self.handle, @ptrCast(*c_void, buffer.ptr + index), want_read_count, &amt_read, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.OPERATION_ABORTED => continue,
diff --git a/std/os/index.zig b/std/os/index.zig
index 6023929b04..62eeb7e43e 100644
--- a/std/os/index.zig
+++ b/std/os/index.zig
@@ -422,10 +422,10 @@ pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator:
const exe_path = argv[0];
if (mem.indexOfScalar(u8, exe_path, '/') != null) {
- return posixExecveErrnoToErr(posix.getErrno(posix.execve(??argv_buf[0], argv_buf.ptr, envp_buf.ptr)));
+ return posixExecveErrnoToErr(posix.getErrno(posix.execve(argv_buf[0].?, argv_buf.ptr, envp_buf.ptr)));
}
- const PATH = getEnvPosix("PATH") ?? "/usr/local/bin:/bin/:/usr/bin";
+ const PATH = getEnvPosix("PATH") orelse "/usr/local/bin:/bin/:/usr/bin";
// PATH.len because it is >= the largest search_path
// +1 for the / to join the search path and exe_path
// +1 for the null terminating byte
@@ -490,7 +490,7 @@ pub fn getEnvMap(allocator: *Allocator) !BufMap {
errdefer result.deinit();
if (is_windows) {
- const ptr = windows.GetEnvironmentStringsA() ?? return error.OutOfMemory;
+ const ptr = windows.GetEnvironmentStringsA() orelse return error.OutOfMemory;
defer assert(windows.FreeEnvironmentStringsA(ptr) != 0);
var i: usize = 0;
@@ -573,7 +573,7 @@ pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) ![]u8 {
return allocator.shrink(u8, buf, result);
}
} else {
- const result = getEnvPosix(key) ?? return error.EnvironmentVariableNotFound;
+ const result = getEnvPosix(key) orelse return error.EnvironmentVariableNotFound;
return mem.dupe(allocator, u8, result);
}
}
@@ -714,7 +714,7 @@ pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path:
else => return err, // TODO zig should know this set does not include PathAlreadyExists
}
- const dirname = os.path.dirname(new_path);
+ const dirname = os.path.dirname(new_path) orelse ".";
var rand_buf: [12]u8 = undefined;
const tmp_path = try allocator.alloc(u8, dirname.len + 1 + base64.Base64Encoder.calcSize(rand_buf.len));
@@ -734,7 +734,23 @@ pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path:
}
}
-pub fn deleteFile(allocator: *Allocator, file_path: []const u8) !void {
+pub const DeleteFileError = error{
+ FileNotFound,
+ AccessDenied,
+ FileBusy,
+ FileSystem,
+ IsDir,
+ SymLinkLoop,
+ NameTooLong,
+ NotDir,
+ SystemResources,
+ ReadOnlyFileSystem,
+ OutOfMemory,
+
+ Unexpected,
+};
+
+pub fn deleteFile(allocator: *Allocator, file_path: []const u8) DeleteFileError!void {
if (builtin.os == Os.windows) {
return deleteFileWindows(allocator, file_path);
} else {
@@ -844,14 +860,14 @@ pub const AtomicFile = struct {
var rand_buf: [12]u8 = undefined;
- const dirname_component_len = if (dirname.len == 0) 0 else dirname.len + 1;
+ const dirname_component_len = if (dirname) |d| d.len + 1 else 0;
const tmp_path = try allocator.alloc(u8, dirname_component_len +
base64.Base64Encoder.calcSize(rand_buf.len));
errdefer allocator.free(tmp_path);
- if (dirname.len != 0) {
- mem.copy(u8, tmp_path[0..], dirname);
- tmp_path[dirname.len] = os.path.sep;
+ if (dirname) |dir| {
+ mem.copy(u8, tmp_path[0..], dir);
+ tmp_path[dir.len] = os.path.sep;
}
while (true) {
@@ -1019,37 +1035,66 @@ pub fn makePath(allocator: *Allocator, full_path: []const u8) !void {
}
}
+pub const DeleteDirError = error{
+ AccessDenied,
+ FileBusy,
+ SymLinkLoop,
+ NameTooLong,
+ FileNotFound,
+ SystemResources,
+ NotDir,
+ DirNotEmpty,
+ ReadOnlyFileSystem,
+ OutOfMemory,
+
+ Unexpected,
+};
+
/// Returns ::error.DirNotEmpty if the directory is not empty.
/// To delete a directory recursively, see ::deleteTree
-pub fn deleteDir(allocator: *Allocator, dir_path: []const u8) !void {
+pub fn deleteDir(allocator: *Allocator, dir_path: []const u8) DeleteDirError!void {
const path_buf = try allocator.alloc(u8, dir_path.len + 1);
defer allocator.free(path_buf);
mem.copy(u8, path_buf, dir_path);
path_buf[dir_path.len] = 0;
- const err = posix.getErrno(posix.rmdir(path_buf.ptr));
- if (err > 0) {
- return switch (err) {
- posix.EACCES, posix.EPERM => error.AccessDenied,
- posix.EBUSY => error.FileBusy,
- posix.EFAULT, posix.EINVAL => unreachable,
- posix.ELOOP => error.SymLinkLoop,
- posix.ENAMETOOLONG => error.NameTooLong,
- posix.ENOENT => error.FileNotFound,
- posix.ENOMEM => error.SystemResources,
- posix.ENOTDIR => error.NotDir,
- posix.EEXIST, posix.ENOTEMPTY => error.DirNotEmpty,
- posix.EROFS => error.ReadOnlyFileSystem,
- else => unexpectedErrorPosix(err),
- };
+ switch (builtin.os) {
+ Os.windows => {
+ if (windows.RemoveDirectoryA(path_buf.ptr) == 0) {
+ const err = windows.GetLastError();
+ return switch (err) {
+ windows.ERROR.PATH_NOT_FOUND => error.FileNotFound,
+ windows.ERROR.DIR_NOT_EMPTY => error.DirNotEmpty,
+ else => unexpectedErrorWindows(err),
+ };
+ }
+ },
+ Os.linux, Os.macosx, Os.ios => {
+ const err = posix.getErrno(posix.rmdir(path_buf.ptr));
+ if (err > 0) {
+ return switch (err) {
+ posix.EACCES, posix.EPERM => error.AccessDenied,
+ posix.EBUSY => error.FileBusy,
+ posix.EFAULT, posix.EINVAL => unreachable,
+ posix.ELOOP => error.SymLinkLoop,
+ posix.ENAMETOOLONG => error.NameTooLong,
+ posix.ENOENT => error.FileNotFound,
+ posix.ENOMEM => error.SystemResources,
+ posix.ENOTDIR => error.NotDir,
+ posix.EEXIST, posix.ENOTEMPTY => error.DirNotEmpty,
+ posix.EROFS => error.ReadOnlyFileSystem,
+ else => unexpectedErrorPosix(err),
+ };
+ }
+ },
+ else => @compileError("unimplemented"),
}
}
/// Whether ::full_path describes a symlink, file, or directory, this function
/// removes it. If it cannot be removed because it is a non-empty directory,
/// this function recursively removes its entries and then tries again.
-/// TODO non-recursive implementation
const DeleteTreeError = error{
OutOfMemory,
AccessDenied,
@@ -1128,7 +1173,7 @@ pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!
try full_entry_buf.resize(full_path.len + entry.name.len + 1);
const full_entry_path = full_entry_buf.toSlice();
mem.copy(u8, full_entry_path, full_path);
- full_entry_path[full_path.len] = '/';
+ full_entry_path[full_path.len] = path.sep;
mem.copy(u8, full_entry_path[full_path.len + 1 ..], entry.name);
try deleteTree(allocator, full_entry_path);
@@ -1139,16 +1184,29 @@ pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!
}
pub const Dir = struct {
- fd: i32,
- darwin_seek: darwin_seek_t,
+ handle: Handle,
allocator: *Allocator,
- buf: []u8,
- index: usize,
- end_index: usize,
- const darwin_seek_t = switch (builtin.os) {
- Os.macosx, Os.ios => i64,
- else => void,
+ pub const Handle = switch (builtin.os) {
+ Os.macosx, Os.ios => struct {
+ fd: i32,
+ seek: i64,
+ buf: []u8,
+ index: usize,
+ end_index: usize,
+ },
+ Os.linux => struct {
+ fd: i32,
+ buf: []u8,
+ index: usize,
+ end_index: usize,
+ },
+ Os.windows => struct {
+ handle: windows.HANDLE,
+ find_file_data: windows.WIN32_FIND_DATAA,
+ first: bool,
+ },
+ else => @compileError("unimplemented"),
};
pub const Entry = struct {
@@ -1168,81 +1226,122 @@ pub const Dir = struct {
};
};
- pub fn open(allocator: *Allocator, dir_path: []const u8) !Dir {
- const fd = switch (builtin.os) {
- Os.windows => @compileError("TODO support Dir.open for windows"),
- Os.linux => try posixOpen(allocator, dir_path, posix.O_RDONLY | posix.O_DIRECTORY | posix.O_CLOEXEC, 0),
- Os.macosx, Os.ios => try posixOpen(
- allocator,
- dir_path,
- posix.O_RDONLY | posix.O_NONBLOCK | posix.O_DIRECTORY | posix.O_CLOEXEC,
- 0,
- ),
- else => @compileError("Dir.open is not supported for this platform"),
- };
- const darwin_seek_init = switch (builtin.os) {
- Os.macosx, Os.ios => 0,
- else => {},
- };
+ pub const OpenError = error{
+ PathNotFound,
+ NotDir,
+ AccessDenied,
+ FileTooBig,
+ IsDir,
+ SymLinkLoop,
+ ProcessFdQuotaExceeded,
+ NameTooLong,
+ SystemFdQuotaExceeded,
+ NoDevice,
+ SystemResources,
+ NoSpaceLeft,
+ PathAlreadyExists,
+ OutOfMemory,
+
+ Unexpected,
+ };
+
+ pub fn open(allocator: *Allocator, dir_path: []const u8) OpenError!Dir {
return Dir{
.allocator = allocator,
- .fd = fd,
- .darwin_seek = darwin_seek_init,
- .index = 0,
- .end_index = 0,
- .buf = []u8{},
+ .handle = switch (builtin.os) {
+ Os.windows => blk: {
+ var find_file_data: windows.WIN32_FIND_DATAA = undefined;
+ const handle = try windows_util.windowsFindFirstFile(allocator, dir_path, &find_file_data);
+ break :blk Handle{
+ .handle = handle,
+ .find_file_data = find_file_data, // TODO guaranteed copy elision
+ .first = true,
+ };
+ },
+ Os.macosx, Os.ios => Handle{
+ .fd = try posixOpen(
+ allocator,
+ dir_path,
+ posix.O_RDONLY | posix.O_NONBLOCK | posix.O_DIRECTORY | posix.O_CLOEXEC,
+ 0,
+ ),
+ .seek = 0,
+ .index = 0,
+ .end_index = 0,
+ .buf = []u8{},
+ },
+ Os.linux => Handle{
+ .fd = try posixOpen(
+ allocator,
+ dir_path,
+ posix.O_RDONLY | posix.O_DIRECTORY | posix.O_CLOEXEC,
+ 0,
+ ),
+ .index = 0,
+ .end_index = 0,
+ .buf = []u8{},
+ },
+ else => @compileError("unimplemented"),
+ },
};
}
pub fn close(self: *Dir) void {
- self.allocator.free(self.buf);
- os.close(self.fd);
+ switch (builtin.os) {
+ Os.windows => {
+ _ = windows.FindClose(self.handle.handle);
+ },
+ Os.macosx, Os.ios, Os.linux => {
+ self.allocator.free(self.handle.buf);
+ os.close(self.handle.fd);
+ },
+ else => @compileError("unimplemented"),
+ }
}
/// Memory such as file names referenced in this returned entry becomes invalid
- /// with subsequent calls to next, as well as when this ::Dir is deinitialized.
+ /// with subsequent calls to next, as well as when this `Dir` is deinitialized.
pub fn next(self: *Dir) !?Entry {
switch (builtin.os) {
Os.linux => return self.nextLinux(),
Os.macosx, Os.ios => return self.nextDarwin(),
Os.windows => return self.nextWindows(),
- else => @compileError("Dir.next not supported on " ++ @tagName(builtin.os)),
+ else => @compileError("unimplemented"),
}
}
fn nextDarwin(self: *Dir) !?Entry {
start_over: while (true) {
- if (self.index >= self.end_index) {
- if (self.buf.len == 0) {
- self.buf = try self.allocator.alloc(u8, page_size);
+ if (self.handle.index >= self.handle.end_index) {
+ if (self.handle.buf.len == 0) {
+ self.handle.buf = try self.allocator.alloc(u8, page_size);
}
while (true) {
- const result = posix.getdirentries64(self.fd, self.buf.ptr, self.buf.len, &self.darwin_seek);
+ const result = posix.getdirentries64(self.handle.fd, self.handle.buf.ptr, self.handle.buf.len, &self.handle.seek);
const err = posix.getErrno(result);
if (err > 0) {
switch (err) {
posix.EBADF, posix.EFAULT, posix.ENOTDIR => unreachable,
posix.EINVAL => {
- self.buf = try self.allocator.realloc(u8, self.buf, self.buf.len * 2);
+ self.handle.buf = try self.allocator.realloc(u8, self.handle.buf, self.handle.buf.len * 2);
continue;
},
else => return unexpectedErrorPosix(err),
}
}
if (result == 0) return null;
- self.index = 0;
- self.end_index = result;
+ self.handle.index = 0;
+ self.handle.end_index = result;
break;
}
}
- const darwin_entry = @ptrCast(*align(1) posix.dirent, &self.buf[self.index]);
- const next_index = self.index + darwin_entry.d_reclen;
- self.index = next_index;
+ const darwin_entry = @ptrCast(*align(1) posix.dirent, &self.handle.buf[self.handle.index]);
+ const next_index = self.handle.index + darwin_entry.d_reclen;
+ self.handle.index = next_index;
const name = @ptrCast([*]u8, &darwin_entry.d_name)[0..darwin_entry.d_namlen];
- // skip . and .. entries
if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) {
continue :start_over;
}
@@ -1266,38 +1365,59 @@ pub const Dir = struct {
}
fn nextWindows(self: *Dir) !?Entry {
- @compileError("TODO support Dir.next for windows");
+ while (true) {
+ if (self.handle.first) {
+ self.handle.first = false;
+ } else {
+ if (!try windows_util.windowsFindNextFile(self.handle.handle, &self.handle.find_file_data))
+ return null;
+ }
+ const name = std.cstr.toSlice(self.handle.find_file_data.cFileName[0..].ptr);
+ if (mem.eql(u8, name, ".") or mem.eql(u8, name, ".."))
+ continue;
+ const kind = blk: {
+ const attrs = self.handle.find_file_data.dwFileAttributes;
+ if (attrs & windows.FILE_ATTRIBUTE_DIRECTORY != 0) break :blk Entry.Kind.Directory;
+ if (attrs & windows.FILE_ATTRIBUTE_REPARSE_POINT != 0) break :blk Entry.Kind.SymLink;
+ if (attrs & windows.FILE_ATTRIBUTE_NORMAL != 0) break :blk Entry.Kind.File;
+ break :blk Entry.Kind.Unknown;
+ };
+ return Entry{
+ .name = name,
+ .kind = kind,
+ };
+ }
}
fn nextLinux(self: *Dir) !?Entry {
start_over: while (true) {
- if (self.index >= self.end_index) {
- if (self.buf.len == 0) {
- self.buf = try self.allocator.alloc(u8, page_size);
+ if (self.handle.index >= self.handle.end_index) {
+ if (self.handle.buf.len == 0) {
+ self.handle.buf = try self.allocator.alloc(u8, page_size);
}
while (true) {
- const result = posix.getdents(self.fd, self.buf.ptr, self.buf.len);
+ const result = posix.getdents(self.handle.fd, self.handle.buf.ptr, self.handle.buf.len);
const err = posix.getErrno(result);
if (err > 0) {
switch (err) {
posix.EBADF, posix.EFAULT, posix.ENOTDIR => unreachable,
posix.EINVAL => {
- self.buf = try self.allocator.realloc(u8, self.buf, self.buf.len * 2);
+ self.handle.buf = try self.allocator.realloc(u8, self.handle.buf, self.handle.buf.len * 2);
continue;
},
else => return unexpectedErrorPosix(err),
}
}
if (result == 0) return null;
- self.index = 0;
- self.end_index = result;
+ self.handle.index = 0;
+ self.handle.end_index = result;
break;
}
}
- const linux_entry = @ptrCast(*align(1) posix.dirent, &self.buf[self.index]);
- const next_index = self.index + linux_entry.d_reclen;
- self.index = next_index;
+ const linux_entry = @ptrCast(*align(1) posix.dirent, &self.handle.buf[self.handle.index]);
+ const next_index = self.handle.index + linux_entry.d_reclen;
+ self.handle.index = next_index;
const name = cstr.toSlice(@ptrCast([*]u8, &linux_entry.d_name));
@@ -1306,7 +1426,7 @@ pub const Dir = struct {
continue :start_over;
}
- const type_char = self.buf[next_index - 1];
+ const type_char = self.handle.buf[next_index - 1];
const entry_kind = switch (type_char) {
posix.DT_BLK => Entry.Kind.BlockDevice,
posix.DT_CHR => Entry.Kind.CharacterDevice,
@@ -1641,7 +1761,7 @@ pub const ArgIterator = struct {
if (builtin.os == Os.windows) {
return self.inner.next(allocator);
} else {
- return mem.dupe(allocator, u8, self.inner.next() ?? return null);
+ return mem.dupe(allocator, u8, self.inner.next() orelse return null);
}
}
@@ -1729,7 +1849,7 @@ test "windows arg parsing" {
fn testWindowsCmdLine(input_cmd_line: [*]const u8, expected_args: []const []const u8) void {
var it = ArgIteratorWindows.initWithCmdLine(input_cmd_line);
for (expected_args) |expected_arg| {
- const arg = ??it.next(debug.global_allocator) catch unreachable;
+ const arg = it.next(debug.global_allocator).? catch unreachable;
assert(mem.eql(u8, arg, expected_arg));
}
assert(it.next(debug.global_allocator) == null);
@@ -1845,13 +1965,13 @@ pub fn selfExeDirPath(allocator: *mem.Allocator) ![]u8 {
// the executable was in when it was run.
const full_exe_path = try readLink(allocator, "/proc/self/exe");
errdefer allocator.free(full_exe_path);
- const dir = path.dirname(full_exe_path);
+ const dir = path.dirname(full_exe_path) orelse ".";
return allocator.shrink(u8, full_exe_path, dir.len);
},
Os.windows, Os.macosx, Os.ios => {
const self_exe_path = try selfExePath(allocator);
errdefer allocator.free(self_exe_path);
- const dirname = os.path.dirname(self_exe_path);
+ const dirname = os.path.dirname(self_exe_path) orelse ".";
return allocator.shrink(u8, self_exe_path, dirname.len);
},
else => @compileError("unimplemented: std.os.selfExeDirPath for " ++ @tagName(builtin.os)),
@@ -2362,7 +2482,7 @@ pub const Thread = struct {
},
builtin.Os.windows => struct {
handle: windows.HANDLE,
- alloc_start: [*]c_void,
+ alloc_start: *c_void,
heap_handle: windows.HANDLE,
},
else => @compileError("Unsupported OS"),
@@ -2457,9 +2577,9 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
}
};
- const heap_handle = windows.GetProcessHeap() ?? return SpawnThreadError.OutOfMemory;
+ const heap_handle = windows.GetProcessHeap() orelse return SpawnThreadError.OutOfMemory;
const byte_count = @alignOf(WinThread.OuterContext) + @sizeOf(WinThread.OuterContext);
- const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) ?? return SpawnThreadError.OutOfMemory;
+ const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) orelse return SpawnThreadError.OutOfMemory;
errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0);
const bytes = @ptrCast([*]u8, bytes_ptr)[0..byte_count];
const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable;
@@ -2468,7 +2588,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
outer_context.thread.data.alloc_start = bytes_ptr;
const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(*c_void, &outer_context.inner);
- outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) ?? {
+ outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) orelse {
const err = windows.GetLastError();
return switch (err) {
else => os.unexpectedErrorWindows(err),
@@ -2533,7 +2653,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread
// align to page
stack_end -= stack_end % os.page_size;
- assert(c.pthread_attr_setstack(&attr, @intToPtr([*]c_void, stack_addr), stack_end - stack_addr) == 0);
+ assert(c.pthread_attr_setstack(&attr, @intToPtr(*c_void, stack_addr), stack_end - stack_addr) == 0);
const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(*c_void, arg));
switch (err) {
diff --git a/std/os/linux/vdso.zig b/std/os/linux/vdso.zig
index 2ab4d0cbc1..cbd0cd1df5 100644
--- a/std/os/linux/vdso.zig
+++ b/std/os/linux/vdso.zig
@@ -28,7 +28,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
}
}
}
- const dynv = maybe_dynv ?? return 0;
+ const dynv = maybe_dynv orelse return 0;
if (base == @maxValue(usize)) return 0;
var maybe_strings: ?[*]u8 = null;
@@ -52,9 +52,9 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
}
}
- const strings = maybe_strings ?? return 0;
- const syms = maybe_syms ?? return 0;
- const hashtab = maybe_hashtab ?? return 0;
+ const strings = maybe_strings orelse return 0;
+ const syms = maybe_syms orelse return 0;
+ const hashtab = maybe_hashtab orelse return 0;
if (maybe_verdef == null) maybe_versym = null;
const OK_TYPES = (1 << elf.STT_NOTYPE | 1 << elf.STT_OBJECT | 1 << elf.STT_FUNC | 1 << elf.STT_COMMON);
@@ -67,7 +67,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize {
if (0 == syms[i].st_shndx) continue;
if (!mem.eql(u8, name, cstr.toSliceConst(strings + syms[i].st_name))) continue;
if (maybe_versym) |versym| {
- if (!checkver(??maybe_verdef, versym[i], vername, strings))
+ if (!checkver(maybe_verdef.?, versym[i], vername, strings))
continue;
}
return base + syms[i].st_value;
diff --git a/std/os/path.zig b/std/os/path.zig
index 4df6179bf5..d3ab0c519f 100644
--- a/std/os/path.zig
+++ b/std/os/path.zig
@@ -182,8 +182,8 @@ pub fn windowsParsePath(path: []const u8) WindowsPath {
}
var it = mem.split(path, []u8{this_sep});
- _ = (it.next() ?? return relative_path);
- _ = (it.next() ?? return relative_path);
+ _ = (it.next() orelse return relative_path);
+ _ = (it.next() orelse return relative_path);
return WindowsPath{
.is_abs = isAbsoluteWindows(path),
.kind = WindowsPath.Kind.NetworkShare,
@@ -200,8 +200,8 @@ pub fn windowsParsePath(path: []const u8) WindowsPath {
}
var it = mem.split(path, []u8{this_sep});
- _ = (it.next() ?? return relative_path);
- _ = (it.next() ?? return relative_path);
+ _ = (it.next() orelse return relative_path);
+ _ = (it.next() orelse return relative_path);
return WindowsPath{
.is_abs = isAbsoluteWindows(path),
.kind = WindowsPath.Kind.NetworkShare,
@@ -265,7 +265,7 @@ fn networkShareServersEql(ns1: []const u8, ns2: []const u8) bool {
var it2 = mem.split(ns2, []u8{sep2});
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
- return asciiEqlIgnoreCase(??it1.next(), ??it2.next());
+ return asciiEqlIgnoreCase(it1.next().?, it2.next().?);
}
fn compareDiskDesignators(kind: WindowsPath.Kind, p1: []const u8, p2: []const u8) bool {
@@ -286,7 +286,7 @@ fn compareDiskDesignators(kind: WindowsPath.Kind, p1: []const u8, p2: []const u8
var it2 = mem.split(p2, []u8{sep2});
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
- return asciiEqlIgnoreCase(??it1.next(), ??it2.next()) and asciiEqlIgnoreCase(??it1.next(), ??it2.next());
+ return asciiEqlIgnoreCase(it1.next().?, it2.next().?) and asciiEqlIgnoreCase(it1.next().?, it2.next().?);
},
}
}
@@ -414,8 +414,8 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
WindowsPath.Kind.NetworkShare => {
result = try allocator.alloc(u8, max_size);
var it = mem.split(paths[first_index], "/\\");
- const server_name = ??it.next();
- const other_name = ??it.next();
+ const server_name = it.next().?;
+ const other_name = it.next().?;
result[result_index] = '\\';
result_index += 1;
@@ -648,8 +648,8 @@ fn testResolvePosix(paths: []const []const u8) []u8 {
}
/// If the path is a file in the current directory (no directory component)
-/// then the returned slice has .len = 0.
-pub fn dirname(path: []const u8) []const u8 {
+/// then returns null
+pub fn dirname(path: []const u8) ?[]const u8 {
if (is_windows) {
return dirnameWindows(path);
} else {
@@ -657,9 +657,9 @@ pub fn dirname(path: []const u8) []const u8 {
}
}
-pub fn dirnameWindows(path: []const u8) []const u8 {
+pub fn dirnameWindows(path: []const u8) ?[]const u8 {
if (path.len == 0)
- return path[0..0];
+ return null;
const root_slice = diskDesignatorWindows(path);
if (path.len == root_slice.len)
@@ -671,13 +671,13 @@ pub fn dirnameWindows(path: []const u8) []const u8 {
while ((path[end_index] == '/' or path[end_index] == '\\') and end_index > root_slice.len) {
if (end_index == 0)
- return path[0..0];
+ return null;
end_index -= 1;
}
while (path[end_index] != '/' and path[end_index] != '\\' and end_index > root_slice.len) {
if (end_index == 0)
- return path[0..0];
+ return null;
end_index -= 1;
}
@@ -685,12 +685,15 @@ pub fn dirnameWindows(path: []const u8) []const u8 {
end_index += 1;
}
+ if (end_index == 0)
+ return null;
+
return path[0..end_index];
}
-pub fn dirnamePosix(path: []const u8) []const u8 {
+pub fn dirnamePosix(path: []const u8) ?[]const u8 {
if (path.len == 0)
- return path[0..0];
+ return null;
var end_index: usize = path.len - 1;
while (path[end_index] == '/') {
@@ -701,13 +704,16 @@ pub fn dirnamePosix(path: []const u8) []const u8 {
while (path[end_index] != '/') {
if (end_index == 0)
- return path[0..0];
+ return null;
end_index -= 1;
}
if (end_index == 0 and path[end_index] == '/')
return path[0..1];
+ if (end_index == 0)
+ return null;
+
return path[0..end_index];
}
@@ -717,10 +723,10 @@ test "os.path.dirnamePosix" {
testDirnamePosix("/a", "/");
testDirnamePosix("/", "/");
testDirnamePosix("////", "/");
- testDirnamePosix("", "");
- testDirnamePosix("a", "");
- testDirnamePosix("a/", "");
- testDirnamePosix("a//", "");
+ testDirnamePosix("", null);
+ testDirnamePosix("a", null);
+ testDirnamePosix("a/", null);
+ testDirnamePosix("a//", null);
}
test "os.path.dirnameWindows" {
@@ -742,7 +748,7 @@ test "os.path.dirnameWindows" {
testDirnameWindows("c:foo\\bar", "c:foo");
testDirnameWindows("c:foo\\bar\\", "c:foo");
testDirnameWindows("c:foo\\bar\\baz", "c:foo\\bar");
- testDirnameWindows("file:stream", "");
+ testDirnameWindows("file:stream", null);
testDirnameWindows("dir\\file:stream", "dir");
testDirnameWindows("\\\\unc\\share", "\\\\unc\\share");
testDirnameWindows("\\\\unc\\share\\foo", "\\\\unc\\share\\");
@@ -753,18 +759,26 @@ test "os.path.dirnameWindows" {
testDirnameWindows("/a/b/", "/a");
testDirnameWindows("/a/b", "/a");
testDirnameWindows("/a", "/");
- testDirnameWindows("", "");
+ testDirnameWindows("", null);
testDirnameWindows("/", "/");
testDirnameWindows("////", "/");
- testDirnameWindows("foo", "");
+ testDirnameWindows("foo", null);
}
-fn testDirnamePosix(input: []const u8, expected_output: []const u8) void {
- assert(mem.eql(u8, dirnamePosix(input), expected_output));
+fn testDirnamePosix(input: []const u8, expected_output: ?[]const u8) void {
+ if (dirnamePosix(input)) |output| {
+ assert(mem.eql(u8, output, expected_output.?));
+ } else {
+ assert(expected_output == null);
+ }
}
-fn testDirnameWindows(input: []const u8, expected_output: []const u8) void {
- assert(mem.eql(u8, dirnameWindows(input), expected_output));
+fn testDirnameWindows(input: []const u8, expected_output: ?[]const u8) void {
+ if (dirnameWindows(input)) |output| {
+ assert(mem.eql(u8, output, expected_output.?));
+ } else {
+ assert(expected_output == null);
+ }
}
pub fn basename(path: []const u8) []const u8 {
@@ -923,7 +937,7 @@ pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8)
var from_it = mem.split(resolved_from, "/\\");
var to_it = mem.split(resolved_to, "/\\");
while (true) {
- const from_component = from_it.next() ?? return mem.dupe(allocator, u8, to_it.rest());
+ const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest());
const to_rest = to_it.rest();
if (to_it.next()) |to_component| {
// TODO ASCII is wrong, we actually need full unicode support to compare paths.
@@ -974,7 +988,7 @@ pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![
var from_it = mem.split(resolved_from, "/");
var to_it = mem.split(resolved_to, "/");
while (true) {
- const from_component = from_it.next() ?? return mem.dupe(allocator, u8, to_it.rest());
+ const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest());
const to_rest = to_it.rest();
if (to_it.next()) |to_component| {
if (mem.eql(u8, from_component, to_component))
diff --git a/std/os/test.zig b/std/os/test.zig
index 4aa3535829..5a977a569a 100644
--- a/std/os/test.zig
+++ b/std/os/test.zig
@@ -10,11 +10,6 @@ const AtomicRmwOp = builtin.AtomicRmwOp;
const AtomicOrder = builtin.AtomicOrder;
test "makePath, put some files in it, deleteTree" {
- if (builtin.os == builtin.Os.windows) {
- // TODO implement os.Dir for windows
- // https://github.com/ziglang/zig/issues/709
- return;
- }
try os.makePath(a, "os_test_tmp/b/c");
try io.writeFile(a, "os_test_tmp/b/c/file.txt", "nonsense");
try io.writeFile(a, "os_test_tmp/b/file2.txt", "blah");
@@ -27,10 +22,6 @@ test "makePath, put some files in it, deleteTree" {
}
test "access file" {
- if (builtin.os == builtin.Os.windows) {
- return;
- }
-
try os.makePath(a, "os_test_tmp");
if (os.File.access(a, "os_test_tmp/file.txt", os.default_file_mode)) |ok| {
unreachable;
diff --git a/std/os/time.zig b/std/os/time.zig
index dd64df2156..ffb506cd7d 100644
--- a/std/os/time.zig
+++ b/std/os/time.zig
@@ -68,11 +68,13 @@ pub const milliTimestamp = switch (builtin.os) {
fn milliTimestampWindows() u64 {
//FileTime has a granularity of 100 nanoseconds
// and uses the NTFS/Windows epoch
- var ft: i64 = undefined;
+ var ft: windows.FILETIME = undefined;
windows.GetSystemTimeAsFileTime(&ft);
const hns_per_ms = (ns_per_s / 100) / ms_per_s;
const epoch_adj = epoch.windows * ms_per_s;
- return u64(@divFloor(ft, hns_per_ms) + epoch_adj);
+
+ const ft64 = (u64(ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+ return @divFloor(ft64, hns_per_ms) - -epoch_adj;
}
fn milliTimestampDarwin() u64 {
diff --git a/std/os/windows/index.zig b/std/os/windows/index.zig
index c491ae6538..d631c6adbf 100644
--- a/std/os/windows/index.zig
+++ b/std/os/windows/index.zig
@@ -1,3 +1,7 @@
+test "import" {
+ _ = @import("util.zig");
+}
+
pub const ERROR = @import("error.zig");
pub extern "advapi32" stdcallcc fn CryptAcquireContextA(
@@ -61,6 +65,10 @@ pub extern "kernel32" stdcallcc fn DeleteFileA(lpFileName: LPCSTR) BOOL;
pub extern "kernel32" stdcallcc fn ExitProcess(exit_code: UINT) noreturn;
+pub extern "kernel32" stdcallcc fn FindFirstFileA(lpFileName: LPCSTR, lpFindFileData: *WIN32_FIND_DATAA) HANDLE;
+pub extern "kernel32" stdcallcc fn FindClose(hFindFile: HANDLE) BOOL;
+pub extern "kernel32" stdcallcc fn FindNextFileA(hFindFile: HANDLE, lpFindFileData: *WIN32_FIND_DATAA) BOOL;
+
pub extern "kernel32" stdcallcc fn FreeEnvironmentStringsA(penv: [*]u8) BOOL;
pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR;
@@ -77,6 +85,8 @@ pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCo
pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) BOOL;
+pub extern "kernel32" stdcallcc fn GetFileAttributesA(lpFileName: LPCSTR) DWORD;
+
pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) DWORD;
pub extern "kernel32" stdcallcc fn GetLastError() DWORD;
@@ -97,21 +107,21 @@ pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA(
pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE;
-pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?*FILETIME) void;
+pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(*FILETIME) void;
pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE;
pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL;
-pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]c_void, dwBytes: SIZE_T) ?[*]c_void;
-pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]const c_void) SIZE_T;
-pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]const c_void) BOOL;
+pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void;
+pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T;
+pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL;
pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE;
-pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?[*]c_void;
+pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?*c_void;
-pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]c_void) BOOL;
+pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL;
pub extern "kernel32" stdcallcc fn MoveFileExA(
lpExistingFileName: LPCSTR,
@@ -123,16 +133,16 @@ pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *
pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL;
-pub extern "kernel32" stdcallcc fn PathFileExists(pszPath: ?LPCTSTR) BOOL;
-
pub extern "kernel32" stdcallcc fn ReadFile(
in_hFile: HANDLE,
- out_lpBuffer: [*]c_void,
+ out_lpBuffer: *c_void,
in_nNumberOfBytesToRead: DWORD,
out_lpNumberOfBytesRead: *DWORD,
in_out_lpOverlapped: ?*OVERLAPPED,
) BOOL;
+pub extern "kernel32" stdcallcc fn RemoveDirectoryA(lpPathName: LPCSTR) BOOL;
+
pub extern "kernel32" stdcallcc fn SetFilePointerEx(
in_fFile: HANDLE,
in_liDistanceToMove: LARGE_INTEGER,
@@ -150,7 +160,7 @@ pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMillis
pub extern "kernel32" stdcallcc fn WriteFile(
in_hFile: HANDLE,
- in_lpBuffer: [*]const c_void,
+ in_lpBuffer: *const c_void,
in_nNumberOfBytesToWrite: DWORD,
out_lpNumberOfBytesWritten: ?*DWORD,
in_out_lpOverlapped: ?*OVERLAPPED,
@@ -163,6 +173,8 @@ pub extern "kernel32" stdcallcc fn FreeLibrary(hModule: HMODULE) BOOL;
pub extern "user32" stdcallcc fn MessageBoxA(hWnd: ?HANDLE, lpText: ?LPCTSTR, lpCaption: ?LPCTSTR, uType: UINT) c_int;
+pub extern "shlwapi" stdcallcc fn PathFileExistsA(pszPath: ?LPCTSTR) BOOL;
+
pub const PROV_RSA_FULL = 1;
pub const BOOL = c_int;
@@ -196,7 +208,6 @@ pub const UNICODE = false;
pub const WCHAR = u16;
pub const WORD = u16;
pub const LARGE_INTEGER = i64;
-pub const FILETIME = i64;
pub const TRUE = 1;
pub const FALSE = 0;
@@ -212,6 +223,8 @@ pub const STD_ERROR_HANDLE = @maxValue(DWORD) - 12 + 1;
pub const INVALID_HANDLE_VALUE = @intToPtr(HANDLE, @maxValue(usize));
+pub const INVALID_FILE_ATTRIBUTES = DWORD(@maxValue(DWORD));
+
pub const OVERLAPPED = extern struct {
Internal: ULONG_PTR,
InternalHigh: ULONG_PTR,
@@ -293,13 +306,24 @@ pub const OPEN_EXISTING = 3;
pub const TRUNCATE_EXISTING = 5;
pub const FILE_ATTRIBUTE_ARCHIVE = 0x20;
+pub const FILE_ATTRIBUTE_COMPRESSED = 0x800;
+pub const FILE_ATTRIBUTE_DEVICE = 0x40;
+pub const FILE_ATTRIBUTE_DIRECTORY = 0x10;
pub const FILE_ATTRIBUTE_ENCRYPTED = 0x4000;
pub const FILE_ATTRIBUTE_HIDDEN = 0x2;
+pub const FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x8000;
pub const FILE_ATTRIBUTE_NORMAL = 0x80;
+pub const FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x2000;
+pub const FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x20000;
pub const FILE_ATTRIBUTE_OFFLINE = 0x1000;
pub const FILE_ATTRIBUTE_READONLY = 0x1;
+pub const FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x400000;
+pub const FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x40000;
+pub const FILE_ATTRIBUTE_REPARSE_POINT = 0x400;
+pub const FILE_ATTRIBUTE_SPARSE_FILE = 0x200;
pub const FILE_ATTRIBUTE_SYSTEM = 0x4;
pub const FILE_ATTRIBUTE_TEMPORARY = 0x100;
+pub const FILE_ATTRIBUTE_VIRTUAL = 0x10000;
pub const PROCESS_INFORMATION = extern struct {
hProcess: HANDLE,
@@ -372,6 +396,20 @@ pub const HEAP_NO_SERIALIZE = 0x00000001;
pub const PTHREAD_START_ROUTINE = extern fn (LPVOID) DWORD;
pub const LPTHREAD_START_ROUTINE = PTHREAD_START_ROUTINE;
-test "import" {
- _ = @import("util.zig");
-}
+pub const WIN32_FIND_DATAA = extern struct {
+ dwFileAttributes: DWORD,
+ ftCreationTime: FILETIME,
+ ftLastAccessTime: FILETIME,
+ ftLastWriteTime: FILETIME,
+ nFileSizeHigh: DWORD,
+ nFileSizeLow: DWORD,
+ dwReserved0: DWORD,
+ dwReserved1: DWORD,
+ cFileName: [260]CHAR,
+ cAlternateFileName: [14]CHAR,
+};
+
+pub const FILETIME = extern struct {
+ dwLowDateTime: DWORD,
+ dwHighDateTime: DWORD,
+};
diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig
index 5a40567310..88a9e7952e 100644
--- a/std/os/windows/util.zig
+++ b/std/os/windows/util.zig
@@ -42,7 +42,7 @@ pub const WriteError = error{
};
pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void {
- if (windows.WriteFile(handle, @ptrCast([*]const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) {
+ if (windows.WriteFile(handle, @ptrCast(*const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) {
const err = windows.GetLastError();
return switch (err) {
windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources,
@@ -153,7 +153,7 @@ pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap)
pub fn windowsLoadDll(allocator: *mem.Allocator, dll_path: []const u8) !windows.HMODULE {
const padded_buff = try cstr.addNullByte(allocator, dll_path);
defer allocator.free(padded_buff);
- return windows.LoadLibraryA(padded_buff.ptr) ?? error.DllNotFound;
+ return windows.LoadLibraryA(padded_buff.ptr) orelse error.DllNotFound;
}
pub fn windowsUnloadDll(hModule: windows.HMODULE) void {
@@ -170,3 +170,42 @@ test "InvalidDll" {
return;
};
}
+
+pub fn windowsFindFirstFile(
+ allocator: *mem.Allocator,
+ dir_path: []const u8,
+ find_file_data: *windows.WIN32_FIND_DATAA,
+) !windows.HANDLE {
+ const wild_and_null = []u8{ '\\', '*', 0 };
+ const path_with_wild_and_null = try allocator.alloc(u8, dir_path.len + wild_and_null.len);
+ defer allocator.free(path_with_wild_and_null);
+
+ mem.copy(u8, path_with_wild_and_null, dir_path);
+ mem.copy(u8, path_with_wild_and_null[dir_path.len..], wild_and_null);
+
+ const handle = windows.FindFirstFileA(path_with_wild_and_null.ptr, find_file_data);
+
+ if (handle == windows.INVALID_HANDLE_VALUE) {
+ const err = windows.GetLastError();
+ switch (err) {
+ windows.ERROR.FILE_NOT_FOUND,
+ windows.ERROR.PATH_NOT_FOUND,
+ => return error.PathNotFound,
+ else => return os.unexpectedErrorWindows(err),
+ }
+ }
+
+ return handle;
+}
+
+/// Returns `true` if there was another file, `false` otherwise.
+pub fn windowsFindNextFile(handle: windows.HANDLE, find_file_data: *windows.WIN32_FIND_DATAA) !bool {
+ if (windows.FindNextFileA(handle, find_file_data) == 0) {
+ const err = windows.GetLastError();
+ return switch (err) {
+ windows.ERROR.NO_MORE_FILES => false,
+ else => os.unexpectedErrorWindows(err),
+ };
+ }
+ return true;
+}
diff --git a/std/segmented_list.zig b/std/segmented_list.zig
index a2f3607ad8..9f10f4d44a 100644
--- a/std/segmented_list.zig
+++ b/std/segmented_list.zig
@@ -364,7 +364,7 @@ fn testSegmentedList(comptime prealloc: usize, allocator: *Allocator) !void {
assert(x == 0);
}
- assert(??list.pop() == 100);
+ assert(list.pop().? == 100);
assert(list.len == 99);
try list.pushMany([]i32{
@@ -373,9 +373,9 @@ fn testSegmentedList(comptime prealloc: usize, allocator: *Allocator) !void {
3,
});
assert(list.len == 102);
- assert(??list.pop() == 3);
- assert(??list.pop() == 2);
- assert(??list.pop() == 1);
+ assert(list.pop().? == 3);
+ assert(list.pop().? == 2);
+ assert(list.pop().? == 1);
assert(list.len == 99);
try list.pushMany([]const i32{});
diff --git a/std/special/bootstrap.zig b/std/special/bootstrap.zig
index 64eae79ce4..dd37f1edb6 100644
--- a/std/special/bootstrap.zig
+++ b/std/special/bootstrap.zig
@@ -51,13 +51,13 @@ extern fn WinMainCRTStartup() noreturn {
// TODO https://github.com/ziglang/zig/issues/265
fn posixCallMainAndExit() noreturn {
- const argc = argc_ptr.*;
+ const argc = argc_ptr[0];
const argv = @ptrCast([*][*]u8, argc_ptr + 1);
- const envp_nullable = @ptrCast([*]?[*]u8, argv + argc + 1);
+ const envp_optional = @ptrCast([*]?[*]u8, argv + argc + 1);
var envp_count: usize = 0;
- while (envp_nullable[envp_count]) |_| : (envp_count += 1) {}
- const envp = @ptrCast([*][*]u8, envp_nullable)[0..envp_count];
+ while (envp_optional[envp_count]) |_| : (envp_count += 1) {}
+ const envp = @ptrCast([*][*]u8, envp_optional)[0..envp_count];
if (builtin.os == builtin.Os.linux) {
const auxv = @ptrCast([*]usize, envp.ptr + envp_count + 1);
var i: usize = 0;
diff --git a/std/special/build_runner.zig b/std/special/build_runner.zig
index 3471d6ed21..e4f04df6d0 100644
--- a/std/special/build_runner.zig
+++ b/std/special/build_runner.zig
@@ -27,15 +27,15 @@ pub fn main() !void {
// skip my own exe name
_ = arg_it.skip();
- const zig_exe = try unwrapArg(arg_it.next(allocator) ?? {
+ const zig_exe = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected first argument to be path to zig compiler\n");
return error.InvalidArgs;
});
- const build_root = try unwrapArg(arg_it.next(allocator) ?? {
+ const build_root = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected second argument to be build root directory path\n");
return error.InvalidArgs;
});
- const cache_root = try unwrapArg(arg_it.next(allocator) ?? {
+ const cache_root = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected third argument to be cache root directory path\n");
return error.InvalidArgs;
});
@@ -84,12 +84,12 @@ pub fn main() !void {
} else if (mem.eql(u8, arg, "--help")) {
return usage(&builder, false, try stdout_stream);
} else if (mem.eql(u8, arg, "--prefix")) {
- prefix = try unwrapArg(arg_it.next(allocator) ?? {
+ prefix = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected argument after --prefix\n\n");
return usageAndErr(&builder, false, try stderr_stream);
});
} else if (mem.eql(u8, arg, "--search-prefix")) {
- const search_prefix = try unwrapArg(arg_it.next(allocator) ?? {
+ const search_prefix = try unwrapArg(arg_it.next(allocator) orelse {
warn("Expected argument after --search-prefix\n\n");
return usageAndErr(&builder, false, try stderr_stream);
});
diff --git a/std/special/builtin.zig b/std/special/builtin.zig
index c504dbb6fb..903556f696 100644
--- a/std/special/builtin.zig
+++ b/std/special/builtin.zig
@@ -19,7 +19,7 @@ export fn memset(dest: ?[*]u8, c: u8, n: usize) ?[*]u8 {
var index: usize = 0;
while (index != n) : (index += 1)
- (??dest)[index] = c;
+ dest.?[index] = c;
return dest;
}
@@ -29,7 +29,7 @@ export fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) ?[*]
var index: usize = 0;
while (index != n) : (index += 1)
- (??dest)[index] = (??src)[index];
+ dest.?[index] = src.?[index];
return dest;
}
@@ -40,13 +40,13 @@ export fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) ?[*]u8 {
if (@ptrToInt(dest) < @ptrToInt(src)) {
var index: usize = 0;
while (index != n) : (index += 1) {
- (??dest)[index] = (??src)[index];
+ dest.?[index] = src.?[index];
}
} else {
var index = n;
while (index != 0) {
index -= 1;
- (??dest)[index] = (??src)[index];
+ dest.?[index] = src.?[index];
}
}
diff --git a/std/special/compiler_rt/divti3.zig b/std/special/compiler_rt/divti3.zig
new file mode 100644
index 0000000000..60460ea62d
--- /dev/null
+++ b/std/special/compiler_rt/divti3.zig
@@ -0,0 +1,26 @@
+const udivmod = @import("udivmod.zig").udivmod;
+const builtin = @import("builtin");
+const compiler_rt = @import("index.zig");
+
+pub extern fn __divti3(a: i128, b: i128) i128 {
+ @setRuntimeSafety(builtin.is_test);
+
+ const s_a = a >> (i128.bit_count - 1);
+ const s_b = b >> (i128.bit_count - 1);
+
+ const an = (a ^ s_a) -% s_a;
+ const bn = (b ^ s_b) -% s_b;
+
+ const r = udivmod(u128, @bitCast(u128, an), @bitCast(u128, bn), null);
+ const s = s_a ^ s_b;
+ return (i128(r) ^ s) -% s;
+}
+
+pub extern fn __divti3_windows_x86_64(a: *const i128, b: *const i128) void {
+ @setRuntimeSafety(builtin.is_test);
+ compiler_rt.setXmm0(i128, __divti3(a.*, b.*));
+}
+
+test "import divti3" {
+ _ = @import("divti3_test.zig");
+}
diff --git a/std/special/compiler_rt/divti3_test.zig b/std/special/compiler_rt/divti3_test.zig
new file mode 100644
index 0000000000..eef5a9b812
--- /dev/null
+++ b/std/special/compiler_rt/divti3_test.zig
@@ -0,0 +1,21 @@
+const __divti3 = @import("divti3.zig").__divti3;
+const assert = @import("std").debug.assert;
+
+fn test__divti3(a: i128, b: i128, expected: i128) void {
+ const x = __divti3(a, b);
+ assert(x == expected);
+}
+
+test "divti3" {
+ test__divti3(0, 1, 0);
+ test__divti3(0, -1, 0);
+ test__divti3(2, 1, 2);
+ test__divti3(2, -1, -2);
+ test__divti3(-2, 1, -2);
+ test__divti3(-2, -1, 2);
+
+ test__divti3(@bitCast(i128, u128(0x8 << 124)), 1, @bitCast(i128, u128(0x8 << 124)));
+ test__divti3(@bitCast(i128, u128(0x8 << 124)), -1, @bitCast(i128, u128(0x8 << 124)));
+ test__divti3(@bitCast(i128, u128(0x8 << 124)), -2, @bitCast(i128, u128(0x4 << 124)));
+ test__divti3(@bitCast(i128, u128(0x8 << 124)), 2, @bitCast(i128, u128(0xc << 124)));
+}
diff --git a/std/special/compiler_rt/index.zig b/std/special/compiler_rt/index.zig
index d328324320..f952730353 100644
--- a/std/special/compiler_rt/index.zig
+++ b/std/special/compiler_rt/index.zig
@@ -58,6 +58,8 @@ comptime {
@export("__chkstk", __chkstk, strong_linkage);
@export("___chkstk_ms", ___chkstk_ms, linkage);
}
+ @export("__divti3", @import("divti3.zig").__divti3_windows_x86_64, linkage);
+ @export("__muloti4", @import("muloti4.zig").__muloti4_windows_x86_64, linkage);
@export("__udivti3", @import("udivti3.zig").__udivti3_windows_x86_64, linkage);
@export("__udivmodti4", @import("udivmodti4.zig").__udivmodti4_windows_x86_64, linkage);
@export("__umodti3", @import("umodti3.zig").__umodti3_windows_x86_64, linkage);
@@ -65,6 +67,8 @@ comptime {
else => {},
}
} else {
+ @export("__divti3", @import("divti3.zig").__divti3, linkage);
+ @export("__muloti4", @import("muloti4.zig").__muloti4, linkage);
@export("__udivti3", @import("udivti3.zig").__udivti3, linkage);
@export("__udivmodti4", @import("udivmodti4.zig").__udivmodti4, linkage);
@export("__umodti3", @import("umodti3.zig").__umodti3, linkage);
diff --git a/std/special/compiler_rt/muloti4.zig b/std/special/compiler_rt/muloti4.zig
new file mode 100644
index 0000000000..866077c80c
--- /dev/null
+++ b/std/special/compiler_rt/muloti4.zig
@@ -0,0 +1,55 @@
+const udivmod = @import("udivmod.zig").udivmod;
+const builtin = @import("builtin");
+const compiler_rt = @import("index.zig");
+
+pub extern fn __muloti4(a: i128, b: i128, overflow: *c_int) i128 {
+ @setRuntimeSafety(builtin.is_test);
+
+ const min = @bitCast(i128, u128(1 << (i128.bit_count - 1)));
+ const max = ~min;
+ overflow.* = 0;
+
+ const r = a *% b;
+ if (a == min) {
+ if (b != 0 and b != 1) {
+ overflow.* = 1;
+ }
+ return r;
+ }
+ if (b == min) {
+ if (a != 0 and a != 1) {
+ overflow.* = 1;
+ }
+ return r;
+ }
+
+ const sa = a >> (i128.bit_count - 1);
+ const abs_a = (a ^ sa) -% sa;
+ const sb = b >> (i128.bit_count - 1);
+ const abs_b = (b ^ sb) -% sb;
+
+ if (abs_a < 2 or abs_b < 2) {
+ return r;
+ }
+
+ if (sa == sb) {
+ if (abs_a > @divFloor(max, abs_b)) {
+ overflow.* = 1;
+ }
+ } else {
+ if (abs_a > @divFloor(min, -abs_b)) {
+ overflow.* = 1;
+ }
+ }
+
+ return r;
+}
+
+pub extern fn __muloti4_windows_x86_64(a: *const i128, b: *const i128, overflow: *c_int) void {
+ @setRuntimeSafety(builtin.is_test);
+ compiler_rt.setXmm0(i128, __muloti4(a.*, b.*, overflow));
+}
+
+test "import muloti4" {
+ _ = @import("muloti4_test.zig");
+}
diff --git a/std/special/compiler_rt/muloti4_test.zig b/std/special/compiler_rt/muloti4_test.zig
new file mode 100644
index 0000000000..6b3671323f
--- /dev/null
+++ b/std/special/compiler_rt/muloti4_test.zig
@@ -0,0 +1,76 @@
+const __muloti4 = @import("muloti4.zig").__muloti4;
+const assert = @import("std").debug.assert;
+
+fn test__muloti4(a: i128, b: i128, expected: i128, expected_overflow: c_int) void {
+ var overflow: c_int = undefined;
+ const x = __muloti4(a, b, &overflow);
+ assert(overflow == expected_overflow and (expected_overflow != 0 or x == expected));
+}
+
+test "muloti4" {
+ test__muloti4(0, 0, 0, 0);
+ test__muloti4(0, 1, 0, 0);
+ test__muloti4(1, 0, 0, 0);
+ test__muloti4(0, 10, 0, 0);
+ test__muloti4(10, 0, 0, 0);
+
+ test__muloti4(0, 81985529216486895, 0, 0);
+ test__muloti4(81985529216486895, 0, 0, 0);
+
+ test__muloti4(0, -1, 0, 0);
+ test__muloti4(-1, 0, 0, 0);
+ test__muloti4(0, -10, 0, 0);
+ test__muloti4(-10, 0, 0, 0);
+ test__muloti4(0, -81985529216486895, 0, 0);
+ test__muloti4(-81985529216486895, 0, 0, 0);
+
+ test__muloti4(3037000499, 3037000499, 9223372030926249001, 0);
+ test__muloti4(-3037000499, 3037000499, -9223372030926249001, 0);
+ test__muloti4(3037000499, -3037000499, -9223372030926249001, 0);
+ test__muloti4(-3037000499, -3037000499, 9223372030926249001, 0);
+
+ test__muloti4(4398046511103, 2097152, 9223372036852678656, 0);
+ test__muloti4(-4398046511103, 2097152, -9223372036852678656, 0);
+ test__muloti4(4398046511103, -2097152, -9223372036852678656, 0);
+ test__muloti4(-4398046511103, -2097152, 9223372036852678656, 0);
+
+ test__muloti4(2097152, 4398046511103, 9223372036852678656, 0);
+ test__muloti4(-2097152, 4398046511103, -9223372036852678656, 0);
+ test__muloti4(2097152, -4398046511103, -9223372036852678656, 0);
+ test__muloti4(-2097152, -4398046511103, 9223372036852678656, 0);
+
+ test__muloti4(@bitCast(i128, u128(0x00000000000000B504F333F9DE5BE000)), @bitCast(i128, u128(0x000000000000000000B504F333F9DE5B)), @bitCast(i128, u128(0x7FFFFFFFFFFFF328DF915DA296E8A000)), 0);
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), -2, @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+ test__muloti4(-2, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), -1, @bitCast(i128, u128(0x80000000000000000000000000000001)), 0);
+ test__muloti4(-1, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 0);
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0, 0, 0);
+ test__muloti4(0, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0, 0);
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 1, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
+ test__muloti4(1, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
+ test__muloti4(@bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 2, @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+ test__muloti4(2, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), -2, @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(-2, @bitCast(i128, u128(0x80000000000000000000000000000000)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), -1, @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(-1, @bitCast(i128, u128(0x80000000000000000000000000000000)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), 0, 0, 0);
+ test__muloti4(0, @bitCast(i128, u128(0x80000000000000000000000000000000)), 0, 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), 1, @bitCast(i128, u128(0x80000000000000000000000000000000)), 0);
+ test__muloti4(1, @bitCast(i128, u128(0x80000000000000000000000000000000)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000000)), 2, @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(2, @bitCast(i128, u128(0x80000000000000000000000000000000)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), -2, @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+ test__muloti4(-2, @bitCast(i128, u128(0x80000000000000000000000000000001)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 1);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), -1, @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
+ test__muloti4(-1, @bitCast(i128, u128(0x80000000000000000000000000000001)), @bitCast(i128, u128(0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF)), 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), 0, 0, 0);
+ test__muloti4(0, @bitCast(i128, u128(0x80000000000000000000000000000001)), 0, 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), 1, @bitCast(i128, u128(0x80000000000000000000000000000001)), 0);
+ test__muloti4(1, @bitCast(i128, u128(0x80000000000000000000000000000001)), @bitCast(i128, u128(0x80000000000000000000000000000001)), 0);
+ test__muloti4(@bitCast(i128, u128(0x80000000000000000000000000000001)), 2, @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+ test__muloti4(2, @bitCast(i128, u128(0x80000000000000000000000000000001)), @bitCast(i128, u128(0x80000000000000000000000000000000)), 1);
+}
diff --git a/std/unicode.zig b/std/unicode.zig
index 3d1bebdb55..ec808ca4fe 100644
--- a/std/unicode.zig
+++ b/std/unicode.zig
@@ -220,7 +220,7 @@ const Utf8Iterator = struct {
}
pub fn nextCodepoint(it: *Utf8Iterator) ?u32 {
- const slice = it.nextCodepointSlice() ?? return null;
+ const slice = it.nextCodepointSlice() orelse return null;
switch (slice.len) {
1 => return u32(slice[0]),
@@ -286,15 +286,15 @@ fn testUtf8IteratorOnAscii() void {
const s = Utf8View.initComptime("abc");
var it1 = s.iterator();
- debug.assert(std.mem.eql(u8, "a", ??it1.nextCodepointSlice()));
- debug.assert(std.mem.eql(u8, "b", ??it1.nextCodepointSlice()));
- debug.assert(std.mem.eql(u8, "c", ??it1.nextCodepointSlice()));
+ debug.assert(std.mem.eql(u8, "a", it1.nextCodepointSlice().?));
+ debug.assert(std.mem.eql(u8, "b", it1.nextCodepointSlice().?));
+ debug.assert(std.mem.eql(u8, "c", it1.nextCodepointSlice().?));
debug.assert(it1.nextCodepointSlice() == null);
var it2 = s.iterator();
- debug.assert(??it2.nextCodepoint() == 'a');
- debug.assert(??it2.nextCodepoint() == 'b');
- debug.assert(??it2.nextCodepoint() == 'c');
+ debug.assert(it2.nextCodepoint().? == 'a');
+ debug.assert(it2.nextCodepoint().? == 'b');
+ debug.assert(it2.nextCodepoint().? == 'c');
debug.assert(it2.nextCodepoint() == null);
}
@@ -321,15 +321,15 @@ fn testUtf8ViewOk() void {
const s = Utf8View.initComptime("東京市");
var it1 = s.iterator();
- debug.assert(std.mem.eql(u8, "東", ??it1.nextCodepointSlice()));
- debug.assert(std.mem.eql(u8, "京", ??it1.nextCodepointSlice()));
- debug.assert(std.mem.eql(u8, "市", ??it1.nextCodepointSlice()));
+ debug.assert(std.mem.eql(u8, "東", it1.nextCodepointSlice().?));
+ debug.assert(std.mem.eql(u8, "京", it1.nextCodepointSlice().?));
+ debug.assert(std.mem.eql(u8, "市", it1.nextCodepointSlice().?));
debug.assert(it1.nextCodepointSlice() == null);
var it2 = s.iterator();
- debug.assert(??it2.nextCodepoint() == 0x6771);
- debug.assert(??it2.nextCodepoint() == 0x4eac);
- debug.assert(??it2.nextCodepoint() == 0x5e02);
+ debug.assert(it2.nextCodepoint().? == 0x6771);
+ debug.assert(it2.nextCodepoint().? == 0x4eac);
+ debug.assert(it2.nextCodepoint().? == 0x5e02);
debug.assert(it2.nextCodepoint() == null);
}
diff --git a/std/zig/ast.zig b/std/zig/ast.zig
index a4b64d5db2..4246a50861 100644
--- a/std/zig/ast.zig
+++ b/std/zig/ast.zig
@@ -734,7 +734,7 @@ pub const Node = struct {
var i = index;
if (self.doc_comments) |comments| {
- if (i < 1) return *comments.base;
+ if (i < 1) return &comments.base;
i -= 1;
}
@@ -1243,7 +1243,7 @@ pub const Node = struct {
i -= 1;
if (self.@"else") |@"else"| {
- if (i < 1) return *@"else".base;
+ if (i < 1) return &@"else".base;
i -= 1;
}
@@ -1296,7 +1296,7 @@ pub const Node = struct {
i -= 1;
if (self.@"else") |@"else"| {
- if (i < 1) return *@"else".base;
+ if (i < 1) return &@"else".base;
i -= 1;
}
@@ -1347,7 +1347,7 @@ pub const Node = struct {
i -= 1;
if (self.@"else") |@"else"| {
- if (i < 1) return *@"else".base;
+ if (i < 1) return &@"else".base;
i -= 1;
}
@@ -1417,7 +1417,7 @@ pub const Node = struct {
Range,
Sub,
SubWrap,
- UnwrapMaybe,
+ UnwrapOptional,
};
pub fn iterate(self: *InfixOp, index: usize) ?*Node {
@@ -1475,7 +1475,7 @@ pub const Node = struct {
Op.Range,
Op.Sub,
Op.SubWrap,
- Op.UnwrapMaybe,
+ Op.UnwrapOptional,
=> {},
}
@@ -1507,14 +1507,13 @@ pub const Node = struct {
BitNot,
BoolNot,
Cancel,
- MaybeType,
+ OptionalType,
Negation,
NegationWrap,
Resume,
PtrType: PtrInfo,
SliceType: PtrInfo,
Try,
- UnwrapMaybe,
};
pub const PtrInfo = struct {
@@ -1537,33 +1536,36 @@ pub const Node = struct {
var i = index;
switch (self.op) {
+ // TODO https://github.com/ziglang/zig/issues/1107
Op.SliceType => |addr_of_info| {
if (addr_of_info.align_info) |align_info| {
if (i < 1) return align_info.node;
i -= 1;
}
},
- Op.AddrOf => |addr_of_info| {
+
+ Op.PtrType => |addr_of_info| {
if (addr_of_info.align_info) |align_info| {
if (i < 1) return align_info.node;
i -= 1;
}
},
+
Op.ArrayType => |size_expr| {
if (i < 1) return size_expr;
i -= 1;
},
+
+ Op.AddressOf,
Op.Await,
Op.BitNot,
Op.BoolNot,
Op.Cancel,
- Op.MaybeType,
+ Op.OptionalType,
Op.Negation,
Op.NegationWrap,
Op.Try,
Op.Resume,
- Op.UnwrapMaybe,
- Op.PointerType,
=> {},
}
@@ -1619,6 +1621,7 @@ pub const Node = struct {
ArrayInitializer: InitList,
StructInitializer: InitList,
Deref,
+ UnwrapOptional,
pub const InitList = SegmentedList(*Node, 2);
@@ -1667,7 +1670,9 @@ pub const Node = struct {
if (i < fields.len) return fields.at(i).*;
i -= fields.len;
},
- Op.Deref => {},
+ Op.UnwrapOptional,
+ Op.Deref,
+ => {},
}
return null;
@@ -2022,7 +2027,7 @@ pub const Node = struct {
switch (self.kind) {
Kind.Variable => |variable_name| {
- if (i < 1) return *variable_name.base;
+ if (i < 1) return &variable_name.base;
i -= 1;
},
Kind.Return => |return_type| {
@@ -2092,10 +2097,10 @@ pub const Node = struct {
pub fn iterate(self: *Asm, index: usize) ?*Node {
var i = index;
- if (i < self.outputs.len) return *(self.outputs.at(index).*).base;
+ if (i < self.outputs.len) return &self.outputs.at(index).*.base;
i -= self.outputs.len;
- if (i < self.inputs.len) return *(self.inputs.at(index).*).base;
+ if (i < self.inputs.len) return &self.inputs.at(index).*.base;
i -= self.inputs.len;
return null;
@@ -2205,3 +2210,14 @@ pub const Node = struct {
}
};
};
+
+test "iterate" {
+ var root = Node.Root{
+ .base = Node{ .id = Node.Id.Root },
+ .doc_comments = null,
+ .decls = Node.Root.DeclList.init(std.debug.global_allocator),
+ .eof_token = 0,
+ };
+ var base = &root.base;
+ assert(base.iterate(0) == null);
+}
diff --git a/std/zig/parse.zig b/std/zig/parse.zig
index 7faca8e11b..877b81c527 100644
--- a/std/zig/parse.zig
+++ b/std/zig/parse.zig
@@ -43,7 +43,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
// skip over line comments at the top of the file
while (true) {
- const next_tok = tok_it.peek() ?? break;
+ const next_tok = tok_it.peek() orelse break;
if (next_tok.id != Token.Id.LineComment) break;
_ = tok_it.next();
}
@@ -197,7 +197,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const lib_name_token = nextToken(&tok_it, &tree);
const lib_name_token_index = lib_name_token.index;
const lib_name_token_ptr = lib_name_token.ptr;
- break :blk (try parseStringLiteral(arena, &tok_it, lib_name_token_ptr, lib_name_token_index, &tree)) ?? {
+ break :blk (try parseStringLiteral(arena, &tok_it, lib_name_token_ptr, lib_name_token_index, &tree)) orelse {
prevToken(&tok_it, &tree);
break :blk null;
};
@@ -711,7 +711,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
else => {
// TODO: this is a special case. Remove this when #760 is fixed
if (token_ptr.id == Token.Id.Keyword_error) {
- if ((??tok_it.peek()).id == Token.Id.LBrace) {
+ if (tok_it.peek().?.id == Token.Id.LBrace) {
const error_type_node = try arena.construct(ast.Node.ErrorType{
.base = ast.Node{ .id = ast.Node.Id.ErrorType },
.token = token_index,
@@ -1434,14 +1434,14 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
try stack.append(State{
.ExpectTokenSave = ExpectTokenSave{
.id = Token.Id.AngleBracketRight,
- .ptr = &??async_node.rangle_bracket,
+ .ptr = &async_node.rangle_bracket.?,
},
});
try stack.append(State{ .TypeExprBegin = OptionalCtx{ .RequiredNull = &async_node.allocator_type } });
continue;
},
State.AsyncEnd => |ctx| {
- const node = ctx.ctx.get() ?? continue;
+ const node = ctx.ctx.get() orelse continue;
switch (node.id) {
ast.Node.Id.FnProto => {
@@ -1567,7 +1567,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
.bit_range = null,
};
// TODO https://github.com/ziglang/zig/issues/1022
- const align_info = &??addr_of_info.align_info;
+ const align_info = &addr_of_info.align_info.?;
try stack.append(State{ .AlignBitRange = align_info });
try stack.append(State{ .Expression = OptionalCtx{ .Required = &align_info.node } });
@@ -1604,7 +1604,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
switch (token.ptr.id) {
Token.Id.Colon => {
align_info.bit_range = ast.Node.PrefixOp.PtrInfo.Align.BitRange(undefined);
- const bit_range = &??align_info.bit_range;
+ const bit_range = &align_info.bit_range.?;
try stack.append(State{ .ExpectToken = Token.Id.RParen });
try stack.append(State{ .Expression = OptionalCtx{ .Required = &bit_range.end } });
@@ -1814,7 +1814,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
continue;
},
State.RangeExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Ellipsis3)) |ellipsis3| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -1836,7 +1836,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.AssignmentExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -1866,7 +1866,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.UnwrapExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -1901,7 +1901,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BoolOrExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Keyword_or)) |or_token| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -1925,7 +1925,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BoolAndExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Keyword_and)) |and_token| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -1949,7 +1949,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.ComparisonExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -1979,7 +1979,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BinaryOrExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Pipe)) |pipe| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -2003,7 +2003,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BinaryXorExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Caret)) |caret| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -2027,7 +2027,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BinaryAndExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Ampersand)) |ampersand| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -2051,7 +2051,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.BitShiftExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -2081,7 +2081,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.AdditionExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -2111,7 +2111,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.MultiplyExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -2142,9 +2142,9 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.CurlySuffixExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
- if ((??tok_it.peek()).id == Token.Id.Period) {
+ if (tok_it.peek().?.id == Token.Id.Period) {
const node = try arena.construct(ast.Node.SuffixOp{
.base = ast.Node{ .id = ast.Node.Id.SuffixOp },
.lhs = lhs,
@@ -2190,7 +2190,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.TypeExprEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
if (eatToken(&tok_it, &tree, Token.Id.Bang)) |bang| {
const node = try arena.construct(ast.Node.InfixOp{
@@ -2270,7 +2270,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
},
State.SuffixOpExpressionEnd => |opt_ctx| {
- const lhs = opt_ctx.get() ?? continue;
+ const lhs = opt_ctx.get() orelse continue;
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
@@ -2326,6 +2326,17 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
continue;
}
+ if (eatToken(&tok_it, &tree, Token.Id.QuestionMark)) |question_token| {
+ const node = try arena.construct(ast.Node.SuffixOp{
+ .base = ast.Node{ .id = ast.Node.Id.SuffixOp },
+ .lhs = lhs,
+ .op = ast.Node.SuffixOp.Op.UnwrapOptional,
+ .rtoken = question_token,
+ });
+ opt_ctx.store(&node.base);
+ stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable;
+ continue;
+ }
const node = try arena.construct(ast.Node.InfixOp{
.base = ast.Node{ .id = ast.Node.Id.InfixOp },
.lhs = lhs,
@@ -2403,12 +2414,12 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
.arrow_token = next_token_index,
.return_type = undefined,
};
- const return_type_ptr = &((??node.result).return_type);
+ const return_type_ptr = &node.result.?.return_type;
try stack.append(State{ .Expression = OptionalCtx{ .Required = return_type_ptr } });
continue;
},
Token.Id.StringLiteral, Token.Id.MultilineStringLiteralLine => {
- opt_ctx.store((try parseStringLiteral(arena, &tok_it, token.ptr, token.index, &tree)) ?? unreachable);
+ opt_ctx.store((try parseStringLiteral(arena, &tok_it, token.ptr, token.index, &tree)) orelse unreachable);
continue;
},
Token.Id.LParen => {
@@ -2638,7 +2649,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree {
const token = nextToken(&tok_it, &tree);
const token_index = token.index;
const token_ptr = token.ptr;
- opt_ctx.store((try parseStringLiteral(arena, &tok_it, token_ptr, token_index, &tree)) ?? {
+ opt_ctx.store((try parseStringLiteral(arena, &tok_it, token_ptr, token_index, &tree)) orelse {
prevToken(&tok_it, &tree);
if (opt_ctx != OptionalCtx.Optional) {
((try tree.errors.addOne())).* = Error{ .ExpectedPrimaryExpr = Error.ExpectedPrimaryExpr{ .token = token_index } };
@@ -2875,7 +2886,7 @@ const OptionalCtx = union(enum) {
pub fn get(self: *const OptionalCtx) ?*ast.Node {
switch (self.*) {
OptionalCtx.Optional => |ptr| return ptr.*,
- OptionalCtx.RequiredNull => |ptr| return ??ptr.*,
+ OptionalCtx.RequiredNull => |ptr| return ptr.*.?,
OptionalCtx.Required => |ptr| return ptr.*,
}
}
@@ -3237,7 +3248,7 @@ fn tokenIdToAssignment(id: *const Token.Id) ?ast.Node.InfixOp.Op {
fn tokenIdToUnwrapExpr(id: @TagType(Token.Id)) ?ast.Node.InfixOp.Op {
return switch (id) {
Token.Id.Keyword_catch => ast.Node.InfixOp.Op{ .Catch = null },
- Token.Id.QuestionMarkQuestionMark => ast.Node.InfixOp.Op{ .UnwrapMaybe = void{} },
+ Token.Id.Keyword_orelse => ast.Node.InfixOp.Op{ .UnwrapOptional = void{} },
else => null,
};
}
@@ -3299,8 +3310,7 @@ fn tokenIdToPrefixOp(id: @TagType(Token.Id)) ?ast.Node.PrefixOp.Op {
.volatile_token = null,
},
},
- Token.Id.QuestionMark => ast.Node.PrefixOp.Op{ .MaybeType = void{} },
- Token.Id.QuestionMarkQuestionMark => ast.Node.PrefixOp.Op{ .UnwrapMaybe = void{} },
+ Token.Id.QuestionMark => ast.Node.PrefixOp.Op{ .OptionalType = void{} },
Token.Id.Keyword_await => ast.Node.PrefixOp.Op{ .Await = void{} },
Token.Id.Keyword_try => ast.Node.PrefixOp.Op{ .Try = void{} },
else => null,
@@ -3322,7 +3332,7 @@ fn createToCtxLiteral(arena: *mem.Allocator, opt_ctx: *const OptionalCtx, compti
}
fn eatToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, id: @TagType(Token.Id)) ?TokenIndex {
- const token = ??tok_it.peek();
+ const token = tok_it.peek().?;
if (token.id == id) {
return nextToken(tok_it, tree).index;
@@ -3334,12 +3344,12 @@ fn eatToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, id: @TagType(
fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedToken {
const result = AnnotatedToken{
.index = tok_it.index,
- .ptr = ??tok_it.next(),
+ .ptr = tok_it.next().?,
};
assert(result.ptr.id != Token.Id.LineComment);
while (true) {
- const next_tok = tok_it.peek() ?? return result;
+ const next_tok = tok_it.peek() orelse return result;
if (next_tok.id != Token.Id.LineComment) return result;
_ = tok_it.next();
}
@@ -3347,7 +3357,7 @@ fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedTok
fn prevToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) void {
while (true) {
- const prev_tok = tok_it.prev() ?? return;
+ const prev_tok = tok_it.prev() orelse return;
if (prev_tok.id == Token.Id.LineComment) continue;
return;
}
diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig
index 91a56de827..09ea8aa1a1 100644
--- a/std/zig/parser_test.zig
+++ b/std/zig/parser_test.zig
@@ -650,9 +650,10 @@ test "zig fmt: statements with empty line between" {
);
}
-test "zig fmt: ptr deref operator" {
+test "zig fmt: ptr deref operator and unwrap optional operator" {
try testCanonical(
\\const a = b.*;
+ \\const a = b.?;
\\
);
}
@@ -1150,7 +1151,7 @@ test "zig fmt: infix operators" {
\\ _ = i!i;
\\ _ = i ** i;
\\ _ = i ++ i;
- \\ _ = i ?? i;
+ \\ _ = i orelse i;
\\ _ = i % i;
\\ _ = i / i;
\\ _ = i *% i;
@@ -1209,7 +1210,7 @@ test "zig fmt: precedence" {
test "zig fmt: prefix operators" {
try testCanonical(
\\test "prefix operators" {
- \\ try return --%~??!*&0;
+ \\ try return --%~!*&0;
\\}
\\
);
diff --git a/std/zig/render.zig b/std/zig/render.zig
index 7c9b53b77a..bc45768fa3 100644
--- a/std/zig/render.zig
+++ b/std/zig/render.zig
@@ -83,7 +83,7 @@ fn renderRoot(
var start_col: usize = 0;
var it = tree.root_node.decls.iterator(0);
while (true) {
- var decl = (it.next() ?? return).*;
+ var decl = (it.next() orelse return).*;
// look for zig fmt: off comment
var start_token_index = decl.firstToken();
zig_fmt_loop: while (start_token_index != 0) {
@@ -112,7 +112,7 @@ fn renderRoot(
const start = tree.tokens.at(start_token_index + 1).start;
try stream.print("{}\n", tree.source[start..end_token.end]);
while (tree.tokens.at(decl.firstToken()).start < end_token.end) {
- decl = (it.next() ?? return).*;
+ decl = (it.next() orelse return).*;
}
break :zig_fmt_loop;
}
@@ -222,7 +222,7 @@ fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, i
}
}
- const value_expr = ??tag.value_expr;
+ const value_expr = tag.value_expr.?;
try renderToken(tree, stream, tree.prevToken(value_expr.firstToken()), indent, start_col, Space.Space); // =
try renderExpression(allocator, stream, tree, indent, start_col, value_expr, Space.Comma); // value,
},
@@ -465,8 +465,7 @@ fn renderExpression(
ast.Node.PrefixOp.Op.BoolNot,
ast.Node.PrefixOp.Op.Negation,
ast.Node.PrefixOp.Op.NegationWrap,
- ast.Node.PrefixOp.Op.UnwrapMaybe,
- ast.Node.PrefixOp.Op.MaybeType,
+ ast.Node.PrefixOp.Op.OptionalType,
ast.Node.PrefixOp.Op.AddressOf,
=> {
try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None);
@@ -513,7 +512,7 @@ fn renderExpression(
var it = call_info.params.iterator(0);
while (true) {
- const param_node = ??it.next();
+ const param_node = it.next().?;
const param_node_new_indent = if (param_node.*.id == ast.Node.Id.MultilineStringLiteral) blk: {
break :blk indent;
@@ -559,10 +558,10 @@ fn renderExpression(
return renderToken(tree, stream, rbracket, indent, start_col, space); // ]
},
- ast.Node.SuffixOp.Op.Deref => {
+ ast.Node.SuffixOp.Op.Deref, ast.Node.SuffixOp.Op.UnwrapOptional => {
try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None);
try renderToken(tree, stream, tree.prevToken(suffix_op.rtoken), indent, start_col, Space.None); // .
- return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // *
+ return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // * or ?
},
@TagType(ast.Node.SuffixOp.Op).Slice => |range| {
@@ -595,7 +594,7 @@ fn renderExpression(
}
if (field_inits.len == 1) blk: {
- const field_init = ??field_inits.at(0).*.cast(ast.Node.FieldInitializer);
+ const field_init = field_inits.at(0).*.cast(ast.Node.FieldInitializer).?;
if (field_init.expr.cast(ast.Node.SuffixOp)) |nested_suffix_op| {
if (nested_suffix_op.op == ast.Node.SuffixOp.Op.StructInitializer) {
@@ -688,7 +687,7 @@ fn renderExpression(
var count: usize = 1;
var it = exprs.iterator(0);
while (true) {
- const expr = (??it.next()).*;
+ const expr = it.next().?.*;
if (it.peek()) |next_expr| {
const expr_last_token = expr.*.lastToken() + 1;
const loc = tree.tokenLocation(tree.tokens.at(expr_last_token).end, next_expr.*.firstToken());
@@ -806,7 +805,7 @@ fn renderExpression(
},
}
- return renderExpression(allocator, stream, tree, indent, start_col, ??flow_expr.rhs, space);
+ return renderExpression(allocator, stream, tree, indent, start_col, flow_expr.rhs.?, space);
},
ast.Node.Id.Payload => {
@@ -1245,7 +1244,7 @@ fn renderExpression(
} else {
var it = switch_case.items.iterator(0);
while (true) {
- const node = ??it.next();
+ const node = it.next().?;
if (it.peek()) |next_node| {
try renderExpression(allocator, stream, tree, indent, start_col, node.*, Space.None);
@@ -1550,7 +1549,7 @@ fn renderExpression(
var it = asm_node.outputs.iterator(0);
while (true) {
- const asm_output = ??it.next();
+ const asm_output = it.next().?;
const node = &(asm_output.*).base;
if (it.peek()) |next_asm_output| {
@@ -1588,7 +1587,7 @@ fn renderExpression(
var it = asm_node.inputs.iterator(0);
while (true) {
- const asm_input = ??it.next();
+ const asm_input = it.next().?;
const node = &(asm_input.*).base;
if (it.peek()) |next_asm_input| {
@@ -1620,7 +1619,7 @@ fn renderExpression(
var it = asm_node.clobbers.iterator(0);
while (true) {
- const clobber_token = ??it.next();
+ const clobber_token = it.next().?;
if (it.peek() == null) {
try renderToken(tree, stream, clobber_token.*, indent_once, start_col, Space.Newline);
@@ -1994,7 +1993,7 @@ fn renderDocComments(
indent: usize,
start_col: *usize,
) (@typeOf(stream).Child.Error || Error)!void {
- const comment = node.doc_comments ?? return;
+ const comment = node.doc_comments orelse return;
var it = comment.lines.iterator(0);
const first_token = node.firstToken();
while (it.next()) |line_token_index| {
@@ -2022,7 +2021,7 @@ fn nodeIsBlock(base: *const ast.Node) bool {
}
fn nodeCausesSliceOpSpace(base: *ast.Node) bool {
- const infix_op = base.cast(ast.Node.InfixOp) ?? return false;
+ const infix_op = base.cast(ast.Node.InfixOp) orelse return false;
return switch (infix_op.op) {
ast.Node.InfixOp.Op.Period => false,
else => true,
diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig
index b288a3adb7..4534529f36 100644
--- a/std/zig/tokenizer.zig
+++ b/std/zig/tokenizer.zig
@@ -39,6 +39,7 @@ pub const Token = struct {
Keyword{ .bytes = "noalias", .id = Id.Keyword_noalias },
Keyword{ .bytes = "null", .id = Id.Keyword_null },
Keyword{ .bytes = "or", .id = Id.Keyword_or },
+ Keyword{ .bytes = "orelse", .id = Id.Keyword_orelse },
Keyword{ .bytes = "packed", .id = Id.Keyword_packed },
Keyword{ .bytes = "promise", .id = Id.Keyword_promise },
Keyword{ .bytes = "pub", .id = Id.Keyword_pub },
@@ -129,7 +130,6 @@ pub const Token = struct {
Ampersand,
AmpersandEqual,
QuestionMark,
- QuestionMarkQuestionMark,
AngleBracketLeft,
AngleBracketLeftEqual,
AngleBracketAngleBracketLeft,
@@ -171,6 +171,7 @@ pub const Token = struct {
Keyword_noalias,
Keyword_null,
Keyword_or,
+ Keyword_orelse,
Keyword_packed,
Keyword_promise,
Keyword_pub,
@@ -254,7 +255,6 @@ pub const Tokenizer = struct {
Ampersand,
Caret,
Percent,
- QuestionMark,
Plus,
PlusPercent,
AngleBracketLeft,
@@ -345,6 +345,11 @@ pub const Tokenizer = struct {
self.index += 1;
break;
},
+ '?' => {
+ result.id = Token.Id.QuestionMark;
+ self.index += 1;
+ break;
+ },
':' => {
result.id = Token.Id.Colon;
self.index += 1;
@@ -359,9 +364,6 @@ pub const Tokenizer = struct {
'+' => {
state = State.Plus;
},
- '?' => {
- state = State.QuestionMark;
- },
'<' => {
state = State.AngleBracketLeft;
},
@@ -496,18 +498,6 @@ pub const Tokenizer = struct {
},
},
- State.QuestionMark => switch (c) {
- '?' => {
- result.id = Token.Id.QuestionMarkQuestionMark;
- self.index += 1;
- break;
- },
- else => {
- result.id = Token.Id.QuestionMark;
- break;
- },
- },
-
State.Percent => switch (c) {
'=' => {
result.id = Token.Id.PercentEqual;
@@ -1084,9 +1074,6 @@ pub const Tokenizer = struct {
State.Plus => {
result.id = Token.Id.Plus;
},
- State.QuestionMark => {
- result.id = Token.Id.QuestionMark;
- },
State.Percent => {
result.id = Token.Id.Percent;
},
diff --git a/test/behavior.zig b/test/behavior.zig
index 3341fe717d..eb8b643bb7 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -31,6 +31,7 @@ comptime {
_ = @import("cases/incomplete_struct_param_tld.zig");
_ = @import("cases/ir_block_deps.zig");
_ = @import("cases/math.zig");
+ _ = @import("cases/merge_error_sets.zig");
_ = @import("cases/misc.zig");
_ = @import("cases/namespace_depends_on_compile_var/index.zig");
_ = @import("cases/new_stack_call.zig");
diff --git a/test/cases/array.zig b/test/cases/array.zig
index ef919b27bd..b481261b4f 100644
--- a/test/cases/array.zig
+++ b/test/cases/array.zig
@@ -116,6 +116,15 @@ test "array len property" {
assert(@typeOf(x).len == 5);
}
+test "array len field" {
+ var arr = [4]u8{ 0, 0, 0, 0 };
+ var ptr = &arr;
+ assert(arr.len == 4);
+ comptime assert(arr.len == 4);
+ assert(ptr.len == 4);
+ comptime assert(ptr.len == 4);
+}
+
test "single-item pointer to array indexing and slicing" {
testSingleItemPtrArrayIndexSlice();
comptime testSingleItemPtrArrayIndexSlice();
@@ -143,4 +152,3 @@ fn testImplicitCastSingleItemPtr() void {
slice[0] += 1;
assert(byte == 101);
}
-
diff --git a/test/cases/bugs/656.zig b/test/cases/bugs/656.zig
index a6035d51bb..f93f0ac4d5 100644
--- a/test/cases/bugs/656.zig
+++ b/test/cases/bugs/656.zig
@@ -9,7 +9,7 @@ const Value = struct {
align_expr: ?u32,
};
-test "nullable if after an if in a switch prong of a switch with 2 prongs in an else" {
+test "optional if after an if in a switch prong of a switch with 2 prongs in an else" {
foo(false, true);
}
diff --git a/test/cases/cast.zig b/test/cases/cast.zig
index 7358a4ffd8..ade1cf78aa 100644
--- a/test/cases/cast.zig
+++ b/test/cases/cast.zig
@@ -1,5 +1,6 @@
-const assert = @import("std").debug.assert;
-const mem = @import("std").mem;
+const std = @import("std");
+const assert = std.debug.assert;
+const mem = std.mem;
test "int to ptr cast" {
const x = usize(13);
@@ -72,7 +73,7 @@ fn Struct(comptime T: type) type {
fn maybePointer(self: ?*const Self) Self {
const none = Self{ .x = if (T == void) void{} else 0 };
- return (self ?? &none).*;
+ return (self orelse &none).*;
}
};
}
@@ -86,7 +87,7 @@ const Union = union {
fn maybePointer(self: ?*const Union) Union {
const none = Union{ .x = 0 };
- return (self ?? &none).*;
+ return (self orelse &none).*;
}
};
@@ -99,7 +100,7 @@ const Enum = enum {
}
fn maybePointer(self: ?*const Enum) Enum {
- return (self ?? &Enum.None).*;
+ return (self orelse &Enum.None).*;
}
};
@@ -108,16 +109,16 @@ test "implicitly cast indirect pointer to maybe-indirect pointer" {
const Self = this;
x: u8,
fn constConst(p: *const *const Self) u8 {
- return (p.*).x;
+ return p.*.x;
}
fn maybeConstConst(p: ?*const *const Self) u8 {
- return ((??p).*).x;
+ return p.?.*.x;
}
fn constConstConst(p: *const *const *const Self) u8 {
- return (p.*.*).x;
+ return p.*.*.x;
}
fn maybeConstConstConst(p: ?*const *const *const Self) u8 {
- return ((??p).*.*).x;
+ return p.?.*.*.x;
}
};
const s = S{ .x = 42 };
@@ -176,56 +177,56 @@ test "string literal to &const []const u8" {
}
test "implicitly cast from T to error!?T" {
- castToMaybeTypeError(1);
- comptime castToMaybeTypeError(1);
+ castToOptionalTypeError(1);
+ comptime castToOptionalTypeError(1);
}
const A = struct {
a: i32,
};
-fn castToMaybeTypeError(z: i32) void {
+fn castToOptionalTypeError(z: i32) void {
const x = i32(1);
const y: error!?i32 = x;
- assert(??(try y) == 1);
+ assert((try y).? == 1);
const f = z;
const g: error!?i32 = f;
const a = A{ .a = z };
const b: error!?A = a;
- assert((??(b catch unreachable)).a == 1);
+ assert((b catch unreachable).?.a == 1);
}
test "implicitly cast from int to error!?T" {
- implicitIntLitToMaybe();
- comptime implicitIntLitToMaybe();
+ implicitIntLitToOptional();
+ comptime implicitIntLitToOptional();
}
-fn implicitIntLitToMaybe() void {
+fn implicitIntLitToOptional() void {
const f: ?i32 = 1;
const g: error!?i32 = 1;
}
test "return null from fn() error!?&T" {
- const a = returnNullFromMaybeTypeErrorRef();
- const b = returnNullLitFromMaybeTypeErrorRef();
+ const a = returnNullFromOptionalTypeErrorRef();
+ const b = returnNullLitFromOptionalTypeErrorRef();
assert((try a) == null and (try b) == null);
}
-fn returnNullFromMaybeTypeErrorRef() error!?*A {
+fn returnNullFromOptionalTypeErrorRef() error!?*A {
const a: ?*A = null;
return a;
}
-fn returnNullLitFromMaybeTypeErrorRef() error!?*A {
+fn returnNullLitFromOptionalTypeErrorRef() error!?*A {
return null;
}
test "peer type resolution: ?T and T" {
- assert(??peerTypeTAndMaybeT(true, false) == 0);
- assert(??peerTypeTAndMaybeT(false, false) == 3);
+ assert(peerTypeTAndOptionalT(true, false).? == 0);
+ assert(peerTypeTAndOptionalT(false, false).? == 3);
comptime {
- assert(??peerTypeTAndMaybeT(true, false) == 0);
- assert(??peerTypeTAndMaybeT(false, false) == 3);
+ assert(peerTypeTAndOptionalT(true, false).? == 0);
+ assert(peerTypeTAndOptionalT(false, false).? == 3);
}
}
-fn peerTypeTAndMaybeT(c: bool, b: bool) ?usize {
+fn peerTypeTAndOptionalT(c: bool, b: bool) ?usize {
if (c) {
return if (b) null else usize(0);
}
@@ -250,11 +251,11 @@ fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
}
test "implicitly cast from [N]T to ?[]const T" {
- assert(mem.eql(u8, ??castToMaybeSlice(), "hi"));
- comptime assert(mem.eql(u8, ??castToMaybeSlice(), "hi"));
+ assert(mem.eql(u8, castToOptionalSlice().?, "hi"));
+ comptime assert(mem.eql(u8, castToOptionalSlice().?, "hi"));
}
-fn castToMaybeSlice() ?[]const u8 {
+fn castToOptionalSlice() ?[]const u8 {
return "hi";
}
@@ -384,3 +385,24 @@ test "const slice widen cast" {
assert(@bitCast(u32, bytes) == 0x12121212);
}
+
+test "single-item pointer of array to slice and to unknown length pointer" {
+ testCastPtrOfArrayToSliceAndPtr();
+ comptime testCastPtrOfArrayToSliceAndPtr();
+}
+
+fn testCastPtrOfArrayToSliceAndPtr() void {
+ var array = "ao" ++ "eu"; // TODO https://github.com/ziglang/zig/issues/1076
+ const x: [*]u8 = &array;
+ x[0] += 1;
+ assert(mem.eql(u8, array[0..], "boeu"));
+ const y: []u8 = &array;
+ y[0] += 1;
+ assert(mem.eql(u8, array[0..], "coeu"));
+}
+
+test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
+ const window_name = [1][*]const u8{c"window name"};
+ const x: [*]const ?[*]const u8 = &window_name;
+ assert(mem.eql(u8, std.cstr.toSliceConst(x[0].?), "window name"));
+}
diff --git a/test/cases/enum.zig b/test/cases/enum.zig
index ae9f04869b..5c78d73092 100644
--- a/test/cases/enum.zig
+++ b/test/cases/enum.zig
@@ -883,3 +883,12 @@ test "empty extern enum with members" {
};
assert(@sizeOf(E) == @sizeOf(c_int));
}
+
+test "aoeu" {
+ const LocalFoo = enum {
+ A = 1,
+ B = 0,
+ };
+ var b = LocalFoo.B;
+ assert(mem.eql(u8, @tagName(b), "B"));
+}
diff --git a/test/cases/error.zig b/test/cases/error.zig
index ced49419d5..693631fe2d 100644
--- a/test/cases/error.zig
+++ b/test/cases/error.zig
@@ -140,7 +140,7 @@ fn testComptimeTestErrorEmptySet(x: EmptyErrorSet!i32) void {
if (x) |v| assert(v == 1234) else |err| @compileError("bad");
}
-test "syntax: nullable operator in front of error union operator" {
+test "syntax: optional operator in front of error union operator" {
comptime {
assert(?error!i32 == ?(error!i32));
}
diff --git a/test/cases/eval.zig b/test/cases/eval.zig
index 461408afea..08d3f3a841 100644
--- a/test/cases/eval.zig
+++ b/test/cases/eval.zig
@@ -12,7 +12,7 @@ fn fibonacci(x: i32) i32 {
}
fn unwrapAndAddOne(blah: ?i32) i32 {
- return ??blah + 1;
+ return blah.? + 1;
}
const should_be_1235 = unwrapAndAddOne(1234);
test "static add one" {
@@ -610,3 +610,16 @@ test "slice of type" {
}
}
}
+
+const Wrapper = struct {
+ T: type,
+};
+
+fn wrap(comptime T: type) Wrapper {
+ return Wrapper{ .T = T };
+}
+
+test "function which returns struct with type field causes implicit comptime" {
+ const ty = wrap(i32).T;
+ assert(ty == i32);
+}
diff --git a/test/cases/generics.zig b/test/cases/generics.zig
index a76990e2a1..52aa013989 100644
--- a/test/cases/generics.zig
+++ b/test/cases/generics.zig
@@ -127,7 +127,7 @@ test "generic fn with implicit cast" {
}) == 0);
}
fn getByte(ptr: ?*const u8) u8 {
- return (??ptr).*;
+ return ptr.?.*;
}
fn getFirstByte(comptime T: type, mem: []const T) u8 {
return getByte(@ptrCast(*const u8, &mem[0]));
diff --git a/test/cases/merge_error_sets.zig b/test/cases/merge_error_sets.zig
new file mode 100644
index 0000000000..189bd16a4d
--- /dev/null
+++ b/test/cases/merge_error_sets.zig
@@ -0,0 +1,21 @@
+const A = error{
+ PathNotFound,
+ NotDir,
+};
+const B = error{OutOfMemory};
+
+const C = A || B;
+
+fn foo() C!void {
+ return error.NotDir;
+}
+
+test "merge error sets" {
+ if (foo()) {
+ @panic("unexpected");
+ } else |err| switch (err) {
+ error.OutOfMemory => @panic("unexpected"),
+ error.PathNotFound => @panic("unexpected"),
+ error.NotDir => {},
+ }
+}
diff --git a/test/cases/misc.zig b/test/cases/misc.zig
index 9450cf5e6e..beb0d6d456 100644
--- a/test/cases/misc.zig
+++ b/test/cases/misc.zig
@@ -505,7 +505,7 @@ test "@typeId" {
assert(@typeId(@typeOf(1.0)) == Tid.ComptimeFloat);
assert(@typeId(@typeOf(undefined)) == Tid.Undefined);
assert(@typeId(@typeOf(null)) == Tid.Null);
- assert(@typeId(?i32) == Tid.Nullable);
+ assert(@typeId(?i32) == Tid.Optional);
assert(@typeId(error!i32) == Tid.ErrorUnion);
assert(@typeId(error) == Tid.ErrorSet);
assert(@typeId(AnEnum) == Tid.Enum);
@@ -523,14 +523,6 @@ test "@typeId" {
}
}
-test "@canImplicitCast" {
- comptime {
- assert(@canImplicitCast(i64, i32(3)));
- assert(!@canImplicitCast(i32, f32(1.234)));
- assert(@canImplicitCast([]const u8, "aoeu"));
- }
-}
-
test "@typeName" {
const Struct = struct {};
const Union = union {
diff --git a/test/cases/null.zig b/test/cases/null.zig
index bd78990ff4..d2a9aaed55 100644
--- a/test/cases/null.zig
+++ b/test/cases/null.zig
@@ -1,6 +1,6 @@
const assert = @import("std").debug.assert;
-test "nullable type" {
+test "optional type" {
const x: ?bool = true;
if (x) |y| {
@@ -15,13 +15,13 @@ test "nullable type" {
const next_x: ?i32 = null;
- const z = next_x ?? 1234;
+ const z = next_x orelse 1234;
assert(z == 1234);
const final_x: ?i32 = 13;
- const num = final_x ?? unreachable;
+ const num = final_x orelse unreachable;
assert(num == 13);
}
@@ -33,12 +33,12 @@ test "test maybe object and get a pointer to the inner value" {
b.* = false;
}
- assert(??maybe_bool == false);
+ assert(maybe_bool.? == false);
}
test "rhs maybe unwrap return" {
const x: ?bool = true;
- const y = x ?? return;
+ const y = x orelse return;
}
test "maybe return" {
@@ -47,13 +47,13 @@ test "maybe return" {
}
fn maybeReturnImpl() void {
- assert(??foo(1235));
+ assert(foo(1235).?);
if (foo(null) != null) unreachable;
- assert(!??foo(1234));
+ assert(!foo(1234).?);
}
fn foo(x: ?i32) ?bool {
- const value = x ?? return null;
+ const value = x orelse return null;
return value > 1234;
}
@@ -102,12 +102,12 @@ fn testTestNullRuntime(x: ?i32) void {
assert(!(x != null));
}
-test "nullable void" {
- nullableVoidImpl();
- comptime nullableVoidImpl();
+test "optional void" {
+ optionalVoidImpl();
+ comptime optionalVoidImpl();
}
-fn nullableVoidImpl() void {
+fn optionalVoidImpl() void {
assert(bar(null) == null);
assert(bar({}) != null);
}
@@ -120,19 +120,19 @@ fn bar(x: ?void) ?void {
}
}
-const StructWithNullable = struct {
+const StructWithOptional = struct {
field: ?i32,
};
-var struct_with_nullable: StructWithNullable = undefined;
+var struct_with_optional: StructWithOptional = undefined;
-test "unwrap nullable which is field of global var" {
- struct_with_nullable.field = null;
- if (struct_with_nullable.field) |payload| {
+test "unwrap optional which is field of global var" {
+ struct_with_optional.field = null;
+ if (struct_with_optional.field) |payload| {
unreachable;
}
- struct_with_nullable.field = 1234;
- if (struct_with_nullable.field) |payload| {
+ struct_with_optional.field = 1234;
+ if (struct_with_optional.field) |payload| {
assert(payload == 1234);
} else {
unreachable;
@@ -140,6 +140,17 @@ test "unwrap nullable which is field of global var" {
}
test "null with default unwrap" {
- const x: i32 = null ?? 1;
+ const x: i32 = null orelse 1;
assert(x == 1);
}
+
+test "optional types" {
+ comptime {
+ const opt_type_struct = StructWithOptionalType { .t=u8, };
+ assert(opt_type_struct.t != null and opt_type_struct.t.? == u8);
+ }
+}
+
+const StructWithOptionalType = struct {
+ t: ?type,
+};
diff --git a/test/cases/reflection.zig b/test/cases/reflection.zig
index 48fcc9ef03..3d3af3c889 100644
--- a/test/cases/reflection.zig
+++ b/test/cases/reflection.zig
@@ -2,7 +2,7 @@ const assert = @import("std").debug.assert;
const mem = @import("std").mem;
const reflection = this;
-test "reflection: array, pointer, nullable, error union type child" {
+test "reflection: array, pointer, optional, error union type child" {
comptime {
assert(([10]u8).Child == u8);
assert((*u8).Child == u8);
diff --git a/test/cases/struct.zig b/test/cases/struct.zig
index 6f7d44e09b..6952611a8c 100644
--- a/test/cases/struct.zig
+++ b/test/cases/struct.zig
@@ -421,3 +421,20 @@ const Expr = union(enum) {
fn alloc(comptime T: type) []T {
return []T{};
}
+
+test "call method with mutable reference to struct with no fields" {
+ const S = struct {
+ fn doC(s: *const this) bool {
+ return true;
+ }
+ fn do(s: *this) bool {
+ return true;
+ }
+ };
+
+ var s = S{};
+ assert(S.doC(&s));
+ assert(s.doC());
+ assert(S.do(&s));
+ assert(s.do());
+}
diff --git a/test/cases/type_info.zig b/test/cases/type_info.zig
index 921ff785a7..1bc58b14e1 100644
--- a/test/cases/type_info.zig
+++ b/test/cases/type_info.zig
@@ -39,12 +39,28 @@ test "type info: pointer type info" {
fn testPointer() void {
const u32_ptr_info = @typeInfo(*u32);
assert(TypeId(u32_ptr_info) == TypeId.Pointer);
+ assert(u32_ptr_info.Pointer.size == TypeInfo.Pointer.Size.One);
assert(u32_ptr_info.Pointer.is_const == false);
assert(u32_ptr_info.Pointer.is_volatile == false);
- assert(u32_ptr_info.Pointer.alignment == 4);
+ assert(u32_ptr_info.Pointer.alignment == @alignOf(u32));
assert(u32_ptr_info.Pointer.child == u32);
}
+test "type info: unknown length pointer type info" {
+ testUnknownLenPtr();
+ comptime testUnknownLenPtr();
+}
+
+fn testUnknownLenPtr() void {
+ const u32_ptr_info = @typeInfo([*]const volatile f64);
+ assert(TypeId(u32_ptr_info) == TypeId.Pointer);
+ assert(u32_ptr_info.Pointer.size == TypeInfo.Pointer.Size.Many);
+ assert(u32_ptr_info.Pointer.is_const == true);
+ assert(u32_ptr_info.Pointer.is_volatile == true);
+ assert(u32_ptr_info.Pointer.alignment == @alignOf(f64));
+ assert(u32_ptr_info.Pointer.child == f64);
+}
+
test "type info: slice type info" {
testSlice();
comptime testSlice();
@@ -52,11 +68,12 @@ test "type info: slice type info" {
fn testSlice() void {
const u32_slice_info = @typeInfo([]u32);
- assert(TypeId(u32_slice_info) == TypeId.Slice);
- assert(u32_slice_info.Slice.is_const == false);
- assert(u32_slice_info.Slice.is_volatile == false);
- assert(u32_slice_info.Slice.alignment == 4);
- assert(u32_slice_info.Slice.child == u32);
+ assert(TypeId(u32_slice_info) == TypeId.Pointer);
+ assert(u32_slice_info.Pointer.size == TypeInfo.Pointer.Size.Slice);
+ assert(u32_slice_info.Pointer.is_const == false);
+ assert(u32_slice_info.Pointer.is_volatile == false);
+ assert(u32_slice_info.Pointer.alignment == 4);
+ assert(u32_slice_info.Pointer.child == u32);
}
test "type info: array type info" {
@@ -71,15 +88,15 @@ fn testArray() void {
assert(arr_info.Array.child == bool);
}
-test "type info: nullable type info" {
- testNullable();
- comptime testNullable();
+test "type info: optional type info" {
+ testOptional();
+ comptime testOptional();
}
-fn testNullable() void {
+fn testOptional() void {
const null_info = @typeInfo(?void);
- assert(TypeId(null_info) == TypeId.Nullable);
- assert(null_info.Nullable.child == void);
+ assert(TypeId(null_info) == TypeId.Optional);
+ assert(null_info.Optional.child == void);
}
test "type info: promise info" {
@@ -149,11 +166,11 @@ fn testUnion() void {
assert(TypeId(typeinfo_info) == TypeId.Union);
assert(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto);
assert(typeinfo_info.Union.tag_type == TypeId);
- assert(typeinfo_info.Union.fields.len == 26);
+ assert(typeinfo_info.Union.fields.len == 25);
assert(typeinfo_info.Union.fields[4].enum_field != null);
- assert((??typeinfo_info.Union.fields[4].enum_field).value == 4);
+ assert(typeinfo_info.Union.fields[4].enum_field.?.value == 4);
assert(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int));
- assert(typeinfo_info.Union.defs.len == 21);
+ assert(typeinfo_info.Union.defs.len == 20);
const TestNoTagUnion = union {
Foo: void,
diff --git a/test/cases/while.zig b/test/cases/while.zig
index a95481668d..fe53522ea6 100644
--- a/test/cases/while.zig
+++ b/test/cases/while.zig
@@ -81,7 +81,7 @@ test "while with else" {
assert(got_else == 1);
}
-test "while with nullable as condition" {
+test "while with optional as condition" {
numbers_left = 10;
var sum: i32 = 0;
while (getNumberOrNull()) |value| {
@@ -90,7 +90,7 @@ test "while with nullable as condition" {
assert(sum == 45);
}
-test "while with nullable as condition with else" {
+test "while with optional as condition with else" {
numbers_left = 10;
var sum: i32 = 0;
var got_else: i32 = 0;
@@ -132,7 +132,7 @@ fn getNumberOrNull() ?i32 {
};
}
-test "while on nullable with else result follow else prong" {
+test "while on optional with else result follow else prong" {
const result = while (returnNull()) |value| {
break value;
} else
@@ -140,8 +140,8 @@ test "while on nullable with else result follow else prong" {
assert(result == 2);
}
-test "while on nullable with else result follow break prong" {
- const result = while (returnMaybe(10)) |value| {
+test "while on optional with else result follow break prong" {
+ const result = while (returnOptional(10)) |value| {
break value;
} else
i32(2);
@@ -210,7 +210,7 @@ fn testContinueOuter() void {
fn returnNull() ?i32 {
return null;
}
-fn returnMaybe(x: i32) ?i32 {
+fn returnOptional(x: i32) ?i32 {
return x;
}
fn returnError() error!i32 {
diff --git a/test/compare_output.zig b/test/compare_output.zig
index 8d5dc68d45..eec077ef85 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -284,7 +284,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
cases.addC("expose function pointer to C land",
\\const c = @cImport(@cInclude("stdlib.h"));
\\
- \\export fn compare_fn(a: ?[*]const c_void, b: ?[*]const c_void) c_int {
+ \\export fn compare_fn(a: ?*const c_void, b: ?*const c_void) c_int {
\\ const a_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), a));
\\ const b_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), b));
\\ if (a_int.* < b_int.*) {
@@ -299,7 +299,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\export fn main() c_int {
\\ var array = []u32{ 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 };
\\
- \\ c.qsort(@ptrCast(?[*]c_void, array[0..].ptr), c_ulong(array.len), @sizeOf(i32), compare_fn);
+ \\ c.qsort(@ptrCast(?*c_void, array[0..].ptr), c_ulong(array.len), @sizeOf(i32), compare_fn);
\\
\\ for (array) |item, i| {
\\ if (item != i) {
diff --git a/test/compile_errors.zig b/test/compile_errors.zig
index 4bd6e9bc24..06f17a37ee 100644
--- a/test/compile_errors.zig
+++ b/test/compile_errors.zig
@@ -1,6 +1,57 @@
const tests = @import("tests.zig");
pub fn addCases(cases: *tests.CompileErrorContext) void {
+ cases.add(
+ "use implicit casts to assign null to non-nullable pointer",
+ \\export fn entry() void {
+ \\ var x: i32 = 1234;
+ \\ var p: *i32 = &x;
+ \\ var pp: *?*i32 = &p;
+ \\ pp.* = null;
+ \\ var y = p.*;
+ \\}
+ ,
+ ".tmp_source.zig:4:23: error: expected type '*?*i32', found '**i32'",
+ );
+
+ cases.add(
+ "attempted implicit cast from T to [*]const T",
+ \\export fn entry() void {
+ \\ const x: [*]const bool = true;
+ \\}
+ ,
+ ".tmp_source.zig:2:30: error: expected type '[*]const bool', found 'bool'",
+ );
+
+ cases.add(
+ "dereference unknown length pointer",
+ \\export fn entry(x: [*]i32) i32 {
+ \\ return x.*;
+ \\}
+ ,
+ ".tmp_source.zig:2:13: error: index syntax required for unknown-length pointer type '[*]i32'",
+ );
+
+ cases.add(
+ "field access of unknown length pointer",
+ \\const Foo = extern struct {
+ \\ a: i32,
+ \\};
+ \\
+ \\export fn entry(foo: [*]Foo) void {
+ \\ foo.a += 1;
+ \\}
+ ,
+ ".tmp_source.zig:6:8: error: type '[*]Foo' does not support field access",
+ );
+
+ cases.add(
+ "unknown length pointer to opaque",
+ \\export const T = [*]@OpaqueType();
+ ,
+ ".tmp_source.zig:1:18: error: unknown-length pointer to opaque",
+ );
+
cases.add(
"error when evaluating return type",
\\const Foo = struct {
@@ -1303,7 +1354,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\ if (true) |x| { }
\\}
,
- ".tmp_source.zig:2:9: error: expected nullable type, found 'bool'",
+ ".tmp_source.zig:2:9: error: expected optional type, found 'bool'",
);
cases.add(
@@ -1742,7 +1793,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.add(
- "assign null to non-nullable pointer",
+ "assign null to non-optional pointer",
\\const a: *u8 = null;
\\
\\export fn entry() usize { return @sizeOf(@typeOf(a)); }
@@ -2258,7 +2309,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\ defer try canFail();
\\
- \\ const a = maybeInt() ?? return;
+ \\ const a = maybeInt() orelse return;
\\}
\\
\\fn canFail() error!void { }
@@ -2779,7 +2830,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.add(
- "while expected bool, got nullable",
+ "while expected bool, got optional",
\\export fn foo() void {
\\ while (bar()) {}
\\}
@@ -2799,23 +2850,23 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.add(
- "while expected nullable, got bool",
+ "while expected optional, got bool",
\\export fn foo() void {
\\ while (bar()) |x| {}
\\}
\\fn bar() bool { return true; }
,
- ".tmp_source.zig:2:15: error: expected nullable type, found 'bool'",
+ ".tmp_source.zig:2:15: error: expected optional type, found 'bool'",
);
cases.add(
- "while expected nullable, got error union",
+ "while expected optional, got error union",
\\export fn foo() void {
\\ while (bar()) |x| {}
\\}
\\fn bar() error!i32 { return 1; }
,
- ".tmp_source.zig:2:15: error: expected nullable type, found 'error!i32'",
+ ".tmp_source.zig:2:15: error: expected optional type, found 'error!i32'",
);
cases.add(
@@ -2829,7 +2880,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
);
cases.add(
- "while expected error union, got nullable",
+ "while expected error union, got optional",
\\export fn foo() void {
\\ while (bar()) |x| {} else |err| {}
\\}
@@ -3291,7 +3342,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
".tmp_source.zig:9:4: error: variable of type 'comptime_float' must be const or comptime",
".tmp_source.zig:10:4: error: variable of type '(block)' must be const or comptime",
".tmp_source.zig:11:4: error: variable of type '(null)' must be const or comptime",
- ".tmp_source.zig:12:4: error: variable of type 'Opaque' must be const or comptime",
+ ".tmp_source.zig:12:4: error: variable of type 'Opaque' not allowed",
".tmp_source.zig:13:4: error: variable of type 'type' must be const or comptime",
".tmp_source.zig:14:4: error: variable of type '(namespace)' must be const or comptime",
".tmp_source.zig:15:4: error: variable of type '(bound fn(*const Foo) void)' must be const or comptime",
diff --git a/test/tests.zig b/test/tests.zig
index cc562331fe..b66441f628 100644
--- a/test/tests.zig
+++ b/test/tests.zig
@@ -282,8 +282,8 @@ pub const CompareOutputContext = struct {
var stdout = Buffer.initNull(b.allocator);
var stderr = Buffer.initNull(b.allocator);
- var stdout_file_in_stream = io.FileInStream.init(&??child.stdout);
- var stderr_file_in_stream = io.FileInStream.init(&??child.stderr);
+ var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?);
+ var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?);
stdout_file_in_stream.stream.readAllBuffer(&stdout, max_stdout_size) catch unreachable;
stderr_file_in_stream.stream.readAllBuffer(&stderr, max_stdout_size) catch unreachable;
@@ -601,8 +601,8 @@ pub const CompileErrorContext = struct {
var stdout_buf = Buffer.initNull(b.allocator);
var stderr_buf = Buffer.initNull(b.allocator);
- var stdout_file_in_stream = io.FileInStream.init(&??child.stdout);
- var stderr_file_in_stream = io.FileInStream.init(&??child.stderr);
+ var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?);
+ var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?);
stdout_file_in_stream.stream.readAllBuffer(&stdout_buf, max_stdout_size) catch unreachable;
stderr_file_in_stream.stream.readAllBuffer(&stderr_buf, max_stdout_size) catch unreachable;
@@ -872,8 +872,8 @@ pub const TranslateCContext = struct {
var stdout_buf = Buffer.initNull(b.allocator);
var stderr_buf = Buffer.initNull(b.allocator);
- var stdout_file_in_stream = io.FileInStream.init(&??child.stdout);
- var stderr_file_in_stream = io.FileInStream.init(&??child.stderr);
+ var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?);
+ var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?);
stdout_file_in_stream.stream.readAllBuffer(&stdout_buf, max_stdout_size) catch unreachable;
stderr_file_in_stream.stream.readAllBuffer(&stderr_buf, max_stdout_size) catch unreachable;
diff --git a/test/translate_c.zig b/test/translate_c.zig
index ac0a98e6cc..417171d2c2 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -99,7 +99,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
cases.add("restrict -> noalias",
\\void foo(void *restrict bar, void *restrict);
,
- \\pub extern fn foo(noalias bar: ?[*]c_void, noalias arg1: ?[*]c_void) void;
+ \\pub extern fn foo(noalias bar: ?*c_void, noalias arg1: ?*c_void) void;
);
cases.add("simple struct",
@@ -172,7 +172,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
,
\\pub const struct_Foo = @OpaqueType();
,
- \\pub extern fn some_func(foo: ?[*]struct_Foo, x: c_int) ?[*]struct_Foo;
+ \\pub extern fn some_func(foo: ?*struct_Foo, x: c_int) ?*struct_Foo;
,
\\pub const Foo = struct_Foo;
);
@@ -233,7 +233,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
,
\\pub const Foo = c_void;
,
- \\pub extern fn fun(a: ?[*]Foo) Foo;
+ \\pub extern fn fun(a: ?*Foo) Foo;
);
cases.add("generate inline func for #define global extern fn",
@@ -246,13 +246,13 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub extern var fn_ptr: ?extern fn() void;
,
\\pub inline fn foo() void {
- \\ return (??fn_ptr)();
+ \\ return fn_ptr.?();
\\}
,
\\pub extern var fn_ptr2: ?extern fn(c_int, f32) u8;
,
\\pub inline fn bar(arg0: c_int, arg1: f32) u8 {
- \\ return (??fn_ptr2)(arg0, arg1);
+ \\ return fn_ptr2.?(arg0, arg1);
\\}
);
@@ -505,7 +505,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ return 6;
\\}
,
- \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?[*]c_void) c_int {
+ \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ if ((a != 0) and (b != 0)) return 0;
\\ if ((b != 0) and (c != null)) return 1;
\\ if ((a != 0) and (c != null)) return 2;
@@ -608,7 +608,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ field: c_int,
\\};
\\pub export fn read_field(foo: ?[*]struct_Foo) c_int {
- \\ return (??foo).field;
+ \\ return foo.?.field;
\\}
);
@@ -653,8 +653,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ return x;
\\}
,
- \\pub export fn foo(x: ?[*]c_ushort) ?[*]c_void {
- \\ return @ptrCast(?[*]c_void, x);
+ \\pub export fn foo(x: ?[*]c_ushort) ?*c_void {
+ \\ return @ptrCast(?*c_void, x);
\\}
);
@@ -969,11 +969,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub export fn bar() void {
\\ var f: ?extern fn() void = foo;
\\ var b: ?extern fn() c_int = baz;
- \\ (??f)();
- \\ (??f)();
+ \\ f.?();
+ \\ f.?();
\\ foo();
- \\ _ = (??b)();
- \\ _ = (??b)();
+ \\ _ = b.?();
+ \\ _ = b.?();
\\ _ = baz();
\\}
);
@@ -984,7 +984,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\}
,
\\pub export fn foo(x: ?[*]c_int) void {
- \\ (??x).* = 1;
+ \\ x.?.* = 1;
\\}
);
@@ -1012,7 +1012,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub fn foo() c_int {
\\ var x: c_int = 1234;
\\ var ptr: ?[*]c_int = &x;
- \\ return (??ptr).*;
+ \\ return ptr.?.*;
\\}
);
@@ -1119,7 +1119,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub const glClearPFN = PFNGLCLEARPROC;
,
\\pub inline fn glClearUnion(arg0: GLbitfield) void {
- \\ return (??glProcs.gl.Clear)(arg0);
+ \\ return glProcs.gl.Clear.?(arg0);
\\}
,
\\pub const OpenGLProcs = union_OpenGLProcs;
@@ -1173,7 +1173,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ return !c;
\\}
,
- \\pub fn foo(a: c_int, b: f32, c: ?[*]c_void) c_int {
+ \\pub fn foo(a: c_int, b: f32, c: ?*c_void) c_int {
\\ return !(a == 0);
\\ return !(a != 0);
\\ return !(b != 0);
@@ -1231,7 +1231,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ B,
\\ C,
\\};
- \\pub fn if_none_bool(a: c_int, b: f32, c: ?[*]c_void, d: enum_SomeEnum) c_int {
+ \\pub fn if_none_bool(a: c_int, b: f32, c: ?*c_void, d: enum_SomeEnum) c_int {
\\ if (a != 0) return 0;
\\ if (b != 0) return 1;
\\ if (c != null) return 2;
@@ -1248,7 +1248,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ return 3;
\\}
,
- \\pub fn while_none_bool(a: c_int, b: f32, c: ?[*]c_void) c_int {
+ \\pub fn while_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ while (a != 0) return 0;
\\ while (b != 0) return 1;
\\ while (c != null) return 2;
@@ -1264,7 +1264,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ return 3;
\\}
,
- \\pub fn for_none_bool(a: c_int, b: f32, c: ?[*]c_void) c_int {
+ \\pub fn for_none_bool(a: c_int, b: f32, c: ?*c_void) c_int {
\\ while (a != 0) return 0;
\\ while (b != 0) return 1;
\\ while (c != null) return 2;