diff --git a/CMakeLists.txt b/CMakeLists.txt index a6339290f9..7d34a253a5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -389,6 +389,8 @@ set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_subMagsF32.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_subMagsF64.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_tryPropagateNaNF128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_mulAdd.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_mulAdd.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/softfloat_state.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/ui32_to_f128M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/ui64_to_f128M.c" @@ -522,6 +524,9 @@ set(ZIG_STD_FILES "hash/siphash.zig" "hash_map.zig" "heap.zig" + "heap/logging_allocator.zig" + "http.zig" + "http/headers.zig" "io.zig" "io/c_out_stream.zig" "io/seekable_stream.zig" @@ -6653,15 +6658,18 @@ set(OPTIMIZED_C_FLAGS "-std=c99 -O3") set(EXE_LDFLAGS " ") if(MSVC) set(EXE_LDFLAGS "/STACK:16777216") -elseif(ZIG_STATIC) +elseif(MINGW) + set(EXE_LDFLAGS "${EXE_LDFLAGS} -Wl,--stack,16777216") +endif() + +if(ZIG_STATIC) if(APPLE) set(EXE_LDFLAGS "-static-libgcc -static-libstdc++") else() set(EXE_LDFLAGS "-static") endif() -else() - set(EXE_LDFLAGS " ") endif() + if(ZIG_TEST_COVERAGE) set(EXE_CFLAGS "${EXE_CFLAGS} -fprofile-arcs -ftest-coverage") set(EXE_LDFLAGS "${EXE_LDFLAGS} -fprofile-arcs -ftest-coverage") @@ -6729,6 +6737,7 @@ add_custom_command( "-Doutput-dir=${CMAKE_BINARY_DIR}" WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}" DEPENDS + zig0 "${CMAKE_SOURCE_DIR}/src-self-hosted/dep_tokenizer.zig" "${CMAKE_SOURCE_DIR}/src-self-hosted/stage1.zig" "${CMAKE_SOURCE_DIR}/src-self-hosted/translate_c.zig" diff --git a/build.zig b/build.zig index 7ee30b8477..d233e76a59 100644 --- a/build.zig +++ b/build.zig @@ -74,7 +74,8 @@ pub fn build(b: *Builder) !void { const skip_non_native = b.option(bool, "skip-non-native", "Main test suite skips non-native builds") orelse false; const skip_self_hosted = b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") orelse false; if (!skip_self_hosted) { - test_step.dependOn(&exe.step); + // TODO re-enable this after https://github.com/ziglang/zig/issues/2377 + //test_step.dependOn(&exe.step); } const verbose_link_exe = b.option(bool, "verbose-link", "Print link command for self hosted compiler") orelse false; exe.setVerboseLink(verbose_link_exe); diff --git a/doc/langref.html.in b/doc/langref.html.in index a8673a3d77..65e27821ee 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5096,7 +5096,7 @@ fn gimmeTheBiggerInteger(a: u64, b: u64) u64 {

For example, if we were to introduce another function to the above snippet:

- {#code_begin|test_err|values of type 'type' must be comptime known#} + {#code_begin|test_err|cannot store runtime value in type 'type'#} fn max(comptime T: type, a: T, b: T) T { return if (a > b) a else b; } @@ -6259,6 +6259,13 @@ comptime { This function is only valid within function scope.

+ {#header_close#} + {#header_open|@mulAdd#} +
{#syntax#}@mulAdd(comptime T: type, a: T, b: T, c: T) T{#endsyntax#}
+

+ Fused multiply add (for floats), similar to {#syntax#}(a * b) + c{#endsyntax#}, except + only rounds once, and is thus more accurate. +

{#header_close#} {#header_open|@byteSwap#} @@ -7347,10 +7354,91 @@ test "@setRuntimeSafety" {
{#syntax#}@sqrt(comptime T: type, value: T) T{#endsyntax#}

Performs the square root of a floating point number. Uses a dedicated hardware instruction - when available. Currently only supports f32 and f64 at runtime. f128 at runtime is TODO. + when available. Supports f16, f32, f64, and f128, as well as vectors.

+ {#header_close#} + {#header_open|@sin#} +
{#syntax#}@sin(comptime T: type, value: T) T{#endsyntax#}

- This is a low-level intrinsic. Most code can use {#syntax#}std.math.sqrt{#endsyntax#} instead. + Sine trigometric function on a floating point number. Uses a dedicated hardware instruction + when available. Currently supports f32 and f64. +

+ {#header_close#} + {#header_open|@cos#} +
{#syntax#}@cos(comptime T: type, value: T) T{#endsyntax#}
+

+ Cosine trigometric function on a floating point number. Uses a dedicated hardware instruction + when available. Currently supports f32 and f64. +

+ {#header_close#} + {#header_open|@exp#} +
{#syntax#}@exp(comptime T: type, value: T) T{#endsyntax#}
+

+ Base-e exponential function on a floating point number. Uses a dedicated hardware instruction + when available. Currently supports f32 and f64. +

+ {#header_close#} + {#header_open|@exp2#} +
{#syntax#}@exp2(comptime T: type, value: T) T{#endsyntax#}
+

+ Base-2 exponential function on a floating point number. Uses a dedicated hardware instruction + when available. Currently supports f32 and f64. +

+ {#header_close#} + {#header_open|@ln#} +
{#syntax#}@ln(comptime T: type, value: T) T{#endsyntax#}
+

+ Returns the natural logarithm of a floating point number. Uses a dedicated hardware instruction + when available. Currently supports f32 and f64. +

+ {#header_close#} + {#header_open|@log2#} +
{#syntax#}@log2(comptime T: type, value: T) T{#endsyntax#}
+

+ Returns the logarithm to the base 2 of a floating point number. Uses a dedicated hardware instruction + when available. Currently supports f32 and f64. +

+ {#header_close#} + {#header_open|@log10#} +
{#syntax#}@log10(comptime T: type, value: T) T{#endsyntax#}
+

+ Returns the logarithm to the base 10 of a floating point number. Uses a dedicated hardware instruction + when available. Currently supports f32 and f64. +

+ {#header_close#} + {#header_open|@fabs#} +
{#syntax#}@fabs(comptime T: type, value: T) T{#endsyntax#}
+

+ Returns the absolute value of a floating point number. Uses a dedicated hardware instruction + when available. Currently supports f32 and f64. +

+ {#header_close#} + {#header_open|@floor#} +
{#syntax#}@floor(comptime T: type, value: T) T{#endsyntax#}
+

+ Returns the largest integral value not greater than the given floating point number. Uses a dedicated hardware instruction + when available. Currently supports f32 and f64. +

+ {#header_close#} + {#header_open|@ceil#} +
{#syntax#}@ceil(comptime T: type, value: T) T{#endsyntax#}
+

+ Returns the largest integral value not less than the given floating point number. Uses a dedicated hardware instruction + when available. Currently supports f32 and f64. +

+ {#header_close#} + {#header_open|@trunc#} +
{#syntax#}@trunc(comptime T: type, value: T) T{#endsyntax#}
+

+ Rounds the given floating point number to an integer, towards zero. Uses a dedicated hardware instruction + when available. Currently supports f32 and f64. +

+ {#header_close#} + {#header_open|@round#} +
{#syntax#}@round(comptime T: type, value: T) T{#endsyntax#}
+

+ Rounds the given floating point number to an integer, away from zero. Uses a dedicated hardware instruction + when available. Currently supports f32 and f64.

{#header_close#} diff --git a/src-self-hosted/dep_tokenizer.zig b/src-self-hosted/dep_tokenizer.zig index 2721944451..f802765bc0 100644 --- a/src-self-hosted/dep_tokenizer.zig +++ b/src-self-hosted/dep_tokenizer.zig @@ -998,7 +998,7 @@ fn printCharValues(out: var, bytes: []const u8) !void { fn printUnderstandableChar(out: var, char: u8) !void { if (!std.ascii.isPrint(char) or char == ' ') { - std.fmt.format(out.context, anyerror, out.output, "\\x{X2}", char) catch {}; + std.fmt.format(out.context, anyerror, out.output, "\\x{X:2}", char) catch {}; } else { try out.write("'"); try out.write([_]u8{printable_char_tab[char]}); diff --git a/src/all_types.hpp b/src/all_types.hpp index 5aa1c78ea1..bb5bef04bb 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -34,12 +34,17 @@ struct CodeGen; struct ConstExprValue; struct IrInstruction; struct IrInstructionCast; +struct IrInstructionAllocaGen; struct IrBasicBlock; struct ScopeDecls; struct ZigWindowsSDK; struct Tld; struct TldExport; struct IrAnalyze; +struct ResultLoc; +struct ResultLocPeer; +struct ResultLocPeerParent; +struct ResultLocBitCast; enum X64CABIClass { X64CABIClass_Unknown, @@ -198,6 +203,9 @@ enum ConstPtrMut { // The pointer points to memory that is known only at runtime. // For example it may point to the initializer value of a variable. ConstPtrMutRuntimeVar, + // The pointer points to memory for which it must be inferred whether the + // value is comptime known or not. + ConstPtrMutInfer, }; struct ConstPtrValue { @@ -289,6 +297,7 @@ struct RuntimeHintSlice { struct ConstGlobalRefs { LLVMValueRef llvm_value; LLVMValueRef llvm_global; + uint32_t align; }; struct ConstExprValue { @@ -325,6 +334,10 @@ struct ConstExprValue { RuntimeHintPtr rh_ptr; RuntimeHintSlice rh_slice; } data; + + // uncomment these to find bugs. can't leave them uncommented because of a gcc-9 warning + //ConstExprValue(const ConstExprValue &other) = delete; // plz zero initialize with {} + //ConstExprValue& operator= (const ConstExprValue &other) = delete; // use copy_const_val }; enum ReturnKnowledge { @@ -426,7 +439,7 @@ enum NodeType { NodeTypeVariableDeclaration, NodeTypeTestDecl, NodeTypeBinOpExpr, - NodeTypeUnwrapErrorExpr, + NodeTypeCatchExpr, NodeTypeFloatLiteral, NodeTypeIntLiteral, NodeTypeStringLiteral, @@ -1097,6 +1110,8 @@ struct ZigPackage { // reminder: hash tables must be initialized before use HashMap package_table; + + bool added_to_cache; }; // Stuff that only applies to a struct which is the implicit root struct of a file @@ -1364,7 +1379,7 @@ struct ZigFn { AstNode *fn_no_inline_set_node; AstNode *fn_static_eval_set_node; - ZigList alloca_list; + ZigList alloca_gen_list; ZigList variable_list; Buf *section_name; @@ -1406,6 +1421,7 @@ enum BuiltinFnId { BuiltinFnIdSubWithOverflow, BuiltinFnIdMulWithOverflow, BuiltinFnIdShlWithOverflow, + BuiltinFnIdMulAdd, BuiltinFnIdCInclude, BuiltinFnIdCDefine, BuiltinFnIdCUndef, @@ -1433,6 +1449,19 @@ enum BuiltinFnId { BuiltinFnIdRem, BuiltinFnIdMod, BuiltinFnIdSqrt, + BuiltinFnIdSin, + BuiltinFnIdCos, + BuiltinFnIdExp, + BuiltinFnIdExp2, + BuiltinFnIdLn, + BuiltinFnIdLog2, + BuiltinFnIdLog10, + BuiltinFnIdFabs, + BuiltinFnIdFloor, + BuiltinFnIdCeil, + BuiltinFnIdTrunc, + BuiltinFnIdNearbyInt, + BuiltinFnIdRound, BuiltinFnIdTruncate, BuiltinFnIdIntCast, BuiltinFnIdFloatCast, @@ -1554,9 +1583,8 @@ enum ZigLLVMFnId { ZigLLVMFnIdClz, ZigLLVMFnIdPopCount, ZigLLVMFnIdOverflowArithmetic, - ZigLLVMFnIdFloor, - ZigLLVMFnIdCeil, - ZigLLVMFnIdSqrt, + ZigLLVMFnIdFMA, + ZigLLVMFnIdFloatOp, ZigLLVMFnIdBswap, ZigLLVMFnIdBitReverse, }; @@ -1583,7 +1611,9 @@ struct ZigLLVMFnKey { uint32_t bit_count; } pop_count; struct { + BuiltinFnId op; uint32_t bit_count; + uint32_t vector_len; // 0 means not a vector } floating; struct { AddSubMul add_sub_mul; @@ -1984,6 +2014,11 @@ struct ScopeDecls { bool any_imports_failed; }; +enum LVal { + LValNone, + LValPtr, +}; + // This scope comes from a block expression in user code. // NodeTypeBlock struct ScopeBlock { @@ -1992,12 +2027,14 @@ struct ScopeBlock { Buf *name; IrBasicBlock *end_block; IrInstruction *is_comptime; + ResultLocPeerParent *peer_parent; ZigList *incoming_values; ZigList *incoming_blocks; AstNode *safety_set_node; AstNode *fast_math_set_node; + LVal lval; bool safety_off; bool fast_math_on; }; @@ -2041,12 +2078,14 @@ struct ScopeCImport { struct ScopeLoop { Scope base; + LVal lval; Buf *name; IrBasicBlock *break_block; IrBasicBlock *continue_block; IrInstruction *is_comptime; ZigList *incoming_values; ZigList *incoming_blocks; + ResultLocPeerParent *peer_parent; }; // This scope blocks certain things from working such as comptime continue @@ -2123,6 +2162,8 @@ struct IrBasicBlock { const char *name_hint; size_t debug_id; size_t ref_count; + // index into the basic block list + size_t index; LLVMBasicBlockRef llvm_block; LLVMBasicBlockRef llvm_exit_block; // The instruction that referenced this basic block and caused us to @@ -2133,11 +2174,10 @@ struct IrBasicBlock { // if the branch is comptime. The instruction points to the reason // the basic block must be comptime. IrInstruction *must_be_comptime_source_instr; -}; - -enum LVal { - LValNone, - LValPtr, + IrInstruction *suspend_instruction_ref; + bool already_appended; + bool suspended; + bool in_resume_stack; }; // These instructions are in transition to having "pass 1" instructions @@ -2170,19 +2210,17 @@ enum IrInstructionId { IrInstructionIdUnionFieldPtr, IrInstructionIdElemPtr, IrInstructionIdVarPtr, - IrInstructionIdCall, + IrInstructionIdReturnPtr, + IrInstructionIdCallSrc, + IrInstructionIdCallGen, IrInstructionIdConst, IrInstructionIdReturn, IrInstructionIdCast, IrInstructionIdResizeSlice, IrInstructionIdContainerInitList, IrInstructionIdContainerInitFields, - IrInstructionIdStructInit, - IrInstructionIdUnionInit, IrInstructionIdUnreachable, IrInstructionIdTypeOf, - IrInstructionIdToPtrType, - IrInstructionIdPtrTypeChild, IrInstructionIdSetCold, IrInstructionIdSetRuntimeSafety, IrInstructionIdSetFloatMode, @@ -2207,6 +2245,7 @@ enum IrInstructionId { IrInstructionIdCDefine, IrInstructionIdCUndef, IrInstructionIdRef, + IrInstructionIdRefGen, IrInstructionIdCompileErr, IrInstructionIdCompileLog, IrInstructionIdErrName, @@ -2225,7 +2264,8 @@ enum IrInstructionId { IrInstructionIdBoolNot, IrInstructionIdMemset, IrInstructionIdMemcpy, - IrInstructionIdSlice, + IrInstructionIdSliceSrc, + IrInstructionIdSliceGen, IrInstructionIdMemberCount, IrInstructionIdMemberType, IrInstructionIdMemberName, @@ -2235,7 +2275,10 @@ enum IrInstructionId { IrInstructionIdHandle, IrInstructionIdAlignOf, IrInstructionIdOverflowOp, - IrInstructionIdTestErr, + IrInstructionIdTestErrSrc, + IrInstructionIdTestErrGen, + IrInstructionIdMulAdd, + IrInstructionIdFloatOp, IrInstructionIdUnwrapErrCode, IrInstructionIdUnwrapErrPayload, IrInstructionIdErrWrapCode, @@ -2244,7 +2287,7 @@ enum IrInstructionId { IrInstructionIdTestComptime, IrInstructionIdPtrCastSrc, IrInstructionIdPtrCastGen, - IrInstructionIdBitCast, + IrInstructionIdBitCastSrc, IrInstructionIdBitCastGen, IrInstructionIdWidenOrShorten, IrInstructionIdIntToPtr, @@ -2268,6 +2311,10 @@ enum IrInstructionId { IrInstructionIdSetEvalBranchQuota, IrInstructionIdPtrType, IrInstructionIdAlignCast, + IrInstructionIdImplicitCast, + IrInstructionIdResolveResult, + IrInstructionIdResetResult, + IrInstructionIdResultPtr, IrInstructionIdOpaqueType, IrInstructionIdSetAlignStack, IrInstructionIdArgType, @@ -2296,7 +2343,6 @@ enum IrInstructionId { IrInstructionIdAddImplicitReturnType, IrInstructionIdMergeErrRetTraces, IrInstructionIdMarkErrRetTracePtr, - IrInstructionIdSqrt, IrInstructionIdErrSetCast, IrInstructionIdToBytes, IrInstructionIdFromBytes, @@ -2307,10 +2353,13 @@ enum IrInstructionId { IrInstructionIdAssertNonNull, IrInstructionIdHasDecl, IrInstructionIdUndeclaredIdent, + IrInstructionIdAllocaSrc, + IrInstructionIdAllocaGen, + IrInstructionIdEndExpr, + IrInstructionIdPtrOfArrayToSlice, }; struct IrInstruction { - IrInstructionId id; Scope *scope; AstNode *source_node; ConstExprValue value; @@ -2324,6 +2373,7 @@ struct IrInstruction { // with this child field. IrInstruction *child; IrBasicBlock *owner_bb; + IrInstructionId id; // true if this instruction was generated by zig and not from user code bool is_gen; }; @@ -2334,14 +2384,14 @@ struct IrInstructionDeclVarSrc { ZigVar *var; IrInstruction *var_type; IrInstruction *align_value; - IrInstruction *init_value; + IrInstruction *ptr; }; struct IrInstructionDeclVarGen { IrInstruction base; ZigVar *var; - IrInstruction *init_value; + IrInstruction *var_ptr; }; struct IrInstructionCondBr { @@ -2351,6 +2401,7 @@ struct IrInstructionCondBr { IrBasicBlock *then_block; IrBasicBlock *else_block; IrInstruction *is_comptime; + ResultLoc *result_loc; }; struct IrInstructionBr { @@ -2403,6 +2454,7 @@ struct IrInstructionPhi { size_t incoming_count; IrBasicBlock **incoming_blocks; IrInstruction **incoming_values; + ResultLocPeerParent *peer_parent; }; enum IrUnOp { @@ -2418,8 +2470,9 @@ struct IrInstructionUnOp { IrInstruction base; IrUnOp op_id; - IrInstruction *value; LVal lval; + IrInstruction *value; + ResultLoc *result_loc; }; enum IrBinOp { @@ -2476,7 +2529,7 @@ struct IrInstructionLoadPtrGen { IrInstruction base; IrInstruction *ptr; - LLVMValueRef tmp_ptr; + IrInstruction *result_loc; }; struct IrInstructionStorePtr { @@ -2489,6 +2542,7 @@ struct IrInstructionStorePtr { struct IrInstructionFieldPtr { IrInstruction base; + bool initializing; IrInstruction *container_ptr; Buf *field_name_buffer; IrInstruction *field_name_expr; @@ -2505,9 +2559,10 @@ struct IrInstructionStructFieldPtr { struct IrInstructionUnionFieldPtr { IrInstruction base; + bool safety_check_on; + bool initializing; IrInstruction *union_ptr; TypeUnionField *field; - bool is_const; }; struct IrInstructionElemPtr { @@ -2515,8 +2570,8 @@ struct IrInstructionElemPtr { IrInstruction *array_ptr; IrInstruction *elem_index; + IrInstruction *init_array_type; PtrLen ptr_len; - bool is_const; bool safety_check_on; }; @@ -2527,14 +2582,21 @@ struct IrInstructionVarPtr { ScopeFnDef *crossed_fndef_scope; }; -struct IrInstructionCall { +// For functions that have a return type for which handle_is_ptr is true, a +// result location pointer is the secret first parameter ("sret"). This +// instruction returns that pointer. +struct IrInstructionReturnPtr { + IrInstruction base; +}; + +struct IrInstructionCallSrc { IrInstruction base; IrInstruction *fn_ref; ZigFn *fn_entry; size_t arg_count; IrInstruction **args; - LLVMValueRef tmp_ptr; + ResultLoc *result_loc; IrInstruction *async_allocator; IrInstruction *new_stack; @@ -2543,6 +2605,21 @@ struct IrInstructionCall { bool is_comptime; }; +struct IrInstructionCallGen { + IrInstruction base; + + IrInstruction *fn_ref; + ZigFn *fn_entry; + size_t arg_count; + IrInstruction **args; + IrInstruction *result_loc; + + IrInstruction *async_allocator; + IrInstruction *new_stack; + FnInline fn_inline; + bool is_async; +}; + struct IrInstructionConst { IrInstruction base; }; @@ -2565,7 +2642,6 @@ enum CastOp { CastOpNumLitToConcrete, CastOpErrSet, CastOpBitCast, - CastOpPtrOfArrayToSlice, }; // TODO get rid of this instruction, replace with instructions for each op code @@ -2575,14 +2651,13 @@ struct IrInstructionCast { IrInstruction *value; ZigType *dest_type; CastOp cast_op; - LLVMValueRef tmp_ptr; }; struct IrInstructionResizeSlice { IrInstruction base; IrInstruction *operand; - LLVMValueRef tmp_ptr; + IrInstruction *result_loc; }; struct IrInstructionContainerInitList { @@ -2591,15 +2666,15 @@ struct IrInstructionContainerInitList { IrInstruction *container_type; IrInstruction *elem_type; size_t item_count; - IrInstruction **items; - LLVMValueRef tmp_ptr; + IrInstruction **elem_result_loc_list; + IrInstruction *result_loc; }; struct IrInstructionContainerInitFieldsField { Buf *name; - IrInstruction *value; AstNode *source_node; TypeStructField *type_struct_field; + IrInstruction *result_loc; }; struct IrInstructionContainerInitFields { @@ -2608,29 +2683,7 @@ struct IrInstructionContainerInitFields { IrInstruction *container_type; size_t field_count; IrInstructionContainerInitFieldsField *fields; -}; - -struct IrInstructionStructInitField { - IrInstruction *value; - TypeStructField *type_struct_field; -}; - -struct IrInstructionStructInit { - IrInstruction base; - - ZigType *struct_type; - size_t field_count; - IrInstructionStructInitField *fields; - LLVMValueRef tmp_ptr; -}; - -struct IrInstructionUnionInit { - IrInstruction base; - - ZigType *union_type; - TypeUnionField *field; - IrInstruction *init_value; - LLVMValueRef tmp_ptr; + IrInstruction *result_loc; }; struct IrInstructionUnreachable { @@ -2643,18 +2696,6 @@ struct IrInstructionTypeOf { IrInstruction *value; }; -struct IrInstructionToPtrType { - IrInstruction base; - - IrInstruction *ptr; -}; - -struct IrInstructionPtrTypeChild { - IrInstruction base; - - IrInstruction *value; -}; - struct IrInstructionSetCold { IrInstruction base; @@ -2748,8 +2789,9 @@ struct IrInstructionTestNonNull { struct IrInstructionOptionalUnwrapPtr { IrInstruction base; - IrInstruction *base_ptr; bool safety_check_on; + bool initializing; + IrInstruction *base_ptr; }; struct IrInstructionCtz { @@ -2789,11 +2831,17 @@ struct IrInstructionRef { IrInstruction base; IrInstruction *value; - LLVMValueRef tmp_ptr; bool is_const; bool is_volatile; }; +struct IrInstructionRefGen { + IrInstruction base; + + IrInstruction *operand; + IrInstruction *result_loc; +}; + struct IrInstructionCompileErr { IrInstruction base; @@ -2845,26 +2893,26 @@ struct IrInstructionEmbedFile { struct IrInstructionCmpxchgSrc { IrInstruction base; + bool is_weak; IrInstruction *type_value; IrInstruction *ptr; IrInstruction *cmp_value; IrInstruction *new_value; IrInstruction *success_order_value; IrInstruction *failure_order_value; - - bool is_weak; + ResultLoc *result_loc; }; struct IrInstructionCmpxchgGen { IrInstruction base; + bool is_weak; + AtomicOrder success_order; + AtomicOrder failure_order; IrInstruction *ptr; IrInstruction *cmp_value; IrInstruction *new_value; - LLVMValueRef tmp_ptr; - AtomicOrder success_order; - AtomicOrder failure_order; - bool is_weak; + IrInstruction *result_loc; }; struct IrInstructionFence { @@ -2908,6 +2956,7 @@ struct IrInstructionToBytes { IrInstruction base; IrInstruction *target; + ResultLoc *result_loc; }; struct IrInstructionFromBytes { @@ -2915,6 +2964,7 @@ struct IrInstructionFromBytes { IrInstruction *dest_child_type; IrInstruction *target; + ResultLoc *result_loc; }; struct IrInstructionIntToFloat { @@ -2973,14 +3023,24 @@ struct IrInstructionMemcpy { IrInstruction *count; }; -struct IrInstructionSlice { +struct IrInstructionSliceSrc { IrInstruction base; + bool safety_check_on; IrInstruction *ptr; IrInstruction *start; IrInstruction *end; + ResultLoc *result_loc; +}; + +struct IrInstructionSliceGen { + IrInstruction base; + bool safety_check_on; - LLVMValueRef tmp_ptr; + IrInstruction *ptr; + IrInstruction *start; + IrInstruction *end; + IrInstruction *result_loc; }; struct IrInstructionMemberCount { @@ -3038,6 +3098,15 @@ struct IrInstructionOverflowOp { ZigType *result_ptr_type; }; +struct IrInstructionMulAdd { + IrInstruction base; + + IrInstruction *type_value; + IrInstruction *op1; + IrInstruction *op2; + IrInstruction *op3; +}; + struct IrInstructionAlignOf { IrInstruction base; @@ -3045,44 +3114,54 @@ struct IrInstructionAlignOf { }; // returns true if error, returns false if not error -struct IrInstructionTestErr { +struct IrInstructionTestErrSrc { IrInstruction base; - IrInstruction *value; + bool resolve_err_set; + IrInstruction *base_ptr; }; -struct IrInstructionUnwrapErrCode { +struct IrInstructionTestErrGen { IrInstruction base; IrInstruction *err_union; }; +// Takes an error union pointer, returns a pointer to the error code. +struct IrInstructionUnwrapErrCode { + IrInstruction base; + + bool initializing; + IrInstruction *err_union_ptr; +}; + struct IrInstructionUnwrapErrPayload { IrInstruction base; - IrInstruction *value; bool safety_check_on; + bool initializing; + IrInstruction *value; }; struct IrInstructionOptionalWrap { IrInstruction base; - IrInstruction *value; - LLVMValueRef tmp_ptr; + IrInstruction *operand; + IrInstruction *result_loc; }; struct IrInstructionErrWrapPayload { IrInstruction base; - IrInstruction *value; - LLVMValueRef tmp_ptr; + IrInstruction *operand; + IrInstruction *result_loc; }; struct IrInstructionErrWrapCode { IrInstruction base; - IrInstruction *value; - LLVMValueRef tmp_ptr; + IrInstruction *operand; + IrInstruction *result_loc; }; struct IrInstructionFnProto { @@ -3117,18 +3196,17 @@ struct IrInstructionPtrCastGen { bool safety_check_on; }; -struct IrInstructionBitCast { +struct IrInstructionBitCastSrc { IrInstruction base; - IrInstruction *dest_type; - IrInstruction *value; + IrInstruction *operand; + ResultLocBitCast *result_loc_bit_cast; }; struct IrInstructionBitCastGen { IrInstruction base; IrInstruction *operand; - LLVMValueRef tmp_ptr; }; struct IrInstructionWidenOrShorten { @@ -3204,8 +3282,8 @@ struct IrInstructionTypeName { struct IrInstructionDeclRef { IrInstruction base; - Tld *tld; LVal lval; + Tld *tld; }; struct IrInstructionPanic { @@ -3461,11 +3539,13 @@ struct IrInstructionMarkErrRetTracePtr { IrInstruction *err_ret_trace_ptr; }; -struct IrInstructionSqrt { +// For float ops which take a single argument +struct IrInstructionFloatOp { IrInstruction base; + BuiltinFnId op; IrInstruction *type; - IrInstruction *op; + IrInstruction *op1; }; struct IrInstructionCheckRuntimeScope { @@ -3499,7 +3579,7 @@ struct IrInstructionVectorToArray { IrInstruction base; IrInstruction *vector; - LLVMValueRef tmp_ptr; + IrInstruction *result_loc; }; struct IrInstructionAssertZero { @@ -3527,6 +3607,139 @@ struct IrInstructionUndeclaredIdent { Buf *name; }; +struct IrInstructionAllocaSrc { + IrInstruction base; + + IrInstruction *align; + IrInstruction *is_comptime; + const char *name_hint; +}; + +struct IrInstructionAllocaGen { + IrInstruction base; + + uint32_t align; + const char *name_hint; +}; + +struct IrInstructionEndExpr { + IrInstruction base; + + IrInstruction *value; + ResultLoc *result_loc; +}; + +struct IrInstructionImplicitCast { + IrInstruction base; + + IrInstruction *dest_type; + IrInstruction *target; + ResultLoc *result_loc; +}; + +// This one is for writing through the result pointer. +struct IrInstructionResolveResult { + IrInstruction base; + + ResultLoc *result_loc; + IrInstruction *ty; +}; + +// This one is when you want to read the value of the result. +// You have to give the value in case it is comptime. +struct IrInstructionResultPtr { + IrInstruction base; + + ResultLoc *result_loc; + IrInstruction *result; +}; + +struct IrInstructionResetResult { + IrInstruction base; + + ResultLoc *result_loc; +}; + +struct IrInstructionPtrOfArrayToSlice { + IrInstruction base; + + IrInstruction *operand; + IrInstruction *result_loc; +}; + +enum ResultLocId { + ResultLocIdInvalid, + ResultLocIdNone, + ResultLocIdVar, + ResultLocIdReturn, + ResultLocIdPeer, + ResultLocIdPeerParent, + ResultLocIdInstruction, + ResultLocIdBitCast, +}; + +// Additions to this struct may need to be handled in +// ir_reset_result +struct ResultLoc { + ResultLocId id; + bool written; + IrInstruction *resolved_loc; // result ptr + IrInstruction *source_instruction; + IrInstruction *gen_instruction; // value to store to the result loc + ZigType *implicit_elem_type; +}; + +struct ResultLocNone { + ResultLoc base; +}; + +struct ResultLocVar { + ResultLoc base; + + ZigVar *var; +}; + +struct ResultLocReturn { + ResultLoc base; +}; + +struct IrSuspendPosition { + size_t basic_block_index; + size_t instruction_index; +}; + +struct ResultLocPeerParent { + ResultLoc base; + + bool skipped; + bool done_resuming; + IrBasicBlock *end_bb; + ResultLoc *parent; + ZigList peers; + ZigType *resolved_type; + IrInstruction *is_comptime; +}; + +struct ResultLocPeer { + ResultLoc base; + + ResultLocPeerParent *parent; + IrBasicBlock *next_bb; + IrSuspendPosition suspend_pos; +}; + +// The result location is the source instruction +struct ResultLocInstruction { + ResultLoc base; +}; + +// The source_instruction is the destination type +struct ResultLocBitCast { + ResultLoc base; + + ResultLoc *parent; +}; + static const size_t slice_ptr_index = 0; static const size_t slice_len_index = 1; @@ -3574,7 +3787,7 @@ struct FnWalkAttrs { struct FnWalkCall { ZigList *gen_param_values; - IrInstructionCall *inst; + IrInstructionCallGen *inst; bool is_var_args; }; diff --git a/src/analyze.cpp b/src/analyze.cpp index c7e35367c3..935ced9491 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -2995,7 +2995,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeBlock: case NodeTypeGroupedExpr: case NodeTypeBinOpExpr: - case NodeTypeUnwrapErrorExpr: + case NodeTypeCatchExpr: case NodeTypeFnCallExpr: case NodeTypeArrayAccessExpr: case NodeTypeSliceExpr: @@ -4181,6 +4181,7 @@ static uint32_t hash_const_val_ptr(ConstExprValue *const_val) { case ConstPtrMutComptimeConst: hash_val += (uint32_t)4214318515; break; + case ConstPtrMutInfer: case ConstPtrMutComptimeVar: hash_val += (uint32_t)1103195694; break; @@ -4511,6 +4512,8 @@ bool fn_eval_cacheable(Scope *scope, ZigType *return_type) { ScopeVarDecl *var_scope = (ScopeVarDecl *)scope; if (type_is_invalid(var_scope->var->var_type)) return false; + if (var_scope->var->const_value->special == ConstValSpecialUndef) + return false; if (can_mutate_comptime_var_state(var_scope->var->const_value)) return false; } else if (scope->id == ScopeIdFnDef) { @@ -4710,7 +4713,7 @@ ReqCompTime type_requires_comptime(CodeGen *g, ZigType *type_entry) { void init_const_str_lit(CodeGen *g, ConstExprValue *const_val, Buf *str) { auto entry = g->string_literals_table.maybe_get(str); if (entry != nullptr) { - *const_val = *entry->value; + memcpy(const_val, entry->value, sizeof(ConstExprValue)); return; } @@ -4998,12 +5001,9 @@ void init_const_undefined(CodeGen *g, ConstExprValue *const_val) { field_val->type = wanted_type->data.structure.fields[i].type_entry; assert(field_val->type); init_const_undefined(g, field_val); - ConstParent *parent = get_const_val_parent(g, field_val); - if (parent != nullptr) { - parent->id = ConstParentIdStruct; - parent->data.p_struct.struct_val = const_val; - parent->data.p_struct.field_index = i; - } + field_val->parent.id = ConstParentIdStruct; + field_val->parent.data.p_struct.struct_val = const_val; + field_val->parent.data.p_struct.field_index = i; } } else { const_val->special = ConstValSpecialUndef; @@ -5736,12 +5736,13 @@ uint32_t zig_llvm_fn_key_hash(ZigLLVMFnKey x) { return (uint32_t)(x.data.clz.bit_count) * (uint32_t)2428952817; case ZigLLVMFnIdPopCount: return (uint32_t)(x.data.clz.bit_count) * (uint32_t)101195049; - case ZigLLVMFnIdFloor: - return (uint32_t)(x.data.floating.bit_count) * (uint32_t)1899859168; - case ZigLLVMFnIdCeil: - return (uint32_t)(x.data.floating.bit_count) * (uint32_t)1953839089; - case ZigLLVMFnIdSqrt: - return (uint32_t)(x.data.floating.bit_count) * (uint32_t)2225366385; + case ZigLLVMFnIdFloatOp: + return (uint32_t)(x.data.floating.bit_count) * ((uint32_t)x.id + 1025) + + (uint32_t)(x.data.floating.vector_len) * (((uint32_t)x.id << 5) + 1025) + + (uint32_t)(x.data.floating.op) * (uint32_t)43789879; + case ZigLLVMFnIdFMA: + return (uint32_t)(x.data.floating.bit_count) * ((uint32_t)x.id + 1025) + + (uint32_t)(x.data.floating.vector_len) * (((uint32_t)x.id << 5) + 1025); case ZigLLVMFnIdBswap: return (uint32_t)(x.data.bswap.bit_count) * (uint32_t)3661994335; case ZigLLVMFnIdBitReverse: @@ -5769,10 +5770,13 @@ bool zig_llvm_fn_key_eql(ZigLLVMFnKey a, ZigLLVMFnKey b) { return a.data.bswap.bit_count == b.data.bswap.bit_count; case ZigLLVMFnIdBitReverse: return a.data.bit_reverse.bit_count == b.data.bit_reverse.bit_count; - case ZigLLVMFnIdFloor: - case ZigLLVMFnIdCeil: - case ZigLLVMFnIdSqrt: - return a.data.floating.bit_count == b.data.floating.bit_count; + case ZigLLVMFnIdFloatOp: + return a.data.floating.bit_count == b.data.floating.bit_count && + a.data.floating.vector_len == b.data.floating.vector_len && + a.data.floating.op == b.data.floating.op; + case ZigLLVMFnIdFMA: + return a.data.floating.bit_count == b.data.floating.bit_count && + a.data.floating.vector_len == b.data.floating.vector_len; case ZigLLVMFnIdOverflowArithmetic: return (a.data.overflow_arithmetic.bit_count == b.data.overflow_arithmetic.bit_count) && (a.data.overflow_arithmetic.add_sub_mul == b.data.overflow_arithmetic.add_sub_mul) && @@ -5839,11 +5843,6 @@ void expand_undef_array(CodeGen *g, ConstExprValue *const_val) { zig_unreachable(); } -// Deprecated. Reference the parent field directly. -ConstParent *get_const_val_parent(CodeGen *g, ConstExprValue *value) { - return &value->parent; -} - static const ZigTypeId all_type_ids[] = { ZigTypeIdMetaType, ZigTypeIdVoid, @@ -7277,6 +7276,6 @@ void src_assert(bool ok, AstNode *source_node) { buf_ptr(source_node->owner->data.structure.root_struct->path), (unsigned)source_node->line + 1, (unsigned)source_node->column + 1); } - const char *msg = "assertion failed"; + const char *msg = "assertion failed. This is a bug in the Zig compiler."; stage2_panic(msg, strlen(msg)); } diff --git a/src/analyze.hpp b/src/analyze.hpp index 8d78ef86e2..a6ad92110e 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -180,7 +180,6 @@ void init_const_undefined(CodeGen *g, ConstExprValue *const_val); ConstExprValue *create_const_vals(size_t count); ZigType *make_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits); -ConstParent *get_const_val_parent(CodeGen *g, ConstExprValue *value); void expand_undef_array(CodeGen *g, ConstExprValue *const_val); void update_compile_var(CodeGen *g, Buf *name, ConstExprValue *value); diff --git a/src/ast_render.cpp b/src/ast_render.cpp index 92508c2205..154803f884 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -165,8 +165,8 @@ static const char *node_type_str(NodeType node_type) { return "Parens"; case NodeTypeBinOpExpr: return "BinOpExpr"; - case NodeTypeUnwrapErrorExpr: - return "UnwrapErrorExpr"; + case NodeTypeCatchExpr: + return "CatchExpr"; case NodeTypeFnCallExpr: return "FnCallExpr"; case NodeTypeArrayAccessExpr: @@ -1107,7 +1107,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { fprintf(ar->f, "]"); break; } - case NodeTypeUnwrapErrorExpr: + case NodeTypeCatchExpr: { render_node_ungrouped(ar, node->data.unwrap_err_expr.op1); fprintf(ar->f, " catch "); diff --git a/src/codegen.cpp b/src/codegen.cpp index 3dd6995c61..6ad779fd24 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -180,6 +180,8 @@ CodeGen *codegen_create(Buf *main_pkg_path, Buf *root_src_path, const ZigTarget g->root_package = new_package(".", "", ""); } + g->root_package->package_table.put(buf_create_from_str("root"), g->root_package); + g->zig_std_special_dir = buf_alloc(); os_path_join(g->zig_std_dir, buf_sprintf("special"), g->zig_std_special_dir); @@ -691,7 +693,9 @@ static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) { is_definition, scope_line, flags, is_optimized, nullptr); scope->di_scope = ZigLLVMSubprogramToScope(subprogram); - ZigLLVMFnSetSubprogram(fn_llvm_value(g, fn_table_entry), subprogram); + if (!g->strip_debug_symbols) { + ZigLLVMFnSetSubprogram(fn_llvm_value(g, fn_table_entry), subprogram); + } return scope->di_scope; } case ScopeIdDecls: @@ -806,32 +810,47 @@ static LLVMValueRef get_int_overflow_fn(CodeGen *g, ZigType *operand_type, AddSu return fn_val; } -static LLVMValueRef get_float_fn(CodeGen *g, ZigType *type_entry, ZigLLVMFnId fn_id) { - assert(type_entry->id == ZigTypeIdFloat); +static LLVMValueRef get_float_fn(CodeGen *g, ZigType *type_entry, ZigLLVMFnId fn_id, BuiltinFnId op) { + assert(type_entry->id == ZigTypeIdFloat || + type_entry->id == ZigTypeIdVector); + + bool is_vector = (type_entry->id == ZigTypeIdVector); + ZigType *float_type = is_vector ? type_entry->data.vector.elem_type : type_entry; ZigLLVMFnKey key = {}; key.id = fn_id; - key.data.floating.bit_count = (uint32_t)type_entry->data.floating.bit_count; + key.data.floating.bit_count = (uint32_t)float_type->data.floating.bit_count; + key.data.floating.vector_len = is_vector ? (uint32_t)type_entry->data.vector.len : 0; + key.data.floating.op = op; auto existing_entry = g->llvm_fn_table.maybe_get(key); if (existing_entry) return existing_entry->value; const char *name; - if (fn_id == ZigLLVMFnIdFloor) { - name = "floor"; - } else if (fn_id == ZigLLVMFnIdCeil) { - name = "ceil"; - } else if (fn_id == ZigLLVMFnIdSqrt) { - name = "sqrt"; + uint32_t num_args; + if (fn_id == ZigLLVMFnIdFMA) { + name = "fma"; + num_args = 3; + } else if (fn_id == ZigLLVMFnIdFloatOp) { + name = float_op_to_name(op, true); + num_args = 1; } else { zig_unreachable(); } char fn_name[64]; - sprintf(fn_name, "llvm.%s.f%" ZIG_PRI_usize "", name, type_entry->data.floating.bit_count); + if (is_vector) + sprintf(fn_name, "llvm.%s.v%" PRIu32 "f%" PRIu32, name, key.data.floating.vector_len, key.data.floating.bit_count); + else + sprintf(fn_name, "llvm.%s.f%" PRIu32, name, key.data.floating.bit_count); LLVMTypeRef float_type_ref = get_llvm_type(g, type_entry); - LLVMTypeRef fn_type = LLVMFunctionType(float_type_ref, &float_type_ref, 1, false); + LLVMTypeRef return_elem_types[3] = { + float_type_ref, + float_type_ref, + float_type_ref, + }; + LLVMTypeRef fn_type = LLVMFunctionType(float_type_ref, return_elem_types, num_args, false); LLVMValueRef fn_val = LLVMAddFunction(g->module, fn_name, fn_type); assert(LLVMGetIntrinsicID(fn_val)); @@ -844,9 +863,7 @@ static LLVMValueRef gen_store_untyped(CodeGen *g, LLVMValueRef value, LLVMValueR { LLVMValueRef instruction = LLVMBuildStore(g->builder, value, ptr); if (is_volatile) LLVMSetVolatile(instruction, true); - if (alignment == 0) { - LLVMSetAlignment(instruction, LLVMABIAlignmentOfType(g->target_data_ref, LLVMTypeOf(value))); - } else { + if (alignment != 0) { LLVMSetAlignment(instruction, alignment); } return instruction; @@ -1324,7 +1341,9 @@ static LLVMValueRef get_add_error_return_trace_addr_fn(CodeGen *g) { LLVMBuildRetVoid(g->builder); LLVMPositionBuilderAtEnd(g->builder, prev_block); - LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + if (!g->strip_debug_symbols) { + LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + } g->add_error_return_trace_addr_fn_val = fn_val; return fn_val; @@ -1455,7 +1474,9 @@ static LLVMValueRef get_merge_err_ret_traces_fn_val(CodeGen *g) { LLVMBuildBr(g->builder, loop_block); LLVMPositionBuilderAtEnd(g->builder, prev_block); - LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + if (!g->strip_debug_symbols) { + LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + } g->merge_err_ret_traces_fn_val = fn_val; return fn_val; @@ -1511,7 +1532,9 @@ static LLVMValueRef get_return_err_fn(CodeGen *g) { LLVMBuildRetVoid(g->builder); LLVMPositionBuilderAtEnd(g->builder, prev_block); - LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + if (!g->strip_debug_symbols) { + LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + } g->return_err_fn = fn_val; return fn_val; @@ -1639,7 +1662,9 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) { gen_panic(g, msg_slice, err_ret_trace_arg); LLVMPositionBuilderAtEnd(g->builder, prev_block); - LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + if (!g->strip_debug_symbols) { + LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + } g->safety_crash_err_fn = fn_val; return fn_val; @@ -1989,6 +2014,7 @@ static LLVMValueRef gen_assign_raw(CodeGen *g, LLVMValueRef ptr, ZigType *ptr_ty } static void gen_var_debug_decl(CodeGen *g, ZigVar *var) { + if (g->strip_debug_symbols) return; assert(var->di_loc_var != nullptr); AstNode *source_node = var->decl_node; ZigLLVMDILocation *debug_loc = ZigLLVMGetDebugLoc((unsigned)source_node->line + 1, @@ -2001,7 +2027,7 @@ static LLVMValueRef ir_llvm_value(CodeGen *g, IrInstruction *instruction) { if (!type_has_bits(instruction->value.type)) return nullptr; if (!instruction->llvm_value) { - assert(instruction->value.special != ConstValSpecialRuntime); + src_assert(instruction->value.special != ConstValSpecialRuntime, instruction->source_node); assert(instruction->value.type); render_const_val(g, &instruction->value, ""); // we might have to do some pointer casting here due to the way union @@ -2010,11 +2036,9 @@ static LLVMValueRef ir_llvm_value(CodeGen *g, IrInstruction *instruction) { render_const_val_global(g, &instruction->value, ""); ZigType *ptr_type = get_pointer_to_type(g, instruction->value.type, true); instruction->llvm_value = LLVMBuildBitCast(g->builder, instruction->value.global_refs->llvm_global, get_llvm_type(g, ptr_type), ""); - } else if (get_codegen_ptr_type(instruction->value.type) != nullptr) { + } else { instruction->llvm_value = LLVMBuildBitCast(g->builder, instruction->value.global_refs->llvm_value, get_llvm_type(g, instruction->value.type), ""); - } else { - instruction->llvm_value = instruction->value.global_refs->llvm_value; } assert(instruction->llvm_value); } @@ -2295,7 +2319,7 @@ void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk) { return; } if (fn_walk->id == FnWalkIdCall) { - IrInstructionCall *instruction = fn_walk->data.call.inst; + IrInstructionCallGen *instruction = fn_walk->data.call.inst; bool is_var_args = fn_walk->data.call.is_var_args; for (size_t call_i = 0; call_i < instruction->arg_count; call_i += 1) { IrInstruction *param_instruction = instruction->args[call_i]; @@ -2389,17 +2413,33 @@ static LLVMValueRef ir_render_save_err_ret_addr(CodeGen *g, IrExecutable *execut } static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) { - LLVMValueRef value = ir_llvm_value(g, return_instruction->value); - ZigType *return_type = return_instruction->value->value.type; - if (want_first_arg_sret(g, &g->cur_fn->type_entry->data.fn.fn_type_id)) { + if (return_instruction->value == nullptr) { + LLVMBuildRetVoid(g->builder); + return nullptr; + } assert(g->cur_ret_ptr); + src_assert(return_instruction->value->value.special != ConstValSpecialRuntime, + return_instruction->base.source_node); + LLVMValueRef value = ir_llvm_value(g, return_instruction->value); + ZigType *return_type = return_instruction->value->value.type; gen_assign_raw(g, g->cur_ret_ptr, get_pointer_to_type(g, return_type, false), value); LLVMBuildRetVoid(g->builder); - } else if (handle_is_ptr(return_type)) { - LLVMValueRef by_val_value = gen_load_untyped(g, value, 0, false, ""); - LLVMBuildRet(g->builder, by_val_value); + } else if (g->cur_fn->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync && + handle_is_ptr(g->cur_fn->type_entry->data.fn.fn_type_id.return_type)) + { + if (return_instruction->value == nullptr) { + LLVMValueRef by_val_value = gen_load_untyped(g, g->cur_ret_ptr, 0, false, ""); + LLVMBuildRet(g->builder, by_val_value); + } else { + LLVMValueRef value = ir_llvm_value(g, return_instruction->value); + LLVMValueRef by_val_value = gen_load_untyped(g, value, 0, false, ""); + LLVMBuildRet(g->builder, by_val_value); + } + } else if (return_instruction->value == nullptr) { + LLVMBuildRetVoid(g->builder); } else { + LLVMValueRef value = ir_llvm_value(g, return_instruction->value); LLVMBuildRet(g->builder, value); } return nullptr; @@ -2460,22 +2500,17 @@ static LLVMValueRef gen_overflow_shr_op(CodeGen *g, ZigType *type_entry, return result; } -static LLVMValueRef gen_floor(CodeGen *g, LLVMValueRef val, ZigType *type_entry) { - if (type_entry->id == ZigTypeIdInt) +static LLVMValueRef gen_float_op(CodeGen *g, LLVMValueRef val, ZigType *type_entry, BuiltinFnId op) { + if ((op == BuiltinFnIdCeil || + op == BuiltinFnIdFloor) && + type_entry->id == ZigTypeIdInt) return val; + assert(type_entry->id == ZigTypeIdFloat); - LLVMValueRef floor_fn = get_float_fn(g, type_entry, ZigLLVMFnIdFloor); + LLVMValueRef floor_fn = get_float_fn(g, type_entry, ZigLLVMFnIdFloatOp, op); return LLVMBuildCall(g->builder, floor_fn, &val, 1, ""); } -static LLVMValueRef gen_ceil(CodeGen *g, LLVMValueRef val, ZigType *type_entry) { - if (type_entry->id == ZigTypeIdInt) - return val; - - LLVMValueRef ceil_fn = get_float_fn(g, type_entry, ZigLLVMFnIdCeil); - return LLVMBuildCall(g->builder, ceil_fn, &val, 1, ""); -} - enum DivKind { DivKindFloat, DivKindTrunc, @@ -2551,7 +2586,7 @@ static LLVMValueRef gen_div(CodeGen *g, bool want_runtime_safety, bool want_fast return result; case DivKindExact: if (want_runtime_safety) { - LLVMValueRef floored = gen_floor(g, result, type_entry); + LLVMValueRef floored = gen_float_op(g, result, type_entry, BuiltinFnIdFloor); LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivExactOk"); LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivExactFail"); LLVMValueRef ok_bit = LLVMBuildFCmp(g->builder, LLVMRealOEQ, floored, result, ""); @@ -2573,12 +2608,12 @@ static LLVMValueRef gen_div(CodeGen *g, bool want_runtime_safety, bool want_fast LLVMBuildCondBr(g->builder, ltz, ltz_block, gez_block); LLVMPositionBuilderAtEnd(g->builder, ltz_block); - LLVMValueRef ceiled = gen_ceil(g, result, type_entry); + LLVMValueRef ceiled = gen_float_op(g, result, type_entry, BuiltinFnIdCeil); LLVMBasicBlockRef ceiled_end_block = LLVMGetInsertBlock(g->builder); LLVMBuildBr(g->builder, end_block); LLVMPositionBuilderAtEnd(g->builder, gez_block); - LLVMValueRef floored = gen_floor(g, result, type_entry); + LLVMValueRef floored = gen_float_op(g, result, type_entry, BuiltinFnIdFloor); LLVMBasicBlockRef floored_end_block = LLVMGetInsertBlock(g->builder); LLVMBuildBr(g->builder, end_block); @@ -2590,7 +2625,7 @@ static LLVMValueRef gen_div(CodeGen *g, bool want_runtime_safety, bool want_fast return phi; } case DivKindFloor: - return gen_floor(g, result, type_entry); + return gen_float_op(g, result, type_entry, BuiltinFnIdFloor); } zig_unreachable(); } @@ -2942,7 +2977,7 @@ static LLVMValueRef ir_render_resize_slice(CodeGen *g, IrExecutable *executable, LLVMValueRef expr_val = ir_llvm_value(g, instruction->operand); assert(expr_val); - assert(instruction->tmp_ptr); + LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); assert(wanted_type->id == ZigTypeIdStruct); assert(wanted_type->data.structure.is_slice); assert(actual_type->id == ZigTypeIdStruct); @@ -2963,7 +2998,7 @@ static LLVMValueRef ir_render_resize_slice(CodeGen *g, IrExecutable *executable, LLVMValueRef src_ptr = gen_load_untyped(g, src_ptr_ptr, 0, false, ""); LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, src_ptr, get_llvm_type(g, wanted_type->data.structure.fields[0].type_entry), ""); - LLVMValueRef dest_ptr_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, + LLVMValueRef dest_ptr_ptr = LLVMBuildStructGEP(g->builder, result_loc, (unsigned)wanted_ptr_index, ""); gen_store_untyped(g, src_ptr_casted, dest_ptr_ptr, 0, false); @@ -2996,12 +3031,10 @@ static LLVMValueRef ir_render_resize_slice(CodeGen *g, IrExecutable *executable, zig_unreachable(); } - LLVMValueRef dest_len_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, - (unsigned)wanted_len_index, ""); + LLVMValueRef dest_len_ptr = LLVMBuildStructGEP(g->builder, result_loc, (unsigned)wanted_len_index, ""); gen_store_untyped(g, new_len, dest_len_ptr, 0, false); - - return instruction->tmp_ptr; + return result_loc; } static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable, @@ -3071,33 +3104,39 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable, return expr_val; case CastOpBitCast: return LLVMBuildBitCast(g->builder, expr_val, get_llvm_type(g, wanted_type), ""); - case CastOpPtrOfArrayToSlice: { - assert(cast_instruction->tmp_ptr); - assert(actual_type->id == ZigTypeIdPointer); - ZigType *array_type = actual_type->data.pointer.child_type; - assert(array_type->id == ZigTypeIdArray); - - LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, cast_instruction->tmp_ptr, - slice_ptr_index, ""); - LLVMValueRef indices[] = { - LLVMConstNull(g->builtin_types.entry_usize->llvm_type), - LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 0, false), - }; - LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, expr_val, indices, 2, ""); - gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false); - - LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, cast_instruction->tmp_ptr, - slice_len_index, ""); - LLVMValueRef len_value = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, - array_type->data.array.len, false); - gen_store_untyped(g, len_value, len_field_ptr, 0, false); - - return cast_instruction->tmp_ptr; - } } zig_unreachable(); } +static LLVMValueRef ir_render_ptr_of_array_to_slice(CodeGen *g, IrExecutable *executable, + IrInstructionPtrOfArrayToSlice *instruction) +{ + ZigType *actual_type = instruction->operand->value.type; + LLVMValueRef expr_val = ir_llvm_value(g, instruction->operand); + assert(expr_val); + + LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); + + assert(actual_type->id == ZigTypeIdPointer); + ZigType *array_type = actual_type->data.pointer.child_type; + assert(array_type->id == ZigTypeIdArray); + + LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, result_loc, slice_ptr_index, ""); + LLVMValueRef indices[] = { + LLVMConstNull(g->builtin_types.entry_usize->llvm_type), + LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 0, false), + }; + LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, expr_val, indices, 2, ""); + gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false); + + LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, result_loc, slice_len_index, ""); + LLVMValueRef len_value = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, + array_type->data.array.len, false); + gen_store_untyped(g, len_value, len_field_ptr, 0, false); + + return result_loc; +} + static LLVMValueRef ir_render_ptr_cast(CodeGen *g, IrExecutable *executable, IrInstructionPtrCastGen *instruction) { @@ -3144,12 +3183,7 @@ static LLVMValueRef ir_render_bit_cast(CodeGen *g, IrExecutable *executable, uint32_t alignment = get_abi_alignment(g, actual_type); return gen_load_untyped(g, bitcasted_ptr, alignment, false, ""); } else { - assert(instruction->tmp_ptr != nullptr); - LLVMTypeRef wanted_ptr_type_ref = LLVMPointerType(get_llvm_type(g, actual_type), 0); - LLVMValueRef bitcasted_ptr = LLVMBuildBitCast(g->builder, instruction->tmp_ptr, wanted_ptr_type_ref, ""); - uint32_t alignment = get_abi_alignment(g, wanted_type); - gen_store_untyped(g, value, bitcasted_ptr, alignment, false); - return instruction->tmp_ptr; + zig_unreachable(); } } @@ -3335,31 +3369,13 @@ static LLVMValueRef ir_render_bool_not(CodeGen *g, IrExecutable *executable, IrI return LLVMBuildICmp(g->builder, LLVMIntEQ, value, zero, ""); } -static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable, - IrInstructionDeclVarGen *decl_var_instruction) -{ - ZigVar *var = decl_var_instruction->var; +static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable, IrInstructionDeclVarGen *instruction) { + ZigVar *var = instruction->var; if (!type_has_bits(var->var_type)) return nullptr; - if (var->ref_count == 0 && g->build_mode != BuildModeDebug) - return nullptr; - - IrInstruction *init_value = decl_var_instruction->init_value; - - bool have_init_expr = !value_is_all_undef(&init_value->value); - - if (have_init_expr) { - ZigType *var_ptr_type = get_pointer_to_type_extra(g, var->var_type, false, false, - PtrLenSingle, var->align_bytes, 0, 0, false); - LLVMValueRef llvm_init_val = ir_llvm_value(g, init_value); - gen_assign_raw(g, var->value_ref, var_ptr_type, llvm_init_val); - } else if (ir_want_runtime_safety(g, &decl_var_instruction->base)) { - uint32_t align_bytes = (var->align_bytes == 0) ? get_abi_alignment(g, var->var_type) : var->align_bytes; - gen_undef_init(g, align_bytes, var->var_type, var->value_ref); - } - + var->value_ref = ir_llvm_value(g, instruction->var_ptr); gen_var_debug_decl(g, var); return nullptr; } @@ -3391,13 +3407,13 @@ static LLVMValueRef ir_render_load_ptr(CodeGen *g, IrExecutable *executable, IrI LLVMValueRef shifted_value = LLVMBuildLShr(g->builder, containing_int, shift_amt_val, ""); if (handle_is_ptr(child_type)) { - assert(instruction->tmp_ptr != nullptr); + LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); LLVMTypeRef same_size_int = LLVMIntType(size_in_bits); LLVMValueRef truncated_int = LLVMBuildTrunc(g->builder, shifted_value, same_size_int, ""); - LLVMValueRef bitcasted_ptr = LLVMBuildBitCast(g->builder, instruction->tmp_ptr, + LLVMValueRef bitcasted_ptr = LLVMBuildBitCast(g->builder, result_loc, LLVMPointerType(same_size_int, 0), ""); LLVMBuildStore(g->builder, truncated_int, bitcasted_ptr); - return instruction->tmp_ptr; + return result_loc; } if (child_type->id == ZigTypeIdFloat) { @@ -3575,6 +3591,14 @@ static LLVMValueRef ir_render_var_ptr(CodeGen *g, IrExecutable *executable, IrIn } } +static LLVMValueRef ir_render_return_ptr(CodeGen *g, IrExecutable *executable, + IrInstructionReturnPtr *instruction) +{ + src_assert(g->cur_ret_ptr != nullptr || !type_has_bits(instruction->base.value.type), + instruction->base.source_node); + return g->cur_ret_ptr; +} + static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrInstructionElemPtr *instruction) { LLVMValueRef array_ptr_ptr = ir_llvm_value(g, instruction->array_ptr); ZigType *array_ptr_type = instruction->array_ptr->value.type; @@ -3726,7 +3750,7 @@ static void set_call_instr_sret(CodeGen *g, LLVMValueRef call_instr) { LLVMAddCallSiteAttribute(call_instr, 1, sret_attr); } -static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCall *instruction) { +static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCallGen *instruction) { LLVMValueRef fn_val; ZigType *fn_type; if (instruction->fn_entry) { @@ -3749,8 +3773,9 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, fn_type_id); bool is_var_args = fn_type_id->is_var_args; ZigList gen_param_values = {}; + LLVMValueRef result_loc = instruction->result_loc ? ir_llvm_value(g, instruction->result_loc) : nullptr; if (first_arg_ret) { - gen_param_values.append(instruction->tmp_ptr); + gen_param_values.append(result_loc); } if (prefix_arg_err_ret_stack) { gen_param_values.append(get_cur_err_ret_trace_val(g, instruction->base.scope)); @@ -3758,7 +3783,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (instruction->is_async) { gen_param_values.append(ir_llvm_value(g, instruction->async_allocator)); - LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, err_union_err_index, ""); + LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_err_index, ""); gen_param_values.append(err_val_ptr); } FnWalk fn_walk = {}; @@ -3801,9 +3826,9 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr if (instruction->is_async) { - LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, err_union_payload_index, ""); + LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_payload_index, ""); LLVMBuildStore(g->builder, result, payload_ptr); - return instruction->tmp_ptr; + return result_loc; } if (src_return_type->id == ZigTypeIdUnreachable) { @@ -3812,11 +3837,11 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr return nullptr; } else if (first_arg_ret) { set_call_instr_sret(g, result); - return instruction->tmp_ptr; + return result_loc; } else if (handle_is_ptr(src_return_type)) { - auto store_instr = LLVMBuildStore(g->builder, result, instruction->tmp_ptr); - LLVMSetAlignment(store_instr, LLVMGetAlignment(instruction->tmp_ptr)); - return instruction->tmp_ptr; + LLVMValueRef store_instr = LLVMBuildStore(g->builder, result, result_loc); + LLVMSetAlignment(store_instr, LLVMGetAlignment(result_loc)); + return result_loc; } else { return result; } @@ -3825,6 +3850,9 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr static LLVMValueRef ir_render_struct_field_ptr(CodeGen *g, IrExecutable *executable, IrInstructionStructFieldPtr *instruction) { + if (instruction->base.value.special != ConstValSpecialRuntime) + return nullptr; + LLVMValueRef struct_ptr = ir_llvm_value(g, instruction->struct_ptr); // not necessarily a pointer. could be ZigTypeIdStruct ZigType *struct_ptr_type = instruction->struct_ptr->value.type; @@ -3846,6 +3874,9 @@ static LLVMValueRef ir_render_struct_field_ptr(CodeGen *g, IrExecutable *executa static LLVMValueRef ir_render_union_field_ptr(CodeGen *g, IrExecutable *executable, IrInstructionUnionFieldPtr *instruction) { + if (instruction->base.value.special != ConstValSpecialRuntime) + return nullptr; + ZigType *union_ptr_type = instruction->union_ptr->value.type; assert(union_ptr_type->id == ZigTypeIdPointer); ZigType *union_type = union_ptr_type->data.pointer.child_type; @@ -3853,8 +3884,20 @@ static LLVMValueRef ir_render_union_field_ptr(CodeGen *g, IrExecutable *executab TypeUnionField *field = instruction->field; - if (!type_has_bits(field->type_entry)) + if (!type_has_bits(field->type_entry)) { + if (union_type->data.unionation.gen_tag_index == SIZE_MAX) { + return nullptr; + } + if (instruction->initializing) { + LLVMValueRef union_ptr = ir_llvm_value(g, instruction->union_ptr); + LLVMValueRef tag_field_ptr = LLVMBuildStructGEP(g->builder, union_ptr, + union_type->data.unionation.gen_tag_index, ""); + LLVMValueRef tag_value = bigint_to_llvm_const(get_llvm_type(g, union_type->data.unionation.tag_type), + &field->enum_field->value); + gen_store_untyped(g, tag_value, tag_field_ptr, 0, false); + } return nullptr; + } LLVMValueRef union_ptr = ir_llvm_value(g, instruction->union_ptr); LLVMTypeRef field_type_ref = LLVMPointerType(get_llvm_type(g, field->type_entry), 0); @@ -3865,7 +3908,12 @@ static LLVMValueRef ir_render_union_field_ptr(CodeGen *g, IrExecutable *executab return bitcasted_union_field_ptr; } - if (ir_want_runtime_safety(g, &instruction->base)) { + if (instruction->initializing) { + LLVMValueRef tag_field_ptr = LLVMBuildStructGEP(g->builder, union_ptr, union_type->data.unionation.gen_tag_index, ""); + LLVMValueRef tag_value = bigint_to_llvm_const(get_llvm_type(g, union_type->data.unionation.tag_type), + &field->enum_field->value); + gen_store_untyped(g, tag_value, tag_field_ptr, 0, false); + } else if (instruction->safety_check_on && ir_want_runtime_safety(g, &instruction->base)) { LLVMValueRef tag_field_ptr = LLVMBuildStructGEP(g->builder, union_ptr, union_type->data.unionation.gen_tag_index, ""); LLVMValueRef tag_value = gen_load_untyped(g, tag_field_ptr, 0, false, ""); @@ -4065,14 +4113,17 @@ static LLVMValueRef ir_render_test_non_null(CodeGen *g, IrExecutable *executable static LLVMValueRef ir_render_optional_unwrap_ptr(CodeGen *g, IrExecutable *executable, IrInstructionOptionalUnwrapPtr *instruction) { + if (instruction->base.value.special != ConstValSpecialRuntime) + return nullptr; + ZigType *ptr_type = instruction->base_ptr->value.type; assert(ptr_type->id == ZigTypeIdPointer); ZigType *maybe_type = ptr_type->data.pointer.child_type; assert(maybe_type->id == ZigTypeIdOptional); ZigType *child_type = maybe_type->data.maybe.child_type; - LLVMValueRef maybe_ptr = ir_llvm_value(g, instruction->base_ptr); - if (ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on) { - LLVMValueRef maybe_handle = get_handle_value(g, maybe_ptr, maybe_type, ptr_type); + LLVMValueRef base_ptr = ir_llvm_value(g, instruction->base_ptr); + if (instruction->safety_check_on && ir_want_runtime_safety(g, &instruction->base)) { + LLVMValueRef maybe_handle = get_handle_value(g, base_ptr, maybe_type, ptr_type); LLVMValueRef non_null_bit = gen_non_null_bit(g, maybe_type, maybe_handle); LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapOptionalFail"); LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapOptionalOk"); @@ -4088,10 +4139,16 @@ static LLVMValueRef ir_render_optional_unwrap_ptr(CodeGen *g, IrExecutable *exec } else { bool is_scalar = !handle_is_ptr(maybe_type); if (is_scalar) { - return maybe_ptr; + return base_ptr; } else { - LLVMValueRef maybe_struct_ref = get_handle_value(g, maybe_ptr, maybe_type, ptr_type); - return LLVMBuildStructGEP(g->builder, maybe_struct_ref, maybe_child_index, ""); + LLVMValueRef optional_struct_ref = get_handle_value(g, base_ptr, maybe_type, ptr_type); + if (instruction->initializing) { + LLVMValueRef non_null_bit_ptr = LLVMBuildStructGEP(g->builder, optional_struct_ref, + maybe_null_index, ""); + LLVMValueRef non_null_bit = LLVMConstInt(LLVMInt1Type(), 1, false); + gen_store_untyped(g, non_null_bit, non_null_bit_ptr, 0, false); + } + return LLVMBuildStructGEP(g->builder, optional_struct_ref, maybe_child_index, ""); } } } @@ -4214,17 +4271,17 @@ static LLVMValueRef ir_render_phi(CodeGen *g, IrExecutable *executable, IrInstru return phi; } -static LLVMValueRef ir_render_ref(CodeGen *g, IrExecutable *executable, IrInstructionRef *instruction) { +static LLVMValueRef ir_render_ref(CodeGen *g, IrExecutable *executable, IrInstructionRefGen *instruction) { if (!type_has_bits(instruction->base.value.type)) { return nullptr; } - LLVMValueRef value = ir_llvm_value(g, instruction->value); - if (handle_is_ptr(instruction->value->value.type)) { + LLVMValueRef value = ir_llvm_value(g, instruction->operand); + if (handle_is_ptr(instruction->operand->value.type)) { return value; } else { - assert(instruction->tmp_ptr); - gen_store_untyped(g, value, instruction->tmp_ptr, 0, false); - return instruction->tmp_ptr; + LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); + gen_store_untyped(g, value, result_loc, 0, false); + return result_loc; } } @@ -4340,7 +4397,9 @@ static LLVMValueRef get_enum_tag_name_function(CodeGen *g, ZigType *enum_type) { g->cur_fn = prev_cur_fn; g->cur_fn_val = prev_cur_fn_val; LLVMPositionBuilderAtEnd(g->builder, prev_block); - LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + if (!g->strip_debug_symbols) { + LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + } enum_type->data.enumeration.name_function = fn_val; return fn_val; @@ -4516,28 +4575,28 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutable *executable, IrIn LLVMValueRef result_val = ZigLLVMBuildCmpXchg(g->builder, ptr_val, cmp_val, new_val, success_order, failure_order, instruction->is_weak); - ZigType *maybe_type = instruction->base.value.type; - assert(maybe_type->id == ZigTypeIdOptional); - ZigType *child_type = maybe_type->data.maybe.child_type; + ZigType *optional_type = instruction->base.value.type; + assert(optional_type->id == ZigTypeIdOptional); + ZigType *child_type = optional_type->data.maybe.child_type; - if (!handle_is_ptr(maybe_type)) { + if (!handle_is_ptr(optional_type)) { LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, ""); LLVMValueRef success_bit = LLVMBuildExtractValue(g->builder, result_val, 1, ""); return LLVMBuildSelect(g->builder, success_bit, LLVMConstNull(get_llvm_type(g, child_type)), payload_val, ""); } - assert(instruction->tmp_ptr != nullptr); + LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); assert(type_has_bits(child_type)); LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, ""); - LLVMValueRef val_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, maybe_child_index, ""); + LLVMValueRef val_ptr = LLVMBuildStructGEP(g->builder, result_loc, maybe_child_index, ""); gen_assign_raw(g, val_ptr, get_pointer_to_type(g, child_type, false), payload_val); LLVMValueRef success_bit = LLVMBuildExtractValue(g->builder, result_val, 1, ""); LLVMValueRef nonnull_bit = LLVMBuildNot(g->builder, success_bit, ""); - LLVMValueRef maybe_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, maybe_null_index, ""); + LLVMValueRef maybe_ptr = LLVMBuildStructGEP(g->builder, result_loc, maybe_null_index, ""); gen_store_untyped(g, nonnull_bit, maybe_ptr, 0, false); - return instruction->tmp_ptr; + return result_loc; } static LLVMValueRef ir_render_fence(CodeGen *g, IrExecutable *executable, IrInstructionFence *instruction) { @@ -4609,16 +4668,14 @@ static LLVMValueRef ir_render_memcpy(CodeGen *g, IrExecutable *executable, IrIns return nullptr; } -static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInstructionSlice *instruction) { - assert(instruction->tmp_ptr); - +static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInstructionSliceGen *instruction) { LLVMValueRef array_ptr_ptr = ir_llvm_value(g, instruction->ptr); ZigType *array_ptr_type = instruction->ptr->value.type; assert(array_ptr_type->id == ZigTypeIdPointer); ZigType *array_type = array_ptr_type->data.pointer.child_type; LLVMValueRef array_ptr = get_handle_value(g, array_ptr_ptr, array_type, array_ptr_type); - LLVMValueRef tmp_struct_ptr = instruction->tmp_ptr; + LLVMValueRef tmp_struct_ptr = ir_llvm_value(g, instruction->result_loc); bool want_runtime_safety = instruction->safety_check_on && ir_want_runtime_safety(g, &instruction->base); @@ -4636,7 +4693,9 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst end_val = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, array_type->data.array.len, false); } if (want_runtime_safety) { - add_bounds_check(g, start_val, LLVMIntEQ, nullptr, LLVMIntULE, end_val); + if (instruction->start->value.special == ConstValSpecialRuntime || instruction->end) { + add_bounds_check(g, start_val, LLVMIntEQ, nullptr, LLVMIntULE, end_val); + } if (instruction->end) { LLVMValueRef array_end = LLVMConstInt(g->builtin_types.entry_usize->llvm_type, array_type->data.array.len, false); @@ -4867,10 +4926,10 @@ static LLVMValueRef ir_render_overflow_op(CodeGen *g, IrExecutable *executable, return overflow_bit; } -static LLVMValueRef ir_render_test_err(CodeGen *g, IrExecutable *executable, IrInstructionTestErr *instruction) { - ZigType *err_union_type = instruction->value->value.type; +static LLVMValueRef ir_render_test_err(CodeGen *g, IrExecutable *executable, IrInstructionTestErrGen *instruction) { + ZigType *err_union_type = instruction->err_union->value.type; ZigType *payload_type = err_union_type->data.error_union.payload_type; - LLVMValueRef err_union_handle = ir_llvm_value(g, instruction->value); + LLVMValueRef err_union_handle = ir_llvm_value(g, instruction->err_union); LLVMValueRef err_val; if (type_has_bits(payload_type)) { @@ -4887,25 +4946,30 @@ static LLVMValueRef ir_render_test_err(CodeGen *g, IrExecutable *executable, IrI static LLVMValueRef ir_render_unwrap_err_code(CodeGen *g, IrExecutable *executable, IrInstructionUnwrapErrCode *instruction) { - ZigType *ptr_type = instruction->err_union->value.type; + if (instruction->base.value.special != ConstValSpecialRuntime) + return nullptr; + + ZigType *ptr_type = instruction->err_union_ptr->value.type; assert(ptr_type->id == ZigTypeIdPointer); ZigType *err_union_type = ptr_type->data.pointer.child_type; ZigType *payload_type = err_union_type->data.error_union.payload_type; - LLVMValueRef err_union_ptr = ir_llvm_value(g, instruction->err_union); - LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type); - - if (type_has_bits(payload_type)) { - LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); - return gen_load_untyped(g, err_val_ptr, 0, false, ""); + LLVMValueRef err_union_ptr = ir_llvm_value(g, instruction->err_union_ptr); + if (!type_has_bits(payload_type)) { + return err_union_ptr; } else { - return err_union_handle; + // TODO assign undef to the payload + LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type); + return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); } } static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *executable, IrInstructionUnwrapErrPayload *instruction) { - bool want_safety = ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on && + if (instruction->base.value.special != ConstValSpecialRuntime) + return nullptr; + + bool want_safety = instruction->safety_check_on && ir_want_runtime_safety(g, &instruction->base) && g->errors_by_index.length > 1; if (!want_safety && !type_has_bits(instruction->base.value.type)) return nullptr; @@ -4941,13 +5005,18 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu } if (type_has_bits(payload_type)) { + if (instruction->initializing) { + LLVMValueRef err_tag_ptr = LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); + LLVMValueRef ok_err_val = LLVMConstNull(get_llvm_type(g, g->err_tag_type)); + gen_store_untyped(g, ok_err_val, err_tag_ptr, 0, false); + } return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_payload_index, ""); } else { return nullptr; } } -static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, IrInstructionOptionalWrap *instruction) { +static LLVMValueRef ir_render_optional_wrap(CodeGen *g, IrExecutable *executable, IrInstructionOptionalWrap *instruction) { ZigType *wanted_type = instruction->base.value.type; assert(wanted_type->id == ZigTypeIdOptional); @@ -4955,23 +5024,32 @@ static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, I ZigType *child_type = wanted_type->data.maybe.child_type; if (!type_has_bits(child_type)) { - return LLVMConstInt(LLVMInt1Type(), 1, false); + LLVMValueRef result = LLVMConstAllOnes(LLVMInt1Type()); + if (instruction->result_loc != nullptr) { + LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); + gen_store_untyped(g, result, result_loc, 0, false); + } + return result; } - LLVMValueRef payload_val = ir_llvm_value(g, instruction->value); + LLVMValueRef payload_val = ir_llvm_value(g, instruction->operand); if (!handle_is_ptr(wanted_type)) { + if (instruction->result_loc != nullptr) { + LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); + gen_store_untyped(g, payload_val, result_loc, 0, false); + } return payload_val; } - assert(instruction->tmp_ptr); + LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); - LLVMValueRef val_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, maybe_child_index, ""); + LLVMValueRef val_ptr = LLVMBuildStructGEP(g->builder, result_loc, maybe_child_index, ""); // child_type and instruction->value->value.type may differ by constness gen_assign_raw(g, val_ptr, get_pointer_to_type(g, child_type, false), payload_val); - LLVMValueRef maybe_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, maybe_null_index, ""); + LLVMValueRef maybe_ptr = LLVMBuildStructGEP(g->builder, result_loc, maybe_null_index, ""); gen_store_untyped(g, LLVMConstAllOnes(LLVMInt1Type()), maybe_ptr, 0, false); - return instruction->tmp_ptr; + return result_loc; } static LLVMValueRef ir_render_err_wrap_code(CodeGen *g, IrExecutable *executable, IrInstructionErrWrapCode *instruction) { @@ -4979,20 +5057,19 @@ static LLVMValueRef ir_render_err_wrap_code(CodeGen *g, IrExecutable *executable assert(wanted_type->id == ZigTypeIdErrorUnion); - ZigType *payload_type = wanted_type->data.error_union.payload_type; - ZigType *err_set_type = wanted_type->data.error_union.err_set_type; + LLVMValueRef err_val = ir_llvm_value(g, instruction->operand); - LLVMValueRef err_val = ir_llvm_value(g, instruction->value); - - if (!type_has_bits(payload_type) || !type_has_bits(err_set_type)) + if (!handle_is_ptr(wanted_type)) return err_val; - assert(instruction->tmp_ptr); + LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); - LLVMValueRef err_tag_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, err_union_err_index, ""); + LLVMValueRef err_tag_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_err_index, ""); gen_store_untyped(g, err_val, err_tag_ptr, 0, false); - return instruction->tmp_ptr; + // TODO store undef to the payload + + return result_loc; } static LLVMValueRef ir_render_err_wrap_payload(CodeGen *g, IrExecutable *executable, IrInstructionErrWrapPayload *instruction) { @@ -5004,7 +5081,7 @@ static LLVMValueRef ir_render_err_wrap_payload(CodeGen *g, IrExecutable *executa ZigType *err_set_type = wanted_type->data.error_union.err_set_type; if (!type_has_bits(err_set_type)) { - return ir_llvm_value(g, instruction->value); + return ir_llvm_value(g, instruction->operand); } LLVMValueRef ok_err_val = LLVMConstNull(get_llvm_type(g, g->err_tag_type)); @@ -5012,17 +5089,18 @@ static LLVMValueRef ir_render_err_wrap_payload(CodeGen *g, IrExecutable *executa if (!type_has_bits(payload_type)) return ok_err_val; - assert(instruction->tmp_ptr); - LLVMValueRef payload_val = ir_llvm_value(g, instruction->value); + LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); - LLVMValueRef err_tag_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, err_union_err_index, ""); + LLVMValueRef payload_val = ir_llvm_value(g, instruction->operand); + + LLVMValueRef err_tag_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_err_index, ""); gen_store_untyped(g, ok_err_val, err_tag_ptr, 0, false); - LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, err_union_payload_index, ""); + LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, result_loc, err_union_payload_index, ""); gen_assign_raw(g, payload_ptr, get_pointer_to_type(g, payload_type, false), payload_val); - return instruction->tmp_ptr; + return result_loc; } static LLVMValueRef ir_render_union_tag(CodeGen *g, IrExecutable *executable, IrInstructionUnionTag *instruction) { @@ -5043,90 +5121,6 @@ static LLVMValueRef ir_render_union_tag(CodeGen *g, IrExecutable *executable, Ir return get_handle_value(g, tag_field_ptr, tag_type, ptr_type); } -static LLVMValueRef ir_render_struct_init(CodeGen *g, IrExecutable *executable, IrInstructionStructInit *instruction) { - for (size_t i = 0; i < instruction->field_count; i += 1) { - IrInstructionStructInitField *field = &instruction->fields[i]; - TypeStructField *type_struct_field = field->type_struct_field; - if (!type_has_bits(type_struct_field->type_entry)) - continue; - - LLVMValueRef field_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, - (unsigned)type_struct_field->gen_index, ""); - LLVMValueRef value = ir_llvm_value(g, field->value); - - uint32_t field_align_bytes = get_abi_alignment(g, type_struct_field->type_entry); - uint32_t host_int_bytes = get_host_int_bytes(g, instruction->struct_type, type_struct_field); - - ZigType *ptr_type = get_pointer_to_type_extra(g, type_struct_field->type_entry, - false, false, PtrLenSingle, field_align_bytes, - (uint32_t)type_struct_field->bit_offset_in_host, host_int_bytes, false); - - gen_assign_raw(g, field_ptr, ptr_type, value); - } - return instruction->tmp_ptr; -} - -static LLVMValueRef ir_render_union_init(CodeGen *g, IrExecutable *executable, IrInstructionUnionInit *instruction) { - TypeUnionField *type_union_field = instruction->field; - - if (!type_has_bits(type_union_field->type_entry)) - return nullptr; - - uint32_t field_align_bytes = get_abi_alignment(g, type_union_field->type_entry); - ZigType *ptr_type = get_pointer_to_type_extra(g, type_union_field->type_entry, - false, false, PtrLenSingle, field_align_bytes, - 0, 0, false); - - LLVMValueRef uncasted_union_ptr; - // Even if safety is off in this block, if the union type has the safety field, we have to populate it - // correctly. Otherwise safety code somewhere other than here could fail. - ZigType *union_type = instruction->union_type; - if (union_type->data.unionation.gen_tag_index != SIZE_MAX) { - LLVMValueRef tag_field_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, - union_type->data.unionation.gen_tag_index, ""); - - LLVMValueRef tag_value = bigint_to_llvm_const(get_llvm_type(g, union_type->data.unionation.tag_type), - &type_union_field->enum_field->value); - gen_store_untyped(g, tag_value, tag_field_ptr, 0, false); - - uncasted_union_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, - (unsigned)union_type->data.unionation.gen_union_index, ""); - } else { - uncasted_union_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, (unsigned)0, ""); - } - - LLVMValueRef field_ptr = LLVMBuildBitCast(g->builder, uncasted_union_ptr, get_llvm_type(g, ptr_type), ""); - LLVMValueRef value = ir_llvm_value(g, instruction->init_value); - - gen_assign_raw(g, field_ptr, ptr_type, value); - - return instruction->tmp_ptr; -} - -static LLVMValueRef ir_render_container_init_list(CodeGen *g, IrExecutable *executable, - IrInstructionContainerInitList *instruction) -{ - ZigType *array_type = instruction->base.value.type; - assert(array_type->id == ZigTypeIdArray); - LLVMValueRef tmp_array_ptr = instruction->tmp_ptr; - assert(tmp_array_ptr); - - size_t field_count = instruction->item_count; - - ZigType *child_type = array_type->data.array.child_type; - for (size_t i = 0; i < field_count; i += 1) { - LLVMValueRef elem_val = ir_llvm_value(g, instruction->items[i]); - LLVMValueRef indices[] = { - LLVMConstNull(g->builtin_types.entry_usize->llvm_type), - LLVMConstInt(g->builtin_types.entry_usize->llvm_type, i, false), - }; - LLVMValueRef elem_ptr = LLVMBuildInBoundsGEP(g->builder, tmp_array_ptr, indices, 2, ""); - gen_assign_raw(g, elem_ptr, get_pointer_to_type(g, child_type, false), elem_val); - } - - return tmp_array_ptr; -} - static LLVMValueRef ir_render_panic(CodeGen *g, IrExecutable *executable, IrInstructionPanic *instruction) { gen_panic(g, ir_llvm_value(g, instruction->msg), get_cur_err_ret_trace_val(g, instruction->base.scope)); return nullptr; @@ -5343,7 +5337,9 @@ static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_f g->cur_fn = prev_cur_fn; g->cur_fn_val = prev_cur_fn_val; LLVMPositionBuilderAtEnd(g->builder, prev_block); - LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + if (!g->strip_debug_symbols) { + LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + } g->coro_alloc_helper_fn_val = fn_val; return fn_val; @@ -5430,13 +5426,28 @@ static LLVMValueRef ir_render_mark_err_ret_trace_ptr(CodeGen *g, IrExecutable *e return nullptr; } -static LLVMValueRef ir_render_sqrt(CodeGen *g, IrExecutable *executable, IrInstructionSqrt *instruction) { - LLVMValueRef op = ir_llvm_value(g, instruction->op); +static LLVMValueRef ir_render_float_op(CodeGen *g, IrExecutable *executable, IrInstructionFloatOp *instruction) { + LLVMValueRef op = ir_llvm_value(g, instruction->op1); assert(instruction->base.value.type->id == ZigTypeIdFloat); - LLVMValueRef fn_val = get_float_fn(g, instruction->base.value.type, ZigLLVMFnIdSqrt); + LLVMValueRef fn_val = get_float_fn(g, instruction->base.value.type, ZigLLVMFnIdFloatOp, instruction->op); return LLVMBuildCall(g->builder, fn_val, &op, 1, ""); } +static LLVMValueRef ir_render_mul_add(CodeGen *g, IrExecutable *executable, IrInstructionMulAdd *instruction) { + LLVMValueRef op1 = ir_llvm_value(g, instruction->op1); + LLVMValueRef op2 = ir_llvm_value(g, instruction->op2); + LLVMValueRef op3 = ir_llvm_value(g, instruction->op3); + assert(instruction->base.value.type->id == ZigTypeIdFloat || + instruction->base.value.type->id == ZigTypeIdVector); + LLVMValueRef fn_val = get_float_fn(g, instruction->base.value.type, ZigLLVMFnIdFMA, BuiltinFnIdMulAdd); + LLVMValueRef args[3] = { + op1, + op2, + op3, + }; + return LLVMBuildCall(g->builder, fn_val, args, 3, ""); +} + static LLVMValueRef ir_render_bswap(CodeGen *g, IrExecutable *executable, IrInstructionBswap *instruction) { LLVMValueRef op = ir_llvm_value(g, instruction->op); ZigType *int_type = instruction->base.value.type; @@ -5474,12 +5485,12 @@ static LLVMValueRef ir_render_vector_to_array(CodeGen *g, IrExecutable *executab ZigType *array_type = instruction->base.value.type; assert(array_type->id == ZigTypeIdArray); assert(handle_is_ptr(array_type)); - assert(instruction->tmp_ptr); + LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc); LLVMValueRef vector = ir_llvm_value(g, instruction->vector); - LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, instruction->tmp_ptr, + LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, result_loc, LLVMPointerType(get_llvm_type(g, instruction->vector->value.type), 0), ""); - gen_store_untyped(g, vector, casted_ptr, 0, false); - return instruction->tmp_ptr; + gen_store_untyped(g, vector, casted_ptr, get_ptr_align(g, instruction->result_loc->value.type), false); + return result_loc; } static LLVMValueRef ir_render_array_to_vector(CodeGen *g, IrExecutable *executable, @@ -5542,14 +5553,10 @@ static void set_debug_location(CodeGen *g, IrInstruction *instruction) { } static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, IrInstruction *instruction) { - set_debug_location(g, instruction); - switch (instruction->id) { case IrInstructionIdInvalid: case IrInstructionIdConst: case IrInstructionIdTypeOf: - case IrInstructionIdToPtrType: - case IrInstructionIdPtrTypeChild: case IrInstructionIdFieldPtr: case IrInstructionIdSetCold: case IrInstructionIdSetRuntimeSafety: @@ -5611,10 +5618,22 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdPtrCastSrc: case IrInstructionIdCmpxchgSrc: case IrInstructionIdLoadPtr: - case IrInstructionIdBitCast: case IrInstructionIdGlobalAsm: case IrInstructionIdHasDecl: case IrInstructionIdUndeclaredIdent: + case IrInstructionIdCallSrc: + case IrInstructionIdAllocaSrc: + case IrInstructionIdEndExpr: + case IrInstructionIdAllocaGen: + case IrInstructionIdImplicitCast: + case IrInstructionIdResolveResult: + case IrInstructionIdResetResult: + case IrInstructionIdResultPtr: + case IrInstructionIdContainerInitList: + case IrInstructionIdSliceSrc: + case IrInstructionIdRef: + case IrInstructionIdBitCastSrc: + case IrInstructionIdTestErrSrc: zig_unreachable(); case IrInstructionIdDeclVarGen: @@ -5639,10 +5658,12 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_store_ptr(g, executable, (IrInstructionStorePtr *)instruction); case IrInstructionIdVarPtr: return ir_render_var_ptr(g, executable, (IrInstructionVarPtr *)instruction); + case IrInstructionIdReturnPtr: + return ir_render_return_ptr(g, executable, (IrInstructionReturnPtr *)instruction); case IrInstructionIdElemPtr: return ir_render_elem_ptr(g, executable, (IrInstructionElemPtr *)instruction); - case IrInstructionIdCall: - return ir_render_call(g, executable, (IrInstructionCall *)instruction); + case IrInstructionIdCallGen: + return ir_render_call(g, executable, (IrInstructionCallGen *)instruction); case IrInstructionIdStructFieldPtr: return ir_render_struct_field_ptr(g, executable, (IrInstructionStructFieldPtr *)instruction); case IrInstructionIdUnionFieldPtr: @@ -5667,8 +5688,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_bit_reverse(g, executable, (IrInstructionBitReverse *)instruction); case IrInstructionIdPhi: return ir_render_phi(g, executable, (IrInstructionPhi *)instruction); - case IrInstructionIdRef: - return ir_render_ref(g, executable, (IrInstructionRef *)instruction); + case IrInstructionIdRefGen: + return ir_render_ref(g, executable, (IrInstructionRefGen *)instruction); case IrInstructionIdErrName: return ir_render_err_name(g, executable, (IrInstructionErrName *)instruction); case IrInstructionIdCmpxchgGen: @@ -5683,8 +5704,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_memset(g, executable, (IrInstructionMemset *)instruction); case IrInstructionIdMemcpy: return ir_render_memcpy(g, executable, (IrInstructionMemcpy *)instruction); - case IrInstructionIdSlice: - return ir_render_slice(g, executable, (IrInstructionSlice *)instruction); + case IrInstructionIdSliceGen: + return ir_render_slice(g, executable, (IrInstructionSliceGen *)instruction); case IrInstructionIdBreakpoint: return ir_render_breakpoint(g, executable, (IrInstructionBreakpoint *)instruction); case IrInstructionIdReturnAddress: @@ -5695,24 +5716,20 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_handle(g, executable, (IrInstructionHandle *)instruction); case IrInstructionIdOverflowOp: return ir_render_overflow_op(g, executable, (IrInstructionOverflowOp *)instruction); - case IrInstructionIdTestErr: - return ir_render_test_err(g, executable, (IrInstructionTestErr *)instruction); + case IrInstructionIdTestErrGen: + return ir_render_test_err(g, executable, (IrInstructionTestErrGen *)instruction); case IrInstructionIdUnwrapErrCode: return ir_render_unwrap_err_code(g, executable, (IrInstructionUnwrapErrCode *)instruction); case IrInstructionIdUnwrapErrPayload: return ir_render_unwrap_err_payload(g, executable, (IrInstructionUnwrapErrPayload *)instruction); case IrInstructionIdOptionalWrap: - return ir_render_maybe_wrap(g, executable, (IrInstructionOptionalWrap *)instruction); + return ir_render_optional_wrap(g, executable, (IrInstructionOptionalWrap *)instruction); case IrInstructionIdErrWrapCode: return ir_render_err_wrap_code(g, executable, (IrInstructionErrWrapCode *)instruction); case IrInstructionIdErrWrapPayload: return ir_render_err_wrap_payload(g, executable, (IrInstructionErrWrapPayload *)instruction); case IrInstructionIdUnionTag: return ir_render_union_tag(g, executable, (IrInstructionUnionTag *)instruction); - case IrInstructionIdStructInit: - return ir_render_struct_init(g, executable, (IrInstructionStructInit *)instruction); - case IrInstructionIdUnionInit: - return ir_render_union_init(g, executable, (IrInstructionUnionInit *)instruction); case IrInstructionIdPtrCastGen: return ir_render_ptr_cast(g, executable, (IrInstructionPtrCastGen *)instruction); case IrInstructionIdBitCastGen: @@ -5729,8 +5746,6 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_int_to_err(g, executable, (IrInstructionIntToErr *)instruction); case IrInstructionIdErrToInt: return ir_render_err_to_int(g, executable, (IrInstructionErrToInt *)instruction); - case IrInstructionIdContainerInitList: - return ir_render_container_init_list(g, executable, (IrInstructionContainerInitList *)instruction); case IrInstructionIdPanic: return ir_render_panic(g, executable, (IrInstructionPanic *)instruction); case IrInstructionIdTagName: @@ -5779,8 +5794,10 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_merge_err_ret_traces(g, executable, (IrInstructionMergeErrRetTraces *)instruction); case IrInstructionIdMarkErrRetTracePtr: return ir_render_mark_err_ret_trace_ptr(g, executable, (IrInstructionMarkErrRetTracePtr *)instruction); - case IrInstructionIdSqrt: - return ir_render_sqrt(g, executable, (IrInstructionSqrt *)instruction); + case IrInstructionIdFloatOp: + return ir_render_float_op(g, executable, (IrInstructionFloatOp *)instruction); + case IrInstructionIdMulAdd: + return ir_render_mul_add(g, executable, (IrInstructionMulAdd *)instruction); case IrInstructionIdArrayToVector: return ir_render_array_to_vector(g, executable, (IrInstructionArrayToVector *)instruction); case IrInstructionIdVectorToArray: @@ -5791,6 +5808,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_assert_non_null(g, executable, (IrInstructionAssertNonNull *)instruction); case IrInstructionIdResizeSlice: return ir_render_resize_slice(g, executable, (IrInstructionResizeSlice *)instruction); + case IrInstructionIdPtrOfArrayToSlice: + return ir_render_ptr_of_array_to_slice(g, executable, (IrInstructionPtrOfArrayToSlice *)instruction); } zig_unreachable(); } @@ -5802,7 +5821,6 @@ static void ir_render(CodeGen *g, ZigFn *fn_entry) { assert(executable->basic_block_list.length > 0); for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) { IrBasicBlock *current_block = executable->basic_block_list.at(block_i); - //assert(current_block->ref_count > 0); assert(current_block->llvm_block); LLVMPositionBuilderAtEnd(g->builder, current_block->llvm_block); for (size_t instr_i = 0; instr_i < current_block->instruction_list.length; instr_i += 1) { @@ -5810,6 +5828,9 @@ static void ir_render(CodeGen *g, ZigFn *fn_entry) { if (instruction->ref_count == 0 && !ir_has_side_effects(instruction)) continue; + if (!g->strip_debug_symbols) { + set_debug_location(g, instruction); + } instruction->llvm_value = ir_render_instruction(g, executable, instruction); } current_block->llvm_exit_block = LLVMGetInsertBlock(g->builder); @@ -6599,7 +6620,8 @@ static void render_const_val_global(CodeGen *g, ConstExprValue *const_val, const LLVMSetLinkage(global_value, LLVMInternalLinkage); LLVMSetGlobalConstant(global_value, true); LLVMSetUnnamedAddr(global_value, true); - LLVMSetAlignment(global_value, get_abi_alignment(g, const_val->type)); + LLVMSetAlignment(global_value, (const_val->global_refs->align == 0) ? + get_abi_alignment(g, const_val->type) : const_val->global_refs->align); const_val->global_refs->llvm_global = global_value; } @@ -6724,7 +6746,7 @@ static void do_code_gen(CodeGen *g) { zig_panic("TODO debug info for var with ptr casted value"); } ZigType *var_type = g->builtin_types.entry_f128; - ConstExprValue coerced_value; + ConstExprValue coerced_value = {}; coerced_value.special = ConstValSpecialStatic; coerced_value.type = var_type; coerced_value.data.x_f128 = bigfloat_to_f128(&const_val->data.x_bigfloat); @@ -6829,20 +6851,24 @@ static void do_code_gen(CodeGen *g) { FnTypeId *fn_type_id = &fn_table_entry->type_entry->data.fn.fn_type_id; CallingConvention cc = fn_type_id->cc; bool is_c_abi = cc == CallingConventionC; + bool want_sret = want_first_arg_sret(g, fn_type_id); LLVMValueRef fn = fn_llvm_value(g, fn_table_entry); g->cur_fn = fn_table_entry; g->cur_fn_val = fn; - ZigType *return_type = fn_type_id->return_type; - if (handle_is_ptr(return_type)) { - g->cur_ret_ptr = LLVMGetParam(fn, 0); - } else { - g->cur_ret_ptr = nullptr; - } build_all_basic_blocks(g, fn_table_entry); clear_debug_source_node(g); + if (want_sret) { + g->cur_ret_ptr = LLVMGetParam(fn, 0); + } else if (handle_is_ptr(fn_type_id->return_type)) { + g->cur_ret_ptr = build_alloca(g, fn_type_id->return_type, "result", 0); + // TODO add debug info variable for this + } else { + g->cur_ret_ptr = nullptr; + } + uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry); bool have_err_ret_trace_arg = err_ret_trace_arg_index != UINT32_MAX; if (have_err_ret_trace_arg) { @@ -6867,68 +6893,28 @@ static void do_code_gen(CodeGen *g) { } // allocate temporary stack data - for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_list.length; alloca_i += 1) { - IrInstruction *instruction = fn_table_entry->alloca_list.at(alloca_i); - LLVMValueRef *slot; - ZigType *slot_type = instruction->value.type; - uint32_t alignment_bytes = 0; - if (instruction->id == IrInstructionIdCast) { - IrInstructionCast *cast_instruction = (IrInstructionCast *)instruction; - slot = &cast_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdRef) { - IrInstructionRef *ref_instruction = (IrInstructionRef *)instruction; - slot = &ref_instruction->tmp_ptr; - assert(instruction->value.type->id == ZigTypeIdPointer); - slot_type = instruction->value.type->data.pointer.child_type; - } else if (instruction->id == IrInstructionIdContainerInitList) { - IrInstructionContainerInitList *container_init_list_instruction = (IrInstructionContainerInitList *)instruction; - slot = &container_init_list_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdStructInit) { - IrInstructionStructInit *struct_init_instruction = (IrInstructionStructInit *)instruction; - slot = &struct_init_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdUnionInit) { - IrInstructionUnionInit *union_init_instruction = (IrInstructionUnionInit *)instruction; - slot = &union_init_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdCall) { - IrInstructionCall *call_instruction = (IrInstructionCall *)instruction; - slot = &call_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdSlice) { - IrInstructionSlice *slice_instruction = (IrInstructionSlice *)instruction; - slot = &slice_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdOptionalWrap) { - IrInstructionOptionalWrap *maybe_wrap_instruction = (IrInstructionOptionalWrap *)instruction; - slot = &maybe_wrap_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdErrWrapPayload) { - IrInstructionErrWrapPayload *err_wrap_payload_instruction = (IrInstructionErrWrapPayload *)instruction; - slot = &err_wrap_payload_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdErrWrapCode) { - IrInstructionErrWrapCode *err_wrap_code_instruction = (IrInstructionErrWrapCode *)instruction; - slot = &err_wrap_code_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdCmpxchgGen) { - IrInstructionCmpxchgGen *cmpxchg_instruction = (IrInstructionCmpxchgGen *)instruction; - slot = &cmpxchg_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdResizeSlice) { - IrInstructionResizeSlice *resize_slice_instruction = (IrInstructionResizeSlice *)instruction; - slot = &resize_slice_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdLoadPtrGen) { - IrInstructionLoadPtrGen *load_ptr_inst = (IrInstructionLoadPtrGen *)instruction; - slot = &load_ptr_inst->tmp_ptr; - } else if (instruction->id == IrInstructionIdBitCastGen) { - IrInstructionBitCastGen *bit_cast_inst = (IrInstructionBitCastGen *)instruction; - slot = &bit_cast_inst->tmp_ptr; - } else if (instruction->id == IrInstructionIdVectorToArray) { - IrInstructionVectorToArray *vector_to_array_instruction = (IrInstructionVectorToArray *)instruction; - alignment_bytes = get_abi_alignment(g, vector_to_array_instruction->vector->value.type); - slot = &vector_to_array_instruction->tmp_ptr; - } else { - zig_unreachable(); + for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_gen_list.length; alloca_i += 1) { + IrInstructionAllocaGen *instruction = fn_table_entry->alloca_gen_list.at(alloca_i); + ZigType *ptr_type = instruction->base.value.type; + assert(ptr_type->id == ZigTypeIdPointer); + ZigType *child_type = ptr_type->data.pointer.child_type; + if (!type_has_bits(child_type)) + continue; + if (instruction->base.ref_count == 0) + continue; + if (instruction->base.value.special != ConstValSpecialRuntime) { + if (const_ptr_pointee(nullptr, g, &instruction->base.value, nullptr)->special != + ConstValSpecialRuntime) + { + continue; + } } - *slot = build_alloca(g, slot_type, "", alignment_bytes); + instruction->base.llvm_value = build_alloca(g, child_type, instruction->name_hint, + get_ptr_align(g, ptr_type)); } ZigType *import = get_scope_import(&fn_table_entry->fndef_scope->base); - - unsigned gen_i_init = want_first_arg_sret(g, fn_type_id) ? 1 : 0; + unsigned gen_i_init = want_sret ? 1 : 0; // create debug variable declarations for variables and allocate all local variables FnWalk fn_walk_var = {}; @@ -6955,8 +6941,6 @@ static void do_code_gen(CodeGen *g) { } if (var->src_arg_index == SIZE_MAX) { - var->value_ref = build_alloca(g, var->var_type, buf_ptr(&var->name), var->align_bytes); - var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope), buf_ptr(&var->name), import->data.structure.root_struct->di_file, (unsigned)(var->decl_node->line + 1), get_llvm_di_type(g, var->var_type), !g->strip_debug_symbols, 0); @@ -7398,6 +7382,21 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdRem, "rem", 2); create_builtin_fn(g, BuiltinFnIdMod, "mod", 2); create_builtin_fn(g, BuiltinFnIdSqrt, "sqrt", 2); + create_builtin_fn(g, BuiltinFnIdSin, "sin", 2); + create_builtin_fn(g, BuiltinFnIdCos, "cos", 2); + create_builtin_fn(g, BuiltinFnIdExp, "exp", 2); + create_builtin_fn(g, BuiltinFnIdExp2, "exp2", 2); + create_builtin_fn(g, BuiltinFnIdLn, "ln", 2); + create_builtin_fn(g, BuiltinFnIdLog2, "log2", 2); + create_builtin_fn(g, BuiltinFnIdLog10, "log10", 2); + create_builtin_fn(g, BuiltinFnIdFabs, "fabs", 2); + create_builtin_fn(g, BuiltinFnIdFloor, "floor", 2); + create_builtin_fn(g, BuiltinFnIdCeil, "ceil", 2); + create_builtin_fn(g, BuiltinFnIdTrunc, "trunc", 2); + //Needs library support on Windows + //create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2); + create_builtin_fn(g, BuiltinFnIdRound, "round", 2); + create_builtin_fn(g, BuiltinFnIdMulAdd, "mulAdd", 4); create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdNoInlineCall, "noInlineCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX); @@ -8056,6 +8055,8 @@ static Error define_builtin_compile_vars(CodeGen *g) { g->root_package->package_table.put(buf_create_from_str("builtin"), g->compile_var_package); g->std_package->package_table.put(buf_create_from_str("builtin"), g->compile_var_package); g->std_package->package_table.put(buf_create_from_str("std"), g->std_package); + g->std_package->package_table.put(buf_create_from_str("root"), + g->is_test_build ? g->test_runner_package : g->root_package); g->compile_var_import = add_source_file(g, g->compile_var_package, builtin_zig_path, contents, SourceKindPkgMain); @@ -8525,7 +8526,7 @@ static ZigType *add_special_code(CodeGen *g, ZigPackage *package, const char *ba static ZigPackage *create_bootstrap_pkg(CodeGen *g, ZigPackage *pkg_with_main) { ZigPackage *package = codegen_create_package(g, buf_ptr(g->zig_std_special_dir), "bootstrap.zig", "std.special"); - package->package_table.put(buf_create_from_str("@root"), pkg_with_main); + package->package_table.put(buf_create_from_str("root"), pkg_with_main); return package; } @@ -9378,6 +9379,7 @@ void codegen_add_time_event(CodeGen *g, const char *name) { static void add_cache_pkg(CodeGen *g, CacheHash *ch, ZigPackage *pkg) { if (buf_len(&pkg->root_src_path) == 0) return; + pkg->added_to_cache = true; Buf *rel_full_path = buf_alloc(); os_path_join(&pkg->root_src_dir, &pkg->root_src_path, rel_full_path); @@ -9389,9 +9391,7 @@ static void add_cache_pkg(CodeGen *g, CacheHash *ch, ZigPackage *pkg) { if (!entry) break; - // TODO: I think we need a more sophisticated detection of - // packages we have already seen - if (entry->value != pkg) { + if (!pkg->added_to_cache) { cache_buf(ch, entry->key); add_cache_pkg(g, ch, entry->value); } @@ -9648,6 +9648,10 @@ ZigPackage *codegen_create_package(CodeGen *g, const char *root_src_dir, const c if (g->std_package != nullptr) { assert(g->compile_var_package != nullptr); pkg->package_table.put(buf_create_from_str("std"), g->std_package); + + ZigPackage *main_pkg = g->is_test_build ? g->test_runner_package : g->root_package; + pkg->package_table.put(buf_create_from_str("root"), main_pkg); + pkg->package_table.put(buf_create_from_str("builtin"), g->compile_var_package); } return pkg; diff --git a/src/ir.cpp b/src/ir.cpp index b74a99b37d..abae52fcb5 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -38,6 +38,7 @@ struct IrAnalyze { ZigType *explicit_return_type; AstNode *explicit_return_type_source_node; ZigList src_implicit_return_type_list; + ZigList resume_stack; IrBasicBlock *const_predecessor_bb; }; @@ -157,16 +158,19 @@ enum UndefAllowed { }; static IrInstruction *ir_gen_node(IrBuilder *irb, AstNode *node, Scope *scope); -static IrInstruction *ir_gen_node_extra(IrBuilder *irb, AstNode *node, Scope *scope, LVal lval); -static IrInstruction *ir_analyze_instruction(IrAnalyze *ira, IrInstruction *instruction); +static IrInstruction *ir_gen_node_extra(IrBuilder *irb, AstNode *node, Scope *scope, LVal lval, + ResultLoc *result_loc); static IrInstruction *ir_implicit_cast(IrAnalyze *ira, IrInstruction *value, ZigType *expected_type); -static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruction, IrInstruction *ptr); +static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruction, IrInstruction *ptr, + ResultLoc *result_loc); static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutable *exec, AstNode *source_node, Buf *msg); static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name, - IrInstruction *source_instr, IrInstruction *container_ptr, ZigType *container_type); + IrInstruction *source_instr, IrInstruction *container_ptr, ZigType *container_type, bool initializing); +static void ir_assert(bool ok, IrInstruction *source_instruction); static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction, ZigVar *var); static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op); -static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval); +static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval, ResultLoc *result_loc); +static IrInstruction *ir_expr_wrap(IrBuilder *irb, Scope *scope, IrInstruction *inst, ResultLoc *result_loc); static ZigType *adjust_ptr_align(CodeGen *g, ZigType *ptr_type, uint32_t new_align); static ZigType *adjust_slice_align(CodeGen *g, ZigType *slice_type, uint32_t new_align); static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *source_node, uint8_t *buf, ConstExprValue *val); @@ -178,17 +182,28 @@ static IrInstruction *ir_analyze_ptr_cast(IrAnalyze *ira, IrInstruction *source_ static ConstExprValue *ir_resolve_const(IrAnalyze *ira, IrInstruction *value, UndefAllowed undef_allowed); static void copy_const_val(ConstExprValue *dest, ConstExprValue *src, bool same_global_refs); static Error resolve_ptr_align(IrAnalyze *ira, ZigType *ty, uint32_t *result_align); -static void ir_add_alloca(IrAnalyze *ira, IrInstruction *instruction, ZigType *type_entry); static IrInstruction *ir_analyze_int_to_ptr(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *target, ZigType *ptr_type); static IrInstruction *ir_analyze_bit_cast(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, ZigType *dest_type); +static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspend_source_instr, + ResultLoc *result_loc, ZigType *value_type, IrInstruction *value, bool force_runtime, bool non_null_comptime); +static IrInstruction *ir_resolve_result(IrAnalyze *ira, IrInstruction *suspend_source_instr, + ResultLoc *result_loc, ZigType *value_type, IrInstruction *value, bool force_runtime, bool non_null_comptime); +static IrInstruction *ir_analyze_unwrap_optional_payload(IrAnalyze *ira, IrInstruction *source_instr, + IrInstruction *base_ptr, bool safety_check_on, bool initializing); +static IrInstruction *ir_analyze_unwrap_error_payload(IrAnalyze *ira, IrInstruction *source_instr, + IrInstruction *base_ptr, bool safety_check_on, bool initializing); +static IrInstruction *ir_analyze_unwrap_err_code(IrAnalyze *ira, IrInstruction *source_instr, + IrInstruction *base_ptr, bool initializing); +static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source_instr, + IrInstruction *ptr, IrInstruction *uncasted_value); static ConstExprValue *const_ptr_pointee_unchecked(CodeGen *g, ConstExprValue *const_val) { assert(get_src_ptr_type(const_val->type) != nullptr); assert(const_val->special == ConstValSpecialStatic); ConstExprValue *result; - + switch (type_has_one_possible_value(g, const_val->type->data.pointer.child_type)) { case OnePossibleValueInvalid: zig_unreachable(); @@ -200,7 +215,7 @@ static ConstExprValue *const_ptr_pointee_unchecked(CodeGen *g, ConstExprValue *c case OnePossibleValueNo: break; } - + switch (const_val->data.x_ptr.special) { case ConstPtrSpecialInvalid: zig_unreachable(); @@ -246,6 +261,15 @@ static bool is_opt_err_set(ZigType *ty) { (ty->id == ZigTypeIdOptional && ty->data.maybe.child_type->id == ZigTypeIdErrorSet); } +static bool is_slice(ZigType *type) { + return type->id == ZigTypeIdStruct && type->data.structure.is_slice; +} + +static bool slice_is_const(ZigType *type) { + assert(is_slice(type)); + return type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const; +} + // This function returns true when you can change the type of a ConstExprValue and the // value remains meaningful. static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) { @@ -282,8 +306,9 @@ static bool types_have_same_zig_comptime_repr(ZigType *a, ZigType *b) { return a->data.floating.bit_count == b->data.floating.bit_count; case ZigTypeIdInt: return a->data.integral.is_signed == b->data.integral.is_signed; - case ZigTypeIdArray: case ZigTypeIdStruct: + return is_slice(a) && is_slice(b); + case ZigTypeIdArray: case ZigTypeIdOptional: case ZigTypeIdErrorUnion: case ZigTypeIdEnum: @@ -386,6 +411,7 @@ static IrBasicBlock *ir_create_basic_block(IrBuilder *irb, Scope *scope, const c result->scope = scope; result->name_hint = name_hint; result->debug_id = exec_next_debug_id(irb->exec); + result->index = SIZE_MAX; // set later return result; } @@ -475,8 +501,16 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionVarPtr *) { return IrInstructionIdVarPtr; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionCall *) { - return IrInstructionIdCall; +static constexpr IrInstructionId ir_instruction_id(IrInstructionReturnPtr *) { + return IrInstructionIdReturnPtr; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionCallSrc *) { + return IrInstructionIdCallSrc; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionCallGen *) { + return IrInstructionIdCallGen; } static constexpr IrInstructionId ir_instruction_id(IrInstructionConst *) { @@ -511,14 +545,6 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTypeOf *) { return IrInstructionIdTypeOf; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionToPtrType *) { - return IrInstructionIdToPtrType; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrTypeChild *) { - return IrInstructionIdPtrTypeChild; -} - static constexpr IrInstructionId ir_instruction_id(IrInstructionSetCold *) { return IrInstructionIdSetCold; } @@ -611,12 +637,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionRef *) { return IrInstructionIdRef; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionStructInit *) { - return IrInstructionIdStructInit; -} - -static constexpr IrInstructionId ir_instruction_id(IrInstructionUnionInit *) { - return IrInstructionIdUnionInit; +static constexpr IrInstructionId ir_instruction_id(IrInstructionRefGen *) { + return IrInstructionIdRefGen; } static constexpr IrInstructionId ir_instruction_id(IrInstructionCompileErr *) { @@ -703,8 +725,12 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionMemcpy *) { return IrInstructionIdMemcpy; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionSlice *) { - return IrInstructionIdSlice; +static constexpr IrInstructionId ir_instruction_id(IrInstructionSliceSrc *) { + return IrInstructionIdSliceSrc; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionSliceGen *) { + return IrInstructionIdSliceGen; } static constexpr IrInstructionId ir_instruction_id(IrInstructionMemberCount *) { @@ -743,8 +769,16 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionOverflowOp *) { return IrInstructionIdOverflowOp; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionTestErr *) { - return IrInstructionIdTestErr; +static constexpr IrInstructionId ir_instruction_id(IrInstructionTestErrSrc *) { + return IrInstructionIdTestErrSrc; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionTestErrGen *) { + return IrInstructionIdTestErrGen; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionMulAdd *) { + return IrInstructionIdMulAdd; } static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapErrCode *) { @@ -783,8 +817,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrCastGen *) { return IrInstructionIdPtrCastGen; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionBitCast *) { - return IrInstructionIdBitCast; +static constexpr IrInstructionId ir_instruction_id(IrInstructionBitCastSrc *) { + return IrInstructionIdBitCastSrc; } static constexpr IrInstructionId ir_instruction_id(IrInstructionBitCastGen *) { @@ -879,6 +913,26 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignCast *) { return IrInstructionIdAlignCast; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionImplicitCast *) { + return IrInstructionIdImplicitCast; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionResolveResult *) { + return IrInstructionIdResolveResult; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionResetResult *) { + return IrInstructionIdResetResult; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionResultPtr *) { + return IrInstructionIdResultPtr; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrOfArrayToSlice *) { + return IrInstructionIdPtrOfArrayToSlice; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionOpaqueType *) { return IrInstructionIdOpaqueType; } @@ -987,8 +1041,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionMarkErrRetTraceP return IrInstructionIdMarkErrRetTracePtr; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionSqrt *) { - return IrInstructionIdSqrt; +static constexpr IrInstructionId ir_instruction_id(IrInstructionFloatOp *) { + return IrInstructionIdFloatOp; } static constexpr IrInstructionId ir_instruction_id(IrInstructionCheckRuntimeScope *) { @@ -1019,6 +1073,18 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionUndeclaredIdent return IrInstructionIdUndeclaredIdent; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionAllocaSrc *) { + return IrInstructionIdAllocaSrc; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionAllocaGen *) { + return IrInstructionIdAllocaGen; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionEndExpr *) { + return IrInstructionIdEndExpr; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -1070,13 +1136,15 @@ static IrInstruction *ir_build_cond_br(IrBuilder *irb, Scope *scope, AstNode *so return &cond_br_instruction->base; } -static IrInstruction *ir_build_return(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *return_value) { +static IrInstruction *ir_build_return(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *return_value) +{ IrInstructionReturn *return_instruction = ir_build_instruction(irb, scope, source_node); return_instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable; return_instruction->base.value.special = ConstValSpecialStatic; return_instruction->value = return_value; - ir_ref_instruction(return_value, irb->current_basic_block); + if (return_value != nullptr) ir_ref_instruction(return_value, irb->current_basic_block); return &return_instruction->base; } @@ -1254,17 +1322,27 @@ static IrInstruction *ir_build_var_ptr(IrBuilder *irb, Scope *scope, AstNode *so return ir_build_var_ptr_x(irb, scope, source_node, var, nullptr); } -static IrInstruction *ir_build_elem_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *array_ptr, - IrInstruction *elem_index, bool safety_check_on, PtrLen ptr_len) +static IrInstruction *ir_build_return_ptr(IrAnalyze *ira, IrInstruction *source_instruction, ZigType *ty) { + IrInstructionReturnPtr *instruction = ir_build_instruction(&ira->new_irb, + source_instruction->scope, source_instruction->source_node); + instruction->base.value.type = ty; + return &instruction->base; +} + +static IrInstruction *ir_build_elem_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *array_ptr, IrInstruction *elem_index, bool safety_check_on, PtrLen ptr_len, + IrInstruction *init_array_type) { IrInstructionElemPtr *instruction = ir_build_instruction(irb, scope, source_node); instruction->array_ptr = array_ptr; instruction->elem_index = elem_index; instruction->safety_check_on = safety_check_on; instruction->ptr_len = ptr_len; + instruction->init_array_type = init_array_type; ir_ref_instruction(array_ptr, irb->current_basic_block); ir_ref_instruction(elem_index, irb->current_basic_block); + if (init_array_type != nullptr) ir_ref_instruction(init_array_type, irb->current_basic_block); return &instruction->base; } @@ -1284,12 +1362,13 @@ static IrInstruction *ir_build_field_ptr_instruction(IrBuilder *irb, Scope *scop } static IrInstruction *ir_build_field_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *container_ptr, Buf *field_name) + IrInstruction *container_ptr, Buf *field_name, bool initializing) { IrInstructionFieldPtr *instruction = ir_build_instruction(irb, scope, source_node); instruction->container_ptr = container_ptr; instruction->field_name_buffer = field_name; instruction->field_name_expr = nullptr; + instruction->initializing = initializing; ir_ref_instruction(container_ptr, irb->current_basic_block); @@ -1309,9 +1388,11 @@ static IrInstruction *ir_build_struct_field_ptr(IrBuilder *irb, Scope *scope, As } static IrInstruction *ir_build_union_field_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *union_ptr, TypeUnionField *field) + IrInstruction *union_ptr, TypeUnionField *field, bool safety_check_on, bool initializing) { IrInstructionUnionFieldPtr *instruction = ir_build_instruction(irb, scope, source_node); + instruction->initializing = initializing; + instruction->safety_check_on = safety_check_on; instruction->union_ptr = union_ptr; instruction->field = field; @@ -1320,12 +1401,12 @@ static IrInstruction *ir_build_union_field_ptr(IrBuilder *irb, Scope *scope, Ast return &instruction->base; } -static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *source_node, +static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *source_node, ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator, - IrInstruction *new_stack) + IrInstruction *new_stack, ResultLoc *result_loc) { - IrInstructionCall *call_instruction = ir_build_instruction(irb, scope, source_node); + IrInstructionCallSrc *call_instruction = ir_build_instruction(irb, scope, source_node); call_instruction->fn_entry = fn_entry; call_instruction->fn_ref = fn_ref; call_instruction->is_comptime = is_comptime; @@ -1335,6 +1416,7 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc call_instruction->is_async = is_async; call_instruction->async_allocator = async_allocator; call_instruction->new_stack = new_stack; + call_instruction->result_loc = result_loc; if (fn_ref != nullptr) ir_ref_instruction(fn_ref, irb->current_basic_block); for (size_t i = 0; i < arg_count; i += 1) @@ -1345,8 +1427,37 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc return &call_instruction->base; } +static IrInstruction *ir_build_call_gen(IrAnalyze *ira, IrInstruction *source_instruction, + ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, + FnInline fn_inline, bool is_async, IrInstruction *async_allocator, IrInstruction *new_stack, + IrInstruction *result_loc, ZigType *return_type) +{ + IrInstructionCallGen *call_instruction = ir_build_instruction(&ira->new_irb, + source_instruction->scope, source_instruction->source_node); + call_instruction->base.value.type = return_type; + call_instruction->fn_entry = fn_entry; + call_instruction->fn_ref = fn_ref; + call_instruction->fn_inline = fn_inline; + call_instruction->args = args; + call_instruction->arg_count = arg_count; + call_instruction->is_async = is_async; + call_instruction->async_allocator = async_allocator; + call_instruction->new_stack = new_stack; + call_instruction->result_loc = result_loc; + + if (fn_ref != nullptr) ir_ref_instruction(fn_ref, ira->new_irb.current_basic_block); + for (size_t i = 0; i < arg_count; i += 1) + ir_ref_instruction(args[i], ira->new_irb.current_basic_block); + if (async_allocator != nullptr) ir_ref_instruction(async_allocator, ira->new_irb.current_basic_block); + if (new_stack != nullptr) ir_ref_instruction(new_stack, ira->new_irb.current_basic_block); + if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); + + return &call_instruction->base; +} + static IrInstruction *ir_build_phi(IrBuilder *irb, Scope *scope, AstNode *source_node, - size_t incoming_count, IrBasicBlock **incoming_blocks, IrInstruction **incoming_values) + size_t incoming_count, IrBasicBlock **incoming_blocks, IrInstruction **incoming_values, + ResultLocPeerParent *peer_parent) { assert(incoming_count != 0); assert(incoming_count != SIZE_MAX); @@ -1355,6 +1466,7 @@ static IrInstruction *ir_build_phi(IrBuilder *irb, Scope *scope, AstNode *source phi_instruction->incoming_count = incoming_count; phi_instruction->incoming_blocks = incoming_blocks; phi_instruction->incoming_values = incoming_values; + phi_instruction->peer_parent = peer_parent; for (size_t i = 0; i < incoming_count; i += 1) { ir_ref_bb(incoming_blocks[i]); @@ -1408,12 +1520,13 @@ static IrInstruction *ir_build_ptr_type(IrBuilder *irb, Scope *scope, AstNode *s } static IrInstruction *ir_build_un_op_lval(IrBuilder *irb, Scope *scope, AstNode *source_node, IrUnOp op_id, - IrInstruction *value, LVal lval) + IrInstruction *value, LVal lval, ResultLoc *result_loc) { IrInstructionUnOp *instruction = ir_build_instruction(irb, scope, source_node); instruction->op_id = op_id; instruction->value = value; instruction->lval = lval; + instruction->result_loc = result_loc; ir_ref_instruction(value, irb->current_basic_block); @@ -1423,72 +1536,49 @@ static IrInstruction *ir_build_un_op_lval(IrBuilder *irb, Scope *scope, AstNode static IrInstruction *ir_build_un_op(IrBuilder *irb, Scope *scope, AstNode *source_node, IrUnOp op_id, IrInstruction *value) { - return ir_build_un_op_lval(irb, scope, source_node, op_id, value, LValNone); + return ir_build_un_op_lval(irb, scope, source_node, op_id, value, LValNone, nullptr); } static IrInstruction *ir_build_container_init_list(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *container_type, IrInstruction *elem_type, size_t item_count, IrInstruction **items) + IrInstruction *container_type, size_t item_count, IrInstruction **elem_result_loc_list, + IrInstruction *result_loc) { IrInstructionContainerInitList *container_init_list_instruction = ir_build_instruction(irb, scope, source_node); container_init_list_instruction->container_type = container_type; - container_init_list_instruction->elem_type = elem_type; container_init_list_instruction->item_count = item_count; - container_init_list_instruction->items = items; + container_init_list_instruction->elem_result_loc_list = elem_result_loc_list; + container_init_list_instruction->result_loc = result_loc; - if (container_type != nullptr) ir_ref_instruction(container_type, irb->current_basic_block); - if (elem_type != nullptr) ir_ref_instruction(elem_type, irb->current_basic_block); + ir_ref_instruction(container_type, irb->current_basic_block); for (size_t i = 0; i < item_count; i += 1) { - ir_ref_instruction(items[i], irb->current_basic_block); + ir_ref_instruction(elem_result_loc_list[i], irb->current_basic_block); } + if (result_loc != nullptr) ir_ref_instruction(result_loc, irb->current_basic_block); return &container_init_list_instruction->base; } static IrInstruction *ir_build_container_init_fields(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *container_type, size_t field_count, IrInstructionContainerInitFieldsField *fields) + IrInstruction *container_type, size_t field_count, IrInstructionContainerInitFieldsField *fields, + IrInstruction *result_loc) { IrInstructionContainerInitFields *container_init_fields_instruction = ir_build_instruction(irb, scope, source_node); container_init_fields_instruction->container_type = container_type; container_init_fields_instruction->field_count = field_count; container_init_fields_instruction->fields = fields; + container_init_fields_instruction->result_loc = result_loc; ir_ref_instruction(container_type, irb->current_basic_block); for (size_t i = 0; i < field_count; i += 1) { - ir_ref_instruction(fields[i].value, irb->current_basic_block); + ir_ref_instruction(fields[i].result_loc, irb->current_basic_block); } + if (result_loc != nullptr) ir_ref_instruction(result_loc, irb->current_basic_block); return &container_init_fields_instruction->base; } -static IrInstruction *ir_build_struct_init(IrBuilder *irb, Scope *scope, AstNode *source_node, - ZigType *struct_type, size_t field_count, IrInstructionStructInitField *fields) -{ - IrInstructionStructInit *struct_init_instruction = ir_build_instruction(irb, scope, source_node); - struct_init_instruction->struct_type = struct_type; - struct_init_instruction->field_count = field_count; - struct_init_instruction->fields = fields; - - for (size_t i = 0; i < field_count; i += 1) - ir_ref_instruction(fields[i].value, irb->current_basic_block); - - return &struct_init_instruction->base; -} - -static IrInstruction *ir_build_union_init(IrBuilder *irb, Scope *scope, AstNode *source_node, - ZigType *union_type, TypeUnionField *field, IrInstruction *init_value) -{ - IrInstructionUnionInit *union_init_instruction = ir_build_instruction(irb, scope, source_node); - union_init_instruction->union_type = union_type; - union_init_instruction->field = field; - union_init_instruction->init_value = init_value; - - ir_ref_instruction(init_value, irb->current_basic_block); - - return &union_init_instruction->base; -} - static IrInstruction *ir_build_unreachable(IrBuilder *irb, Scope *scope, AstNode *source_node) { IrInstructionUnreachable *unreachable_instruction = ir_build_instruction(irb, scope, source_node); @@ -1513,47 +1603,47 @@ static IrInstruction *ir_build_store_ptr(IrBuilder *irb, Scope *scope, AstNode * } static IrInstruction *ir_build_var_decl_src(IrBuilder *irb, Scope *scope, AstNode *source_node, - ZigVar *var, IrInstruction *var_type, IrInstruction *align_value, IrInstruction *init_value) + ZigVar *var, IrInstruction *align_value, IrInstruction *ptr) { IrInstructionDeclVarSrc *decl_var_instruction = ir_build_instruction(irb, scope, source_node); decl_var_instruction->base.value.special = ConstValSpecialStatic; decl_var_instruction->base.value.type = irb->codegen->builtin_types.entry_void; decl_var_instruction->var = var; - decl_var_instruction->var_type = var_type; decl_var_instruction->align_value = align_value; - decl_var_instruction->init_value = init_value; + decl_var_instruction->ptr = ptr; - if (var_type != nullptr) ir_ref_instruction(var_type, irb->current_basic_block); if (align_value != nullptr) ir_ref_instruction(align_value, irb->current_basic_block); - ir_ref_instruction(init_value, irb->current_basic_block); + ir_ref_instruction(ptr, irb->current_basic_block); return &decl_var_instruction->base; } static IrInstruction *ir_build_var_decl_gen(IrAnalyze *ira, IrInstruction *source_instruction, - ZigVar *var, IrInstruction *init_value) + ZigVar *var, IrInstruction *var_ptr) { IrInstructionDeclVarGen *decl_var_instruction = ir_build_instruction(&ira->new_irb, source_instruction->scope, source_instruction->source_node); decl_var_instruction->base.value.special = ConstValSpecialStatic; decl_var_instruction->base.value.type = ira->codegen->builtin_types.entry_void; decl_var_instruction->var = var; - decl_var_instruction->init_value = init_value; + decl_var_instruction->var_ptr = var_ptr; - ir_ref_instruction(init_value, ira->new_irb.current_basic_block); + ir_ref_instruction(var_ptr, ira->new_irb.current_basic_block); return &decl_var_instruction->base; } static IrInstruction *ir_build_resize_slice(IrAnalyze *ira, IrInstruction *source_instruction, - IrInstruction *operand, ZigType *ty) + IrInstruction *operand, ZigType *ty, IrInstruction *result_loc) { IrInstructionResizeSlice *instruction = ir_build_instruction(&ira->new_irb, source_instruction->scope, source_instruction->source_node); instruction->base.value.type = ty; instruction->operand = operand; + instruction->result_loc = result_loc; ir_ref_instruction(operand, ira->new_irb.current_basic_block); + ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); return &instruction->base; } @@ -1594,27 +1684,6 @@ static IrInstruction *ir_build_typeof(IrBuilder *irb, Scope *scope, AstNode *sou return &instruction->base; } -static IrInstruction *ir_build_to_ptr_type(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *ptr) { - IrInstructionToPtrType *instruction = ir_build_instruction(irb, scope, source_node); - instruction->ptr = ptr; - - ir_ref_instruction(ptr, irb->current_basic_block); - - return &instruction->base; -} - -static IrInstruction *ir_build_ptr_type_child(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *value) -{ - IrInstructionPtrTypeChild *instruction = ir_build_instruction( - irb, scope, source_node); - instruction->value = value; - - ir_ref_instruction(value, irb->current_basic_block); - - return &instruction->base; -} - static IrInstruction *ir_build_set_cold(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *is_cold) { IrInstructionSetCold *instruction = ir_build_instruction(irb, scope, source_node); instruction->is_cold = is_cold; @@ -1740,40 +1809,59 @@ static IrInstruction *ir_build_test_nonnull(IrBuilder *irb, Scope *scope, AstNod } static IrInstruction *ir_build_optional_unwrap_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *base_ptr, bool safety_check_on) + IrInstruction *base_ptr, bool safety_check_on, bool initializing) { IrInstructionOptionalUnwrapPtr *instruction = ir_build_instruction(irb, scope, source_node); instruction->base_ptr = base_ptr; instruction->safety_check_on = safety_check_on; + instruction->initializing = initializing; ir_ref_instruction(base_ptr, irb->current_basic_block); return &instruction->base; } -static IrInstruction *ir_build_maybe_wrap(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value) { - IrInstructionOptionalWrap *instruction = ir_build_instruction(irb, scope, source_node); - instruction->value = value; +static IrInstruction *ir_build_optional_wrap(IrAnalyze *ira, IrInstruction *source_instruction, ZigType *result_ty, + IrInstruction *operand, IrInstruction *result_loc) +{ + IrInstructionOptionalWrap *instruction = ir_build_instruction( + &ira->new_irb, source_instruction->scope, source_instruction->source_node); + instruction->base.value.type = result_ty; + instruction->operand = operand; + instruction->result_loc = result_loc; - ir_ref_instruction(value, irb->current_basic_block); + ir_ref_instruction(operand, ira->new_irb.current_basic_block); + if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); return &instruction->base; } -static IrInstruction *ir_build_err_wrap_payload(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value) { - IrInstructionErrWrapPayload *instruction = ir_build_instruction(irb, scope, source_node); - instruction->value = value; +static IrInstruction *ir_build_err_wrap_payload(IrAnalyze *ira, IrInstruction *source_instruction, + ZigType *result_type, IrInstruction *operand, IrInstruction *result_loc) +{ + IrInstructionErrWrapPayload *instruction = ir_build_instruction( + &ira->new_irb, source_instruction->scope, source_instruction->source_node); + instruction->base.value.type = result_type; + instruction->operand = operand; + instruction->result_loc = result_loc; - ir_ref_instruction(value, irb->current_basic_block); + ir_ref_instruction(operand, ira->new_irb.current_basic_block); + if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); return &instruction->base; } -static IrInstruction *ir_build_err_wrap_code(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value) { - IrInstructionErrWrapCode *instruction = ir_build_instruction(irb, scope, source_node); - instruction->value = value; +static IrInstruction *ir_build_err_wrap_code(IrAnalyze *ira, IrInstruction *source_instruction, + ZigType *result_type, IrInstruction *operand, IrInstruction *result_loc) +{ + IrInstructionErrWrapCode *instruction = ir_build_instruction( + &ira->new_irb, source_instruction->scope, source_instruction->source_node); + instruction->base.value.type = result_type; + instruction->operand = operand; + instruction->result_loc = result_loc; - ir_ref_instruction(value, irb->current_basic_block); + ir_ref_instruction(operand, ira->new_irb.current_basic_block); + if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); return &instruction->base; } @@ -1930,6 +2018,21 @@ static IrInstruction *ir_build_ref(IrBuilder *irb, Scope *scope, AstNode *source return &instruction->base; } +static IrInstruction *ir_build_ref_gen(IrAnalyze *ira, IrInstruction *source_instruction, ZigType *result_type, + IrInstruction *operand, IrInstruction *result_loc) +{ + IrInstructionRefGen *instruction = ir_build_instruction(&ira->new_irb, + source_instruction->scope, source_instruction->source_node); + instruction->base.value.type = result_type; + instruction->operand = operand; + instruction->result_loc = result_loc; + + ir_ref_instruction(operand, ira->new_irb.current_basic_block); + if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_compile_err(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *msg) { IrInstructionCompileErr *instruction = ir_build_instruction(irb, scope, source_node); instruction->msg = msg; @@ -2007,8 +2110,7 @@ static IrInstruction *ir_build_embed_file(IrBuilder *irb, Scope *scope, AstNode static IrInstruction *ir_build_cmpxchg_src(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *type_value, IrInstruction *ptr, IrInstruction *cmp_value, IrInstruction *new_value, - IrInstruction *success_order_value, IrInstruction *failure_order_value, - bool is_weak) + IrInstruction *success_order_value, IrInstruction *failure_order_value, bool is_weak, ResultLoc *result_loc) { IrInstructionCmpxchgSrc *instruction = ir_build_instruction(irb, scope, source_node); instruction->type_value = type_value; @@ -2018,6 +2120,7 @@ static IrInstruction *ir_build_cmpxchg_src(IrBuilder *irb, Scope *scope, AstNode instruction->success_order_value = success_order_value; instruction->failure_order_value = failure_order_value; instruction->is_weak = is_weak; + instruction->result_loc = result_loc; ir_ref_instruction(type_value, irb->current_basic_block); ir_ref_instruction(ptr, irb->current_basic_block); @@ -2029,22 +2132,25 @@ static IrInstruction *ir_build_cmpxchg_src(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } -static IrInstruction *ir_build_cmpxchg_gen(IrAnalyze *ira, IrInstruction *source_instruction, +static IrInstruction *ir_build_cmpxchg_gen(IrAnalyze *ira, IrInstruction *source_instruction, ZigType *result_type, IrInstruction *ptr, IrInstruction *cmp_value, IrInstruction *new_value, - AtomicOrder success_order, AtomicOrder failure_order, bool is_weak) + AtomicOrder success_order, AtomicOrder failure_order, bool is_weak, IrInstruction *result_loc) { IrInstructionCmpxchgGen *instruction = ir_build_instruction(&ira->new_irb, source_instruction->scope, source_instruction->source_node); + instruction->base.value.type = result_type; instruction->ptr = ptr; instruction->cmp_value = cmp_value; instruction->new_value = new_value; instruction->success_order = success_order; instruction->failure_order = failure_order; instruction->is_weak = is_weak; + instruction->result_loc = result_loc; ir_ref_instruction(ptr, ira->new_irb.current_basic_block); ir_ref_instruction(cmp_value, ira->new_irb.current_basic_block); ir_ref_instruction(new_value, ira->new_irb.current_basic_block); + if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); return &instruction->base; } @@ -2103,19 +2209,25 @@ static IrInstruction *ir_build_err_set_cast(IrBuilder *irb, Scope *scope, AstNod return &instruction->base; } -static IrInstruction *ir_build_to_bytes(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *target) { +static IrInstruction *ir_build_to_bytes(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *target, + ResultLoc *result_loc) +{ IrInstructionToBytes *instruction = ir_build_instruction(irb, scope, source_node); instruction->target = target; + instruction->result_loc = result_loc; ir_ref_instruction(target, irb->current_basic_block); return &instruction->base; } -static IrInstruction *ir_build_from_bytes(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *dest_child_type, IrInstruction *target) { +static IrInstruction *ir_build_from_bytes(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *dest_child_type, IrInstruction *target, ResultLoc *result_loc) +{ IrInstructionFromBytes *instruction = ir_build_instruction(irb, scope, source_node); instruction->dest_child_type = dest_child_type; instruction->target = target; + instruction->result_loc = result_loc; ir_ref_instruction(dest_child_type, irb->current_basic_block); ir_ref_instruction(target, irb->current_basic_block); @@ -2217,14 +2329,15 @@ static IrInstruction *ir_build_memcpy(IrBuilder *irb, Scope *scope, AstNode *sou return &instruction->base; } -static IrInstruction *ir_build_slice(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *ptr, IrInstruction *start, IrInstruction *end, bool safety_check_on) +static IrInstruction *ir_build_slice_src(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *ptr, IrInstruction *start, IrInstruction *end, bool safety_check_on, ResultLoc *result_loc) { - IrInstructionSlice *instruction = ir_build_instruction(irb, scope, source_node); + IrInstructionSliceSrc *instruction = ir_build_instruction(irb, scope, source_node); instruction->ptr = ptr; instruction->start = start; instruction->end = end; instruction->safety_check_on = safety_check_on; + instruction->result_loc = result_loc; ir_ref_instruction(ptr, irb->current_basic_block); ir_ref_instruction(start, irb->current_basic_block); @@ -2233,6 +2346,26 @@ static IrInstruction *ir_build_slice(IrBuilder *irb, Scope *scope, AstNode *sour return &instruction->base; } +static IrInstruction *ir_build_slice_gen(IrAnalyze *ira, IrInstruction *source_instruction, ZigType *slice_type, + IrInstruction *ptr, IrInstruction *start, IrInstruction *end, bool safety_check_on, IrInstruction *result_loc) +{ + IrInstructionSliceGen *instruction = ir_build_instruction( + &ira->new_irb, source_instruction->scope, source_instruction->source_node); + instruction->base.value.type = slice_type; + instruction->ptr = ptr; + instruction->start = start; + instruction->end = end; + instruction->safety_check_on = safety_check_on; + instruction->result_loc = result_loc; + + ir_ref_instruction(ptr, ira->new_irb.current_basic_block); + ir_ref_instruction(start, ira->new_irb.current_basic_block); + if (end) ir_ref_instruction(end, ira->new_irb.current_basic_block); + ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_member_count(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *container) { IrInstructionMemberCount *instruction = ir_build_instruction(irb, scope, source_node); instruction->container = container; @@ -2308,6 +2441,75 @@ static IrInstruction *ir_build_overflow_op(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } + +//TODO Powi, Pow, minnum, maxnum, maximum, minimum, copysign, +// lround, llround, lrint, llrint +// So far this is only non-complicated type functions. +const char *float_op_to_name(BuiltinFnId op, bool llvm_name) { + const bool b = llvm_name; + + switch (op) { + case BuiltinFnIdSqrt: + return "sqrt"; + case BuiltinFnIdSin: + return "sin"; + case BuiltinFnIdCos: + return "cos"; + case BuiltinFnIdExp: + return "exp"; + case BuiltinFnIdExp2: + return "exp2"; + case BuiltinFnIdLn: + return b ? "log" : "ln"; + case BuiltinFnIdLog10: + return "log10"; + case BuiltinFnIdLog2: + return "log2"; + case BuiltinFnIdFabs: + return "fabs"; + case BuiltinFnIdFloor: + return "floor"; + case BuiltinFnIdCeil: + return "ceil"; + case BuiltinFnIdTrunc: + return "trunc"; + case BuiltinFnIdNearbyInt: + return b ? "nearbyint" : "nearbyInt"; + case BuiltinFnIdRound: + return "round"; + default: + zig_unreachable(); + } +} + +static IrInstruction *ir_build_float_op(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *type, IrInstruction *op1, BuiltinFnId op) { + IrInstructionFloatOp *instruction = ir_build_instruction(irb, scope, source_node); + instruction->type = type; + instruction->op1 = op1; + instruction->op = op; + + if (type != nullptr) ir_ref_instruction(type, irb->current_basic_block); + ir_ref_instruction(op1, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_mul_add(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *type_value, IrInstruction *op1, IrInstruction *op2, IrInstruction *op3) { + IrInstructionMulAdd *instruction = ir_build_instruction(irb, scope, source_node); + instruction->type_value = type_value; + instruction->op1 = op1; + instruction->op2 = op2; + instruction->op3 = op3; + + ir_ref_instruction(type_value, irb->current_basic_block); + ir_ref_instruction(op1, irb->current_basic_block); + ir_ref_instruction(op2, irb->current_basic_block); + ir_ref_instruction(op3, irb->current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_align_of(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *type_value) { IrInstructionAlignOf *instruction = ir_build_instruction(irb, scope, source_node); instruction->type_value = type_value; @@ -2317,34 +2519,49 @@ static IrInstruction *ir_build_align_of(IrBuilder *irb, Scope *scope, AstNode *s return &instruction->base; } -static IrInstruction *ir_build_test_err(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *value) +static IrInstruction *ir_build_test_err_src(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *base_ptr, bool resolve_err_set) { - IrInstructionTestErr *instruction = ir_build_instruction(irb, scope, source_node); - instruction->value = value; + IrInstructionTestErrSrc *instruction = ir_build_instruction(irb, scope, source_node); + instruction->base_ptr = base_ptr; + instruction->resolve_err_set = resolve_err_set; - ir_ref_instruction(value, irb->current_basic_block); + ir_ref_instruction(base_ptr, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_test_err_gen(IrAnalyze *ira, IrInstruction *source_instruction, + IrInstruction *err_union) +{ + IrInstructionTestErrGen *instruction = ir_build_instruction( + &ira->new_irb, source_instruction->scope, source_instruction->source_node); + instruction->base.value.type = ira->codegen->builtin_types.entry_bool; + instruction->err_union = err_union; + + ir_ref_instruction(err_union, ira->new_irb.current_basic_block); return &instruction->base; } static IrInstruction *ir_build_unwrap_err_code(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *err_union) + IrInstruction *err_union_ptr) { IrInstructionUnwrapErrCode *instruction = ir_build_instruction(irb, scope, source_node); - instruction->err_union = err_union; + instruction->err_union_ptr = err_union_ptr; - ir_ref_instruction(err_union, irb->current_basic_block); + ir_ref_instruction(err_union_ptr, irb->current_basic_block); return &instruction->base; } static IrInstruction *ir_build_unwrap_err_payload(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *value, bool safety_check_on) + IrInstruction *value, bool safety_check_on, bool initializing) { IrInstructionUnwrapErrPayload *instruction = ir_build_instruction(irb, scope, source_node); instruction->value = value; instruction->safety_check_on = safety_check_on; + instruction->initializing = initializing; ir_ref_instruction(value, irb->current_basic_block); @@ -2414,28 +2631,28 @@ static IrInstruction *ir_build_ptr_cast_gen(IrAnalyze *ira, IrInstruction *sourc } static IrInstruction *ir_build_load_ptr_gen(IrAnalyze *ira, IrInstruction *source_instruction, - IrInstruction *ptr, ZigType *ty) + IrInstruction *ptr, ZigType *ty, IrInstruction *result_loc) { IrInstructionLoadPtrGen *instruction = ir_build_instruction( &ira->new_irb, source_instruction->scope, source_instruction->source_node); instruction->base.value.type = ty; instruction->ptr = ptr; + instruction->result_loc = result_loc; ir_ref_instruction(ptr, ira->new_irb.current_basic_block); + if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); return &instruction->base; } -static IrInstruction *ir_build_bit_cast(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *dest_type, IrInstruction *value) +static IrInstruction *ir_build_bit_cast_src(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *operand, ResultLocBitCast *result_loc_bit_cast) { - IrInstructionBitCast *instruction = ir_build_instruction( - irb, scope, source_node); - instruction->dest_type = dest_type; - instruction->value = value; + IrInstructionBitCastSrc *instruction = ir_build_instruction(irb, scope, source_node); + instruction->operand = operand; + instruction->result_loc_bit_cast = result_loc_bit_cast; - ir_ref_instruction(dest_type, irb->current_basic_block); - ir_ref_instruction(value, irb->current_basic_block); + ir_ref_instruction(operand, irb->current_basic_block); return &instruction->base; } @@ -2587,11 +2804,8 @@ static IrInstruction *ir_build_type_name(IrBuilder *irb, Scope *scope, AstNode * return &instruction->base; } -static IrInstruction *ir_build_decl_ref(IrBuilder *irb, Scope *scope, AstNode *source_node, - Tld *tld, LVal lval) -{ - IrInstructionDeclRef *instruction = ir_build_instruction( - irb, scope, source_node); +static IrInstruction *ir_build_decl_ref(IrBuilder *irb, Scope *scope, AstNode *source_node, Tld *tld, LVal lval) { + IrInstructionDeclRef *instruction = ir_build_instruction(irb, scope, source_node); instruction->tld = tld; instruction->lval = lval; @@ -2719,6 +2933,53 @@ static IrInstruction *ir_build_align_cast(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_implicit_cast(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *dest_type, IrInstruction *target, ResultLoc *result_loc) +{ + IrInstructionImplicitCast *instruction = ir_build_instruction(irb, scope, source_node); + instruction->dest_type = dest_type; + instruction->target = target; + instruction->result_loc = result_loc; + + ir_ref_instruction(dest_type, irb->current_basic_block); + ir_ref_instruction(target, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_resolve_result(IrBuilder *irb, Scope *scope, AstNode *source_node, + ResultLoc *result_loc, IrInstruction *ty) +{ + IrInstructionResolveResult *instruction = ir_build_instruction(irb, scope, source_node); + instruction->result_loc = result_loc; + instruction->ty = ty; + + ir_ref_instruction(ty, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_reset_result(IrBuilder *irb, Scope *scope, AstNode *source_node, + ResultLoc *result_loc) +{ + IrInstructionResetResult *instruction = ir_build_instruction(irb, scope, source_node); + instruction->result_loc = result_loc; + + return &instruction->base; +} + +static IrInstruction *ir_build_result_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, + ResultLoc *result_loc, IrInstruction *result) +{ + IrInstructionResultPtr *instruction = ir_build_instruction(irb, scope, source_node); + instruction->result_loc = result_loc; + instruction->result = result; + + ir_ref_instruction(result, irb->current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_opaque_type(IrBuilder *irb, Scope *scope, AstNode *source_node) { IrInstructionOpaqueType *instruction = ir_build_instruction(irb, scope, source_node); @@ -3013,17 +3274,6 @@ static IrInstruction *ir_build_mark_err_ret_trace_ptr(IrBuilder *irb, Scope *sco return &instruction->base; } -static IrInstruction *ir_build_sqrt(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *type, IrInstruction *op) { - IrInstructionSqrt *instruction = ir_build_instruction(irb, scope, source_node); - instruction->type = type; - instruction->op = op; - - if (type != nullptr) ir_ref_instruction(type, irb->current_basic_block); - ir_ref_instruction(op, irb->current_basic_block); - - return &instruction->base; -} - static IrInstruction *ir_build_has_decl(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *container, IrInstruction *name) { @@ -3058,16 +3308,31 @@ static IrInstruction *ir_build_check_runtime_scope(IrBuilder *irb, Scope *scope, } static IrInstruction *ir_build_vector_to_array(IrAnalyze *ira, IrInstruction *source_instruction, - IrInstruction *vector, ZigType *result_type) + ZigType *result_type, IrInstruction *vector, IrInstruction *result_loc) { IrInstructionVectorToArray *instruction = ir_build_instruction(&ira->new_irb, source_instruction->scope, source_instruction->source_node); instruction->base.value.type = result_type; instruction->vector = vector; + instruction->result_loc = result_loc; ir_ref_instruction(vector, ira->new_irb.current_basic_block); + ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); - ir_add_alloca(ira, &instruction->base, result_type); + return &instruction->base; +} + +static IrInstruction *ir_build_ptr_of_array_to_slice(IrAnalyze *ira, IrInstruction *source_instruction, + ZigType *result_type, IrInstruction *operand, IrInstruction *result_loc) +{ + IrInstructionPtrOfArrayToSlice *instruction = ir_build_instruction(&ira->new_irb, + source_instruction->scope, source_instruction->source_node); + instruction->base.value.type = result_type; + instruction->operand = operand; + instruction->result_loc = result_loc; + + ir_ref_instruction(operand, ira->new_irb.current_basic_block); + ir_ref_instruction(result_loc, ira->new_irb.current_basic_block); return &instruction->base; } @@ -3111,6 +3376,45 @@ static IrInstruction *ir_build_assert_non_null(IrAnalyze *ira, IrInstruction *so return &instruction->base; } +static IrInstruction *ir_build_alloca_src(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *align, const char *name_hint, IrInstruction *is_comptime) +{ + IrInstructionAllocaSrc *instruction = ir_build_instruction(irb, scope, source_node); + instruction->base.is_gen = true; + instruction->align = align; + instruction->name_hint = name_hint; + instruction->is_comptime = is_comptime; + + if (align != nullptr) ir_ref_instruction(align, irb->current_basic_block); + if (is_comptime != nullptr) ir_ref_instruction(is_comptime, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstructionAllocaGen *ir_create_alloca_gen(IrAnalyze *ira, IrInstruction *source_instruction, + uint32_t align, const char *name_hint) +{ + IrInstructionAllocaGen *instruction = ir_create_instruction(&ira->new_irb, + source_instruction->scope, source_instruction->source_node); + instruction->align = align; + instruction->name_hint = name_hint; + + return instruction; +} + +static IrInstruction *ir_build_end_expr(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *value, ResultLoc *result_loc) +{ + IrInstructionEndExpr *instruction = ir_build_instruction(irb, scope, source_node); + instruction->base.is_gen = true; + instruction->value = value; + instruction->result_loc = result_loc; + + ir_ref_instruction(value, irb->current_basic_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -3211,6 +3515,7 @@ static void ir_set_cursor_at_end(IrBuilder *irb, IrBasicBlock *basic_block) { } static void ir_set_cursor_at_end_and_append_block(IrBuilder *irb, IrBasicBlock *basic_block) { + basic_block->index = irb->exec->basic_block_list.length; irb->exec->basic_block_list.append(basic_block); ir_set_cursor_at_end(irb, basic_block); } @@ -3299,7 +3604,7 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode return ir_build_cond_br(irb, scope, node, is_canceled_bool, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, is_comptime); } -static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { +static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) { assert(node->type == NodeTypeReturnExpr); ZigFn *fn_entry = exec_fn_entry(irb->exec); @@ -3323,12 +3628,16 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, switch (node->data.return_expr.kind) { case ReturnKindUnconditional: { + ResultLocReturn *result_loc_ret = allocate(1); + result_loc_ret->base.id = ResultLocIdReturn; + ir_build_reset_result(irb, scope, node, &result_loc_ret->base); + IrInstruction *return_value; if (expr_node) { // Temporarily set this so that if we return a type it gets the name of the function ZigFn *prev_name_fn = irb->exec->name_fn; irb->exec->name_fn = exec_fn_entry(irb->exec); - return_value = ir_gen_node(irb, expr_node, scope); + return_value = ir_gen_node_extra(irb, expr_node, scope, LValNone, &result_loc_ret->base); irb->exec->name_fn = prev_name_fn; if (return_value == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; @@ -3346,7 +3655,9 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, ir_gen_defers_for_block(irb, scope, outer_scope, false); } - IrInstruction *is_err = ir_build_test_err(irb, scope, node, return_value); + IrInstruction *ret_ptr = ir_build_result_ptr(irb, scope, node, &result_loc_ret->base, + return_value); + IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, ret_ptr, false); bool should_inline = ir_should_inline(irb->exec, scope); IrInstruction *is_comptime; @@ -3375,21 +3686,24 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, ir_build_br(irb, scope, node, ret_stmt_block, is_comptime); ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block); - return ir_gen_async_return(irb, scope, node, return_value, false); + IrInstruction *result = ir_gen_async_return(irb, scope, node, return_value, false); + result_loc_ret->base.source_instruction = result; + return result; } else { // generate unconditional defers ir_gen_defers_for_block(irb, scope, outer_scope, false); - return ir_gen_async_return(irb, scope, node, return_value, false); + IrInstruction *result = ir_gen_async_return(irb, scope, node, return_value, false); + result_loc_ret->base.source_instruction = result; + return result; } } case ReturnKindError: { assert(expr_node); - IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr); + IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr); if (err_union_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - IrInstruction *err_union_val = ir_build_load_ptr(irb, scope, node, err_union_ptr); - IrInstruction *is_err_val = ir_build_test_err(irb, scope, node, err_union_val); + IrInstruction *is_err_val = ir_build_test_err_src(irb, scope, node, err_union_ptr, true); IrBasicBlock *return_block = ir_create_basic_block(irb, scope, "ErrRetReturn"); IrBasicBlock *continue_block = ir_create_basic_block(irb, scope, "ErrRetContinue"); @@ -3404,19 +3718,27 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, ir_set_cursor_at_end_and_append_block(irb, return_block); if (!ir_gen_defers_for_block(irb, scope, outer_scope, true)) { - IrInstruction *err_val = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr); + IrInstruction *err_val_ptr = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr); + IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr); + + ResultLocReturn *result_loc_ret = allocate(1); + result_loc_ret->base.id = ResultLocIdReturn; + ir_build_reset_result(irb, scope, node, &result_loc_ret->base); + ir_build_end_expr(irb, scope, node, err_val, &result_loc_ret->base); + if (irb->codegen->have_err_ret_tracing && !should_inline) { ir_build_save_err_ret_addr(irb, scope, node); } - ir_gen_async_return(irb, scope, node, err_val, false); + IrInstruction *ret_inst = ir_gen_async_return(irb, scope, node, err_val, false); + result_loc_ret->base.source_instruction = ret_inst; } ir_set_cursor_at_end_and_append_block(irb, continue_block); - IrInstruction *unwrapped_ptr = ir_build_unwrap_err_payload(irb, scope, node, err_union_ptr, false); + IrInstruction *unwrapped_ptr = ir_build_unwrap_err_payload(irb, scope, node, err_union_ptr, false, false); if (lval == LValPtr) return unwrapped_ptr; else - return ir_build_load_ptr(irb, scope, node, unwrapped_ptr); + return ir_expr_wrap(irb, scope, ir_build_load_ptr(irb, scope, node, unwrapped_ptr), result_loc); } } zig_unreachable(); @@ -3500,7 +3822,17 @@ static ZigVar *ir_create_var(IrBuilder *irb, AstNode *node, Scope *scope, Buf *n return var; } -static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode *block_node) { +static ResultLocPeer *create_peer_result(ResultLocPeerParent *peer_parent) { + ResultLocPeer *result = allocate(1); + result->base.id = ResultLocIdPeer; + result->base.source_instruction = peer_parent->base.source_instruction; + result->parent = peer_parent; + return result; +} + +static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode *block_node, LVal lval, + ResultLoc *result_loc) +{ assert(block_node->type == NodeTypeBlock); ZigList incoming_values = {0}; @@ -3518,15 +3850,24 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode if (block_node->data.block.statements.length == 0) { // {} - return ir_build_const_void(irb, child_scope, block_node); + return ir_lval_wrap(irb, parent_scope, ir_build_const_void(irb, child_scope, block_node), lval, result_loc); } if (block_node->data.block.name != nullptr) { + scope_block->lval = lval; scope_block->incoming_blocks = &incoming_blocks; scope_block->incoming_values = &incoming_values; scope_block->end_block = ir_create_basic_block(irb, parent_scope, "BlockEnd"); scope_block->is_comptime = ir_build_const_bool(irb, parent_scope, block_node, ir_should_inline(irb->exec, parent_scope)); + + scope_block->peer_parent = allocate(1); + scope_block->peer_parent->base.id = ResultLocIdPeerParent; + scope_block->peer_parent->base.source_instruction = scope_block->is_comptime; + scope_block->peer_parent->end_bb = scope_block->end_block; + scope_block->peer_parent->is_comptime = scope_block->is_comptime; + scope_block->peer_parent->parent = result_loc; + ir_build_reset_result(irb, parent_scope, block_node, &scope_block->peer_parent->base); } bool is_continuation_unreachable = false; @@ -3540,6 +3881,8 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode // keep the last noreturn statement value around in case we need to return it noreturn_return_value = statement_value; } + // This logic must be kept in sync with + // [STMT_EXPR_TEST_THING] <--- (search this token) if (statement_node->type == NodeTypeDefer && statement_value != irb->codegen->invalid_instruction) { // defer starts a new scope child_scope = statement_node->data.defer.child_scope; @@ -3560,21 +3903,41 @@ static IrInstruction *ir_gen_block(IrBuilder *irb, Scope *parent_scope, AstNode return noreturn_return_value; } + if (scope_block->peer_parent != nullptr && scope_block->peer_parent->peers.length != 0) { + scope_block->peer_parent->peers.last()->next_bb = scope_block->end_block; + } ir_set_cursor_at_end_and_append_block(irb, scope_block->end_block); - return ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length, incoming_blocks.items, incoming_values.items); + IrInstruction *phi = ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length, + incoming_blocks.items, incoming_values.items, scope_block->peer_parent); + return ir_expr_wrap(irb, parent_scope, phi, result_loc); } else { incoming_blocks.append(irb->current_basic_block); - incoming_values.append(ir_mark_gen(ir_build_const_void(irb, parent_scope, block_node))); + IrInstruction *else_expr_result = ir_mark_gen(ir_build_const_void(irb, parent_scope, block_node)); + + if (scope_block->peer_parent != nullptr) { + ResultLocPeer *peer_result = create_peer_result(scope_block->peer_parent); + scope_block->peer_parent->peers.append(peer_result); + ir_build_end_expr(irb, parent_scope, block_node, else_expr_result, &peer_result->base); + + if (scope_block->peer_parent->peers.length != 0) { + scope_block->peer_parent->peers.last()->next_bb = scope_block->end_block; + } + } + + incoming_values.append(else_expr_result); } if (block_node->data.block.name != nullptr) { ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false); ir_mark_gen(ir_build_br(irb, parent_scope, block_node, scope_block->end_block, scope_block->is_comptime)); ir_set_cursor_at_end_and_append_block(irb, scope_block->end_block); - return ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length, incoming_blocks.items, incoming_values.items); + IrInstruction *phi = ir_build_phi(irb, parent_scope, block_node, incoming_blocks.length, + incoming_blocks.items, incoming_values.items, scope_block->peer_parent); + return ir_expr_wrap(irb, parent_scope, phi, result_loc); } else { ir_gen_defers_for_block(irb, child_scope, outer_block_scope, false); - return ir_mark_gen(ir_mark_gen(ir_build_const_void(irb, child_scope, block_node))); + IrInstruction *void_inst = ir_mark_gen(ir_build_const_void(irb, child_scope, block_node)); + return ir_lval_wrap(irb, parent_scope, void_inst, lval, result_loc); } } @@ -3594,7 +3957,7 @@ static IrInstruction *ir_gen_bin_op_id(IrBuilder *irb, Scope *scope, AstNode *no } static IrInstruction *ir_gen_assign(IrBuilder *irb, Scope *scope, AstNode *node) { - IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValPtr); + IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValPtr, nullptr); IrInstruction *rvalue = ir_gen_node(irb, node->data.bin_op_expr.op2, scope); if (lvalue == irb->codegen->invalid_instruction || rvalue == irb->codegen->invalid_instruction) @@ -3605,7 +3968,7 @@ static IrInstruction *ir_gen_assign(IrBuilder *irb, Scope *scope, AstNode *node) } static IrInstruction *ir_gen_assign_op(IrBuilder *irb, Scope *scope, AstNode *node, IrBinOp op_id) { - IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValPtr); + IrInstruction *lvalue = ir_gen_node_extra(irb, node->data.bin_op_expr.op1, scope, LValPtr, nullptr); if (lvalue == irb->codegen->invalid_instruction) return lvalue; IrInstruction *op1 = ir_build_load_ptr(irb, scope, node->data.bin_op_expr.op1, lvalue); @@ -3656,7 +4019,7 @@ static IrInstruction *ir_gen_bool_or(IrBuilder *irb, Scope *scope, AstNode *node incoming_blocks[0] = post_val1_block; incoming_blocks[1] = post_val2_block; - return ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); + return ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, nullptr); } static IrInstruction *ir_gen_bool_and(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -3698,16 +4061,51 @@ static IrInstruction *ir_gen_bool_and(IrBuilder *irb, Scope *scope, AstNode *nod incoming_blocks[0] = post_val1_block; incoming_blocks[1] = post_val2_block; - return ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); + return ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, nullptr); } -static IrInstruction *ir_gen_orelse(IrBuilder *irb, Scope *parent_scope, AstNode *node) { +static ResultLocPeerParent *ir_build_result_peers(IrBuilder *irb, IrInstruction *cond_br_inst, + IrBasicBlock *end_block, ResultLoc *parent, IrInstruction *is_comptime) +{ + ResultLocPeerParent *peer_parent = allocate(1); + peer_parent->base.id = ResultLocIdPeerParent; + peer_parent->base.source_instruction = cond_br_inst; + peer_parent->end_bb = end_block; + peer_parent->is_comptime = is_comptime; + peer_parent->parent = parent; + + IrInstruction *popped_inst = irb->current_basic_block->instruction_list.pop(); + ir_assert(popped_inst == cond_br_inst, cond_br_inst); + + ir_build_reset_result(irb, cond_br_inst->scope, cond_br_inst->source_node, &peer_parent->base); + irb->current_basic_block->instruction_list.append(popped_inst); + + return peer_parent; +} + +static ResultLocPeerParent *ir_build_binary_result_peers(IrBuilder *irb, IrInstruction *cond_br_inst, + IrBasicBlock *else_block, IrBasicBlock *end_block, ResultLoc *parent, IrInstruction *is_comptime) +{ + ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, parent, is_comptime); + + peer_parent->peers.append(create_peer_result(peer_parent)); + peer_parent->peers.last()->next_bb = else_block; + + peer_parent->peers.append(create_peer_result(peer_parent)); + peer_parent->peers.last()->next_bb = end_block; + + return peer_parent; +} + +static IrInstruction *ir_gen_orelse(IrBuilder *irb, Scope *parent_scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ assert(node->type == NodeTypeBinOpExpr); AstNode *op1_node = node->data.bin_op_expr.op1; AstNode *op2_node = node->data.bin_op_expr.op2; - IrInstruction *maybe_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr); + IrInstruction *maybe_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr, nullptr); if (maybe_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; @@ -3724,10 +4122,14 @@ static IrInstruction *ir_gen_orelse(IrBuilder *irb, Scope *parent_scope, AstNode IrBasicBlock *ok_block = ir_create_basic_block(irb, parent_scope, "OptionalNonNull"); IrBasicBlock *null_block = ir_create_basic_block(irb, parent_scope, "OptionalNull"); IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "OptionalEnd"); - ir_build_cond_br(irb, parent_scope, node, is_non_null, ok_block, null_block, is_comptime); + IrInstruction *cond_br_inst = ir_build_cond_br(irb, parent_scope, node, is_non_null, ok_block, null_block, is_comptime); + + ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, ok_block, end_block, + result_loc, is_comptime); ir_set_cursor_at_end_and_append_block(irb, null_block); - IrInstruction *null_result = ir_gen_node(irb, op2_node, parent_scope); + IrInstruction *null_result = ir_gen_node_extra(irb, op2_node, parent_scope, LValNone, + &peer_parent->peers.at(0)->base); if (null_result == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; IrBasicBlock *after_null_block = irb->current_basic_block; @@ -3735,8 +4137,9 @@ static IrInstruction *ir_gen_orelse(IrBuilder *irb, Scope *parent_scope, AstNode ir_mark_gen(ir_build_br(irb, parent_scope, node, end_block, is_comptime)); ir_set_cursor_at_end_and_append_block(irb, ok_block); - IrInstruction *unwrapped_ptr = ir_build_optional_unwrap_ptr(irb, parent_scope, node, maybe_ptr, false); + IrInstruction *unwrapped_ptr = ir_build_optional_unwrap_ptr(irb, parent_scope, node, maybe_ptr, false, false); IrInstruction *unwrapped_payload = ir_build_load_ptr(irb, parent_scope, node, unwrapped_ptr); + ir_build_end_expr(irb, parent_scope, node, unwrapped_payload, &peer_parent->peers.at(1)->base); IrBasicBlock *after_ok_block = irb->current_basic_block; ir_build_br(irb, parent_scope, node, end_block, is_comptime); @@ -3747,7 +4150,8 @@ static IrInstruction *ir_gen_orelse(IrBuilder *irb, Scope *parent_scope, AstNode IrBasicBlock **incoming_blocks = allocate(2); incoming_blocks[0] = after_null_block; incoming_blocks[1] = after_ok_block; - return ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values); + IrInstruction *phi = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values, peer_parent); + return ir_lval_wrap(irb, parent_scope, phi, lval, result_loc); } static IrInstruction *ir_gen_error_union(IrBuilder *irb, Scope *parent_scope, AstNode *node) { @@ -3767,7 +4171,7 @@ static IrInstruction *ir_gen_error_union(IrBuilder *irb, Scope *parent_scope, As return ir_build_error_union(irb, parent_scope, node, err_set, payload); } -static IrInstruction *ir_gen_bin_op(IrBuilder *irb, Scope *scope, AstNode *node) { +static IrInstruction *ir_gen_bin_op(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) { assert(node->type == NodeTypeBinOpExpr); BinOpType bin_op_type = node->data.bin_op_expr.bin_op; @@ -3775,87 +4179,87 @@ static IrInstruction *ir_gen_bin_op(IrBuilder *irb, Scope *scope, AstNode *node) case BinOpTypeInvalid: zig_unreachable(); case BinOpTypeAssign: - return ir_gen_assign(irb, scope, node); + return ir_lval_wrap(irb, scope, ir_gen_assign(irb, scope, node), lval, result_loc); case BinOpTypeAssignTimes: - return ir_gen_assign_op(irb, scope, node, IrBinOpMult); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpMult), lval, result_loc); case BinOpTypeAssignTimesWrap: - return ir_gen_assign_op(irb, scope, node, IrBinOpMultWrap); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpMultWrap), lval, result_loc); case BinOpTypeAssignDiv: - return ir_gen_assign_op(irb, scope, node, IrBinOpDivUnspecified); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpDivUnspecified), lval, result_loc); case BinOpTypeAssignMod: - return ir_gen_assign_op(irb, scope, node, IrBinOpRemUnspecified); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpRemUnspecified), lval, result_loc); case BinOpTypeAssignPlus: - return ir_gen_assign_op(irb, scope, node, IrBinOpAdd); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpAdd), lval, result_loc); case BinOpTypeAssignPlusWrap: - return ir_gen_assign_op(irb, scope, node, IrBinOpAddWrap); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpAddWrap), lval, result_loc); case BinOpTypeAssignMinus: - return ir_gen_assign_op(irb, scope, node, IrBinOpSub); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpSub), lval, result_loc); case BinOpTypeAssignMinusWrap: - return ir_gen_assign_op(irb, scope, node, IrBinOpSubWrap); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpSubWrap), lval, result_loc); case BinOpTypeAssignBitShiftLeft: - return ir_gen_assign_op(irb, scope, node, IrBinOpBitShiftLeftLossy); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBitShiftLeftLossy), lval, result_loc); case BinOpTypeAssignBitShiftRight: - return ir_gen_assign_op(irb, scope, node, IrBinOpBitShiftRightLossy); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBitShiftRightLossy), lval, result_loc); case BinOpTypeAssignBitAnd: - return ir_gen_assign_op(irb, scope, node, IrBinOpBinAnd); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBinAnd), lval, result_loc); case BinOpTypeAssignBitXor: - return ir_gen_assign_op(irb, scope, node, IrBinOpBinXor); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBinXor), lval, result_loc); case BinOpTypeAssignBitOr: - return ir_gen_assign_op(irb, scope, node, IrBinOpBinOr); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpBinOr), lval, result_loc); case BinOpTypeAssignMergeErrorSets: - return ir_gen_assign_op(irb, scope, node, IrBinOpMergeErrorSets); + return ir_lval_wrap(irb, scope, ir_gen_assign_op(irb, scope, node, IrBinOpMergeErrorSets), lval, result_loc); case BinOpTypeBoolOr: - return ir_gen_bool_or(irb, scope, node); + return ir_lval_wrap(irb, scope, ir_gen_bool_or(irb, scope, node), lval, result_loc); case BinOpTypeBoolAnd: - return ir_gen_bool_and(irb, scope, node); + return ir_lval_wrap(irb, scope, ir_gen_bool_and(irb, scope, node), lval, result_loc); case BinOpTypeCmpEq: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpEq); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpEq), lval, result_loc); case BinOpTypeCmpNotEq: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpNotEq); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpNotEq), lval, result_loc); case BinOpTypeCmpLessThan: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpLessThan); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpLessThan), lval, result_loc); case BinOpTypeCmpGreaterThan: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpGreaterThan); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpGreaterThan), lval, result_loc); case BinOpTypeCmpLessOrEq: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpLessOrEq); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpLessOrEq), lval, result_loc); case BinOpTypeCmpGreaterOrEq: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpGreaterOrEq); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpCmpGreaterOrEq), lval, result_loc); case BinOpTypeBinOr: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpBinOr); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBinOr), lval, result_loc); case BinOpTypeBinXor: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpBinXor); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBinXor), lval, result_loc); case BinOpTypeBinAnd: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpBinAnd); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBinAnd), lval, result_loc); case BinOpTypeBitShiftLeft: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpBitShiftLeftLossy); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBitShiftLeftLossy), lval, result_loc); case BinOpTypeBitShiftRight: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpBitShiftRightLossy); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpBitShiftRightLossy), lval, result_loc); case BinOpTypeAdd: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpAdd); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpAdd), lval, result_loc); case BinOpTypeAddWrap: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpAddWrap); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpAddWrap), lval, result_loc); case BinOpTypeSub: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpSub); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpSub), lval, result_loc); case BinOpTypeSubWrap: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpSubWrap); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpSubWrap), lval, result_loc); case BinOpTypeMult: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpMult); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpMult), lval, result_loc); case BinOpTypeMultWrap: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpMultWrap); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpMultWrap), lval, result_loc); case BinOpTypeDiv: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpDivUnspecified); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpDivUnspecified), lval, result_loc); case BinOpTypeMod: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpRemUnspecified); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpRemUnspecified), lval, result_loc); case BinOpTypeArrayCat: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayCat); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayCat), lval, result_loc); case BinOpTypeArrayMult: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayMult); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayMult), lval, result_loc); case BinOpTypeMergeErrorSets: - return ir_gen_bin_op_id(irb, scope, node, IrBinOpMergeErrorSets); + return ir_lval_wrap(irb, scope, ir_gen_bin_op_id(irb, scope, node, IrBinOpMergeErrorSets), lval, result_loc); case BinOpTypeUnwrapOptional: - return ir_gen_orelse(irb, scope, node); + return ir_gen_orelse(irb, scope, node, lval, result_loc); case BinOpTypeErrorUnion: - return ir_gen_error_union(irb, scope, node); + return ir_lval_wrap(irb, scope, ir_gen_error_union(irb, scope, node), lval, result_loc); } zig_unreachable(); } @@ -3900,12 +4304,12 @@ static void populate_invalid_variable_in_scope(CodeGen *g, Scope *scope, AstNode TldVar *tld_var = allocate(1); init_tld(&tld_var->base, TldIdVar, var_name, VisibModPub, node, &scope_decls->base); tld_var->base.resolution = TldResolutionInvalid; - tld_var->var = add_variable(g, node, &scope_decls->base, var_name, false, + tld_var->var = add_variable(g, node, &scope_decls->base, var_name, false, &g->invalid_instruction->value, &tld_var->base, g->builtin_types.entry_invalid); scope_decls->decl_table.put(var_name, &tld_var->base); } -static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { +static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) { Error err; assert(node->type == NodeTypeSymbol); @@ -3939,7 +4343,7 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node, if (lval == LValPtr) { return ir_build_ref(irb, scope, node, value, false, false); } else { - return value; + return ir_expr_wrap(irb, scope, value, result_loc); } } @@ -3947,15 +4351,22 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node, ZigVar *var = find_variable(irb->codegen, scope, variable_name, &crossed_fndef_scope); if (var) { IrInstruction *var_ptr = ir_build_var_ptr_x(irb, scope, node, var, crossed_fndef_scope); - if (lval == LValPtr) + if (lval == LValPtr) { return var_ptr; - else - return ir_build_load_ptr(irb, scope, node, var_ptr); + } else { + return ir_expr_wrap(irb, scope, ir_build_load_ptr(irb, scope, node, var_ptr), result_loc); + } } Tld *tld = find_decl(irb->codegen, scope, variable_name); - if (tld) - return ir_build_decl_ref(irb, scope, node, tld, lval); + if (tld) { + IrInstruction *decl_ref = ir_build_decl_ref(irb, scope, node, tld, lval); + if (lval == LValPtr) { + return decl_ref; + } else { + return ir_expr_wrap(irb, scope, decl_ref, result_loc); + } + } if (get_container_scope(node->owner)->any_imports_failed) { // skip the error message since we had a failing import in this file @@ -3966,11 +4377,13 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node, return ir_build_undeclared_identifier(irb, scope, node, variable_name); } -static IrInstruction *ir_gen_array_access(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { +static IrInstruction *ir_gen_array_access(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ assert(node->type == NodeTypeArrayAccessExpr); AstNode *array_ref_node = node->data.array_access_expr.array_ref_expr; - IrInstruction *array_ref_instruction = ir_gen_node_extra(irb, array_ref_node, scope, LValPtr); + IrInstruction *array_ref_instruction = ir_gen_node_extra(irb, array_ref_node, scope, LValPtr, nullptr); if (array_ref_instruction == irb->codegen->invalid_instruction) return array_ref_instruction; @@ -3980,11 +4393,12 @@ static IrInstruction *ir_gen_array_access(IrBuilder *irb, Scope *scope, AstNode return subscript_instruction; IrInstruction *ptr_instruction = ir_build_elem_ptr(irb, scope, node, array_ref_instruction, - subscript_instruction, true, PtrLenSingle); + subscript_instruction, true, PtrLenSingle, nullptr); if (lval == LValPtr) return ptr_instruction; - return ir_build_load_ptr(irb, scope, node, ptr_instruction); + IrInstruction *load_ptr = ir_build_load_ptr(irb, scope, node, ptr_instruction); + return ir_expr_wrap(irb, scope, load_ptr, result_loc); } static IrInstruction *ir_gen_field_access(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -3993,11 +4407,11 @@ static IrInstruction *ir_gen_field_access(IrBuilder *irb, Scope *scope, AstNode AstNode *container_ref_node = node->data.field_access_expr.struct_expr; Buf *field_name = node->data.field_access_expr.field_name; - IrInstruction *container_ref_instruction = ir_gen_node_extra(irb, container_ref_node, scope, LValPtr); + IrInstruction *container_ref_instruction = ir_gen_node_extra(irb, container_ref_node, scope, LValPtr, nullptr); if (container_ref_instruction == irb->codegen->invalid_instruction) return container_ref_instruction; - return ir_build_field_ptr(irb, scope, node, container_ref_instruction, field_name); + return ir_build_field_ptr(irb, scope, node, container_ref_instruction, field_name, false); } static IrInstruction *ir_gen_overflow_op(IrBuilder *irb, Scope *scope, AstNode *node, IrOverflowOp op) { @@ -4028,6 +4442,33 @@ static IrInstruction *ir_gen_overflow_op(IrBuilder *irb, Scope *scope, AstNode * return ir_build_overflow_op(irb, scope, node, op, type_value, op1, op2, result_ptr, nullptr); } +static IrInstruction *ir_gen_mul_add(IrBuilder *irb, Scope *scope, AstNode *node) { + assert(node->type == NodeTypeFnCallExpr); + + AstNode *type_node = node->data.fn_call_expr.params.at(0); + AstNode *op1_node = node->data.fn_call_expr.params.at(1); + AstNode *op2_node = node->data.fn_call_expr.params.at(2); + AstNode *op3_node = node->data.fn_call_expr.params.at(3); + + IrInstruction *type_value = ir_gen_node(irb, type_node, scope); + if (type_value == irb->codegen->invalid_instruction) + return irb->codegen->invalid_instruction; + + IrInstruction *op1 = ir_gen_node(irb, op1_node, scope); + if (op1 == irb->codegen->invalid_instruction) + return irb->codegen->invalid_instruction; + + IrInstruction *op2 = ir_gen_node(irb, op2_node, scope); + if (op2 == irb->codegen->invalid_instruction) + return irb->codegen->invalid_instruction; + + IrInstruction *op3 = ir_gen_node(irb, op3_node, scope); + if (op3 == irb->codegen->invalid_instruction) + return irb->codegen->invalid_instruction; + + return ir_build_mul_add(irb, scope, node, type_value, op1, op2, op3); +} + static IrInstruction *ir_gen_this(IrBuilder *irb, Scope *orig_scope, AstNode *node) { for (Scope *it_scope = orig_scope; it_scope != nullptr; it_scope = it_scope->parent) { if (it_scope->id == ScopeIdDecls) { @@ -4043,7 +4484,9 @@ static IrInstruction *ir_gen_this(IrBuilder *irb, Scope *orig_scope, AstNode *no zig_unreachable(); } -static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { +static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ assert(node->type == NodeTypeFnCallExpr); AstNode *fn_ref_expr = node->data.fn_call_expr.fn_ref_expr; @@ -4079,7 +4522,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg; IrInstruction *type_of = ir_build_typeof(irb, scope, node, arg); - return ir_lval_wrap(irb, scope, type_of, lval); + return ir_lval_wrap(irb, scope, type_of, lval, result_loc); } case BuiltinFnIdSetCold: { @@ -4089,7 +4532,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *set_cold = ir_build_set_cold(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, set_cold, lval); + return ir_lval_wrap(irb, scope, set_cold, lval, result_loc); } case BuiltinFnIdSetRuntimeSafety: { @@ -4099,7 +4542,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *set_safety = ir_build_set_runtime_safety(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, set_safety, lval); + return ir_lval_wrap(irb, scope, set_safety, lval, result_loc); } case BuiltinFnIdSetFloatMode: { @@ -4109,7 +4552,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *set_float_mode = ir_build_set_float_mode(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, set_float_mode, lval); + return ir_lval_wrap(irb, scope, set_float_mode, lval, result_loc); } case BuiltinFnIdSizeof: { @@ -4119,7 +4562,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *size_of = ir_build_size_of(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, size_of, lval); + return ir_lval_wrap(irb, scope, size_of, lval, result_loc); } case BuiltinFnIdImport: { @@ -4129,12 +4572,12 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *import = ir_build_import(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, import, lval); + return ir_lval_wrap(irb, scope, import, lval, result_loc); } case BuiltinFnIdCImport: { IrInstruction *c_import = ir_build_c_import(irb, scope, node); - return ir_lval_wrap(irb, scope, c_import, lval); + return ir_lval_wrap(irb, scope, c_import, lval, result_loc); } case BuiltinFnIdCInclude: { @@ -4149,7 +4592,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } IrInstruction *c_include = ir_build_c_include(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, c_include, lval); + return ir_lval_wrap(irb, scope, c_include, lval, result_loc); } case BuiltinFnIdCDefine: { @@ -4169,7 +4612,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } IrInstruction *c_define = ir_build_c_define(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, c_define, lval); + return ir_lval_wrap(irb, scope, c_define, lval, result_loc); } case BuiltinFnIdCUndef: { @@ -4184,7 +4627,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } IrInstruction *c_undef = ir_build_c_undef(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, c_undef, lval); + return ir_lval_wrap(irb, scope, c_undef, lval, result_loc); } case BuiltinFnIdCompileErr: { @@ -4194,7 +4637,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *compile_err = ir_build_compile_err(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, compile_err, lval); + return ir_lval_wrap(irb, scope, compile_err, lval, result_loc); } case BuiltinFnIdCompileLog: { @@ -4208,7 +4651,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } IrInstruction *compile_log = ir_build_compile_log(irb, scope, node, actual_param_count, args); - return ir_lval_wrap(irb, scope, compile_log, lval); + return ir_lval_wrap(irb, scope, compile_log, lval, result_loc); } case BuiltinFnIdErrName: { @@ -4218,7 +4661,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *err_name = ir_build_err_name(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, err_name, lval); + return ir_lval_wrap(irb, scope, err_name, lval, result_loc); } case BuiltinFnIdEmbedFile: { @@ -4228,7 +4671,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *embed_file = ir_build_embed_file(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, embed_file, lval); + return ir_lval_wrap(irb, scope, embed_file, lval, result_loc); } case BuiltinFnIdCmpxchgWeak: case BuiltinFnIdCmpxchgStrong: @@ -4264,8 +4707,9 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg5_value; IrInstruction *cmpxchg = ir_build_cmpxchg_src(irb, scope, node, arg0_value, arg1_value, - arg2_value, arg3_value, arg4_value, arg5_value, (builtin_fn->id == BuiltinFnIdCmpxchgWeak)); - return ir_lval_wrap(irb, scope, cmpxchg, lval); + arg2_value, arg3_value, arg4_value, arg5_value, (builtin_fn->id == BuiltinFnIdCmpxchgWeak), + result_loc); + return ir_lval_wrap(irb, scope, cmpxchg, lval, result_loc); } case BuiltinFnIdFence: { @@ -4275,7 +4719,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *fence = ir_build_fence(irb, scope, node, arg0_value, AtomicOrderUnordered); - return ir_lval_wrap(irb, scope, fence, lval); + return ir_lval_wrap(irb, scope, fence, lval, result_loc); } case BuiltinFnIdDivExact: { @@ -4290,7 +4734,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivExact, arg0_value, arg1_value, true); - return ir_lval_wrap(irb, scope, bin_op, lval); + return ir_lval_wrap(irb, scope, bin_op, lval, result_loc); } case BuiltinFnIdDivTrunc: { @@ -4305,7 +4749,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivTrunc, arg0_value, arg1_value, true); - return ir_lval_wrap(irb, scope, bin_op, lval); + return ir_lval_wrap(irb, scope, bin_op, lval, result_loc); } case BuiltinFnIdDivFloor: { @@ -4320,7 +4764,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpDivFloor, arg0_value, arg1_value, true); - return ir_lval_wrap(irb, scope, bin_op, lval); + return ir_lval_wrap(irb, scope, bin_op, lval, result_loc); } case BuiltinFnIdRem: { @@ -4335,7 +4779,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpRemRem, arg0_value, arg1_value, true); - return ir_lval_wrap(irb, scope, bin_op, lval); + return ir_lval_wrap(irb, scope, bin_op, lval, result_loc); } case BuiltinFnIdMod: { @@ -4350,9 +4794,22 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpRemMod, arg0_value, arg1_value, true); - return ir_lval_wrap(irb, scope, bin_op, lval); + return ir_lval_wrap(irb, scope, bin_op, lval, result_loc); } case BuiltinFnIdSqrt: + case BuiltinFnIdSin: + case BuiltinFnIdCos: + case BuiltinFnIdExp: + case BuiltinFnIdExp2: + case BuiltinFnIdLn: + case BuiltinFnIdLog2: + case BuiltinFnIdLog10: + case BuiltinFnIdFabs: + case BuiltinFnIdFloor: + case BuiltinFnIdCeil: + case BuiltinFnIdTrunc: + case BuiltinFnIdNearbyInt: + case BuiltinFnIdRound: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); @@ -4364,8 +4821,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - IrInstruction *ir_sqrt = ir_build_sqrt(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, ir_sqrt, lval); + IrInstruction *ir_sqrt = ir_build_float_op(irb, scope, node, arg0_value, arg1_value, builtin_fn->id); + return ir_lval_wrap(irb, scope, ir_sqrt, lval, result_loc); } case BuiltinFnIdTruncate: { @@ -4380,7 +4837,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *truncate = ir_build_truncate(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, truncate, lval); + return ir_lval_wrap(irb, scope, truncate, lval, result_loc); } case BuiltinFnIdIntCast: { @@ -4395,7 +4852,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *result = ir_build_int_cast(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, result, lval); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdFloatCast: { @@ -4410,7 +4867,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *result = ir_build_float_cast(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, result, lval); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdErrSetCast: { @@ -4425,7 +4882,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *result = ir_build_err_set_cast(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, result, lval); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdFromBytes: { @@ -4439,8 +4896,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - IrInstruction *result = ir_build_from_bytes(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, result, lval); + IrInstruction *result = ir_build_from_bytes(irb, scope, node, arg0_value, arg1_value, result_loc); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdToBytes: { @@ -4449,8 +4906,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; - IrInstruction *result = ir_build_to_bytes(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, result, lval); + IrInstruction *result = ir_build_to_bytes(irb, scope, node, arg0_value, result_loc); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdIntToFloat: { @@ -4465,7 +4922,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *result = ir_build_int_to_float(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, result, lval); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdFloatToInt: { @@ -4480,7 +4937,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *result = ir_build_float_to_int(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, result, lval); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdErrToInt: { @@ -4490,7 +4947,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *result = ir_build_err_to_int(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, result, lval); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdIntToErr: { @@ -4500,7 +4957,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *result = ir_build_int_to_err(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, result, lval); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdBoolToInt: { @@ -4510,7 +4967,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *result = ir_build_bool_to_int(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, result, lval); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdIntType: { @@ -4525,7 +4982,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *int_type = ir_build_int_type(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, int_type, lval); + return ir_lval_wrap(irb, scope, int_type, lval, result_loc); } case BuiltinFnIdVectorType: { @@ -4540,7 +4997,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *vector_type = ir_build_vector_type(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, vector_type, lval); + return ir_lval_wrap(irb, scope, vector_type, lval, result_loc); } case BuiltinFnIdMemcpy: { @@ -4560,7 +5017,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg2_value; IrInstruction *ir_memcpy = ir_build_memcpy(irb, scope, node, arg0_value, arg1_value, arg2_value); - return ir_lval_wrap(irb, scope, ir_memcpy, lval); + return ir_lval_wrap(irb, scope, ir_memcpy, lval, result_loc); } case BuiltinFnIdMemset: { @@ -4580,7 +5037,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg2_value; IrInstruction *ir_memset = ir_build_memset(irb, scope, node, arg0_value, arg1_value, arg2_value); - return ir_lval_wrap(irb, scope, ir_memset, lval); + return ir_lval_wrap(irb, scope, ir_memset, lval, result_loc); } case BuiltinFnIdMemberCount: { @@ -4590,7 +5047,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *member_count = ir_build_member_count(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, member_count, lval); + return ir_lval_wrap(irb, scope, member_count, lval, result_loc); } case BuiltinFnIdMemberType: { @@ -4606,7 +5063,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo IrInstruction *member_type = ir_build_member_type(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, member_type, lval); + return ir_lval_wrap(irb, scope, member_type, lval, result_loc); } case BuiltinFnIdMemberName: { @@ -4622,12 +5079,12 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo IrInstruction *member_name = ir_build_member_name(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, member_name, lval); + return ir_lval_wrap(irb, scope, member_name, lval, result_loc); } case BuiltinFnIdField: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); - IrInstruction *arg0_value = ir_gen_node_extra(irb, arg0_node, scope, LValPtr); + IrInstruction *arg0_value = ir_gen_node_extra(irb, arg0_node, scope, LValPtr, nullptr); if (arg0_value == irb->codegen->invalid_instruction) return arg0_value; @@ -4641,7 +5098,8 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (lval == LValPtr) return ptr_instruction; - return ir_build_load_ptr(irb, scope, node, ptr_instruction); + IrInstruction *load_ptr = ir_build_load_ptr(irb, scope, node, ptr_instruction); + return ir_expr_wrap(irb, scope, load_ptr, result_loc); } case BuiltinFnIdTypeInfo: { @@ -4651,14 +5109,14 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *type_info = ir_build_type_info(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, type_info, lval); + return ir_lval_wrap(irb, scope, type_info, lval, result_loc); } case BuiltinFnIdBreakpoint: - return ir_lval_wrap(irb, scope, ir_build_breakpoint(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_build_breakpoint(irb, scope, node), lval, result_loc); case BuiltinFnIdReturnAddress: - return ir_lval_wrap(irb, scope, ir_build_return_address(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_build_return_address(irb, scope, node), lval, result_loc); case BuiltinFnIdFrameAddress: - return ir_lval_wrap(irb, scope, ir_build_frame_address(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_build_frame_address(irb, scope, node), lval, result_loc); case BuiltinFnIdHandle: if (!irb->exec->fn_entry) { add_node_error(irb->codegen, node, buf_sprintf("@handle() called outside of function definition")); @@ -4668,7 +5126,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo add_node_error(irb->codegen, node, buf_sprintf("@handle() in non-async function")); return irb->codegen->invalid_instruction; } - return ir_lval_wrap(irb, scope, ir_build_handle(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_build_handle(irb, scope, node), lval, result_loc); case BuiltinFnIdAlignOf: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); @@ -4677,16 +5135,18 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *align_of = ir_build_align_of(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, align_of, lval); + return ir_lval_wrap(irb, scope, align_of, lval, result_loc); } case BuiltinFnIdAddWithOverflow: - return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpAdd), lval); + return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpAdd), lval, result_loc); case BuiltinFnIdSubWithOverflow: - return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpSub), lval); + return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpSub), lval, result_loc); case BuiltinFnIdMulWithOverflow: - return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpMul), lval); + return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpMul), lval, result_loc); case BuiltinFnIdShlWithOverflow: - return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpShl), lval); + return ir_lval_wrap(irb, scope, ir_gen_overflow_op(irb, scope, node, IrOverflowOpShl), lval, result_loc); + case BuiltinFnIdMulAdd: + return ir_lval_wrap(irb, scope, ir_gen_mul_add(irb, scope, node), lval, result_loc); case BuiltinFnIdTypeName: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); @@ -4695,7 +5155,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *type_name = ir_build_type_name(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, type_name, lval); + return ir_lval_wrap(irb, scope, type_name, lval, result_loc); } case BuiltinFnIdPanic: { @@ -4705,7 +5165,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *panic = ir_build_panic(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, panic, lval); + return ir_lval_wrap(irb, scope, panic, lval, result_loc); } case BuiltinFnIdPtrCast: { @@ -4720,22 +5180,31 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *ptr_cast = ir_build_ptr_cast_src(irb, scope, node, arg0_value, arg1_value, true); - return ir_lval_wrap(irb, scope, ptr_cast, lval); + return ir_lval_wrap(irb, scope, ptr_cast, lval, result_loc); } case BuiltinFnIdBitCast: { - AstNode *arg0_node = node->data.fn_call_expr.params.at(0); - IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); - if (arg0_value == irb->codegen->invalid_instruction) - return arg0_value; + AstNode *dest_type_node = node->data.fn_call_expr.params.at(0); + IrInstruction *dest_type = ir_gen_node(irb, dest_type_node, scope); + if (dest_type == irb->codegen->invalid_instruction) + return dest_type; + + ResultLocBitCast *result_loc_bit_cast = allocate(1); + result_loc_bit_cast->base.id = ResultLocIdBitCast; + result_loc_bit_cast->base.source_instruction = dest_type; + ir_ref_instruction(dest_type, irb->current_basic_block); + result_loc_bit_cast->parent = result_loc; + + ir_build_reset_result(irb, scope, node, &result_loc_bit_cast->base); AstNode *arg1_node = node->data.fn_call_expr.params.at(1); - IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + IrInstruction *arg1_value = ir_gen_node_extra(irb, arg1_node, scope, LValNone, + &result_loc_bit_cast->base); if (arg1_value == irb->codegen->invalid_instruction) return arg1_value; - IrInstruction *bit_cast = ir_build_bit_cast(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, bit_cast, lval); + IrInstruction *bitcast = ir_build_bit_cast_src(irb, scope, arg1_node, arg1_value, result_loc_bit_cast); + return ir_lval_wrap(irb, scope, bitcast, lval, result_loc); } case BuiltinFnIdIntToPtr: { @@ -4750,7 +5219,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *int_to_ptr = ir_build_int_to_ptr(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, int_to_ptr, lval); + return ir_lval_wrap(irb, scope, int_to_ptr, lval, result_loc); } case BuiltinFnIdPtrToInt: { @@ -4760,7 +5229,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *ptr_to_int = ir_build_ptr_to_int(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, ptr_to_int, lval); + return ir_lval_wrap(irb, scope, ptr_to_int, lval, result_loc); } case BuiltinFnIdTagName: { @@ -4771,7 +5240,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo IrInstruction *actual_tag = ir_build_union_tag(irb, scope, node, arg0_value); IrInstruction *tag_name = ir_build_tag_name(irb, scope, node, actual_tag); - return ir_lval_wrap(irb, scope, tag_name, lval); + return ir_lval_wrap(irb, scope, tag_name, lval, result_loc); } case BuiltinFnIdTagType: { @@ -4781,7 +5250,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *tag_type = ir_build_tag_type(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, tag_type, lval); + return ir_lval_wrap(irb, scope, tag_type, lval, result_loc); } case BuiltinFnIdFieldParentPtr: { @@ -4801,7 +5270,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg2_value; IrInstruction *field_parent_ptr = ir_build_field_parent_ptr(irb, scope, node, arg0_value, arg1_value, arg2_value, nullptr); - return ir_lval_wrap(irb, scope, field_parent_ptr, lval); + return ir_lval_wrap(irb, scope, field_parent_ptr, lval, result_loc); } case BuiltinFnIdByteOffsetOf: { @@ -4816,7 +5285,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *offset_of = ir_build_byte_offset_of(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, offset_of, lval); + return ir_lval_wrap(irb, scope, offset_of, lval, result_loc); } case BuiltinFnIdBitOffsetOf: { @@ -4831,7 +5300,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *offset_of = ir_build_bit_offset_of(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, offset_of, lval); + return ir_lval_wrap(irb, scope, offset_of, lval, result_loc); } case BuiltinFnIdInlineCall: case BuiltinFnIdNoInlineCall: @@ -4857,8 +5326,9 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever; - IrInstruction *call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline, false, nullptr, nullptr); - return ir_lval_wrap(irb, scope, call, lval); + IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, + fn_inline, false, nullptr, nullptr, result_loc); + return ir_lval_wrap(irb, scope, call, lval, result_loc); } case BuiltinFnIdNewStackCall: { @@ -4887,8 +5357,9 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return args[i]; } - IrInstruction *call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, false, nullptr, new_stack); - return ir_lval_wrap(irb, scope, call, lval); + IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, + FnInlineAuto, false, nullptr, new_stack, result_loc); + return ir_lval_wrap(irb, scope, call, lval, result_loc); } case BuiltinFnIdTypeId: { @@ -4898,7 +5369,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *type_id = ir_build_type_id(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, type_id, lval); + return ir_lval_wrap(irb, scope, type_id, lval, result_loc); } case BuiltinFnIdShlExact: { @@ -4913,7 +5384,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpBitShiftLeftExact, arg0_value, arg1_value, true); - return ir_lval_wrap(irb, scope, bin_op, lval); + return ir_lval_wrap(irb, scope, bin_op, lval, result_loc); } case BuiltinFnIdShrExact: { @@ -4928,7 +5399,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *bin_op = ir_build_bin_op(irb, scope, node, IrBinOpBitShiftRightExact, arg0_value, arg1_value, true); - return ir_lval_wrap(irb, scope, bin_op, lval); + return ir_lval_wrap(irb, scope, bin_op, lval, result_loc); } case BuiltinFnIdSetEvalBranchQuota: { @@ -4938,7 +5409,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *set_eval_branch_quota = ir_build_set_eval_branch_quota(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, set_eval_branch_quota, lval); + return ir_lval_wrap(irb, scope, set_eval_branch_quota, lval, result_loc); } case BuiltinFnIdAlignCast: { @@ -4953,17 +5424,17 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *align_cast = ir_build_align_cast(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, align_cast, lval); + return ir_lval_wrap(irb, scope, align_cast, lval, result_loc); } case BuiltinFnIdOpaqueType: { IrInstruction *opaque_type = ir_build_opaque_type(irb, scope, node); - return ir_lval_wrap(irb, scope, opaque_type, lval); + return ir_lval_wrap(irb, scope, opaque_type, lval, result_loc); } case BuiltinFnIdThis: { IrInstruction *this_inst = ir_gen_this(irb, scope, node); - return ir_lval_wrap(irb, scope, this_inst, lval); + return ir_lval_wrap(irb, scope, this_inst, lval, result_loc); } case BuiltinFnIdSetAlignStack: { @@ -4973,7 +5444,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *set_align_stack = ir_build_set_align_stack(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, set_align_stack, lval); + return ir_lval_wrap(irb, scope, set_align_stack, lval, result_loc); } case BuiltinFnIdArgType: { @@ -4988,7 +5459,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *arg_type = ir_build_arg_type(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, arg_type, lval); + return ir_lval_wrap(irb, scope, arg_type, lval, result_loc); } case BuiltinFnIdExport: { @@ -5008,12 +5479,12 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg2_value; IrInstruction *ir_export = ir_build_export(irb, scope, node, arg0_value, arg1_value, arg2_value); - return ir_lval_wrap(irb, scope, ir_export, lval); + return ir_lval_wrap(irb, scope, ir_export, lval, result_loc); } case BuiltinFnIdErrorReturnTrace: { IrInstruction *error_return_trace = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::Null); - return ir_lval_wrap(irb, scope, error_return_trace, lval); + return ir_lval_wrap(irb, scope, error_return_trace, lval, result_loc); } case BuiltinFnIdAtomicRmw: { @@ -5042,10 +5513,11 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg4_value == irb->codegen->invalid_instruction) return arg4_value; - return ir_build_atomic_rmw(irb, scope, node, arg0_value, arg1_value, arg2_value, arg3_value, + IrInstruction *inst = ir_build_atomic_rmw(irb, scope, node, arg0_value, arg1_value, arg2_value, arg3_value, arg4_value, // these 2 values don't mean anything since we passed non-null values for other args AtomicRmwOp_xchg, AtomicOrderMonotonic); + return ir_lval_wrap(irb, scope, inst, lval, result_loc); } case BuiltinFnIdAtomicLoad: { @@ -5064,9 +5536,10 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo if (arg2_value == irb->codegen->invalid_instruction) return arg2_value; - return ir_build_atomic_load(irb, scope, node, arg0_value, arg1_value, arg2_value, + IrInstruction *inst = ir_build_atomic_load(irb, scope, node, arg0_value, arg1_value, arg2_value, // this value does not mean anything since we passed non-null values for other arg AtomicOrderMonotonic); + return ir_lval_wrap(irb, scope, inst, lval, result_loc); } case BuiltinFnIdIntToEnum: { @@ -5081,7 +5554,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *result = ir_build_int_to_enum(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, result, lval); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdEnumToInt: { @@ -5091,7 +5564,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg0_value; IrInstruction *result = ir_build_enum_to_int(irb, scope, node, arg0_value); - return ir_lval_wrap(irb, scope, result, lval); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdCtz: case BuiltinFnIdPopCount: @@ -5129,7 +5602,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo default: zig_unreachable(); } - return ir_lval_wrap(irb, scope, result, lval); + return ir_lval_wrap(irb, scope, result, lval, result_loc); } case BuiltinFnIdHasDecl: { @@ -5144,17 +5617,19 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return arg1_value; IrInstruction *has_decl = ir_build_has_decl(irb, scope, node, arg0_value, arg1_value); - return ir_lval_wrap(irb, scope, has_decl, lval); + return ir_lval_wrap(irb, scope, has_decl, lval, result_loc); } } zig_unreachable(); } -static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { +static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ assert(node->type == NodeTypeFnCallExpr); if (node->data.fn_call_expr.is_builtin) - return ir_gen_builtin_fn_call(irb, scope, node, lval); + return ir_gen_builtin_fn_call(irb, scope, node, lval, result_loc); AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr; IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope); @@ -5180,12 +5655,14 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node } } - IrInstruction *fn_call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, - is_async, async_allocator, nullptr); - return ir_lval_wrap(irb, scope, fn_call, lval); + IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, + is_async, async_allocator, nullptr, result_loc); + return ir_lval_wrap(irb, scope, fn_call, lval, result_loc); } -static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode *node) { +static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ assert(node->type == NodeTypeIfBoolExpr); IrInstruction *condition = ir_gen_node(irb, node->data.if_bool_expr.condition, scope); @@ -5206,12 +5683,16 @@ static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "Else"); IrBasicBlock *endif_block = ir_create_basic_block(irb, scope, "EndIf"); - ir_build_cond_br(irb, scope, condition->source_node, condition, then_block, else_block, is_comptime); + IrInstruction *cond_br_inst = ir_build_cond_br(irb, scope, node, condition, + then_block, else_block, is_comptime); + ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, else_block, endif_block, + result_loc, is_comptime); ir_set_cursor_at_end_and_append_block(irb, then_block); Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime); - IrInstruction *then_expr_result = ir_gen_node(irb, then_node, subexpr_scope); + IrInstruction *then_expr_result = ir_gen_node_extra(irb, then_node, subexpr_scope, lval, + &peer_parent->peers.at(0)->base); if (then_expr_result == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; IrBasicBlock *after_then_block = irb->current_basic_block; @@ -5221,11 +5702,12 @@ static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode ir_set_cursor_at_end_and_append_block(irb, else_block); IrInstruction *else_expr_result; if (else_node) { - else_expr_result = ir_gen_node(irb, else_node, subexpr_scope); + else_expr_result = ir_gen_node_extra(irb, else_node, subexpr_scope, lval, &peer_parent->peers.at(1)->base); if (else_expr_result == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; } else { else_expr_result = ir_build_const_void(irb, scope, node); + ir_build_end_expr(irb, scope, node, else_expr_result, &peer_parent->peers.at(1)->base); } IrBasicBlock *after_else_block = irb->current_basic_block; if (!instr_is_unreachable(else_expr_result)) @@ -5239,14 +5721,15 @@ static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode incoming_blocks[0] = after_then_block; incoming_blocks[1] = after_else_block; - return ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); + IrInstruction *phi = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, peer_parent); + return ir_expr_wrap(irb, scope, phi, result_loc); } static IrInstruction *ir_gen_prefix_op_id_lval(IrBuilder *irb, Scope *scope, AstNode *node, IrUnOp op_id, LVal lval) { assert(node->type == NodeTypePrefixOpExpr); AstNode *expr_node = node->data.prefix_op_expr.primary_expr; - IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval); + IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval, nullptr); if (value == irb->codegen->invalid_instruction) return value; @@ -5257,15 +5740,34 @@ static IrInstruction *ir_gen_prefix_op_id(IrBuilder *irb, Scope *scope, AstNode return ir_gen_prefix_op_id_lval(irb, scope, node, op_id, LValNone); } -static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval) { - if (lval != LValPtr) - return value; - if (value == irb->codegen->invalid_instruction) - return value; +static IrInstruction *ir_expr_wrap(IrBuilder *irb, Scope *scope, IrInstruction *inst, ResultLoc *result_loc) { + ir_build_end_expr(irb, scope, inst->source_node, inst, result_loc); + return inst; +} + +static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval, + ResultLoc *result_loc) +{ + // This logic must be kept in sync with + // [STMT_EXPR_TEST_THING] <--- (search this token) + if (value == irb->codegen->invalid_instruction || + instr_is_unreachable(value) || + value->source_node->type == NodeTypeDefer || + value->id == IrInstructionIdDeclVarSrc) + { + return value; + } + + if (lval == LValPtr) { + // We needed a pointer to a value, but we got a value. So we create + // an instruction which just makes a pointer of it. + return ir_build_ref(irb, scope, value->source_node, value, false, false); + } else if (result_loc != nullptr) { + return ir_expr_wrap(irb, scope, value, result_loc); + } else { + return value; + } - // We needed a pointer to a value, but we got a value. So we create - // an instruction which just makes a const pointer of it. - return ir_build_ref(irb, scope, value->source_node, value, false, false); } static PtrLen star_token_to_ptr_len(TokenId token_id) { @@ -5338,21 +5840,22 @@ static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode ptr_len, align_value, bit_offset_start, host_int_bytes, is_allow_zero); } -static IrInstruction *ir_gen_catch_unreachable(IrBuilder *irb, Scope *scope, AstNode *source_node, AstNode *expr_node, - LVal lval) +static IrInstruction *ir_gen_catch_unreachable(IrBuilder *irb, Scope *scope, AstNode *source_node, + AstNode *expr_node, LVal lval, ResultLoc *result_loc) { - IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr); + IrInstruction *err_union_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr); if (err_union_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - IrInstruction *payload_ptr = ir_build_unwrap_err_payload(irb, scope, source_node, err_union_ptr, true); + IrInstruction *payload_ptr = ir_build_unwrap_err_payload(irb, scope, source_node, err_union_ptr, true, false); if (payload_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; if (lval == LValPtr) return payload_ptr; - return ir_build_load_ptr(irb, scope, source_node, payload_ptr); + IrInstruction *load_ptr = ir_build_load_ptr(irb, scope, source_node, payload_ptr); + return ir_expr_wrap(irb, scope, load_ptr, result_loc); } static IrInstruction *ir_gen_bool_not(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -5366,7 +5869,9 @@ static IrInstruction *ir_gen_bool_not(IrBuilder *irb, Scope *scope, AstNode *nod return ir_build_bool_not(irb, scope, node, value); } -static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { +static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ assert(node->type == NodeTypePrefixOpExpr); PrefixOp prefix_op = node->data.prefix_op_expr.prefix_op; @@ -5375,24 +5880,26 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod case PrefixOpInvalid: zig_unreachable(); case PrefixOpBoolNot: - return ir_lval_wrap(irb, scope, ir_gen_bool_not(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_bool_not(irb, scope, node), lval, result_loc); case PrefixOpBinNot: - return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpBinNot), lval); + return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpBinNot), lval, result_loc); case PrefixOpNegation: - return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegation), lval); + return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegation), lval, result_loc); case PrefixOpNegationWrap: - return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegationWrap), lval); + return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegationWrap), lval, result_loc); case PrefixOpOptional: - return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval); + return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval, result_loc); case PrefixOpAddrOf: { AstNode *expr_node = node->data.prefix_op_expr.primary_expr; - return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LValPtr), lval); + return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr), lval, result_loc); } } zig_unreachable(); } -static IrInstruction *ir_gen_container_init_expr(IrBuilder *irb, Scope *scope, AstNode *node) { +static IrInstruction *ir_gen_container_init_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, + ResultLoc *parent_result_loc) +{ assert(node->type == NodeTypeContainerInitExpr); AstNodeContainerInitExpr *container_init_expr = &node->data.container_init_expr; @@ -5410,45 +5917,104 @@ static IrInstruction *ir_gen_container_init_expr(IrBuilder *irb, Scope *scope, A return container_type; } - if (kind == ContainerInitKindStruct) { - if (elem_type != nullptr) { - add_node_error(irb->codegen, container_init_expr->type, - buf_sprintf("initializing array with struct syntax")); - return irb->codegen->invalid_instruction; + switch (kind) { + case ContainerInitKindStruct: { + if (elem_type != nullptr) { + add_node_error(irb->codegen, container_init_expr->type, + buf_sprintf("initializing array with struct syntax")); + return irb->codegen->invalid_instruction; + } + + IrInstruction *container_ptr = ir_build_resolve_result(irb, scope, node, parent_result_loc, + container_type); + + size_t field_count = container_init_expr->entries.length; + IrInstructionContainerInitFieldsField *fields = allocate(field_count); + for (size_t i = 0; i < field_count; i += 1) { + AstNode *entry_node = container_init_expr->entries.at(i); + assert(entry_node->type == NodeTypeStructValueField); + + Buf *name = entry_node->data.struct_val_field.name; + AstNode *expr_node = entry_node->data.struct_val_field.expr; + + IrInstruction *field_ptr = ir_build_field_ptr(irb, scope, entry_node, container_ptr, name, true); + ResultLocInstruction *result_loc_inst = allocate(1); + result_loc_inst->base.id = ResultLocIdInstruction; + result_loc_inst->base.source_instruction = field_ptr; + ir_ref_instruction(field_ptr, irb->current_basic_block); + ir_build_reset_result(irb, scope, expr_node, &result_loc_inst->base); + + IrInstruction *expr_value = ir_gen_node_extra(irb, expr_node, scope, LValNone, + &result_loc_inst->base); + if (expr_value == irb->codegen->invalid_instruction) + return expr_value; + + fields[i].name = name; + fields[i].source_node = entry_node; + fields[i].result_loc = field_ptr; + } + IrInstruction *init_fields = ir_build_container_init_fields(irb, scope, node, container_type, + field_count, fields, container_ptr); + + return ir_lval_wrap(irb, scope, init_fields, lval, parent_result_loc); } + case ContainerInitKindArray: { + size_t item_count = container_init_expr->entries.length; - size_t field_count = container_init_expr->entries.length; - IrInstructionContainerInitFieldsField *fields = allocate(field_count); - for (size_t i = 0; i < field_count; i += 1) { - AstNode *entry_node = container_init_expr->entries.at(i); - assert(entry_node->type == NodeTypeStructValueField); + if (container_type == nullptr) { + IrInstruction *item_count_inst = ir_build_const_usize(irb, scope, node, item_count); + container_type = ir_build_array_type(irb, scope, node, item_count_inst, elem_type); + } - Buf *name = entry_node->data.struct_val_field.name; - AstNode *expr_node = entry_node->data.struct_val_field.expr; - IrInstruction *expr_value = ir_gen_node(irb, expr_node, scope); - if (expr_value == irb->codegen->invalid_instruction) - return expr_value; + IrInstruction *container_ptr = ir_build_resolve_result(irb, scope, node, parent_result_loc, + container_type); - fields[i].name = name; - fields[i].value = expr_value; - fields[i].source_node = entry_node; + IrInstruction **result_locs = allocate(item_count); + for (size_t i = 0; i < item_count; i += 1) { + AstNode *expr_node = container_init_expr->entries.at(i); + + IrInstruction *elem_index = ir_build_const_usize(irb, scope, expr_node, i); + IrInstruction *elem_ptr = ir_build_elem_ptr(irb, scope, expr_node, container_ptr, elem_index, + false, PtrLenSingle, container_type); + ResultLocInstruction *result_loc_inst = allocate(1); + result_loc_inst->base.id = ResultLocIdInstruction; + result_loc_inst->base.source_instruction = elem_ptr; + ir_ref_instruction(elem_ptr, irb->current_basic_block); + ir_build_reset_result(irb, scope, expr_node, &result_loc_inst->base); + + IrInstruction *expr_value = ir_gen_node_extra(irb, expr_node, scope, LValNone, + &result_loc_inst->base); + if (expr_value == irb->codegen->invalid_instruction) + return expr_value; + + result_locs[i] = elem_ptr; + } + IrInstruction *init_list = ir_build_container_init_list(irb, scope, node, container_type, + item_count, result_locs, container_ptr); + return ir_lval_wrap(irb, scope, init_list, lval, parent_result_loc); } - return ir_build_container_init_fields(irb, scope, node, container_type, field_count, fields); - } else if (kind == ContainerInitKindArray) { - size_t item_count = container_init_expr->entries.length; - IrInstruction **values = allocate(item_count); - for (size_t i = 0; i < item_count; i += 1) { - AstNode *expr_node = container_init_expr->entries.at(i); - IrInstruction *expr_value = ir_gen_node(irb, expr_node, scope); - if (expr_value == irb->codegen->invalid_instruction) - return expr_value; - - values[i] = expr_value; - } - return ir_build_container_init_list(irb, scope, node, container_type, elem_type, item_count, values); - } else { - zig_unreachable(); } + zig_unreachable(); +} + +static ResultLocVar *ir_build_var_result_loc(IrBuilder *irb, IrInstruction *alloca, ZigVar *var) { + ResultLocVar *result_loc_var = allocate(1); + result_loc_var->base.id = ResultLocIdVar; + result_loc_var->base.source_instruction = alloca; + result_loc_var->var = var; + + ir_build_reset_result(irb, alloca->scope, alloca->source_node, &result_loc_var->base); + + return result_loc_var; +} + +static void build_decl_var_and_init(IrBuilder *irb, Scope *scope, AstNode *source_node, ZigVar *var, + IrInstruction *init, const char *name_hint, IrInstruction *is_comptime) +{ + IrInstruction *alloca = ir_build_alloca_src(irb, scope, source_node, nullptr, name_hint, is_comptime); + ResultLocVar *var_result_loc = ir_build_var_result_loc(irb, alloca, var); + ir_build_end_expr(irb, scope, source_node, init, &var_result_loc->base); + ir_build_var_decl_src(irb, scope, source_node, var, nullptr, alloca); } static IrInstruction *ir_gen_var_decl(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -5461,9 +6027,12 @@ static IrInstruction *ir_gen_var_decl(IrBuilder *irb, Scope *scope, AstNode *nod return irb->codegen->invalid_instruction; } + // Used for the type expr and the align expr + Scope *comptime_scope = create_comptime_scope(irb->codegen, node, scope); + IrInstruction *type_instruction; if (variable_declaration->type != nullptr) { - type_instruction = ir_gen_node(irb, variable_declaration->type, scope); + type_instruction = ir_gen_node(irb, variable_declaration->type, comptime_scope); if (type_instruction == irb->codegen->invalid_instruction) return type_instruction; } else { @@ -5474,8 +6043,8 @@ static IrInstruction *ir_gen_var_decl(IrBuilder *irb, Scope *scope, AstNode *nod bool is_const = variable_declaration->is_const; bool is_extern = variable_declaration->is_extern; - IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, - ir_should_inline(irb->exec, scope) || variable_declaration->is_comptime); + bool is_comptime_scalar = ir_should_inline(irb->exec, scope) || variable_declaration->is_comptime; + IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, is_comptime_scalar); ZigVar *var = ir_create_var(irb, node, scope, variable_declaration->symbol, is_const, is_const, is_shadowable, is_comptime); // we detect IrInstructionIdDeclVarSrc in gen_block to make sure the next node @@ -5489,7 +6058,7 @@ static IrInstruction *ir_gen_var_decl(IrBuilder *irb, Scope *scope, AstNode *nod IrInstruction *align_value = nullptr; if (variable_declaration->align_expr != nullptr) { - align_value = ir_gen_node(irb, variable_declaration->align_expr, scope); + align_value = ir_gen_node(irb, variable_declaration->align_expr, comptime_scope); if (align_value == irb->codegen->invalid_instruction) return align_value; } @@ -5502,20 +6071,39 @@ static IrInstruction *ir_gen_var_decl(IrBuilder *irb, Scope *scope, AstNode *nod // Parser should ensure that this never happens assert(variable_declaration->threadlocal_tok == nullptr); + IrInstruction *alloca = ir_build_alloca_src(irb, scope, node, align_value, + buf_ptr(variable_declaration->symbol), is_comptime); + + // Create a result location for the initialization expression. + ResultLocVar *result_loc_var = ir_build_var_result_loc(irb, alloca, var); + ResultLoc *init_result_loc = (type_instruction == nullptr) ? &result_loc_var->base : nullptr; + + Scope *init_scope = is_comptime_scalar ? + create_comptime_scope(irb->codegen, variable_declaration->expr, scope) : scope; + // Temporarily set the name of the IrExecutable to the VariableDeclaration // so that the struct or enum from the init expression inherits the name. Buf *old_exec_name = irb->exec->name; irb->exec->name = variable_declaration->symbol; - IrInstruction *init_value = ir_gen_node(irb, variable_declaration->expr, scope); + IrInstruction *init_value = ir_gen_node_extra(irb, variable_declaration->expr, init_scope, + LValNone, init_result_loc); irb->exec->name = old_exec_name; if (init_value == irb->codegen->invalid_instruction) - return init_value; + return irb->codegen->invalid_instruction; - return ir_build_var_decl_src(irb, scope, node, var, type_instruction, align_value, init_value); + if (type_instruction != nullptr) { + IrInstruction *implicit_cast = ir_build_implicit_cast(irb, scope, node, type_instruction, init_value, + &result_loc_var->base); + ir_build_end_expr(irb, scope, node, implicit_cast, &result_loc_var->base); + } + + return ir_build_var_decl_src(irb, scope, node, var, align_value, alloca); } -static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *node) { +static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ assert(node->type == NodeTypeWhileExpr); AstNode *continue_expr_node = node->data.while_expr.continue_expr; @@ -5550,25 +6138,33 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n } else { payload_scope = subexpr_scope; } - IrInstruction *err_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, subexpr_scope, LValPtr); + IrInstruction *err_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, subexpr_scope, + LValPtr, nullptr); if (err_val_ptr == irb->codegen->invalid_instruction) return err_val_ptr; - IrInstruction *err_val = ir_build_load_ptr(irb, scope, node->data.while_expr.condition, err_val_ptr); - IrInstruction *is_err = ir_build_test_err(irb, scope, node->data.while_expr.condition, err_val); + IrInstruction *is_err = ir_build_test_err_src(irb, scope, node->data.while_expr.condition, err_val_ptr, true); IrBasicBlock *after_cond_block = irb->current_basic_block; IrInstruction *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node)); + IrInstruction *cond_br_inst; if (!instr_is_unreachable(is_err)) { - ir_mark_gen(ir_build_cond_br(irb, scope, node->data.while_expr.condition, is_err, - else_block, body_block, is_comptime)); + cond_br_inst = ir_build_cond_br(irb, scope, node->data.while_expr.condition, is_err, + else_block, body_block, is_comptime); + cond_br_inst->is_gen = true; + } else { + // for the purposes of the source instruction to ir_build_result_peers + cond_br_inst = irb->current_basic_block->instruction_list.last(); } + ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, result_loc, + is_comptime); + ir_set_cursor_at_end_and_append_block(irb, body_block); if (var_symbol) { - IrInstruction *var_ptr_value = ir_build_unwrap_err_payload(irb, payload_scope, symbol_node, - err_val_ptr, false); - IrInstruction *var_value = node->data.while_expr.var_is_ptr ? - var_ptr_value : ir_build_load_ptr(irb, payload_scope, symbol_node, var_ptr_value); - ir_build_var_decl_src(irb, payload_scope, symbol_node, payload_var, nullptr, nullptr, var_value); + IrInstruction *payload_ptr = ir_build_unwrap_err_payload(irb, payload_scope, symbol_node, + err_val_ptr, false, false); + IrInstruction *var_ptr = node->data.while_expr.var_is_ptr ? + ir_build_ref(irb, payload_scope, symbol_node, payload_ptr, true, false) : payload_ptr; + ir_build_var_decl_src(irb, payload_scope, symbol_node, payload_var, nullptr, var_ptr); } ZigList incoming_values = {0}; @@ -5580,7 +6176,12 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n loop_scope->is_comptime = is_comptime; loop_scope->incoming_blocks = &incoming_blocks; loop_scope->incoming_values = &incoming_values; + loop_scope->lval = lval; + loop_scope->peer_parent = peer_parent; + // Note the body block of the loop is not the place that lval and result_loc are used - + // it's actually in break statements, handled similarly to return statements. + // That is why we set those values in loop_scope above and not in this ir_gen_node call. IrInstruction *body_result = ir_gen_node(irb, node->data.while_expr.body, &loop_scope->base); if (body_result == irb->codegen->invalid_instruction) return body_result; @@ -5609,10 +6210,15 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n ZigVar *err_var = ir_create_var(irb, err_symbol_node, scope, err_symbol, true, false, false, is_comptime); Scope *err_scope = err_var->child_scope; - IrInstruction *err_var_value = ir_build_unwrap_err_code(irb, err_scope, err_symbol_node, err_val_ptr); - ir_build_var_decl_src(irb, err_scope, symbol_node, err_var, nullptr, nullptr, err_var_value); + IrInstruction *err_ptr = ir_build_unwrap_err_code(irb, err_scope, err_symbol_node, err_val_ptr); + ir_build_var_decl_src(irb, err_scope, symbol_node, err_var, nullptr, err_ptr); - IrInstruction *else_result = ir_gen_node(irb, else_node, err_scope); + if (peer_parent->peers.length != 0) { + peer_parent->peers.last()->next_bb = else_block; + } + ResultLocPeer *peer_result = create_peer_result(peer_parent); + peer_parent->peers.append(peer_result); + IrInstruction *else_result = ir_gen_node_extra(irb, else_node, err_scope, lval, &peer_result->base); if (else_result == irb->codegen->invalid_instruction) return else_result; if (!instr_is_unreachable(else_result)) @@ -5626,8 +6232,13 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n incoming_blocks.append(after_cond_block); incoming_values.append(void_else_result); } + if (peer_parent->peers.length != 0) { + peer_parent->peers.last()->next_bb = end_block; + } - return ir_build_phi(irb, scope, node, incoming_blocks.length, incoming_blocks.items, incoming_values.items); + IrInstruction *phi = ir_build_phi(irb, scope, node, incoming_blocks.length, + incoming_blocks.items, incoming_values.items, peer_parent); + return ir_expr_wrap(irb, scope, phi, result_loc); } else if (var_symbol != nullptr) { ir_set_cursor_at_end_and_append_block(irb, cond_block); Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime); @@ -5637,23 +6248,32 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n ZigVar *payload_var = ir_create_var(irb, symbol_node, subexpr_scope, var_symbol, true, false, false, is_comptime); Scope *child_scope = payload_var->child_scope; - IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, subexpr_scope, LValPtr); + IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, node->data.while_expr.condition, subexpr_scope, + LValPtr, nullptr); if (maybe_val_ptr == irb->codegen->invalid_instruction) return maybe_val_ptr; IrInstruction *maybe_val = ir_build_load_ptr(irb, scope, node->data.while_expr.condition, maybe_val_ptr); IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node->data.while_expr.condition, maybe_val); IrBasicBlock *after_cond_block = irb->current_basic_block; IrInstruction *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node)); + IrInstruction *cond_br_inst; if (!instr_is_unreachable(is_non_null)) { - ir_mark_gen(ir_build_cond_br(irb, scope, node->data.while_expr.condition, is_non_null, - body_block, else_block, is_comptime)); + cond_br_inst = ir_build_cond_br(irb, scope, node->data.while_expr.condition, is_non_null, + body_block, else_block, is_comptime); + cond_br_inst->is_gen = true; + } else { + // for the purposes of the source instruction to ir_build_result_peers + cond_br_inst = irb->current_basic_block->instruction_list.last(); } + ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, result_loc, + is_comptime); + ir_set_cursor_at_end_and_append_block(irb, body_block); - IrInstruction *var_ptr_value = ir_build_optional_unwrap_ptr(irb, child_scope, symbol_node, maybe_val_ptr, false); - IrInstruction *var_value = node->data.while_expr.var_is_ptr ? - var_ptr_value : ir_build_load_ptr(irb, child_scope, symbol_node, var_ptr_value); - ir_build_var_decl_src(irb, child_scope, symbol_node, payload_var, nullptr, nullptr, var_value); + IrInstruction *payload_ptr = ir_build_optional_unwrap_ptr(irb, child_scope, symbol_node, maybe_val_ptr, false, false); + IrInstruction *var_ptr = node->data.while_expr.var_is_ptr ? + ir_build_ref(irb, child_scope, symbol_node, payload_ptr, true, false) : payload_ptr; + ir_build_var_decl_src(irb, child_scope, symbol_node, payload_var, nullptr, var_ptr); ZigList incoming_values = {0}; ZigList incoming_blocks = {0}; @@ -5664,7 +6284,12 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n loop_scope->is_comptime = is_comptime; loop_scope->incoming_blocks = &incoming_blocks; loop_scope->incoming_values = &incoming_values; + loop_scope->lval = lval; + loop_scope->peer_parent = peer_parent; + // Note the body block of the loop is not the place that lval and result_loc are used - + // it's actually in break statements, handled similarly to return statements. + // That is why we set those values in loop_scope above and not in this ir_gen_node call. IrInstruction *body_result = ir_gen_node(irb, node->data.while_expr.body, &loop_scope->base); if (body_result == irb->codegen->invalid_instruction) return body_result; @@ -5689,7 +6314,12 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n if (else_node) { ir_set_cursor_at_end_and_append_block(irb, else_block); - else_result = ir_gen_node(irb, else_node, scope); + if (peer_parent->peers.length != 0) { + peer_parent->peers.last()->next_bb = else_block; + } + ResultLocPeer *peer_result = create_peer_result(peer_parent); + peer_parent->peers.append(peer_result); + else_result = ir_gen_node_extra(irb, else_node, scope, lval, &peer_result->base); if (else_result == irb->codegen->invalid_instruction) return else_result; if (!instr_is_unreachable(else_result)) @@ -5704,8 +6334,13 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n incoming_blocks.append(after_cond_block); incoming_values.append(void_else_result); } + if (peer_parent->peers.length != 0) { + peer_parent->peers.last()->next_bb = end_block; + } - return ir_build_phi(irb, scope, node, incoming_blocks.length, incoming_blocks.items, incoming_values.items); + IrInstruction *phi = ir_build_phi(irb, scope, node, incoming_blocks.length, + incoming_blocks.items, incoming_values.items, peer_parent); + return ir_expr_wrap(irb, scope, phi, result_loc); } else { ir_set_cursor_at_end_and_append_block(irb, cond_block); IrInstruction *cond_val = ir_gen_node(irb, node->data.while_expr.condition, scope); @@ -5713,11 +6348,18 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n return cond_val; IrBasicBlock *after_cond_block = irb->current_basic_block; IrInstruction *void_else_result = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, scope, node)); + IrInstruction *cond_br_inst; if (!instr_is_unreachable(cond_val)) { - ir_mark_gen(ir_build_cond_br(irb, scope, node->data.while_expr.condition, cond_val, - body_block, else_block, is_comptime)); + cond_br_inst = ir_build_cond_br(irb, scope, node->data.while_expr.condition, cond_val, + body_block, else_block, is_comptime); + cond_br_inst->is_gen = true; + } else { + // for the purposes of the source instruction to ir_build_result_peers + cond_br_inst = irb->current_basic_block->instruction_list.last(); } + ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, result_loc, + is_comptime); ir_set_cursor_at_end_and_append_block(irb, body_block); ZigList incoming_values = {0}; @@ -5731,7 +6373,12 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n loop_scope->is_comptime = is_comptime; loop_scope->incoming_blocks = &incoming_blocks; loop_scope->incoming_values = &incoming_values; + loop_scope->lval = lval; + loop_scope->peer_parent = peer_parent; + // Note the body block of the loop is not the place that lval and result_loc are used - + // it's actually in break statements, handled similarly to return statements. + // That is why we set those values in loop_scope above and not in this ir_gen_node call. IrInstruction *body_result = ir_gen_node(irb, node->data.while_expr.body, &loop_scope->base); if (body_result == irb->codegen->invalid_instruction) return body_result; @@ -5756,7 +6403,13 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n if (else_node) { ir_set_cursor_at_end_and_append_block(irb, else_block); - else_result = ir_gen_node(irb, else_node, subexpr_scope); + if (peer_parent->peers.length != 0) { + peer_parent->peers.last()->next_bb = else_block; + } + ResultLocPeer *peer_result = create_peer_result(peer_parent); + peer_parent->peers.append(peer_result); + + else_result = ir_gen_node_extra(irb, else_node, subexpr_scope, lval, &peer_result->base); if (else_result == irb->codegen->invalid_instruction) return else_result; if (!instr_is_unreachable(else_result)) @@ -5771,12 +6424,19 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n incoming_blocks.append(after_cond_block); incoming_values.append(void_else_result); } + if (peer_parent->peers.length != 0) { + peer_parent->peers.last()->next_bb = end_block; + } - return ir_build_phi(irb, scope, node, incoming_blocks.length, incoming_blocks.items, incoming_values.items); + IrInstruction *phi = ir_build_phi(irb, scope, node, incoming_blocks.length, + incoming_blocks.items, incoming_values.items, peer_parent); + return ir_expr_wrap(irb, scope, phi, result_loc); } } -static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNode *node) { +static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ assert(node->type == NodeTypeForExpr); AstNode *array_node = node->data.for_expr.array_expr; @@ -5791,76 +6451,67 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo } assert(elem_node->type == NodeTypeSymbol); - IrInstruction *array_val_ptr = ir_gen_node_extra(irb, array_node, parent_scope, LValPtr); + IrInstruction *array_val_ptr = ir_gen_node_extra(irb, array_node, parent_scope, LValPtr, nullptr); if (array_val_ptr == irb->codegen->invalid_instruction) return array_val_ptr; - IrInstruction *pointer_type = ir_build_to_ptr_type(irb, parent_scope, array_node, array_val_ptr); - IrInstruction *elem_var_type; - if (node->data.for_expr.elem_is_ptr) { - elem_var_type = pointer_type; - } else { - elem_var_type = ir_build_ptr_type_child(irb, parent_scope, elem_node, pointer_type); - } - IrInstruction *is_comptime = ir_build_const_bool(irb, parent_scope, node, ir_should_inline(irb->exec, parent_scope) || node->data.for_expr.is_inline); + AstNode *index_var_source_node; + ZigVar *index_var; + const char *index_var_name; + if (index_node) { + index_var_source_node = index_node; + Buf *index_var_name_buf = index_node->data.symbol_expr.symbol; + index_var = ir_create_var(irb, index_node, parent_scope, index_var_name_buf, true, false, false, is_comptime); + index_var_name = buf_ptr(index_var_name_buf); + } else { + index_var_source_node = node; + index_var = ir_create_var(irb, node, parent_scope, nullptr, true, false, true, is_comptime); + index_var_name = "i"; + } + + IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0); + build_decl_var_and_init(irb, parent_scope, index_var_source_node, index_var, zero, index_var_name, is_comptime); + parent_scope = index_var->child_scope; + + IrInstruction *one = ir_build_const_usize(irb, parent_scope, node, 1); + IrInstruction *index_ptr = ir_build_var_ptr(irb, parent_scope, node, index_var); + + + IrBasicBlock *cond_block = ir_create_basic_block(irb, parent_scope, "ForCond"); + IrBasicBlock *body_block = ir_create_basic_block(irb, parent_scope, "ForBody"); + IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "ForEnd"); + IrBasicBlock *else_block = else_node ? ir_create_basic_block(irb, parent_scope, "ForElse") : end_block; + IrBasicBlock *continue_block = ir_create_basic_block(irb, parent_scope, "ForContinue"); + + Buf *len_field_name = buf_create_from_str("len"); + IrInstruction *len_ref = ir_build_field_ptr(irb, parent_scope, node, array_val_ptr, len_field_name, false); + IrInstruction *len_val = ir_build_load_ptr(irb, parent_scope, node, len_ref); + ir_build_br(irb, parent_scope, node, cond_block, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, cond_block); + IrInstruction *index_val = ir_build_load_ptr(irb, parent_scope, node, index_ptr); + IrInstruction *cond = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpLessThan, index_val, len_val, false); + IrBasicBlock *after_cond_block = irb->current_basic_block; + IrInstruction *void_else_value = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, parent_scope, node)); + IrInstruction *cond_br_inst = ir_mark_gen(ir_build_cond_br(irb, parent_scope, node, cond, + body_block, else_block, is_comptime)); + + ResultLocPeerParent *peer_parent = ir_build_result_peers(irb, cond_br_inst, end_block, result_loc, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, body_block); + IrInstruction *elem_ptr = ir_build_elem_ptr(irb, parent_scope, node, array_val_ptr, index_val, false, + PtrLenSingle, nullptr); // TODO make it an error to write to element variable or i variable. Buf *elem_var_name = elem_node->data.symbol_expr.symbol; ZigVar *elem_var = ir_create_var(irb, elem_node, parent_scope, elem_var_name, true, false, false, is_comptime); Scope *child_scope = elem_var->child_scope; - IrInstruction *undefined_value = ir_build_const_undefined(irb, child_scope, elem_node); - ir_build_var_decl_src(irb, child_scope, elem_node, elem_var, elem_var_type, nullptr, undefined_value); - IrInstruction *elem_var_ptr = ir_build_var_ptr(irb, child_scope, node, elem_var); - - AstNode *index_var_source_node; - ZigVar *index_var; - if (index_node) { - index_var_source_node = index_node; - Buf *index_var_name = index_node->data.symbol_expr.symbol; - index_var = ir_create_var(irb, index_node, child_scope, index_var_name, true, false, false, is_comptime); - } else { - index_var_source_node = node; - index_var = ir_create_var(irb, node, child_scope, nullptr, true, false, true, is_comptime); - } - child_scope = index_var->child_scope; - - IrInstruction *usize = ir_build_const_type(irb, child_scope, node, irb->codegen->builtin_types.entry_usize); - IrInstruction *zero = ir_build_const_usize(irb, child_scope, node, 0); - IrInstruction *one = ir_build_const_usize(irb, child_scope, node, 1); - ir_build_var_decl_src(irb, child_scope, index_var_source_node, index_var, usize, nullptr, zero); - IrInstruction *index_ptr = ir_build_var_ptr(irb, child_scope, node, index_var); - - - IrBasicBlock *cond_block = ir_create_basic_block(irb, child_scope, "ForCond"); - IrBasicBlock *body_block = ir_create_basic_block(irb, child_scope, "ForBody"); - IrBasicBlock *end_block = ir_create_basic_block(irb, child_scope, "ForEnd"); - IrBasicBlock *else_block = else_node ? ir_create_basic_block(irb, child_scope, "ForElse") : end_block; - IrBasicBlock *continue_block = ir_create_basic_block(irb, child_scope, "ForContinue"); - - Buf *len_field_name = buf_create_from_str("len"); - IrInstruction *len_ref = ir_build_field_ptr(irb, child_scope, node, array_val_ptr, len_field_name); - IrInstruction *len_val = ir_build_load_ptr(irb, child_scope, node, len_ref); - ir_build_br(irb, child_scope, node, cond_block, is_comptime); - - ir_set_cursor_at_end_and_append_block(irb, cond_block); - IrInstruction *index_val = ir_build_load_ptr(irb, child_scope, node, index_ptr); - IrInstruction *cond = ir_build_bin_op(irb, child_scope, node, IrBinOpCmpLessThan, index_val, len_val, false); - IrBasicBlock *after_cond_block = irb->current_basic_block; - IrInstruction *void_else_value = else_node ? nullptr : ir_mark_gen(ir_build_const_void(irb, parent_scope, node)); - ir_mark_gen(ir_build_cond_br(irb, child_scope, node, cond, body_block, else_block, is_comptime)); - - ir_set_cursor_at_end_and_append_block(irb, body_block); - IrInstruction *elem_ptr = ir_build_elem_ptr(irb, child_scope, node, array_val_ptr, index_val, false, PtrLenSingle); - IrInstruction *elem_val; - if (node->data.for_expr.elem_is_ptr) { - elem_val = elem_ptr; - } else { - elem_val = ir_build_load_ptr(irb, child_scope, node, elem_ptr); - } - ir_mark_gen(ir_build_store_ptr(irb, child_scope, node, elem_var_ptr, elem_val)); + IrInstruction *var_ptr = node->data.for_expr.elem_is_ptr ? + ir_build_ref(irb, parent_scope, elem_node, elem_ptr, true, false) : elem_ptr; + ir_build_var_decl_src(irb, parent_scope, elem_node, elem_var, nullptr, var_ptr); ZigList incoming_values = {0}; ZigList incoming_blocks = {0}; @@ -5870,7 +6521,12 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo loop_scope->is_comptime = is_comptime; loop_scope->incoming_blocks = &incoming_blocks; loop_scope->incoming_values = &incoming_values; + loop_scope->lval = LValNone; + loop_scope->peer_parent = peer_parent; + // Note the body block of the loop is not the place that lval and result_loc are used - + // it's actually in break statements, handled similarly to return statements. + // That is why we set those values in loop_scope above and not in this ir_gen_node call. IrInstruction *body_result = ir_gen_node(irb, body_node, &loop_scope->base); if (!instr_is_unreachable(body_result)) { @@ -5887,7 +6543,12 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo if (else_node) { ir_set_cursor_at_end_and_append_block(irb, else_block); - else_result = ir_gen_node(irb, else_node, parent_scope); + if (peer_parent->peers.length != 0) { + peer_parent->peers.last()->next_bb = else_block; + } + ResultLocPeer *peer_result = create_peer_result(peer_parent); + peer_parent->peers.append(peer_result); + else_result = ir_gen_node_extra(irb, else_node, parent_scope, LValNone, &peer_result->base); if (else_result == irb->codegen->invalid_instruction) return else_result; if (!instr_is_unreachable(else_result)) @@ -5903,8 +6564,13 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo incoming_blocks.append(after_cond_block); incoming_values.append(void_else_value); } + if (peer_parent->peers.length != 0) { + peer_parent->peers.last()->next_bb = end_block; + } - return ir_build_phi(irb, parent_scope, node, incoming_blocks.length, incoming_blocks.items, incoming_values.items); + IrInstruction *phi = ir_build_phi(irb, parent_scope, node, incoming_blocks.length, + incoming_blocks.items, incoming_values.items, peer_parent); + return ir_lval_wrap(irb, parent_scope, phi, lval, result_loc); } static IrInstruction *ir_gen_bool_literal(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -6189,7 +6855,9 @@ static IrInstruction *ir_gen_asm_expr(IrBuilder *irb, Scope *scope, AstNode *nod input_list, output_types, output_vars, return_count, is_volatile); } -static IrInstruction *ir_gen_if_optional_expr(IrBuilder *irb, Scope *scope, AstNode *node) { +static IrInstruction *ir_gen_if_optional_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ assert(node->type == NodeTypeIfOptional); Buf *var_symbol = node->data.test_expr.var_symbol; @@ -6198,7 +6866,7 @@ static IrInstruction *ir_gen_if_optional_expr(IrBuilder *irb, Scope *scope, AstN AstNode *else_node = node->data.test_expr.else_node; bool var_is_ptr = node->data.test_expr.var_is_ptr; - IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr); + IrInstruction *maybe_val_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr); if (maybe_val_ptr == irb->codegen->invalid_instruction) return maybe_val_ptr; @@ -6215,27 +6883,31 @@ static IrInstruction *ir_gen_if_optional_expr(IrBuilder *irb, Scope *scope, AstN } else { is_comptime = ir_build_test_comptime(irb, scope, node, is_non_null); } - ir_build_cond_br(irb, scope, node, is_non_null, then_block, else_block, is_comptime); + IrInstruction *cond_br_inst = ir_build_cond_br(irb, scope, node, is_non_null, + then_block, else_block, is_comptime); + + ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, else_block, endif_block, + result_loc, is_comptime); ir_set_cursor_at_end_and_append_block(irb, then_block); Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime); Scope *var_scope; if (var_symbol) { - IrInstruction *var_type = nullptr; bool is_shadowable = false; bool is_const = true; ZigVar *var = ir_create_var(irb, node, subexpr_scope, var_symbol, is_const, is_const, is_shadowable, is_comptime); - IrInstruction *var_ptr_value = ir_build_optional_unwrap_ptr(irb, subexpr_scope, node, maybe_val_ptr, false); - IrInstruction *var_value = var_is_ptr ? var_ptr_value : ir_build_load_ptr(irb, subexpr_scope, node, var_ptr_value); - ir_build_var_decl_src(irb, subexpr_scope, node, var, var_type, nullptr, var_value); + IrInstruction *payload_ptr = ir_build_optional_unwrap_ptr(irb, subexpr_scope, node, maybe_val_ptr, false, false); + IrInstruction *var_ptr = var_is_ptr ? ir_build_ref(irb, subexpr_scope, node, payload_ptr, true, false) : payload_ptr; + ir_build_var_decl_src(irb, subexpr_scope, node, var, nullptr, var_ptr); var_scope = var->child_scope; } else { var_scope = subexpr_scope; } - IrInstruction *then_expr_result = ir_gen_node(irb, then_node, var_scope); + IrInstruction *then_expr_result = ir_gen_node_extra(irb, then_node, var_scope, lval, + &peer_parent->peers.at(0)->base); if (then_expr_result == irb->codegen->invalid_instruction) return then_expr_result; IrBasicBlock *after_then_block = irb->current_basic_block; @@ -6245,11 +6917,12 @@ static IrInstruction *ir_gen_if_optional_expr(IrBuilder *irb, Scope *scope, AstN ir_set_cursor_at_end_and_append_block(irb, else_block); IrInstruction *else_expr_result; if (else_node) { - else_expr_result = ir_gen_node(irb, else_node, subexpr_scope); + else_expr_result = ir_gen_node_extra(irb, else_node, subexpr_scope, lval, &peer_parent->peers.at(1)->base); if (else_expr_result == irb->codegen->invalid_instruction) return else_expr_result; } else { else_expr_result = ir_build_const_void(irb, scope, node); + ir_build_end_expr(irb, scope, node, else_expr_result, &peer_parent->peers.at(1)->base); } IrBasicBlock *after_else_block = irb->current_basic_block; if (!instr_is_unreachable(else_expr_result)) @@ -6263,10 +6936,13 @@ static IrInstruction *ir_gen_if_optional_expr(IrBuilder *irb, Scope *scope, AstN incoming_blocks[0] = after_then_block; incoming_blocks[1] = after_else_block; - return ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); + IrInstruction *phi = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, peer_parent); + return ir_expr_wrap(irb, scope, phi, result_loc); } -static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode *node) { +static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ assert(node->type == NodeTypeIfErrorExpr); AstNode *target_node = node->data.if_err_expr.target_node; @@ -6277,12 +6953,12 @@ static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode * Buf *var_symbol = node->data.if_err_expr.var_symbol; Buf *err_symbol = node->data.if_err_expr.err_symbol; - IrInstruction *err_val_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr); + IrInstruction *err_val_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr, nullptr); if (err_val_ptr == irb->codegen->invalid_instruction) return err_val_ptr; IrInstruction *err_val = ir_build_load_ptr(irb, scope, node, err_val_ptr); - IrInstruction *is_err = ir_build_test_err(irb, scope, node, err_val); + IrInstruction *is_err = ir_build_test_err_src(irb, scope, node, err_val_ptr, true); IrBasicBlock *ok_block = ir_create_basic_block(irb, scope, "TryOk"); IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "TryElse"); @@ -6290,27 +6966,31 @@ static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode * bool force_comptime = ir_should_inline(irb->exec, scope); IrInstruction *is_comptime = force_comptime ? ir_build_const_bool(irb, scope, node, true) : ir_build_test_comptime(irb, scope, node, is_err); - ir_build_cond_br(irb, scope, node, is_err, else_block, ok_block, is_comptime); + IrInstruction *cond_br_inst = ir_build_cond_br(irb, scope, node, is_err, else_block, ok_block, is_comptime); + + ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, else_block, endif_block, + result_loc, is_comptime); ir_set_cursor_at_end_and_append_block(irb, ok_block); Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime); Scope *var_scope; if (var_symbol) { - IrInstruction *var_type = nullptr; bool is_shadowable = false; IrInstruction *var_is_comptime = force_comptime ? ir_build_const_bool(irb, subexpr_scope, node, true) : ir_build_test_comptime(irb, subexpr_scope, node, err_val); ZigVar *var = ir_create_var(irb, node, subexpr_scope, var_symbol, var_is_const, var_is_const, is_shadowable, var_is_comptime); - IrInstruction *var_ptr_value = ir_build_unwrap_err_payload(irb, subexpr_scope, node, err_val_ptr, false); - IrInstruction *var_value = var_is_ptr ? var_ptr_value : ir_build_load_ptr(irb, subexpr_scope, node, var_ptr_value); - ir_build_var_decl_src(irb, subexpr_scope, node, var, var_type, nullptr, var_value); + IrInstruction *payload_ptr = ir_build_unwrap_err_payload(irb, subexpr_scope, node, err_val_ptr, false, false); + IrInstruction *var_ptr = var_is_ptr ? + ir_build_ref(irb, subexpr_scope, node, payload_ptr, true, false) : payload_ptr; + ir_build_var_decl_src(irb, subexpr_scope, node, var, nullptr, var_ptr); var_scope = var->child_scope; } else { var_scope = subexpr_scope; } - IrInstruction *then_expr_result = ir_gen_node(irb, then_node, var_scope); + IrInstruction *then_expr_result = ir_gen_node_extra(irb, then_node, var_scope, lval, + &peer_parent->peers.at(0)->base); if (then_expr_result == irb->codegen->invalid_instruction) return then_expr_result; IrBasicBlock *after_then_block = irb->current_basic_block; @@ -6323,23 +7003,23 @@ static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode * if (else_node) { Scope *err_var_scope; if (err_symbol) { - IrInstruction *var_type = nullptr; bool is_shadowable = false; bool is_const = true; ZigVar *var = ir_create_var(irb, node, subexpr_scope, err_symbol, is_const, is_const, is_shadowable, is_comptime); - IrInstruction *var_value = ir_build_unwrap_err_code(irb, subexpr_scope, node, err_val_ptr); - ir_build_var_decl_src(irb, subexpr_scope, node, var, var_type, nullptr, var_value); + IrInstruction *err_ptr = ir_build_unwrap_err_code(irb, subexpr_scope, node, err_val_ptr); + ir_build_var_decl_src(irb, subexpr_scope, node, var, nullptr, err_ptr); err_var_scope = var->child_scope; } else { err_var_scope = subexpr_scope; } - else_expr_result = ir_gen_node(irb, else_node, err_var_scope); + else_expr_result = ir_gen_node_extra(irb, else_node, err_var_scope, lval, &peer_parent->peers.at(1)->base); if (else_expr_result == irb->codegen->invalid_instruction) return else_expr_result; } else { else_expr_result = ir_build_const_void(irb, scope, node); + ir_build_end_expr(irb, scope, node, else_expr_result, &peer_parent->peers.at(1)->base); } IrBasicBlock *after_else_block = irb->current_basic_block; if (!instr_is_unreachable(else_expr_result)) @@ -6353,14 +7033,15 @@ static IrInstruction *ir_gen_if_err_expr(IrBuilder *irb, Scope *scope, AstNode * incoming_blocks[0] = after_then_block; incoming_blocks[1] = after_else_block; - return ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); + IrInstruction *phi = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, peer_parent); + return ir_expr_wrap(irb, scope, phi, result_loc); } static bool ir_gen_switch_prong_expr(IrBuilder *irb, Scope *scope, AstNode *switch_node, AstNode *prong_node, IrBasicBlock *end_block, IrInstruction *is_comptime, IrInstruction *var_is_comptime, IrInstruction *target_value_ptr, IrInstruction **prong_values, size_t prong_values_len, ZigList *incoming_blocks, ZigList *incoming_values, - IrInstructionSwitchElseVar **out_switch_else_var) + IrInstructionSwitchElseVar **out_switch_else_var, LVal lval, ResultLoc *result_loc) { assert(switch_node->type == NodeTypeSwitchExpr); assert(prong_node->type == NodeTypeSwitchProng); @@ -6378,28 +7059,27 @@ static bool ir_gen_switch_prong_expr(IrBuilder *irb, Scope *scope, AstNode *swit ZigVar *var = ir_create_var(irb, var_symbol_node, scope, var_name, is_const, is_const, is_shadowable, var_is_comptime); child_scope = var->child_scope; - IrInstruction *var_value; + IrInstruction *var_ptr; if (out_switch_else_var != nullptr) { IrInstructionSwitchElseVar *switch_else_var = ir_build_switch_else_var(irb, scope, var_symbol_node, target_value_ptr); *out_switch_else_var = switch_else_var; - IrInstruction *var_ptr_value = &switch_else_var->base; - var_value = var_is_ptr ? var_ptr_value : ir_build_load_ptr(irb, scope, var_symbol_node, var_ptr_value); + IrInstruction *payload_ptr = &switch_else_var->base; + var_ptr = var_is_ptr ? ir_build_ref(irb, scope, var_symbol_node, payload_ptr, true, false) : payload_ptr; } else if (prong_values != nullptr) { - IrInstruction *var_ptr_value = ir_build_switch_var(irb, scope, var_symbol_node, target_value_ptr, + IrInstruction *payload_ptr = ir_build_switch_var(irb, scope, var_symbol_node, target_value_ptr, prong_values, prong_values_len); - var_value = var_is_ptr ? var_ptr_value : ir_build_load_ptr(irb, scope, var_symbol_node, var_ptr_value); + var_ptr = var_is_ptr ? ir_build_ref(irb, scope, var_symbol_node, payload_ptr, true, false) : payload_ptr; } else { - var_value = var_is_ptr ? target_value_ptr : ir_build_load_ptr(irb, scope, var_symbol_node, -target_value_ptr); + var_ptr = var_is_ptr ? + ir_build_ref(irb, scope, var_symbol_node, target_value_ptr, true, false) : target_value_ptr; } - IrInstruction *var_type = nullptr; // infer the type - ir_build_var_decl_src(irb, scope, var_symbol_node, var, var_type, nullptr, var_value); + ir_build_var_decl_src(irb, scope, var_symbol_node, var, nullptr, var_ptr); } else { child_scope = scope; } - IrInstruction *expr_result = ir_gen_node(irb, expr_node, child_scope); + IrInstruction *expr_result = ir_gen_node_extra(irb, expr_node, child_scope, lval, result_loc); if (expr_result == irb->codegen->invalid_instruction) return false; if (!instr_is_unreachable(expr_result)) @@ -6409,11 +7089,13 @@ target_value_ptr); return true; } -static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode *node) { +static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ assert(node->type == NodeTypeSwitchExpr); AstNode *target_node = node->data.switch_expr.expr; - IrInstruction *target_value_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr); + IrInstruction *target_value_ptr = ir_gen_node_extra(irb, target_node, scope, LValPtr, nullptr); if (target_value_ptr == irb->codegen->invalid_instruction) return target_value_ptr; IrInstruction *target_value = ir_build_switch_target(irb, scope, node, target_value_ptr); @@ -6440,6 +7122,14 @@ static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode * IrInstructionSwitchElseVar *switch_else_var = nullptr; + ResultLocPeerParent *peer_parent = allocate(1); + peer_parent->base.id = ResultLocIdPeerParent; + peer_parent->end_bb = end_block; + peer_parent->is_comptime = is_comptime; + peer_parent->parent = result_loc; + + ir_build_reset_result(irb, scope, node, &peer_parent->base); + // First do the else and the ranges Scope *subexpr_scope = create_runtime_scope(irb->codegen, node, scope, is_comptime); Scope *comptime_scope = create_comptime_scope(irb->codegen, node, scope); @@ -6448,6 +7138,7 @@ static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode * AstNode *prong_node = node->data.switch_expr.prongs.at(prong_i); size_t prong_item_count = prong_node->data.switch_prong.items.length; if (prong_item_count == 0) { + ResultLocPeer *this_peer_result_loc = create_peer_result(peer_parent); if (else_prong) { ErrorMsg *msg = add_node_error(irb->codegen, prong_node, buf_sprintf("multiple else prongs in switch expression")); @@ -6458,15 +7149,21 @@ static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode * else_prong = prong_node; IrBasicBlock *prev_block = irb->current_basic_block; + if (peer_parent->peers.length > 0) { + peer_parent->peers.last()->next_bb = else_block; + } + peer_parent->peers.append(this_peer_result_loc); ir_set_cursor_at_end_and_append_block(irb, else_block); if (!ir_gen_switch_prong_expr(irb, subexpr_scope, node, prong_node, end_block, is_comptime, var_is_comptime, target_value_ptr, nullptr, 0, &incoming_blocks, &incoming_values, - &switch_else_var)) + &switch_else_var, LValNone, &this_peer_result_loc->base)) { return irb->codegen->invalid_instruction; } ir_set_cursor_at_end(irb, prev_block); } else if (prong_node->data.switch_prong.any_items_are_range) { + ResultLocPeer *this_peer_result_loc = create_peer_result(peer_parent); + IrInstruction *ok_bit = nullptr; AstNode *last_item_node = nullptr; for (size_t item_i = 0; item_i < prong_item_count; item_i += 1) { @@ -6523,13 +7220,20 @@ static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode * assert(ok_bit); assert(last_item_node); - ir_mark_gen(ir_build_cond_br(irb, scope, last_item_node, ok_bit, range_block_yes, - range_block_no, is_comptime)); + IrInstruction *br_inst = ir_mark_gen(ir_build_cond_br(irb, scope, last_item_node, ok_bit, + range_block_yes, range_block_no, is_comptime)); + if (peer_parent->base.source_instruction == nullptr) { + peer_parent->base.source_instruction = br_inst; + } + if (peer_parent->peers.length > 0) { + peer_parent->peers.last()->next_bb = range_block_yes; + } + peer_parent->peers.append(this_peer_result_loc); ir_set_cursor_at_end_and_append_block(irb, range_block_yes); if (!ir_gen_switch_prong_expr(irb, subexpr_scope, node, prong_node, end_block, is_comptime, var_is_comptime, target_value_ptr, nullptr, 0, - &incoming_blocks, &incoming_values, nullptr)) + &incoming_blocks, &incoming_values, nullptr, LValNone, &this_peer_result_loc->base)) { return irb->codegen->invalid_instruction; } @@ -6547,6 +7251,8 @@ static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode * if (prong_node->data.switch_prong.any_items_are_range) continue; + ResultLocPeer *this_peer_result_loc = create_peer_result(peer_parent); + IrBasicBlock *prong_block = ir_create_basic_block(irb, scope, "SwitchProng"); IrInstruction **items = allocate(prong_item_count); @@ -6570,10 +7276,14 @@ static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode * } IrBasicBlock *prev_block = irb->current_basic_block; + if (peer_parent->peers.length > 0) { + peer_parent->peers.last()->next_bb = prong_block; + } + peer_parent->peers.append(this_peer_result_loc); ir_set_cursor_at_end_and_append_block(irb, prong_block); if (!ir_gen_switch_prong_expr(irb, subexpr_scope, node, prong_node, end_block, is_comptime, var_is_comptime, target_value_ptr, items, prong_item_count, - &incoming_blocks, &incoming_values, nullptr)) + &incoming_blocks, &incoming_values, nullptr, LValNone, &this_peer_result_loc->base)) { return irb->codegen->invalid_instruction; } @@ -6582,38 +7292,57 @@ static IrInstruction *ir_gen_switch_expr(IrBuilder *irb, Scope *scope, AstNode * } - IrInstruction *switch_prongs_void = ir_build_check_switch_prongs(irb, scope, node, target_value, check_ranges.items, check_ranges.length, - else_prong != nullptr); + IrInstruction *switch_prongs_void = ir_build_check_switch_prongs(irb, scope, node, target_value, + check_ranges.items, check_ranges.length, else_prong != nullptr); + IrInstruction *br_instruction; if (cases.length == 0) { - ir_build_br(irb, scope, node, else_block, is_comptime); + br_instruction = ir_build_br(irb, scope, node, else_block, is_comptime); } else { IrInstructionSwitchBr *switch_br = ir_build_switch_br(irb, scope, node, target_value, else_block, cases.length, cases.items, is_comptime, switch_prongs_void); if (switch_else_var != nullptr) { switch_else_var->switch_br = switch_br; } + br_instruction = &switch_br->base; + } + if (peer_parent->base.source_instruction == nullptr) { + peer_parent->base.source_instruction = br_instruction; + } + for (size_t i = 0; i < peer_parent->peers.length; i += 1) { + peer_parent->peers.at(i)->base.source_instruction = peer_parent->base.source_instruction; } if (!else_prong) { + if (peer_parent->peers.length != 0) { + peer_parent->peers.last()->next_bb = else_block; + } ir_set_cursor_at_end_and_append_block(irb, else_block); ir_build_unreachable(irb, scope, node); + } else { + if (peer_parent->peers.length != 0) { + peer_parent->peers.last()->next_bb = end_block; + } } ir_set_cursor_at_end_and_append_block(irb, end_block); assert(incoming_blocks.length == incoming_values.length); + IrInstruction *result_instruction; if (incoming_blocks.length == 0) { - return ir_build_const_void(irb, scope, node); + result_instruction = ir_build_const_void(irb, scope, node); } else { - return ir_build_phi(irb, scope, node, incoming_blocks.length, incoming_blocks.items, incoming_values.items); + result_instruction = ir_build_phi(irb, scope, node, incoming_blocks.length, + incoming_blocks.items, incoming_values.items, peer_parent); } + return ir_lval_wrap(irb, scope, result_instruction, lval, result_loc); } static IrInstruction *ir_gen_comptime(IrBuilder *irb, Scope *parent_scope, AstNode *node, LVal lval) { assert(node->type == NodeTypeCompTime); Scope *child_scope = create_comptime_scope(irb->codegen, node, parent_scope); - return ir_gen_node_extra(irb, node->data.comptime_expr.expr, child_scope, lval); + // purposefully pass null for result_loc and let EndExpr handle it + return ir_gen_node_extra(irb, node->data.comptime_expr.expr, child_scope, lval, nullptr); } static IrInstruction *ir_gen_return_from_block(IrBuilder *irb, Scope *break_scope, AstNode *node, ScopeBlock *block_scope) { @@ -6626,7 +7355,11 @@ static IrInstruction *ir_gen_return_from_block(IrBuilder *irb, Scope *break_scop IrInstruction *result_value; if (node->data.break_expr.expr) { - result_value = ir_gen_node(irb, node->data.break_expr.expr, break_scope); + ResultLocPeer *peer_result = create_peer_result(block_scope->peer_parent); + block_scope->peer_parent->peers.append(peer_result); + + result_value = ir_gen_node_extra(irb, node->data.break_expr.expr, break_scope, block_scope->lval, + &peer_result->base); if (result_value == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; } else { @@ -6696,7 +7429,11 @@ static IrInstruction *ir_gen_break(IrBuilder *irb, Scope *break_scope, AstNode * IrInstruction *result_value; if (node->data.break_expr.expr) { - result_value = ir_gen_node(irb, node->data.break_expr.expr, break_scope); + ResultLocPeer *peer_result = create_peer_result(loop_scope->peer_parent); + loop_scope->peer_parent->peers.append(peer_result); + + result_value = ir_gen_node_extra(irb, node->data.break_expr.expr, break_scope, + loop_scope->lval, &peer_result->base); if (result_value == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; } else { @@ -6784,7 +7521,7 @@ static IrInstruction *ir_gen_defer(IrBuilder *irb, Scope *parent_scope, AstNode return ir_build_const_void(irb, parent_scope, node); } -static IrInstruction *ir_gen_slice(IrBuilder *irb, Scope *scope, AstNode *node) { +static IrInstruction *ir_gen_slice(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval, ResultLoc *result_loc) { assert(node->type == NodeTypeSliceExpr); AstNodeSliceExpr *slice_expr = &node->data.slice_expr; @@ -6792,7 +7529,7 @@ static IrInstruction *ir_gen_slice(IrBuilder *irb, Scope *scope, AstNode *node) AstNode *start_node = slice_expr->start; AstNode *end_node = slice_expr->end; - IrInstruction *ptr_value = ir_gen_node_extra(irb, array_node, scope, LValPtr); + IrInstruction *ptr_value = ir_gen_node_extra(irb, array_node, scope, LValPtr, nullptr); if (ptr_value == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; @@ -6809,11 +7546,14 @@ static IrInstruction *ir_gen_slice(IrBuilder *irb, Scope *scope, AstNode *node) end_value = nullptr; } - return ir_build_slice(irb, scope, node, ptr_value, start_value, end_value, true); + IrInstruction *slice = ir_build_slice_src(irb, scope, node, ptr_value, start_value, end_value, true, result_loc); + return ir_lval_wrap(irb, scope, slice, lval, result_loc); } -static IrInstruction *ir_gen_catch(IrBuilder *irb, Scope *parent_scope, AstNode *node) { - assert(node->type == NodeTypeUnwrapErrorExpr); +static IrInstruction *ir_gen_catch(IrBuilder *irb, Scope *parent_scope, AstNode *node, LVal lval, + ResultLoc *result_loc) +{ + assert(node->type == NodeTypeCatchExpr); AstNode *op1_node = node->data.unwrap_err_expr.op1; AstNode *op2_node = node->data.unwrap_err_expr.op2; @@ -6826,16 +7566,15 @@ static IrInstruction *ir_gen_catch(IrBuilder *irb, Scope *parent_scope, AstNode add_node_error(irb->codegen, var_node, buf_sprintf("unused variable: '%s'", buf_ptr(var_name))); return irb->codegen->invalid_instruction; } - return ir_gen_catch_unreachable(irb, parent_scope, node, op1_node, LValNone); + return ir_gen_catch_unreachable(irb, parent_scope, node, op1_node, lval, result_loc); } - IrInstruction *err_union_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr); + IrInstruction *err_union_ptr = ir_gen_node_extra(irb, op1_node, parent_scope, LValPtr, nullptr); if (err_union_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - IrInstruction *err_union_val = ir_build_load_ptr(irb, parent_scope, node, err_union_ptr); - IrInstruction *is_err = ir_build_test_err(irb, parent_scope, node, err_union_val); + IrInstruction *is_err = ir_build_test_err_src(irb, parent_scope, node, err_union_ptr, true); IrInstruction *is_comptime; if (ir_should_inline(irb->exec, parent_scope)) { @@ -6847,7 +7586,10 @@ static IrInstruction *ir_gen_catch(IrBuilder *irb, Scope *parent_scope, AstNode IrBasicBlock *ok_block = ir_create_basic_block(irb, parent_scope, "UnwrapErrOk"); IrBasicBlock *err_block = ir_create_basic_block(irb, parent_scope, "UnwrapErrError"); IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "UnwrapErrEnd"); - ir_build_cond_br(irb, parent_scope, node, is_err, err_block, ok_block, is_comptime); + IrInstruction *cond_br_inst = ir_build_cond_br(irb, parent_scope, node, is_err, err_block, ok_block, is_comptime); + + ResultLocPeerParent *peer_parent = ir_build_binary_result_peers(irb, cond_br_inst, ok_block, end_block, result_loc, + is_comptime); ir_set_cursor_at_end_and_append_block(irb, err_block); Scope *err_scope; @@ -6859,12 +7601,12 @@ static IrInstruction *ir_gen_catch(IrBuilder *irb, Scope *parent_scope, AstNode ZigVar *var = ir_create_var(irb, node, parent_scope, var_name, is_const, is_const, is_shadowable, is_comptime); err_scope = var->child_scope; - IrInstruction *err_val = ir_build_unwrap_err_code(irb, err_scope, node, err_union_ptr); - ir_build_var_decl_src(irb, err_scope, var_node, var, nullptr, nullptr, err_val); + IrInstruction *err_ptr = ir_build_unwrap_err_code(irb, err_scope, node, err_union_ptr); + ir_build_var_decl_src(irb, err_scope, var_node, var, nullptr, err_ptr); } else { err_scope = parent_scope; } - IrInstruction *err_result = ir_gen_node(irb, op2_node, err_scope); + IrInstruction *err_result = ir_gen_node_extra(irb, op2_node, err_scope, LValNone, &peer_parent->peers.at(0)->base); if (err_result == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; IrBasicBlock *after_err_block = irb->current_basic_block; @@ -6872,8 +7614,9 @@ static IrInstruction *ir_gen_catch(IrBuilder *irb, Scope *parent_scope, AstNode ir_mark_gen(ir_build_br(irb, err_scope, node, end_block, is_comptime)); ir_set_cursor_at_end_and_append_block(irb, ok_block); - IrInstruction *unwrapped_ptr = ir_build_unwrap_err_payload(irb, parent_scope, node, err_union_ptr, false); + IrInstruction *unwrapped_ptr = ir_build_unwrap_err_payload(irb, parent_scope, node, err_union_ptr, false, false); IrInstruction *unwrapped_payload = ir_build_load_ptr(irb, parent_scope, node, unwrapped_ptr); + ir_build_end_expr(irb, parent_scope, node, unwrapped_payload, &peer_parent->peers.at(1)->base); IrBasicBlock *after_ok_block = irb->current_basic_block; ir_build_br(irb, parent_scope, node, end_block, is_comptime); @@ -6884,7 +7627,8 @@ static IrInstruction *ir_gen_catch(IrBuilder *irb, Scope *parent_scope, AstNode IrBasicBlock **incoming_blocks = allocate(2); incoming_blocks[0] = after_err_block; incoming_blocks[1] = after_ok_block; - return ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values); + IrInstruction *phi = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values, peer_parent); + return ir_lval_wrap(irb, parent_scope, phi, lval, result_loc); } static bool render_instance_name_recursive(CodeGen *codegen, Buf *name, Scope *outer_scope, Scope *inner_scope) { @@ -7160,7 +7904,7 @@ static IrInstruction *ir_gen_cancel_target(IrBuilder *irb, Scope *scope, AstNode IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst); Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, - atomic_state_field_name); + atomic_state_field_name, false); // set the is_canceled bit IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, @@ -7239,7 +7983,7 @@ static IrInstruction *ir_gen_resume_target(IrBuilder *irb, Scope *scope, AstNode IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, casted_target_inst); Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, - atomic_state_field_name); + atomic_state_field_name, false); // clear the is_suspended bit IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, @@ -7306,12 +8050,12 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, scope, node, target_inst); Buf *result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME); - IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name); + IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name, false); if (irb->codegen->have_err_ret_tracing) { IrInstruction *err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull); Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME); - IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name); + IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name, false); ir_build_store_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr); } @@ -7333,11 +8077,11 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, - atomic_state_field_name); + atomic_state_field_name, false); IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise); IrInstruction *const_bool_false = ir_build_const_bool(irb, scope, node, false); - IrInstruction *undefined_value = ir_build_const_undefined(irb, scope, node); + IrInstruction *undef = ir_build_const_undefined(irb, scope, node); IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize); IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111 @@ -7351,7 +8095,8 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n IrInstruction *target_promise_type = ir_build_typeof(irb, scope, node, target_inst); IrInstruction *promise_result_type = ir_build_promise_result_type(irb, scope, node, target_promise_type); ir_build_await_bookkeeping(irb, scope, node, promise_result_type); - ir_build_var_decl_src(irb, scope, node, result_var, promise_result_type, nullptr, undefined_value); + IrInstruction *undef_promise_result = ir_build_implicit_cast(irb, scope, node, promise_result_type, undef, nullptr); + build_decl_var_and_init(irb, scope, node, result_var, undef_promise_result, "result", const_bool_false); IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, scope, node, result_var); ir_build_store_ptr(irb, scope, node, result_ptr_field_ptr, my_result_var_ptr); IrInstruction *save_token = ir_build_coro_save(irb, scope, node, irb->exec->coro_handle); @@ -7386,12 +8131,12 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *scope, AstNode *n ir_set_cursor_at_end_and_append_block(irb, no_suspend_block); if (irb->codegen->have_err_ret_tracing) { Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME); - IrInstruction *src_err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name); + IrInstruction *src_err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name, false); IrInstruction *dest_err_ret_trace_ptr = ir_build_error_return_trace(irb, scope, node, IrInstructionErrorReturnTrace::NonNull); ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, src_err_ret_trace_ptr, dest_err_ret_trace_ptr); } Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME); - IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name); + IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name, false); // If the type of the result handle_is_ptr then this does not actually perform a load. But we need it to, // because we're about to destroy the memory. So we store it into our result variable. IrInstruction *no_suspend_result = ir_build_load_ptr(irb, scope, node, promise_result_ptr); @@ -7567,7 +8312,8 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod incoming_values[0] = const_bool_true; incoming_blocks[1] = post_cancel_awaiter_block; incoming_values[1] = const_bool_false; - IrInstruction *destroy_ourselves = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values); + IrInstruction *destroy_ourselves = ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values, + nullptr); ir_gen_defers_for_block(irb, parent_scope, outer_scope, true); ir_mark_gen(ir_build_cond_br(irb, parent_scope, node, destroy_ourselves, irb->exec->coro_final_cleanup_block, irb->exec->coro_early_final, const_bool_false)); @@ -7576,7 +8322,7 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod } static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope, - LVal lval) + LVal lval, ResultLoc *result_loc) { assert(scope); switch (node->type) { @@ -7590,37 +8336,37 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop case NodeTypeTestDecl: zig_unreachable(); case NodeTypeBlock: - return ir_lval_wrap(irb, scope, ir_gen_block(irb, scope, node), lval); + return ir_gen_block(irb, scope, node, lval, result_loc); case NodeTypeGroupedExpr: - return ir_gen_node_raw(irb, node->data.grouped_expr, scope, lval); + return ir_gen_node_raw(irb, node->data.grouped_expr, scope, lval, result_loc); case NodeTypeBinOpExpr: - return ir_lval_wrap(irb, scope, ir_gen_bin_op(irb, scope, node), lval); + return ir_gen_bin_op(irb, scope, node, lval, result_loc); case NodeTypeIntLiteral: - return ir_lval_wrap(irb, scope, ir_gen_int_lit(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_int_lit(irb, scope, node), lval, result_loc); case NodeTypeFloatLiteral: - return ir_lval_wrap(irb, scope, ir_gen_float_lit(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_float_lit(irb, scope, node), lval, result_loc); case NodeTypeCharLiteral: - return ir_lval_wrap(irb, scope, ir_gen_char_lit(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_char_lit(irb, scope, node), lval, result_loc); case NodeTypeSymbol: - return ir_gen_symbol(irb, scope, node, lval); + return ir_gen_symbol(irb, scope, node, lval, result_loc); case NodeTypeFnCallExpr: - return ir_gen_fn_call(irb, scope, node, lval); + return ir_gen_fn_call(irb, scope, node, lval, result_loc); case NodeTypeIfBoolExpr: - return ir_lval_wrap(irb, scope, ir_gen_if_bool_expr(irb, scope, node), lval); + return ir_gen_if_bool_expr(irb, scope, node, lval, result_loc); case NodeTypePrefixOpExpr: - return ir_gen_prefix_op_expr(irb, scope, node, lval); + return ir_gen_prefix_op_expr(irb, scope, node, lval, result_loc); case NodeTypeContainerInitExpr: - return ir_lval_wrap(irb, scope, ir_gen_container_init_expr(irb, scope, node), lval); + return ir_gen_container_init_expr(irb, scope, node, lval, result_loc); case NodeTypeVariableDeclaration: - return ir_lval_wrap(irb, scope, ir_gen_var_decl(irb, scope, node), lval); + return ir_gen_var_decl(irb, scope, node); case NodeTypeWhileExpr: - return ir_lval_wrap(irb, scope, ir_gen_while_expr(irb, scope, node), lval); + return ir_gen_while_expr(irb, scope, node, lval, result_loc); case NodeTypeForExpr: - return ir_lval_wrap(irb, scope, ir_gen_for_expr(irb, scope, node), lval); + return ir_gen_for_expr(irb, scope, node, lval, result_loc); case NodeTypeArrayAccessExpr: - return ir_gen_array_access(irb, scope, node, lval); + return ir_gen_array_access(irb, scope, node, lval, result_loc); case NodeTypeReturnExpr: - return ir_gen_return(irb, scope, node, lval); + return ir_gen_return(irb, scope, node, lval, result_loc); case NodeTypeFieldAccessExpr: { IrInstruction *ptr_instruction = ir_gen_field_access(irb, scope, node); @@ -7629,86 +8375,89 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop if (lval == LValPtr) return ptr_instruction; - return ir_build_load_ptr(irb, scope, node, ptr_instruction); + IrInstruction *load_ptr = ir_build_load_ptr(irb, scope, node, ptr_instruction); + return ir_expr_wrap(irb, scope, load_ptr, result_loc); } case NodeTypePtrDeref: { AstNode *expr_node = node->data.ptr_deref_expr.target; - IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval); + IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval, nullptr); if (value == irb->codegen->invalid_instruction) return value; // We essentially just converted any lvalue from &(x.*) to (&x).*; // this inhibits checking that x is a pointer later, so we directly // record whether the pointer check is needed - return ir_build_un_op_lval(irb, scope, node, IrUnOpDereference, value, lval); + IrInstruction *un_op = ir_build_un_op_lval(irb, scope, node, IrUnOpDereference, value, lval, result_loc); + return ir_expr_wrap(irb, scope, un_op, result_loc); } case NodeTypeUnwrapOptional: { AstNode *expr_node = node->data.unwrap_optional.expr; - IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr); + IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LValPtr, nullptr); if (maybe_ptr == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - IrInstruction *unwrapped_ptr = ir_build_optional_unwrap_ptr(irb, scope, node, maybe_ptr, true); + IrInstruction *unwrapped_ptr = ir_build_optional_unwrap_ptr(irb, scope, node, maybe_ptr, true, false); if (lval == LValPtr) return unwrapped_ptr; - return ir_build_load_ptr(irb, scope, node, unwrapped_ptr); + IrInstruction *load_ptr = ir_build_load_ptr(irb, scope, node, unwrapped_ptr); + return ir_expr_wrap(irb, scope, load_ptr, result_loc); } case NodeTypeBoolLiteral: - return ir_lval_wrap(irb, scope, ir_gen_bool_literal(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_bool_literal(irb, scope, node), lval, result_loc); case NodeTypeArrayType: - return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval, result_loc); case NodeTypePointerType: - return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval, result_loc); case NodeTypePromiseType: - return ir_lval_wrap(irb, scope, ir_gen_promise_type(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_promise_type(irb, scope, node), lval, result_loc); case NodeTypeStringLiteral: - return ir_lval_wrap(irb, scope, ir_gen_string_literal(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_string_literal(irb, scope, node), lval, result_loc); case NodeTypeUndefinedLiteral: - return ir_lval_wrap(irb, scope, ir_gen_undefined_literal(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_undefined_literal(irb, scope, node), lval, result_loc); case NodeTypeAsmExpr: - return ir_lval_wrap(irb, scope, ir_gen_asm_expr(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_asm_expr(irb, scope, node), lval, result_loc); case NodeTypeNullLiteral: - return ir_lval_wrap(irb, scope, ir_gen_null_literal(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_null_literal(irb, scope, node), lval, result_loc); case NodeTypeIfErrorExpr: - return ir_lval_wrap(irb, scope, ir_gen_if_err_expr(irb, scope, node), lval); + return ir_gen_if_err_expr(irb, scope, node, lval, result_loc); case NodeTypeIfOptional: - return ir_lval_wrap(irb, scope, ir_gen_if_optional_expr(irb, scope, node), lval); + return ir_gen_if_optional_expr(irb, scope, node, lval, result_loc); case NodeTypeSwitchExpr: - return ir_lval_wrap(irb, scope, ir_gen_switch_expr(irb, scope, node), lval); + return ir_gen_switch_expr(irb, scope, node, lval, result_loc); case NodeTypeCompTime: - return ir_gen_comptime(irb, scope, node, lval); + return ir_expr_wrap(irb, scope, ir_gen_comptime(irb, scope, node, lval), result_loc); case NodeTypeErrorType: - return ir_lval_wrap(irb, scope, ir_gen_error_type(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_error_type(irb, scope, node), lval, result_loc); case NodeTypeBreak: - return ir_lval_wrap(irb, scope, ir_gen_break(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_break(irb, scope, node), lval, result_loc); case NodeTypeContinue: - return ir_lval_wrap(irb, scope, ir_gen_continue(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_continue(irb, scope, node), lval, result_loc); case NodeTypeUnreachable: - return ir_lval_wrap(irb, scope, ir_build_unreachable(irb, scope, node), lval); + return ir_build_unreachable(irb, scope, node); case NodeTypeDefer: - return ir_lval_wrap(irb, scope, ir_gen_defer(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_defer(irb, scope, node), lval, result_loc); case NodeTypeSliceExpr: - return ir_lval_wrap(irb, scope, ir_gen_slice(irb, scope, node), lval); - case NodeTypeUnwrapErrorExpr: - return ir_lval_wrap(irb, scope, ir_gen_catch(irb, scope, node), lval); + return ir_gen_slice(irb, scope, node, lval, result_loc); + case NodeTypeCatchExpr: + return ir_gen_catch(irb, scope, node, lval, result_loc); case NodeTypeContainerDecl: - return ir_lval_wrap(irb, scope, ir_gen_container_decl(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_container_decl(irb, scope, node), lval, result_loc); case NodeTypeFnProto: - return ir_lval_wrap(irb, scope, ir_gen_fn_proto(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_fn_proto(irb, scope, node), lval, result_loc); case NodeTypeErrorSetDecl: - return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval, result_loc); case NodeTypeCancel: - return ir_lval_wrap(irb, scope, ir_gen_cancel(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_cancel(irb, scope, node), lval, result_loc); case NodeTypeResume: - return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval, result_loc); case NodeTypeAwaitExpr: - return ir_lval_wrap(irb, scope, ir_gen_await_expr(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_await_expr(irb, scope, node), lval, result_loc); case NodeTypeSuspend: - return ir_lval_wrap(irb, scope, ir_gen_suspend(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_suspend(irb, scope, node), lval, result_loc); case NodeTypeEnumLiteral: - return ir_lval_wrap(irb, scope, ir_gen_enum_literal(irb, scope, node), lval); + return ir_lval_wrap(irb, scope, ir_gen_enum_literal(irb, scope, node), lval, result_loc); case NodeTypeInferredArrayType: add_node_error(irb->codegen, node, buf_sprintf("inferred array size invalid here")); @@ -7717,14 +8466,28 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop zig_unreachable(); } -static IrInstruction *ir_gen_node_extra(IrBuilder *irb, AstNode *node, Scope *scope, LVal lval) { - IrInstruction *result = ir_gen_node_raw(irb, node, scope, lval); +static ResultLoc *no_result_loc(void) { + ResultLocNone *result_loc_none = allocate(1); + result_loc_none->base.id = ResultLocIdNone; + return &result_loc_none->base; +} + +static IrInstruction *ir_gen_node_extra(IrBuilder *irb, AstNode *node, Scope *scope, LVal lval, + ResultLoc *result_loc) +{ + if (result_loc == nullptr) { + // Create a result location indicating there is none - but if one gets created + // it will be properly distributed. + result_loc = no_result_loc(); + ir_build_reset_result(irb, scope, node, result_loc); + } + IrInstruction *result = ir_gen_node_raw(irb, node, scope, lval, result_loc); irb->exec->invalid = irb->exec->invalid || (result == irb->codegen->invalid_instruction); return result; } static IrInstruction *ir_gen_node(IrBuilder *irb, AstNode *node, Scope *scope) { - return ir_gen_node_extra(irb, node, scope, LValNone); + return ir_gen_node_extra(irb, node, scope, LValNone, nullptr); } static void invalidate_exec(IrExecutable *exec) { @@ -7775,17 +8538,19 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec return_type = fn_entry->type_entry->data.fn.fn_type_id.return_type; IrInstruction *undef = ir_build_const_undefined(irb, coro_scope, node); + // TODO mark this var decl as "no safety" e.g. disable initializing the undef value to 0xaa ZigType *coro_frame_type = get_promise_frame_type(irb->codegen, return_type); IrInstruction *coro_frame_type_value = ir_build_const_type(irb, coro_scope, node, coro_frame_type); - // TODO mark this var decl as "no safety" e.g. disable initializing the undef value to 0xaa - ir_build_var_decl_src(irb, coro_scope, node, promise_var, coro_frame_type_value, nullptr, undef); + IrInstruction *undef_coro_frame = ir_build_implicit_cast(irb, coro_scope, node, coro_frame_type_value, undef, nullptr); + build_decl_var_and_init(irb, coro_scope, node, promise_var, undef_coro_frame, "promise", const_bool_false); coro_promise_ptr = ir_build_var_ptr(irb, coro_scope, node, promise_var); ZigVar *await_handle_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false); IrInstruction *null_value = ir_build_const_null(irb, coro_scope, node); IrInstruction *await_handle_type_val = ir_build_const_type(irb, coro_scope, node, get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); - ir_build_var_decl_src(irb, coro_scope, node, await_handle_var, await_handle_type_val, nullptr, null_value); + IrInstruction *null_await_handle = ir_build_implicit_cast(irb, coro_scope, node, await_handle_type_val, null_value, nullptr); + build_decl_var_and_init(irb, coro_scope, node, await_handle_var, null_await_handle, "await_handle", const_bool_false); irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node, await_handle_var); u8_ptr_type = ir_build_const_type(irb, coro_scope, node, @@ -7795,13 +8560,14 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec coro_id = ir_build_coro_id(irb, coro_scope, node, promise_as_u8_ptr); coro_size_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false); IrInstruction *coro_size = ir_build_coro_size(irb, coro_scope, node); - ir_build_var_decl_src(irb, coro_scope, node, coro_size_var, nullptr, nullptr, coro_size); + build_decl_var_and_init(irb, coro_scope, node, coro_size_var, coro_size, "coro_size", const_bool_false); IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, coro_scope, node, ImplicitAllocatorIdArg); irb->exec->coro_allocator_var = ir_create_var(irb, node, coro_scope, nullptr, true, true, true, const_bool_false); - ir_build_var_decl_src(irb, coro_scope, node, irb->exec->coro_allocator_var, nullptr, nullptr, implicit_allocator_ptr); + build_decl_var_and_init(irb, coro_scope, node, irb->exec->coro_allocator_var, implicit_allocator_ptr, + "allocator", const_bool_false); Buf *realloc_field_name = buf_create_from_str(ASYNC_REALLOC_FIELD_NAME); - IrInstruction *realloc_fn_ptr = ir_build_field_ptr(irb, coro_scope, node, implicit_allocator_ptr, realloc_field_name); + IrInstruction *realloc_fn_ptr = ir_build_field_ptr(irb, coro_scope, node, implicit_allocator_ptr, realloc_field_name, false); IrInstruction *realloc_fn = ir_build_load_ptr(irb, coro_scope, node, realloc_fn_ptr); IrInstruction *maybe_coro_mem_ptr = ir_build_coro_alloc_helper(irb, coro_scope, node, realloc_fn, coro_size); IrInstruction *alloc_result_is_ok = ir_build_test_nonnull(irb, coro_scope, node, maybe_coro_mem_ptr); @@ -7821,32 +8587,32 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); irb->exec->atomic_state_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, - atomic_state_field_name); + atomic_state_field_name, false); IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); ir_build_store_ptr(irb, scope, node, irb->exec->atomic_state_field_ptr, zero); Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME); - irb->exec->coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name); + irb->exec->coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name, false); result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME); - irb->exec->coro_result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name); + irb->exec->coro_result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_ptr_field_name, false); ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr, irb->exec->coro_result_field_ptr); if (irb->codegen->have_err_ret_tracing) { // initialize the error return trace Buf *return_addresses_field_name = buf_create_from_str(RETURN_ADDRESSES_FIELD_NAME); - IrInstruction *return_addresses_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, return_addresses_field_name); + IrInstruction *return_addresses_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, return_addresses_field_name, false); Buf *err_ret_trace_field_name = buf_create_from_str(ERR_RET_TRACE_FIELD_NAME); - err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name); + err_ret_trace_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_field_name, false); ir_build_mark_err_ret_trace_ptr(irb, scope, node, err_ret_trace_ptr); // coordinate with builtin.zig Buf *index_name = buf_create_from_str("index"); - IrInstruction *index_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, index_name); + IrInstruction *index_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, index_name, false); ir_build_store_ptr(irb, scope, node, index_ptr, zero); Buf *instruction_addresses_name = buf_create_from_str("instruction_addresses"); - IrInstruction *addrs_slice_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, instruction_addresses_name); + IrInstruction *addrs_slice_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, instruction_addresses_name, false); - IrInstruction *slice_value = ir_build_slice(irb, scope, node, return_addresses_ptr, zero, nullptr, false); + IrInstruction *slice_value = ir_build_slice_src(irb, scope, node, return_addresses_ptr, zero, nullptr, false, no_result_loc()); ir_build_store_ptr(irb, scope, node, addrs_slice_ptr, slice_value); } @@ -7857,7 +8623,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec irb->exec->coro_final_cleanup_block = ir_create_basic_block(irb, scope, "FinalCleanup"); } - IrInstruction *result = ir_gen_node_extra(irb, node, scope, LValNone); + IrInstruction *result = ir_gen_node_extra(irb, node, scope, LValNone, nullptr); assert(result); if (irb->exec->invalid) return false; @@ -7905,7 +8671,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec } if (irb->codegen->have_err_ret_tracing) { Buf *err_ret_trace_ptr_field_name = buf_create_from_str(ERR_RET_TRACE_PTR_FIELD_NAME); - IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name); + IrInstruction *err_ret_trace_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr_field_name, false); IrInstruction *dest_err_ret_trace_ptr = ir_build_load_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr); ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr, dest_err_ret_trace_ptr); } @@ -7913,7 +8679,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec // a register or local variable which does not get spilled into the frame, // otherwise llvm tries to access memory inside the destroyed frame. IrInstruction *unwrapped_await_handle_ptr = ir_build_optional_unwrap_ptr(irb, scope, node, - irb->exec->await_handle_var_ptr, false); + irb->exec->await_handle_var_ptr, false, false); IrInstruction *await_handle_in_block = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr); ir_build_br(irb, scope, node, check_free_block, const_bool_false); @@ -7927,7 +8693,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec incoming_values[0] = const_bool_false; incoming_blocks[1] = irb->exec->coro_normal_final; incoming_values[1] = const_bool_true; - IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); + IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values, nullptr); IrBasicBlock **merge_incoming_blocks = allocate(2); IrInstruction **merge_incoming_values = allocate(2); @@ -7935,12 +8701,12 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec merge_incoming_values[0] = ir_build_const_undefined(irb, scope, node); merge_incoming_blocks[1] = irb->exec->coro_normal_final; merge_incoming_values[1] = await_handle_in_block; - IrInstruction *awaiter_handle = ir_build_phi(irb, scope, node, 2, merge_incoming_blocks, merge_incoming_values); + IrInstruction *awaiter_handle = ir_build_phi(irb, scope, node, 2, merge_incoming_blocks, merge_incoming_values, nullptr); Buf *shrink_field_name = buf_create_from_str(ASYNC_SHRINK_FIELD_NAME); IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node, ImplicitAllocatorIdLocalVar); - IrInstruction *shrink_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, shrink_field_name); + IrInstruction *shrink_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, shrink_field_name, false); IrInstruction *shrink_fn = ir_build_load_ptr(irb, scope, node, shrink_fn_ptr); IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle); @@ -7952,7 +8718,8 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false); IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var); IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr); - IrInstruction *mem_slice = ir_build_slice(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false); + IrInstruction *mem_slice = ir_build_slice_src(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false, + no_result_loc()); size_t arg_count = 5; IrInstruction **args = allocate(arg_count); args[0] = implicit_allocator_ptr; // self @@ -7966,7 +8733,8 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec // non-allocating. Basically coroutines are not supported right now until they are reworked. args[3] = ir_build_const_usize(irb, scope, node, 1); // new_size args[4] = ir_build_const_usize(irb, scope, node, 1); // new_align - ir_build_call(irb, scope, node, nullptr, shrink_fn, arg_count, args, false, FnInlineAuto, false, nullptr, nullptr); + ir_build_call_src(irb, scope, node, nullptr, shrink_fn, arg_count, args, false, FnInlineAuto, false, nullptr, + nullptr, no_result_loc()); IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume"); ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false); @@ -8073,6 +8841,15 @@ static ConstExprValue *ir_exec_const_result(CodeGen *codegen, IrExecutable *exec } return &value->value; } else if (ir_has_side_effects(instruction)) { + if (instr_is_comptime(instruction)) { + switch (instruction->id) { + case IrInstructionIdUnwrapErrPayload: + case IrInstructionIdUnionFieldPtr: + continue; + default: + break; + } + } exec_add_error_node(codegen, exec, instruction->source_node, buf_sprintf("unable to evaluate constant expression")); return &codegen->invalid_instruction->value; @@ -9050,15 +9827,6 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc return false; } -static bool is_slice(ZigType *type) { - return type->id == ZigTypeIdStruct && type->data.structure.is_slice; -} - -static bool slice_is_const(ZigType *type) { - assert(is_slice(type)); - return type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const; -} - static bool is_tagged_union(ZigType *type) { if (type->id != ZigTypeIdUnion) return false; @@ -9429,9 +10197,21 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT { Error err; assert(instruction_count >= 1); - IrInstruction *prev_inst = instructions[0]; - if (type_is_invalid(prev_inst->value.type)) { - return ira->codegen->builtin_types.entry_invalid; + IrInstruction *prev_inst; + size_t i = 0; + for (;;) { + prev_inst = instructions[i]; + if (type_is_invalid(prev_inst->value.type)) { + return ira->codegen->builtin_types.entry_invalid; + } + if (prev_inst->value.type->id == ZigTypeIdUnreachable) { + i += 1; + if (i == instruction_count) { + return prev_inst->value.type; + } + continue; + } + break; } ErrorTableEntry **errors = nullptr; size_t errors_count = 0; @@ -9456,7 +10236,7 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT bool any_are_null = (prev_inst->value.type->id == ZigTypeIdNull); bool convert_to_const_slice = false; - for (size_t i = 1; i < instruction_count; i += 1) { + for (; i < instruction_count; i += 1) { IrInstruction *cur_inst = instructions[i]; ZigType *cur_type = cur_inst->value.type; ZigType *prev_type = prev_inst->value.type; @@ -9475,7 +10255,7 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT } if (prev_type->id == ZigTypeIdErrorSet) { - assert(err_set_type != nullptr); + ir_assert(err_set_type != nullptr, prev_inst); if (cur_type->id == ZigTypeIdErrorSet) { if (type_is_global_error_set(err_set_type)) { continue; @@ -9735,6 +10515,7 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT if (prev_type->id == ZigTypeIdNull) { prev_inst = cur_inst; + any_are_null = true; continue; } @@ -10049,6 +10830,8 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT } else if (prev_inst->value.type->id == ZigTypeIdOptional) { return prev_inst->value.type; } else { + if ((err = type_resolve(ira->codegen, prev_inst->value.type, ResolveStatusSizeKnown))) + return ira->codegen->builtin_types.entry_invalid; return get_optional_type(ira->codegen, prev_inst->value.type); } } else { @@ -10056,24 +10839,18 @@ static ZigType *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, ZigT } } -static void ir_add_alloca(IrAnalyze *ira, IrInstruction *instruction, ZigType *type_entry) { - if (type_has_bits(type_entry) && handle_is_ptr(type_entry)) { - ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); - if (fn_entry != nullptr) { - fn_entry->alloca_list.append(instruction); - } - } -} - static void copy_const_val(ConstExprValue *dest, ConstExprValue *src, bool same_global_refs) { ConstGlobalRefs *global_refs = dest->global_refs; - assert(!same_global_refs || src->global_refs != nullptr); - *dest = *src; + memcpy(dest, src, sizeof(ConstExprValue)); if (!same_global_refs) { dest->global_refs = global_refs; + if (src->special == ConstValSpecialUndef) + return; if (dest->type->id == ZigTypeIdStruct) { - dest->data.x_struct.fields = allocate_nonzero(dest->type->data.structure.src_field_count); - memcpy(dest->data.x_struct.fields, src->data.x_struct.fields, sizeof(ConstExprValue) * dest->type->data.structure.src_field_count); + dest->data.x_struct.fields = create_const_vals(dest->type->data.structure.src_field_count); + for (size_t i = 0; i < dest->type->data.structure.src_field_count; i += 1) { + copy_const_val(&dest->data.x_struct.fields[i], &src->data.x_struct.fields[i], false); + } } } } @@ -10091,7 +10868,6 @@ static bool eval_const_expr_implicit_cast(IrAnalyze *ira, IrInstruction *source_ zig_unreachable(); case CastOpErrSet: case CastOpBitCast: - case CastOpPtrOfArrayToSlice: zig_panic("TODO"); case CastOpNoop: { @@ -10191,7 +10967,7 @@ static IrInstruction *ir_const(IrAnalyze *ira, IrInstruction *old_instruction, Z } static IrInstruction *ir_resolve_cast(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, - ZigType *wanted_type, CastOp cast_op, bool need_alloca) + ZigType *wanted_type, CastOp cast_op) { if (instr_is_comptime(value) || !type_has_bits(wanted_type)) { IrInstruction *result = ir_const(ira, source_instr, wanted_type); @@ -10204,9 +10980,6 @@ static IrInstruction *ir_resolve_cast(IrAnalyze *ira, IrInstruction *source_inst } else { IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node, wanted_type, value, cast_op); result->value.type = wanted_type; - if (need_alloca) { - ir_add_alloca(ira, result, wanted_type); - } return result; } } @@ -10248,7 +11021,7 @@ static IrInstruction *ir_resolve_ptr_of_array_to_unknown_len_ptr(IrAnalyze *ira, } static IrInstruction *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInstruction *source_instr, - IrInstruction *value, ZigType *wanted_type) + IrInstruction *value, ZigType *wanted_type, ResultLoc *result_loc) { Error err; @@ -10279,11 +11052,12 @@ static IrInstruction *ir_resolve_ptr_of_array_to_slice(IrAnalyze *ira, IrInstruc } } - IrInstruction *result = ir_build_cast(&ira->new_irb, source_instr->scope, source_instr->source_node, - wanted_type, value, CastOpPtrOfArrayToSlice); - result->value.type = wanted_type; - ir_add_alloca(ira, result, wanted_type); - return result; + if (result_loc == nullptr) result_loc = no_result_loc(); + IrInstruction *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false); + if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) { + return result_loc_inst; + } + return ir_build_ptr_of_array_to_slice(ira, source_instr, wanted_type, value, result_loc_inst); } static IrBasicBlock *ir_get_new_bb(IrAnalyze *ira, IrBasicBlock *old_bb, IrInstruction *ref_old_instruction) { @@ -10315,13 +11089,123 @@ static IrBasicBlock *ir_get_new_bb_runtime(IrAnalyze *ira, IrBasicBlock *old_bb, } static void ir_start_bb(IrAnalyze *ira, IrBasicBlock *old_bb, IrBasicBlock *const_predecessor_bb) { + assert(!old_bb->suspended); ira->instruction_index = 0; ira->old_irb.current_basic_block = old_bb; ira->const_predecessor_bb = const_predecessor_bb; + ira->old_bb_index = old_bb->index; +} + +static IrInstruction *ira_suspend(IrAnalyze *ira, IrInstruction *old_instruction, IrBasicBlock *next_bb, + IrSuspendPosition *suspend_pos) +{ + if (ira->codegen->verbose_ir) { + fprintf(stderr, "suspend %s_%zu %s_%zu #%zu (%zu,%zu)\n", ira->old_irb.current_basic_block->name_hint, + ira->old_irb.current_basic_block->debug_id, + ira->old_irb.exec->basic_block_list.at(ira->old_bb_index)->name_hint, + ira->old_irb.exec->basic_block_list.at(ira->old_bb_index)->debug_id, + ira->old_irb.current_basic_block->instruction_list.at(ira->instruction_index)->debug_id, + ira->old_bb_index, ira->instruction_index); + } + suspend_pos->basic_block_index = ira->old_bb_index; + suspend_pos->instruction_index = ira->instruction_index; + + ira->old_irb.current_basic_block->suspended = true; + + // null next_bb means that the caller plans to call ira_resume before returning + if (next_bb != nullptr) { + ira->old_bb_index = next_bb->index; + ira->old_irb.current_basic_block = ira->old_irb.exec->basic_block_list.at(ira->old_bb_index); + assert(ira->old_irb.current_basic_block == next_bb); + ira->instruction_index = 0; + ira->const_predecessor_bb = nullptr; + next_bb->other = ir_get_new_bb_runtime(ira, next_bb, old_instruction); + ira->new_irb.current_basic_block = next_bb->other; + } + return ira->codegen->unreach_instruction; +} + +static IrInstruction *ira_resume(IrAnalyze *ira) { + IrSuspendPosition pos = ira->resume_stack.pop(); + if (ira->codegen->verbose_ir) { + fprintf(stderr, "resume (%zu,%zu) ", pos.basic_block_index, pos.instruction_index); + } + ira->old_bb_index = pos.basic_block_index; + ira->old_irb.current_basic_block = ira->old_irb.exec->basic_block_list.at(ira->old_bb_index); + assert(ira->old_irb.current_basic_block->in_resume_stack); + ira->old_irb.current_basic_block->in_resume_stack = false; + ira->old_irb.current_basic_block->suspended = false; + ira->instruction_index = pos.instruction_index; + assert(pos.instruction_index < ira->old_irb.current_basic_block->instruction_list.length); + if (ira->codegen->verbose_ir) { + fprintf(stderr, "%s_%zu #%zu\n", ira->old_irb.current_basic_block->name_hint, + ira->old_irb.current_basic_block->debug_id, + ira->old_irb.current_basic_block->instruction_list.at(pos.instruction_index)->debug_id); + } + ira->const_predecessor_bb = nullptr; + ira->new_irb.current_basic_block = ira->old_irb.current_basic_block->other; + assert(ira->new_irb.current_basic_block != nullptr); + return ira->codegen->unreach_instruction; +} + +static void ir_start_next_bb(IrAnalyze *ira) { + ira->old_bb_index += 1; + + bool need_repeat = true; + for (;;) { + while (ira->old_bb_index < ira->old_irb.exec->basic_block_list.length) { + IrBasicBlock *old_bb = ira->old_irb.exec->basic_block_list.at(ira->old_bb_index); + if (old_bb->other == nullptr && old_bb->suspend_instruction_ref == nullptr) { + ira->old_bb_index += 1; + continue; + } + // if it's already started, or + // if it's a suspended block, + // then skip it + if (old_bb->suspended || + (old_bb->other != nullptr && old_bb->other->instruction_list.length != 0) || + (old_bb->other != nullptr && old_bb->other->already_appended)) + { + ira->old_bb_index += 1; + continue; + } + + // if there is a resume_stack, pop one from there rather than moving on. + // the last item of the resume stack will be a basic block that will + // move on to the next one below + if (ira->resume_stack.length != 0) { + ira_resume(ira); + return; + } + + if (old_bb->other == nullptr) { + old_bb->other = ir_get_new_bb_runtime(ira, old_bb, old_bb->suspend_instruction_ref); + } + ira->new_irb.current_basic_block = old_bb->other; + ir_start_bb(ira, old_bb, nullptr); + return; + } + if (!need_repeat) { + if (ira->resume_stack.length != 0) { + ira_resume(ira); + } + return; + } + need_repeat = false; + ira->old_bb_index = 0; + continue; + } } static void ir_finish_bb(IrAnalyze *ira) { - ira->new_irb.exec->basic_block_list.append(ira->new_irb.current_basic_block); + if (!ira->new_irb.current_basic_block->already_appended) { + ira->new_irb.current_basic_block->already_appended = true; + if (ira->codegen->verbose_ir) { + fprintf(stderr, "append new bb %s_%zu\n", ira->new_irb.current_basic_block->name_hint, + ira->new_irb.current_basic_block->debug_id); + } + ira->new_irb.exec->basic_block_list.append(ira->new_irb.current_basic_block); + } ira->instruction_index += 1; while (ira->instruction_index < ira->old_irb.current_basic_block->instruction_list.length) { IrInstruction *next_instruction = ira->old_irb.current_basic_block->instruction_list.at(ira->instruction_index); @@ -10332,32 +11216,7 @@ static void ir_finish_bb(IrAnalyze *ira) { ira->instruction_index += 1; } - size_t my_old_bb_index = ira->old_bb_index; - ira->old_bb_index += 1; - - bool need_repeat = true; - for (;;) { - while (ira->old_bb_index < ira->old_irb.exec->basic_block_list.length) { - IrBasicBlock *old_bb = ira->old_irb.exec->basic_block_list.at(ira->old_bb_index); - if (old_bb->other == nullptr) { - ira->old_bb_index += 1; - continue; - } - if (old_bb->other->instruction_list.length != 0 || ira->old_bb_index == my_old_bb_index) { - ira->old_bb_index += 1; - continue; - } - ira->new_irb.current_basic_block = old_bb->other; - - ir_start_bb(ira, old_bb, nullptr); - return; - } - if (!need_repeat) - return; - need_repeat = false; - ira->old_bb_index = 0; - continue; - } + ir_start_next_bb(ira); } static IrInstruction *ir_unreach_error(IrAnalyze *ira) { @@ -10420,6 +11279,12 @@ static IrInstruction *ir_const_undef(IrAnalyze *ira, IrInstruction *source_instr return result; } +static IrInstruction *ir_const_unreachable(IrAnalyze *ira, IrInstruction *source_instruction) { + IrInstruction *result = ir_const(ira, source_instruction, ira->codegen->builtin_types.entry_unreachable); + result->value.special = ConstValSpecialStatic; + return result; +} + static IrInstruction *ir_const_void(IrAnalyze *ira, IrInstruction *source_instruction) { return ir_const(ira, source_instruction, ira->codegen->builtin_types.entry_void); } @@ -10617,7 +11482,7 @@ static ZigFn *ir_resolve_fn(IrAnalyze *ira, IrInstruction *fn_value) { } static IrInstruction *ir_analyze_optional_wrap(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, - ZigType *wanted_type) + ZigType *wanted_type, ResultLoc *result_loc) { assert(wanted_type->id == ZigTypeIdOptional); @@ -10643,20 +11508,29 @@ static IrInstruction *ir_analyze_optional_wrap(IrAnalyze *ira, IrInstruction *so return &const_instruction->base; } - IrInstruction *result = ir_build_maybe_wrap(&ira->new_irb, source_instr->scope, source_instr->source_node, value); - result->value.type = wanted_type; + if (result_loc == nullptr && handle_is_ptr(wanted_type)) { + result_loc = no_result_loc(); + } + IrInstruction *result_loc_inst = nullptr; + if (result_loc != nullptr) { + result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false); + if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) { + return result_loc_inst; + } + } + IrInstruction *result = ir_build_optional_wrap(ira, source_instr, wanted_type, value, result_loc_inst); result->value.data.rh_maybe = RuntimeHintOptionalNonNull; - ir_add_alloca(ira, result, wanted_type); return result; } static IrInstruction *ir_analyze_err_wrap_payload(IrAnalyze *ira, IrInstruction *source_instr, - IrInstruction *value, ZigType *wanted_type) + IrInstruction *value, ZigType *wanted_type, ResultLoc *result_loc) { assert(wanted_type->id == ZigTypeIdErrorUnion); + ZigType *payload_type = wanted_type->data.error_union.payload_type; + ZigType *err_set_type = wanted_type->data.error_union.err_set_type; if (instr_is_comptime(value)) { - ZigType *payload_type = wanted_type->data.error_union.payload_type; IrInstruction *casted_payload = ir_implicit_cast(ira, value, payload_type); if (type_is_invalid(casted_payload->value.type)) return ira->codegen->invalid_instruction; @@ -10666,7 +11540,7 @@ static IrInstruction *ir_analyze_err_wrap_payload(IrAnalyze *ira, IrInstruction return ira->codegen->invalid_instruction; ConstExprValue *err_set_val = create_const_vals(1); - err_set_val->type = wanted_type->data.error_union.err_set_type; + err_set_val->type = err_set_type; err_set_val->special = ConstValSpecialStatic; err_set_val->data.x_err_set = nullptr; @@ -10679,10 +11553,19 @@ static IrInstruction *ir_analyze_err_wrap_payload(IrAnalyze *ira, IrInstruction return &const_instruction->base; } - IrInstruction *result = ir_build_err_wrap_payload(&ira->new_irb, source_instr->scope, source_instr->source_node, value); - result->value.type = wanted_type; + IrInstruction *result_loc_inst; + if (handle_is_ptr(wanted_type)) { + if (result_loc == nullptr) result_loc = no_result_loc(); + result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false); + if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) { + return result_loc_inst; + } + } else { + result_loc_inst = nullptr; + } + + IrInstruction *result = ir_build_err_wrap_payload(ira, source_instr, wanted_type, value, result_loc_inst); result->value.data.rh_error_union = RuntimeHintErrorUnionNonError; - ir_add_alloca(ira, result, wanted_type); return result; } @@ -10729,7 +11612,9 @@ static IrInstruction *ir_analyze_err_set_cast(IrAnalyze *ira, IrInstruction *sou return result; } -static IrInstruction *ir_analyze_err_wrap_code(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, ZigType *wanted_type) { +static IrInstruction *ir_analyze_err_wrap_code(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, + ZigType *wanted_type, ResultLoc *result_loc) +{ assert(wanted_type->id == ZigTypeIdErrorUnion); IrInstruction *casted_value = ir_implicit_cast(ira, value, wanted_type->data.error_union.err_set_type); @@ -10753,10 +11638,20 @@ static IrInstruction *ir_analyze_err_wrap_code(IrAnalyze *ira, IrInstruction *so return &const_instruction->base; } - IrInstruction *result = ir_build_err_wrap_code(&ira->new_irb, source_instr->scope, source_instr->source_node, value); - result->value.type = wanted_type; + IrInstruction *result_loc_inst; + if (handle_is_ptr(wanted_type)) { + if (result_loc == nullptr) result_loc = no_result_loc(); + result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false); + if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) { + return result_loc_inst; + } + } else { + result_loc_inst = nullptr; + } + + + IrInstruction *result = ir_build_err_wrap_code(ira, source_instr, wanted_type, value, result_loc_inst); result->value.data.rh_error_union = RuntimeHintErrorUnionError; - ir_add_alloca(ira, result, wanted_type); return result; } @@ -10816,20 +11711,21 @@ static IrInstruction *ir_get_ref(IrAnalyze *ira, IrInstruction *source_instructi ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, value->value.type, is_const, is_volatile, PtrLenSingle, 0, 0, 0, false); - IrInstruction *new_instruction = ir_build_ref(&ira->new_irb, source_instruction->scope, - source_instruction->source_node, value, is_const, is_volatile); - new_instruction->value.type = ptr_type; - new_instruction->value.data.rh_ptr = RuntimeHintPtrStack; + + IrInstruction *result_loc; if (type_has_bits(ptr_type) && !handle_is_ptr(value->value.type)) { - ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); - assert(fn_entry); - fn_entry->alloca_list.append(new_instruction); + result_loc = ir_resolve_result(ira, source_instruction, no_result_loc(), value->value.type, nullptr, true, false); + } else { + result_loc = nullptr; } + + IrInstruction *new_instruction = ir_build_ref_gen(ira, source_instruction, ptr_type, value, result_loc); + new_instruction->value.data.rh_ptr = RuntimeHintPtrStack; return new_instruction; } static IrInstruction *ir_analyze_array_to_slice(IrAnalyze *ira, IrInstruction *source_instr, - IrInstruction *array_arg, ZigType *wanted_type) + IrInstruction *array_arg, ZigType *wanted_type, ResultLoc *result_loc) { assert(is_slice(wanted_type)); // In this function we honor the const-ness of wanted_type, because @@ -10838,7 +11734,7 @@ static IrInstruction *ir_analyze_array_to_slice(IrAnalyze *ira, IrInstruction *s IrInstruction *array_ptr = nullptr; IrInstruction *array; if (array_arg->value.type->id == ZigTypeIdPointer) { - array = ir_get_deref(ira, source_instr, array_arg); + array = ir_get_deref(ira, source_instr, array_arg, nullptr); array_ptr = array_arg; } else { array = array_arg; @@ -10861,12 +11757,14 @@ static IrInstruction *ir_analyze_array_to_slice(IrAnalyze *ira, IrInstruction *s if (!array_ptr) array_ptr = ir_get_ref(ira, source_instr, array, true, false); - IrInstruction *result = ir_build_slice(&ira->new_irb, source_instr->scope, - source_instr->source_node, array_ptr, start, end, false); - result->value.type = wanted_type; + if (result_loc == nullptr) result_loc = no_result_loc(); + IrInstruction *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, wanted_type, nullptr, true, false); + if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) { + return result_loc_inst; + } + IrInstruction *result = ir_build_slice_gen(ira, source_instr, wanted_type, array_ptr, start, end, false, result_loc_inst); result->value.data.rh_slice.id = RuntimeHintSliceIdLen; result->value.data.rh_slice.len = array_type->data.array.len; - ir_add_alloca(ira, result, result->value.type); return result; } @@ -11504,7 +12402,7 @@ static IrInstruction *ir_analyze_array_to_vector(IrAnalyze *ira, IrInstruction * } static IrInstruction *ir_analyze_vector_to_array(IrAnalyze *ira, IrInstruction *source_instr, - IrInstruction *vector, ZigType *array_type) + IrInstruction *vector, ZigType *array_type, ResultLoc *result_loc) { if (instr_is_comptime(vector)) { // arrays and vectors have the same ConstExprValue representation @@ -11513,7 +12411,14 @@ static IrInstruction *ir_analyze_vector_to_array(IrAnalyze *ira, IrInstruction * result->value.type = array_type; return result; } - return ir_build_vector_to_array(ira, source_instr, vector, array_type); + if (result_loc == nullptr) { + result_loc = no_result_loc(); + } + IrInstruction *result_loc_inst = ir_resolve_result(ira, source_instr, result_loc, array_type, nullptr, true, false); + if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) { + return result_loc_inst; + } + return ir_build_vector_to_array(ira, source_instr, array_type, vector, result_loc_inst); } static IrInstruction *ir_analyze_int_to_c_ptr(IrAnalyze *ira, IrInstruction *source_instr, @@ -11563,7 +12468,7 @@ static bool is_pointery_and_elem_is_not_pointery(ZigType *ty) { } static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_instr, - ZigType *wanted_type, IrInstruction *value) + ZigType *wanted_type, IrInstruction *value, ResultLoc *result_loc) { Error err; ZigType *actual_type = value->value.type; @@ -11579,7 +12484,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst if (const_cast_result.id == ConstCastResultIdInvalid) return ira->codegen->invalid_instruction; if (const_cast_result.id == ConstCastResultIdOk) { - return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop, false); + return ir_resolve_cast(ira, source_instr, value, wanted_type, CastOpNoop); } // cast from T to ?T @@ -11589,12 +12494,12 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node, false).id == ConstCastResultIdOk) { - return ir_analyze_optional_wrap(ira, source_instr, value, wanted_type); + return ir_analyze_optional_wrap(ira, source_instr, value, wanted_type, result_loc); } else if (actual_type->id == ZigTypeIdComptimeInt || actual_type->id == ZigTypeIdComptimeFloat) { if (ir_num_lit_fits_in_other_type(ira, value, wanted_child_type, true)) { - return ir_analyze_optional_wrap(ira, source_instr, value, wanted_type); + return ir_analyze_optional_wrap(ira, source_instr, value, wanted_type, result_loc); } else { return ira->codegen->invalid_instruction; } @@ -11618,7 +12523,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst wanted_child_type); if (type_is_invalid(cast1->value.type)) return ira->codegen->invalid_instruction; - return ir_analyze_optional_wrap(ira, source_instr, cast1, wanted_type); + return ir_analyze_optional_wrap(ira, source_instr, cast1, wanted_type, result_loc); } } } @@ -11628,12 +12533,12 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type, source_node, false).id == ConstCastResultIdOk) { - return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type); + return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type, result_loc); } else if (actual_type->id == ZigTypeIdComptimeInt || actual_type->id == ZigTypeIdComptimeFloat) { if (ir_num_lit_fits_in_other_type(ira, value, wanted_type->data.error_union.payload_type, true)) { - return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type); + return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type, result_loc); } else { return ira->codegen->invalid_instruction; } @@ -11651,11 +12556,11 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst actual_type->id == ZigTypeIdComptimeInt || actual_type->id == ZigTypeIdComptimeFloat) { - IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value); + IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value, nullptr); if (type_is_invalid(cast1->value.type)) return ira->codegen->invalid_instruction; - IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1); + IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1, result_loc); if (type_is_invalid(cast2->value.type)) return ira->codegen->invalid_instruction; @@ -11737,7 +12642,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk) { - return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type); + return ir_analyze_array_to_slice(ira, source_instr, value, wanted_type, result_loc); } } @@ -11754,11 +12659,11 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk) { - IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.maybe.child_type, value); + IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.maybe.child_type, value, nullptr); if (type_is_invalid(cast1->value.type)) return ira->codegen->invalid_instruction; - IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1); + IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1, result_loc); if (type_is_invalid(cast2->value.type)) return ira->codegen->invalid_instruction; @@ -11801,7 +12706,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst array_type->data.array.child_type, source_node, !slice_ptr_type->data.pointer.is_const).id == ConstCastResultIdOk) { - return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, wanted_type); + return ir_resolve_ptr_of_array_to_slice(ira, source_instr, value, wanted_type, result_loc); } } @@ -11832,11 +12737,11 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst types_match_const_cast_only(ira, ptr_type->data.pointer.child_type, actual_type->data.array.child_type, source_node, false).id == ConstCastResultIdOk) { - IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value); + IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value, nullptr); if (type_is_invalid(cast1->value.type)) return ira->codegen->invalid_instruction; - IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1); + IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1, result_loc); if (type_is_invalid(cast2->value.type)) return ira->codegen->invalid_instruction; @@ -11848,7 +12753,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst if (wanted_type->id == ZigTypeIdErrorUnion && actual_type->id == ZigTypeIdErrorSet) { - return ir_analyze_err_wrap_code(ira, source_instr, value, wanted_type); + return ir_analyze_err_wrap_code(ira, source_instr, value, wanted_type, result_loc); } // cast from typed number to integer or float literal. @@ -11972,7 +12877,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst types_match_const_cast_only(ira, wanted_type->data.array.child_type, actual_type->data.vector.elem_type, source_node, false).id == ConstCastResultIdOk) { - return ir_analyze_vector_to_array(ira, source_instr, value, wanted_type); + return ir_analyze_vector_to_array(ira, source_instr, value, wanted_type, result_loc); } // cast from [N]T to @Vector(N, T) @@ -12014,7 +12919,9 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst return ira->codegen->invalid_instruction; } -static IrInstruction *ir_implicit_cast(IrAnalyze *ira, IrInstruction *value, ZigType *expected_type) { +static IrInstruction *ir_implicit_cast_with_result(IrAnalyze *ira, IrInstruction *value, ZigType *expected_type, + ResultLoc *result_loc) +{ assert(value); assert(value != ira->codegen->invalid_instruction); assert(!expected_type || !type_is_invalid(expected_type)); @@ -12027,63 +12934,76 @@ static IrInstruction *ir_implicit_cast(IrAnalyze *ira, IrInstruction *value, Zig if (value->value.type->id == ZigTypeIdUnreachable) return value; - return ir_analyze_cast(ira, value, expected_type, value); + return ir_analyze_cast(ira, value, expected_type, value, result_loc); } -static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruction, IrInstruction *ptr) { +static IrInstruction *ir_implicit_cast(IrAnalyze *ira, IrInstruction *value, ZigType *expected_type) { + return ir_implicit_cast_with_result(ira, value, expected_type, nullptr); +} + +static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruction, IrInstruction *ptr, + ResultLoc *result_loc) +{ Error err; ZigType *type_entry = ptr->value.type; - if (type_is_invalid(type_entry)) { + if (type_is_invalid(type_entry)) return ira->codegen->invalid_instruction; - } else if (type_entry->id == ZigTypeIdPointer) { - ZigType *child_type = type_entry->data.pointer.child_type; - // if the child type has one possible value, the deref is comptime - switch (type_has_one_possible_value(ira->codegen, child_type)) { - case OnePossibleValueInvalid: - return ira->codegen->invalid_instruction; - case OnePossibleValueYes: - return ir_const(ira, source_instruction, child_type); - case OnePossibleValueNo: - break; - } - if (instr_is_comptime(ptr)) { - if (ptr->value.special == ConstValSpecialUndef) { - ir_add_error(ira, ptr, buf_sprintf("attempt to dereference undefined value")); - return ira->codegen->invalid_instruction; - } - if (ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst || - ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar) - { - ConstExprValue *pointee = const_ptr_pointee_unchecked(ira->codegen, &ptr->value); - if (pointee->special != ConstValSpecialRuntime) { - IrInstruction *result = ir_const(ira, source_instruction, child_type); - if ((err = ir_read_const_ptr(ira, ira->codegen, source_instruction->source_node, &result->value, - &ptr->value))) - { - return ira->codegen->invalid_instruction; - } - result->value.type = child_type; - return result; - } - } - } - // if the instruction is a const ref instruction we can skip it - if (ptr->id == IrInstructionIdRef) { - IrInstructionRef *ref_inst = reinterpret_cast(ptr); - return ref_inst->value; - } - IrInstruction *result = ir_build_load_ptr_gen(ira, source_instruction, ptr, child_type); - if (type_entry->data.pointer.host_int_bytes != 0 && handle_is_ptr(child_type)) { - ir_add_alloca(ira, result, child_type); - } - return result; - } else { + if (type_entry->id != ZigTypeIdPointer) { ir_add_error_node(ira, source_instruction->source_node, buf_sprintf("attempt to dereference non-pointer type '%s'", buf_ptr(&type_entry->name))); return ira->codegen->invalid_instruction; } + + ZigType *child_type = type_entry->data.pointer.child_type; + // if the child type has one possible value, the deref is comptime + switch (type_has_one_possible_value(ira->codegen, child_type)) { + case OnePossibleValueInvalid: + return ira->codegen->invalid_instruction; + case OnePossibleValueYes: + return ir_const(ira, source_instruction, child_type); + case OnePossibleValueNo: + break; + } + if (instr_is_comptime(ptr)) { + if (ptr->value.special == ConstValSpecialUndef) { + ir_add_error(ira, ptr, buf_sprintf("attempt to dereference undefined value")); + return ira->codegen->invalid_instruction; + } + if (ptr->value.data.x_ptr.mut != ConstPtrMutRuntimeVar) { + ConstExprValue *pointee = const_ptr_pointee_unchecked(ira->codegen, &ptr->value); + if (pointee->special != ConstValSpecialRuntime) { + IrInstruction *result = ir_const(ira, source_instruction, child_type); + + if ((err = ir_read_const_ptr(ira, ira->codegen, source_instruction->source_node, &result->value, + &ptr->value))) + { + return ira->codegen->invalid_instruction; + } + result->value.type = child_type; + return result; + } + } + } + // if the instruction is a const ref instruction we can skip it + if (ptr->id == IrInstructionIdRef) { + IrInstructionRef *ref_inst = reinterpret_cast(ptr); + return ref_inst->value; + } + + IrInstruction *result_loc_inst; + if (type_entry->data.pointer.host_int_bytes != 0 && handle_is_ptr(child_type)) { + if (result_loc == nullptr) result_loc = no_result_loc(); + result_loc_inst = ir_resolve_result(ira, source_instruction, result_loc, child_type, nullptr, true, false); + if (type_is_invalid(result_loc_inst->value.type) || instr_is_unreachable(result_loc_inst)) { + return result_loc_inst; + } + } else { + result_loc_inst = nullptr; + } + + return ir_build_load_ptr_gen(ira, source_instruction, ptr, child_type, result_loc_inst); } static bool ir_resolve_align(IrAnalyze *ira, IrInstruction *value, uint32_t *out) { @@ -12297,6 +13217,14 @@ static IrInstruction *ir_analyze_instruction_return(IrAnalyze *ira, IrInstructio if (type_is_invalid(value->value.type)) return ir_unreach_error(ira); + if (!instr_is_comptime(value) && handle_is_ptr(ira->explicit_return_type)) { + // result location mechanism took care of it. + IrInstruction *result = ir_build_return(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, nullptr); + result->value.type = ira->codegen->builtin_types.entry_unreachable; + return ir_finish_anal(ira, result); + } + IrInstruction *casted_value = ir_implicit_cast(ira, value, ira->explicit_return_type); if (type_is_invalid(casted_value->value.type)) { AstNode *source_node = ira->explicit_return_type_source_node; @@ -12459,7 +13387,7 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp * } else { return is_non_null; } - } else if (is_equality_cmp && + } else if (is_equality_cmp && ((op1->value.type->id == ZigTypeIdNull && op2->value.type->id == ZigTypeIdPointer && op2->value.type->data.pointer.ptr_len == PtrLenC) || (op2->value.type->id == ZigTypeIdNull && op1->value.type->id == ZigTypeIdPointer && @@ -12901,7 +13829,7 @@ static ErrorMsg *ir_eval_math_op_scalar(IrAnalyze *ira, IrInstruction *source_in } } else { float_div_trunc(out_val, op1_val, op2_val); - ConstExprValue remainder; + ConstExprValue remainder = {}; float_rem(&remainder, op1_val, op2_val); if (float_cmp_zero(&remainder) != CmpEQ) { return ir_add_error(ira, source_instr, buf_sprintf("exact division had a remainder")); @@ -13276,8 +14204,8 @@ static IrInstruction *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp // have a remainder function ambiguity problem ok = true; } else { - ConstExprValue rem_result; - ConstExprValue mod_result; + ConstExprValue rem_result = {}; + ConstExprValue mod_result = {}; float_rem(&rem_result, op1_val, op2_val); float_mod(&mod_result, op1_val, op2_val); ok = float_cmp(&rem_result, &mod_result) == CmpEQ; @@ -13500,10 +14428,12 @@ static IrInstruction *ir_analyze_array_cat(IrAnalyze *ira, IrInstructionBinOp *i size_t next_index = 0; for (size_t i = op1_array_index; i < op1_array_end; i += 1, next_index += 1) { - out_array_val->data.x_array.data.s_none.elements[next_index] = op1_array_val->data.x_array.data.s_none.elements[i]; + copy_const_val(&out_array_val->data.x_array.data.s_none.elements[next_index], + &op1_array_val->data.x_array.data.s_none.elements[i], true); } for (size_t i = op2_array_index; i < op2_array_end; i += 1, next_index += 1) { - out_array_val->data.x_array.data.s_none.elements[next_index] = op2_array_val->data.x_array.data.s_none.elements[i]; + copy_const_val(&out_array_val->data.x_array.data.s_none.elements[next_index], + &op2_array_val->data.x_array.data.s_none.elements[i], true); } if (next_index < new_len) { ConstExprValue *null_byte = &out_array_val->data.x_array.data.s_none.elements[next_index]; @@ -13564,7 +14494,8 @@ static IrInstruction *ir_analyze_array_mult(IrAnalyze *ira, IrInstructionBinOp * uint64_t i = 0; for (uint64_t x = 0; x < mult_amt; x += 1) { for (uint64_t y = 0; y < old_array_len; y += 1) { - out_val->data.x_array.data.s_none.elements[i] = array_val->data.x_array.data.s_none.elements[y]; + copy_const_val(&out_val->data.x_array.data.s_none.elements[i], + &array_val->data.x_array.data.s_none.elements[y], true); i += 1; } } @@ -13661,12 +14592,6 @@ static IrInstruction *ir_analyze_instruction_decl_var(IrAnalyze *ira, Error err; ZigVar *var = decl_var_instruction->var; - IrInstruction *init_value = decl_var_instruction->init_value->child; - if (type_is_invalid(init_value->value.type)) { - var->var_type = ira->codegen->builtin_types.entry_invalid; - return ira->codegen->invalid_instruction; - } - ZigType *explicit_type = nullptr; IrInstruction *var_type = nullptr; if (decl_var_instruction->var_type != nullptr) { @@ -13681,18 +14606,40 @@ static IrInstruction *ir_analyze_instruction_decl_var(IrAnalyze *ira, AstNode *source_node = decl_var_instruction->base.source_node; - IrInstruction *casted_init_value = ir_implicit_cast(ira, init_value, explicit_type); bool is_comptime_var = ir_get_var_is_comptime(var); bool var_class_requires_const = false; - ZigType *result_type = casted_init_value->value.type; + IrInstruction *var_ptr = decl_var_instruction->ptr->child; + // if this is null, a compiler error happened and did not initialize the variable. + // if there are no compile errors there may be a missing ir_expr_wrap in pass1 IR generation. + if (var_ptr == nullptr || type_is_invalid(var_ptr->value.type)) { + ir_assert(var_ptr != nullptr || ira->codegen->errors.length != 0, &decl_var_instruction->base); + var->var_type = ira->codegen->builtin_types.entry_invalid; + return ira->codegen->invalid_instruction; + } + + // The ir_build_var_decl_src call is supposed to pass a pointer to the allocation, not an initialization value. + ir_assert(var_ptr->value.type->id == ZigTypeIdPointer, &decl_var_instruction->base); + + ZigType *result_type = var_ptr->value.type->data.pointer.child_type; if (type_is_invalid(result_type)) { result_type = ira->codegen->builtin_types.entry_invalid; } else if (result_type->id == ZigTypeIdUnreachable || result_type->id == ZigTypeIdOpaque) { - ir_add_error_node(ira, source_node, - buf_sprintf("variable of type '%s' not allowed", buf_ptr(&result_type->name))); - result_type = ira->codegen->builtin_types.entry_invalid; + zig_unreachable(); + } + + ConstExprValue *init_val = nullptr; + if (instr_is_comptime(var_ptr) && var_ptr->value.data.x_ptr.mut != ConstPtrMutRuntimeVar) { + init_val = const_ptr_pointee(ira, ira->codegen, &var_ptr->value, decl_var_instruction->base.source_node); + if (is_comptime_var) { + if (var->gen_is_const) { + var->const_value = init_val; + } else { + var->const_value = create_const_vals(1); + copy_const_val(var->const_value, init_val, false); + } + } } switch (type_requires_comptime(ira->codegen, result_type)) { @@ -13709,18 +14656,20 @@ static IrInstruction *ir_analyze_instruction_decl_var(IrAnalyze *ira, } break; case ReqCompTimeNo: - if (casted_init_value->value.special == ConstValSpecialStatic && - casted_init_value->value.type->id == ZigTypeIdFn && - casted_init_value->value.data.x_ptr.special != ConstPtrSpecialHardCodedAddr && - casted_init_value->value.data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways) - { - var_class_requires_const = true; - if (!var->src_is_const && !is_comptime_var) { - ErrorMsg *msg = ir_add_error_node(ira, source_node, - buf_sprintf("functions marked inline must be stored in const or comptime var")); - AstNode *proto_node = casted_init_value->value.data.x_ptr.data.fn.fn_entry->proto_node; - add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here")); - result_type = ira->codegen->builtin_types.entry_invalid; + if (init_val != nullptr) { + if (init_val->special == ConstValSpecialStatic && + init_val->type->id == ZigTypeIdFn && + init_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr && + init_val->data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways) + { + var_class_requires_const = true; + if (!var->src_is_const && !is_comptime_var) { + ErrorMsg *msg = ir_add_error_node(ira, source_node, + buf_sprintf("functions marked inline must be stored in const or comptime var")); + AstNode *proto_node = init_val->data.x_ptr.data.fn.fn_entry->proto_node; + add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here")); + result_type = ira->codegen->builtin_types.entry_invalid; + } } } break; @@ -13767,11 +14716,29 @@ static IrInstruction *ir_analyze_instruction_decl_var(IrAnalyze *ira, } } - if (casted_init_value->value.special != ConstValSpecialRuntime) { - if (var->mem_slot_index != SIZE_MAX) { + if (init_val != nullptr && init_val->special != ConstValSpecialRuntime) { + // Resolve ConstPtrMutInfer + if (var->gen_is_const) { + var_ptr->value.data.x_ptr.mut = ConstPtrMutComptimeConst; + } else if (is_comptime_var) { + var_ptr->value.data.x_ptr.mut = ConstPtrMutComptimeVar; + } else { + // we need a runtime ptr but we have a comptime val. + // since it's a comptime val there are no instructions for it. + // we memcpy the init value here + IrInstruction *deref = ir_get_deref(ira, var_ptr, var_ptr, nullptr); + // If this assertion trips, something is wrong with the IR instructions, because + // we expected the above deref to return a constant value, but it created a runtime + // instruction. + assert(deref->value.special != ConstValSpecialRuntime); + var_ptr->value.special = ConstValSpecialRuntime; + ir_analyze_store_ptr(ira, var_ptr, var_ptr, deref); + } + + if (var_ptr->value.special == ConstValSpecialStatic && var->mem_slot_index != SIZE_MAX) { assert(var->mem_slot_index < ira->exec_context.mem_slot_list.length); ConstExprValue *mem_slot = ira->exec_context.mem_slot_list.at(var->mem_slot_index); - copy_const_val(mem_slot, &casted_init_value->value, !is_comptime_var || var->gen_is_const); + copy_const_val(mem_slot, init_val, !is_comptime_var || var->gen_is_const); if (is_comptime_var || (var_class_requires_const && var->gen_is_const)) { return ir_const_void(ira, &decl_var_instruction->base); @@ -13788,7 +14755,7 @@ static IrInstruction *ir_analyze_instruction_decl_var(IrAnalyze *ira, if (fn_entry) fn_entry->variable_list.append(var); - return ir_build_var_decl_gen(ira, &decl_var_instruction->base, var, casted_init_value); + return ir_build_var_decl_gen(ira, &decl_var_instruction->base, var, var_ptr); } static IrInstruction *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructionExport *instruction) { @@ -14082,7 +15049,7 @@ IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_i ZigVar *coro_allocator_var = ira->old_irb.exec->coro_allocator_var; assert(coro_allocator_var != nullptr); IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var); - IrInstruction *result = ir_get_deref(ira, source_instr, var_ptr_inst); + IrInstruction *result = ir_get_deref(ira, source_instr, var_ptr_inst, nullptr); assert(result->value.type != nullptr); return result; } @@ -14090,7 +15057,436 @@ IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_i zig_unreachable(); } -static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *call_instruction, ZigFn *fn_entry, +static IrInstruction *ir_analyze_alloca(IrAnalyze *ira, IrInstruction *source_inst, ZigType *var_type, + uint32_t align, const char *name_hint, bool force_comptime) +{ + Error err; + + ConstExprValue *pointee = create_const_vals(1); + pointee->special = ConstValSpecialUndef; + + IrInstructionAllocaGen *result = ir_create_alloca_gen(ira, source_inst, align, name_hint); + result->base.value.special = ConstValSpecialStatic; + result->base.value.data.x_ptr.special = ConstPtrSpecialRef; + result->base.value.data.x_ptr.mut = force_comptime ? ConstPtrMutComptimeVar : ConstPtrMutInfer; + result->base.value.data.x_ptr.data.ref.pointee = pointee; + + if ((err = type_resolve(ira->codegen, var_type, ResolveStatusZeroBitsKnown))) + return ira->codegen->invalid_instruction; + assert(result->base.value.data.x_ptr.special != ConstPtrSpecialInvalid); + + pointee->type = var_type; + result->base.value.type = get_pointer_to_type_extra(ira->codegen, var_type, false, false, + PtrLenSingle, align, 0, 0, false); + + ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); + if (fn_entry != nullptr) { + fn_entry->alloca_gen_list.append(result); + } + result->base.is_gen = true; + return &result->base; +} + +static ZigType *ir_result_loc_expected_type(IrAnalyze *ira, IrInstruction *suspend_source_instr, + ResultLoc *result_loc) +{ + switch (result_loc->id) { + case ResultLocIdInvalid: + case ResultLocIdPeerParent: + zig_unreachable(); + case ResultLocIdNone: + case ResultLocIdVar: + case ResultLocIdBitCast: + return nullptr; + case ResultLocIdInstruction: + return result_loc->source_instruction->child->value.type; + case ResultLocIdReturn: + return ira->explicit_return_type; + case ResultLocIdPeer: + return reinterpret_cast(result_loc)->parent->resolved_type; + } + zig_unreachable(); +} + +static bool type_can_bit_cast(ZigType *t) { + switch (t->id) { + case ZigTypeIdInvalid: + zig_unreachable(); + case ZigTypeIdMetaType: + case ZigTypeIdOpaque: + case ZigTypeIdBoundFn: + case ZigTypeIdArgTuple: + case ZigTypeIdUnreachable: + case ZigTypeIdComptimeFloat: + case ZigTypeIdComptimeInt: + case ZigTypeIdEnumLiteral: + case ZigTypeIdUndefined: + case ZigTypeIdNull: + case ZigTypeIdPointer: + return false; + default: + // TODO list these types out explicitly, there are probably some other invalid ones here + return true; + } +} + +static void set_up_result_loc_for_inferred_comptime(IrInstruction *ptr) { + ConstExprValue *undef_child = create_const_vals(1); + undef_child->type = ptr->value.type->data.pointer.child_type; + undef_child->special = ConstValSpecialUndef; + ptr->value.special = ConstValSpecialStatic; + ptr->value.data.x_ptr.mut = ConstPtrMutInfer; + ptr->value.data.x_ptr.special = ConstPtrSpecialRef; + ptr->value.data.x_ptr.data.ref.pointee = undef_child; +} + +// when calling this function, at the callsite must check for result type noreturn and propagate it up +static IrInstruction *ir_resolve_result_raw(IrAnalyze *ira, IrInstruction *suspend_source_instr, + ResultLoc *result_loc, ZigType *value_type, IrInstruction *value, bool force_runtime, bool non_null_comptime) +{ + Error err; + if (result_loc->resolved_loc != nullptr) { + // allow to redo the result location if the value is known and comptime and the previous one isn't + if (value == nullptr || !instr_is_comptime(value) || instr_is_comptime(result_loc->resolved_loc)) { + return result_loc->resolved_loc; + } + } + result_loc->gen_instruction = value; + result_loc->implicit_elem_type = value_type; + switch (result_loc->id) { + case ResultLocIdInvalid: + case ResultLocIdPeerParent: + zig_unreachable(); + case ResultLocIdNone: { + if (value != nullptr) { + return nullptr; + } + // need to return a result location and don't have one. use a stack allocation + IrInstructionAllocaGen *alloca_gen = ir_create_alloca_gen(ira, suspend_source_instr, 0, ""); + if ((err = type_resolve(ira->codegen, value_type, ResolveStatusZeroBitsKnown))) + return ira->codegen->invalid_instruction; + alloca_gen->base.value.type = get_pointer_to_type_extra(ira->codegen, value_type, false, false, + PtrLenSingle, 0, 0, 0, false); + set_up_result_loc_for_inferred_comptime(&alloca_gen->base); + ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec); + if (fn_entry != nullptr) { + fn_entry->alloca_gen_list.append(alloca_gen); + } + result_loc->written = true; + result_loc->resolved_loc = &alloca_gen->base; + return result_loc->resolved_loc; + } + case ResultLocIdVar: { + ResultLocVar *result_loc_var = reinterpret_cast(result_loc); + assert(result_loc->source_instruction->id == IrInstructionIdAllocaSrc); + + if (value_type->id == ZigTypeIdUnreachable || value_type->id == ZigTypeIdOpaque) { + ir_add_error(ira, result_loc->source_instruction, + buf_sprintf("variable of type '%s' not allowed", buf_ptr(&value_type->name))); + return ira->codegen->invalid_instruction; + } + + IrInstructionAllocaSrc *alloca_src = + reinterpret_cast(result_loc->source_instruction); + bool force_comptime; + if (!ir_resolve_comptime(ira, alloca_src->is_comptime->child, &force_comptime)) + return ira->codegen->invalid_instruction; + bool is_comptime = force_comptime || (value != nullptr && + value->value.special != ConstValSpecialRuntime && result_loc_var->var->gen_is_const); + + if (alloca_src->base.child == nullptr || is_comptime) { + uint32_t align = 0; + if (alloca_src->align != nullptr && !ir_resolve_align(ira, alloca_src->align->child, &align)) { + return ira->codegen->invalid_instruction; + } + IrInstruction *alloca_gen; + if (is_comptime && value != nullptr) { + if (align > value->value.global_refs->align) { + value->value.global_refs->align = align; + } + alloca_gen = ir_get_ref(ira, result_loc->source_instruction, value, true, false); + } else { + alloca_gen = ir_analyze_alloca(ira, result_loc->source_instruction, value_type, align, + alloca_src->name_hint, force_comptime); + } + if (alloca_src->base.child != nullptr) { + alloca_src->base.child->ref_count = 0; + } + alloca_src->base.child = alloca_gen; + } + result_loc->written = true; + result_loc->resolved_loc = is_comptime ? nullptr : alloca_src->base.child; + return result_loc->resolved_loc; + } + case ResultLocIdInstruction: { + result_loc->written = true; + result_loc->resolved_loc = result_loc->source_instruction->child; + return result_loc->resolved_loc; + } + case ResultLocIdReturn: { + if (!non_null_comptime) { + bool is_comptime = value != nullptr && value->value.special != ConstValSpecialRuntime; + if (is_comptime) + return nullptr; + } + if ((err = type_resolve(ira->codegen, ira->explicit_return_type, ResolveStatusZeroBitsKnown))) { + return ira->codegen->invalid_instruction; + } + if (!type_has_bits(ira->explicit_return_type) || !handle_is_ptr(ira->explicit_return_type)) + return nullptr; + + ZigType *ptr_return_type = get_pointer_to_type(ira->codegen, ira->explicit_return_type, false); + result_loc->written = true; + result_loc->resolved_loc = ir_build_return_ptr(ira, result_loc->source_instruction, ptr_return_type); + if (ir_should_inline(ira->old_irb.exec, result_loc->source_instruction->scope)) { + set_up_result_loc_for_inferred_comptime(result_loc->resolved_loc); + } + return result_loc->resolved_loc; + } + case ResultLocIdPeer: { + ResultLocPeer *result_peer = reinterpret_cast(result_loc); + ResultLocPeerParent *peer_parent = result_peer->parent; + + if (peer_parent->peers.length == 1) { + IrInstruction *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, peer_parent->parent, + value_type, value, force_runtime, non_null_comptime); + result_peer->suspend_pos.basic_block_index = SIZE_MAX; + result_peer->suspend_pos.instruction_index = SIZE_MAX; + if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value.type) || + parent_result_loc->value.type->id == ZigTypeIdUnreachable) + { + return parent_result_loc; + } + result_loc->written = true; + result_loc->resolved_loc = parent_result_loc; + return result_loc->resolved_loc; + } + + bool is_comptime; + if (!ir_resolve_comptime(ira, peer_parent->is_comptime->child, &is_comptime)) + return ira->codegen->invalid_instruction; + peer_parent->skipped = is_comptime; + if (peer_parent->skipped) { + if (non_null_comptime) { + return ir_resolve_result(ira, suspend_source_instr, peer_parent->parent, + value_type, value, force_runtime, non_null_comptime); + } + return nullptr; + } + + if (peer_parent->resolved_type == nullptr) { + if (peer_parent->end_bb->suspend_instruction_ref == nullptr) { + peer_parent->end_bb->suspend_instruction_ref = suspend_source_instr; + } + IrInstruction *unreach_inst = ira_suspend(ira, suspend_source_instr, result_peer->next_bb, + &result_peer->suspend_pos); + if (result_peer->next_bb == nullptr) { + ir_start_next_bb(ira); + } + return unreach_inst; + } + + IrInstruction *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, peer_parent->parent, + peer_parent->resolved_type, nullptr, force_runtime, non_null_comptime); + if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value.type) || + parent_result_loc->value.type->id == ZigTypeIdUnreachable) + { + return parent_result_loc; + } + // because is_comptime is false, we mark this a runtime pointer + parent_result_loc->value.special = ConstValSpecialRuntime; + result_loc->written = true; + result_loc->resolved_loc = parent_result_loc; + return result_loc->resolved_loc; + } + case ResultLocIdBitCast: { + ResultLocBitCast *result_bit_cast = reinterpret_cast(result_loc); + ZigType *dest_type = ir_resolve_type(ira, result_bit_cast->base.source_instruction->child); + if (type_is_invalid(dest_type)) + return ira->codegen->invalid_instruction; + + if (get_codegen_ptr_type(dest_type) != nullptr) { + ir_add_error(ira, result_loc->source_instruction, + buf_sprintf("unable to @bitCast to pointer type '%s'", buf_ptr(&dest_type->name))); + return ira->codegen->invalid_instruction; + } + + if (!type_can_bit_cast(dest_type)) { + ir_add_error(ira, result_loc->source_instruction, + buf_sprintf("unable to @bitCast to type '%s'", buf_ptr(&dest_type->name))); + return ira->codegen->invalid_instruction; + } + + if (get_codegen_ptr_type(value_type) != nullptr) { + ir_add_error(ira, suspend_source_instr, + buf_sprintf("unable to @bitCast from pointer type '%s'", buf_ptr(&value_type->name))); + return ira->codegen->invalid_instruction; + } + + if (!type_can_bit_cast(value_type)) { + ir_add_error(ira, suspend_source_instr, + buf_sprintf("unable to @bitCast from type '%s'", buf_ptr(&value_type->name))); + return ira->codegen->invalid_instruction; + } + + IrInstruction *bitcasted_value; + if (value != nullptr) { + bitcasted_value = ir_analyze_bit_cast(ira, result_loc->source_instruction, value, dest_type); + } else { + bitcasted_value = nullptr; + } + + IrInstruction *parent_result_loc = ir_resolve_result(ira, suspend_source_instr, result_bit_cast->parent, + dest_type, bitcasted_value, force_runtime, non_null_comptime); + if (parent_result_loc == nullptr || type_is_invalid(parent_result_loc->value.type) || + parent_result_loc->value.type->id == ZigTypeIdUnreachable) + { + return parent_result_loc; + } + ZigType *parent_ptr_type = parent_result_loc->value.type; + assert(parent_ptr_type->id == ZigTypeIdPointer); + if ((err = type_resolve(ira->codegen, parent_ptr_type->data.pointer.child_type, + ResolveStatusAlignmentKnown))) + { + return ira->codegen->invalid_instruction; + } + uint64_t parent_ptr_align = get_ptr_align(ira->codegen, parent_ptr_type); + ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, value_type, + parent_ptr_type->data.pointer.is_const, parent_ptr_type->data.pointer.is_volatile, PtrLenSingle, + parent_ptr_align, 0, 0, parent_ptr_type->data.pointer.allow_zero); + + result_loc->written = true; + result_loc->resolved_loc = ir_analyze_ptr_cast(ira, suspend_source_instr, parent_result_loc, + ptr_type, result_bit_cast->base.source_instruction, false); + return result_loc->resolved_loc; + } + } + zig_unreachable(); +} + +static IrInstruction *ir_resolve_result(IrAnalyze *ira, IrInstruction *suspend_source_instr, + ResultLoc *result_loc_pass1, ZigType *value_type, IrInstruction *value, bool force_runtime, + bool non_null_comptime) +{ + IrInstruction *result_loc = ir_resolve_result_raw(ira, suspend_source_instr, result_loc_pass1, value_type, + value, force_runtime, non_null_comptime); + if (result_loc == nullptr || (instr_is_unreachable(result_loc) || type_is_invalid(result_loc->value.type))) + return result_loc; + + if ((force_runtime || (value != nullptr && !instr_is_comptime(value))) && + result_loc_pass1->written && result_loc->value.data.x_ptr.mut == ConstPtrMutInfer) + { + result_loc->value.special = ConstValSpecialRuntime; + } + + ir_assert(result_loc->value.type->id == ZigTypeIdPointer, suspend_source_instr); + ZigType *actual_elem_type = result_loc->value.type->data.pointer.child_type; + if (actual_elem_type->id == ZigTypeIdOptional && value_type->id != ZigTypeIdOptional && + value_type->id != ZigTypeIdNull) + { + return ir_analyze_unwrap_optional_payload(ira, suspend_source_instr, result_loc, false, true); + } else if (actual_elem_type->id == ZigTypeIdErrorUnion && value_type->id != ZigTypeIdErrorUnion) { + if (value_type->id == ZigTypeIdErrorSet) { + return ir_analyze_unwrap_err_code(ira, suspend_source_instr, result_loc, true); + } else { + IrInstruction *unwrapped_err_ptr = ir_analyze_unwrap_error_payload(ira, suspend_source_instr, + result_loc, false, true); + ZigType *actual_payload_type = actual_elem_type->data.error_union.payload_type; + if (actual_payload_type->id == ZigTypeIdOptional && value_type->id != ZigTypeIdOptional) { + return ir_analyze_unwrap_optional_payload(ira, suspend_source_instr, unwrapped_err_ptr, false, true); + } else { + return unwrapped_err_ptr; + } + } + } else if (is_slice(actual_elem_type) && value_type->id == ZigTypeIdArray) { + // need to allow EndExpr to do the implicit cast from array to slice + result_loc_pass1->written = false; + } + return result_loc; +} + +static IrInstruction *ir_analyze_instruction_implicit_cast(IrAnalyze *ira, IrInstructionImplicitCast *instruction) { + ZigType *dest_type = ir_resolve_type(ira, instruction->dest_type->child); + if (type_is_invalid(dest_type)) + return ira->codegen->invalid_instruction; + + IrInstruction *target = instruction->target->child; + if (type_is_invalid(target->value.type)) + return ira->codegen->invalid_instruction; + + return ir_implicit_cast_with_result(ira, target, dest_type, instruction->result_loc); +} + +static IrInstruction *ir_analyze_instruction_resolve_result(IrAnalyze *ira, IrInstructionResolveResult *instruction) { + ZigType *implicit_elem_type = ir_resolve_type(ira, instruction->ty->child); + if (type_is_invalid(implicit_elem_type)) + return ira->codegen->invalid_instruction; + IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc, + implicit_elem_type, nullptr, false, true); + if (result_loc != nullptr) + return result_loc; + + ZigFn *fn = exec_fn_entry(ira->new_irb.exec); + if (fn != nullptr && fn->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync && + instruction->result_loc->id == ResultLocIdReturn) + { + result_loc = ir_resolve_result(ira, &instruction->base, no_result_loc(), + implicit_elem_type, nullptr, false, true); + if (result_loc != nullptr && + (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) + { + return result_loc; + } + result_loc->value.special = ConstValSpecialRuntime; + return result_loc; + } + + IrInstruction *result = ir_const(ira, &instruction->base, implicit_elem_type); + result->value.special = ConstValSpecialUndef; + IrInstruction *ptr = ir_get_ref(ira, &instruction->base, result, false, false); + ptr->value.data.x_ptr.mut = ConstPtrMutComptimeVar; + return ptr; +} + +static void ir_reset_result(ResultLoc *result_loc) { + result_loc->written = false; + result_loc->resolved_loc = nullptr; + result_loc->gen_instruction = nullptr; + result_loc->implicit_elem_type = nullptr; + switch (result_loc->id) { + case ResultLocIdInvalid: + zig_unreachable(); + case ResultLocIdPeerParent: { + ResultLocPeerParent *peer_parent = reinterpret_cast(result_loc); + peer_parent->skipped = false; + peer_parent->done_resuming = false; + peer_parent->resolved_type = nullptr; + for (size_t i = 0; i < peer_parent->peers.length; i += 1) { + ir_reset_result(&peer_parent->peers.at(i)->base); + } + break; + } + case ResultLocIdVar: { + IrInstructionAllocaSrc *alloca_src = + reinterpret_cast(result_loc->source_instruction); + alloca_src->base.child = nullptr; + break; + } + case ResultLocIdPeer: + case ResultLocIdNone: + case ResultLocIdReturn: + case ResultLocIdInstruction: + case ResultLocIdBitCast: + break; + } +} + +static IrInstruction *ir_analyze_instruction_reset_result(IrAnalyze *ira, IrInstructionResetResult *instruction) { + ir_reset_result(instruction->result_loc); + return ir_const_void(ira, &instruction->base); +} + +static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry, ZigType *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count, IrInstruction *async_allocator_inst) { @@ -14098,7 +15494,7 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *c ir_assert(async_allocator_inst->value.type->id == ZigTypeIdPointer, &call_instruction->base); ZigType *container_type = async_allocator_inst->value.type->data.pointer.child_type; IrInstruction *field_ptr_inst = ir_analyze_container_field_ptr(ira, realloc_field_name, &call_instruction->base, - async_allocator_inst, container_type); + async_allocator_inst, container_type, false); if (type_is_invalid(field_ptr_inst->value.type)) { return ira->codegen->invalid_instruction; } @@ -14123,10 +15519,15 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *c ZigType *promise_type = get_promise_type(ira->codegen, return_type); ZigType *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type); - IrInstruction *result = ir_build_call(&ira->new_irb, call_instruction->base.scope, call_instruction->base.source_node, - fn_entry, fn_ref, arg_count, casted_args, false, FnInlineAuto, true, async_allocator_inst, nullptr); - result->value.type = async_return_type; - return result; + IrInstruction *result_loc = ir_resolve_result(ira, &call_instruction->base, no_result_loc(), + async_return_type, nullptr, true, true); + if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) { + return result_loc; + } + + return ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, arg_count, + casted_args, FnInlineAuto, true, async_allocator_inst, nullptr, result_loc, + async_return_type); } static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node, @@ -14149,7 +15550,7 @@ static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node casted_arg = arg; } - ConstExprValue *arg_val = ir_resolve_const(ira, casted_arg, UndefBad); + ConstExprValue *arg_val = ir_resolve_const(ira, casted_arg, UndefOk); if (!arg_val) return false; @@ -14205,7 +15606,7 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod arg_val = create_const_runtime(casted_arg->value.type); } if (arg_part_of_generic_id) { - generic_id->params[generic_id->param_count] = *arg_val; + copy_const_val(&generic_id->params[generic_id->param_count], arg_val, true); generic_id->param_count += 1; } @@ -14376,7 +15777,9 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source ir_add_error(ira, source_instr, buf_sprintf("cannot assign to constant")); return ira->codegen->invalid_instruction; } - if (ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar) { + if (ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar || + ptr->value.data.x_ptr.mut == ConstPtrMutInfer) + { if (instr_is_comptime(value)) { ConstExprValue *dest_val = const_ptr_pointee(ira, ira->codegen, &ptr->value, source_instr->source_node); if (dest_val == nullptr) @@ -14390,18 +15793,24 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source // ConstPtrMutComptimeVar, thus defeating the logic below. bool same_global_refs = ptr->value.data.x_ptr.mut != ConstPtrMutComptimeVar; copy_const_val(dest_val, &value->value, same_global_refs); - if (!ira->new_irb.current_basic_block->must_be_comptime_source_instr) { + if (ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar && + !ira->new_irb.current_basic_block->must_be_comptime_source_instr) + { ira->new_irb.current_basic_block->must_be_comptime_source_instr = source_instr; } return ir_const_void(ira, source_instr); } } - ir_add_error(ira, source_instr, - buf_sprintf("cannot store runtime value in compile time variable")); - ConstExprValue *dest_val = const_ptr_pointee_unchecked(ira->codegen, &ptr->value); - dest_val->type = ira->codegen->builtin_types.entry_invalid; + if (ptr->value.data.x_ptr.mut == ConstPtrMutInfer) { + ptr->value.special = ConstValSpecialRuntime; + } else { + ir_add_error(ira, source_instr, + buf_sprintf("cannot store runtime value in compile time variable")); + ConstExprValue *dest_val = const_ptr_pointee_unchecked(ira->codegen, &ptr->value); + dest_val->type = ira->codegen->builtin_types.entry_invalid; - return ira->codegen->invalid_instruction; + return ira->codegen->invalid_instruction; + } } } @@ -14430,7 +15839,7 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source return result; } -static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *call_instruction, +static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction, ZigFn *fn_entry, ZigType *fn_type, IrInstruction *fn_ref, IrInstruction *first_arg_ptr, bool comptime_fn_call, FnInline fn_inline) { @@ -14533,7 +15942,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *call if (!first_arg_known_bare && handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) { first_arg = first_arg_ptr; } else { - first_arg = ir_get_deref(ira, first_arg_ptr, first_arg_ptr); + first_arg = ir_get_deref(ira, first_arg_ptr, first_arg_ptr, nullptr); if (type_is_invalid(first_arg->value.type)) return ira->codegen->invalid_instruction; } @@ -14692,7 +16101,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *call if (!first_arg_known_bare && handle_is_ptr(first_arg_ptr->value.type->data.pointer.child_type)) { first_arg = first_arg_ptr; } else { - first_arg = ir_get_deref(ira, first_arg_ptr, first_arg_ptr); + first_arg = ir_get_deref(ira, first_arg_ptr, first_arg_ptr, nullptr); if (type_is_invalid(first_arg->value.type)) return ira->codegen->invalid_instruction; } @@ -14736,7 +16145,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *call if (type_is_invalid(arg_var_ptr_inst->value.type)) return ira->codegen->invalid_instruction; - IrInstruction *arg_tuple_arg = ir_get_deref(ira, arg, arg_var_ptr_inst); + IrInstruction *arg_tuple_arg = ir_get_deref(ira, arg, arg_var_ptr_inst, nullptr); if (type_is_invalid(arg_tuple_arg->value.type)) return ira->codegen->invalid_instruction; @@ -14785,7 +16194,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *call nullptr, nullptr, fn_proto_node->data.fn_proto.align_expr, nullptr, ira->new_irb.exec, nullptr); IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb, impl_fn->child_scope, fn_proto_node->data.fn_proto.align_expr); - const_instruction->base.value = *align_result; + copy_const_val(&const_instruction->base.value, align_result, true); uint32_t align_bytes = 0; ir_resolve_align(ira, &const_instruction->base, &align_bytes); @@ -14867,6 +16276,19 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *call } FnTypeId *impl_fn_type_id = &impl_fn->type_entry->data.fn.fn_type_id; + IrInstruction *result_loc; + if (handle_is_ptr(impl_fn_type_id->return_type)) { + result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc, + impl_fn_type_id->return_type, nullptr, true, true); + if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || + instr_is_unreachable(result_loc))) + { + return result_loc; + } + } else { + result_loc = nullptr; + } + if (fn_type_can_fail(impl_fn_type_id)) { parent_fn_entry->calls_or_awaits_errorable_fn = true; } @@ -14875,18 +16297,14 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *call if (call_instruction->is_async) { IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry, fn_ref, casted_args, impl_param_count, async_allocator_inst); - ir_add_alloca(ira, result, result->value.type); return ir_finish_anal(ira, result); } assert(async_allocator_inst == nullptr); - IrInstruction *new_call_instruction = ir_build_call(&ira->new_irb, - call_instruction->base.scope, call_instruction->base.source_node, - impl_fn, nullptr, impl_param_count, casted_args, false, fn_inline, - call_instruction->is_async, nullptr, casted_new_stack); - new_call_instruction->value.type = impl_fn_type_id->return_type; - - ir_add_alloca(ira, new_call_instruction, impl_fn_type_id->return_type); + IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, + impl_fn, nullptr, impl_param_count, casted_args, fn_inline, + call_instruction->is_async, nullptr, casted_new_stack, result_loc, + impl_fn_type_id->return_type); return ir_finish_anal(ira, new_call_instruction); } @@ -14914,7 +16332,7 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *call { first_arg = first_arg_ptr; } else { - first_arg = ir_get_deref(ira, first_arg_ptr, first_arg_ptr); + first_arg = ir_get_deref(ira, first_arg_ptr, first_arg_ptr, nullptr); if (type_is_invalid(first_arg->value.type)) return ira->codegen->invalid_instruction; } @@ -14971,7 +16389,6 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *call IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref, casted_args, call_param_count, async_allocator_inst); - ir_add_alloca(ira, result, result->value.type); return ir_finish_anal(ira, result); } @@ -14981,15 +16398,24 @@ static IrInstruction *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *call return ira->codegen->invalid_instruction; } - IrInstruction *new_call_instruction = ir_build_call(&ira->new_irb, - call_instruction->base.scope, call_instruction->base.source_node, - fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr, casted_new_stack); - new_call_instruction->value.type = return_type; - ir_add_alloca(ira, new_call_instruction, return_type); + IrInstruction *result_loc; + if (handle_is_ptr(return_type)) { + result_loc = ir_resolve_result(ira, &call_instruction->base, call_instruction->result_loc, + return_type, nullptr, true, true); + if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) { + return result_loc; + } + } else { + result_loc = nullptr; + } + + IrInstruction *new_call_instruction = ir_build_call_gen(ira, &call_instruction->base, fn_entry, fn_ref, + call_param_count, casted_args, fn_inline, false, nullptr, casted_new_stack, + result_loc, return_type); return ir_finish_anal(ira, new_call_instruction); } -static IrInstruction *ir_analyze_instruction_call(IrAnalyze *ira, IrInstructionCall *call_instruction) { +static IrInstruction *ir_analyze_instruction_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction) { IrInstruction *fn_ref = call_instruction->fn_ref->child; if (type_is_invalid(fn_ref->value.type)) return ira->codegen->invalid_instruction; @@ -15013,7 +16439,8 @@ static IrInstruction *ir_analyze_instruction_call(IrAnalyze *ira, IrInstructionC IrInstruction *arg = call_instruction->args[0]->child; - IrInstruction *cast_instruction = ir_analyze_cast(ira, &call_instruction->base, dest_type, arg); + IrInstruction *cast_instruction = ir_analyze_cast(ira, &call_instruction->base, dest_type, arg, + call_instruction->result_loc); if (type_is_invalid(cast_instruction->value.type)) return ira->codegen->invalid_instruction; return ir_finish_anal(ira, cast_instruction); @@ -15066,7 +16493,7 @@ static Error ir_read_const_ptr(IrAnalyze *ira, CodeGen *codegen, AstNode *source if (dst_size <= src_size) { if (src_size == dst_size && types_have_same_zig_comptime_repr(pointee->type, out_val->type)) { - copy_const_val(out_val, pointee, ptr_val->data.x_ptr.mut == ConstPtrMutComptimeConst); + copy_const_val(out_val, pointee, ptr_val->data.x_ptr.mut != ConstPtrMutComptimeVar); return ErrorNone; } Buf buf = BUF_INIT; @@ -15315,7 +16742,7 @@ static IrInstruction *ir_analyze_instruction_un_op(IrAnalyze *ira, IrInstruction return ira->codegen->invalid_instruction; } - IrInstruction *result = ir_get_deref(ira, &instruction->base, ptr); + IrInstruction *result = ir_get_deref(ira, &instruction->base, ptr, instruction->result_loc); if (result == ira->codegen->invalid_instruction) return ira->codegen->invalid_instruction; @@ -15334,6 +16761,19 @@ static IrInstruction *ir_analyze_instruction_un_op(IrAnalyze *ira, IrInstruction zig_unreachable(); } +static void ir_push_resume(IrAnalyze *ira, IrSuspendPosition pos) { + IrBasicBlock *old_bb = ira->old_irb.exec->basic_block_list.at(pos.basic_block_index); + if (old_bb->in_resume_stack) return; + ira->resume_stack.append(pos); + old_bb->in_resume_stack = true; +} + +static void ir_push_resume_block(IrAnalyze *ira, IrBasicBlock *old_bb) { + if (ira->resume_stack.length != 0) { + ir_push_resume(ira, {old_bb->index, 0}); + } +} + static IrInstruction *ir_analyze_instruction_br(IrAnalyze *ira, IrInstructionBr *br_instruction) { IrBasicBlock *old_dest_block = br_instruction->dest_block; @@ -15341,13 +16781,15 @@ static IrInstruction *ir_analyze_instruction_br(IrAnalyze *ira, IrInstructionBr if (!ir_resolve_comptime(ira, br_instruction->is_comptime->child, &is_comptime)) return ir_unreach_error(ira); - if (is_comptime || old_dest_block->ref_count == 1) + if (is_comptime || (old_dest_block->ref_count == 1 && old_dest_block->suspend_instruction_ref == nullptr)) return ir_inline_bb(ira, &br_instruction->base, old_dest_block); IrBasicBlock *new_bb = ir_get_new_bb_runtime(ira, old_dest_block, &br_instruction->base); if (new_bb == nullptr) return ir_unreach_error(ira); + ir_push_resume_block(ira, old_dest_block); + IrInstruction *result = ir_build_br(&ira->new_irb, br_instruction->base.scope, br_instruction->base.source_node, new_bb, nullptr); result->value.type = ira->codegen->builtin_types.entry_unreachable; @@ -15376,13 +16818,15 @@ static IrInstruction *ir_analyze_instruction_cond_br(IrAnalyze *ira, IrInstructi IrBasicBlock *old_dest_block = cond_is_true ? cond_br_instruction->then_block : cond_br_instruction->else_block; - if (is_comptime || old_dest_block->ref_count == 1) + if (is_comptime || (old_dest_block->ref_count == 1 && old_dest_block->suspend_instruction_ref == nullptr)) return ir_inline_bb(ira, &cond_br_instruction->base, old_dest_block); IrBasicBlock *new_dest_block = ir_get_new_bb_runtime(ira, old_dest_block, &cond_br_instruction->base); if (new_dest_block == nullptr) return ir_unreach_error(ira); + ir_push_resume_block(ira, old_dest_block); + IrInstruction *result = ir_build_br(&ira->new_irb, cond_br_instruction->base.scope, cond_br_instruction->base.source_node, new_dest_block, nullptr); result->value.type = ira->codegen->builtin_types.entry_unreachable; @@ -15398,6 +16842,9 @@ static IrInstruction *ir_analyze_instruction_cond_br(IrAnalyze *ira, IrInstructi if (new_else_block == nullptr) return ir_unreach_error(ira); + ir_push_resume_block(ira, cond_br_instruction->else_block); + ir_push_resume_block(ira, cond_br_instruction->then_block); + IrInstruction *result = ir_build_cond_br(&ira->new_irb, cond_br_instruction->base.scope, cond_br_instruction->base.source_node, casted_condition, new_then_block, new_else_block, nullptr); @@ -15436,6 +16883,80 @@ static IrInstruction *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionPh zig_unreachable(); } + ResultLocPeerParent *peer_parent = phi_instruction->peer_parent; + if (peer_parent != nullptr && !peer_parent->skipped && !peer_parent->done_resuming && + peer_parent->peers.length >= 2) + { + if (peer_parent->resolved_type == nullptr) { + IrInstruction **instructions = allocate(peer_parent->peers.length); + for (size_t i = 0; i < peer_parent->peers.length; i += 1) { + ResultLocPeer *this_peer = peer_parent->peers.at(i); + + IrInstruction *gen_instruction = this_peer->base.gen_instruction; + if (gen_instruction == nullptr) { + // unreachable instructions will cause implicit_elem_type to be null + if (this_peer->base.implicit_elem_type == nullptr) { + instructions[i] = ir_const_unreachable(ira, this_peer->base.source_instruction); + } else { + instructions[i] = ir_const(ira, this_peer->base.source_instruction, + this_peer->base.implicit_elem_type); + instructions[i]->value.special = ConstValSpecialRuntime; + } + } else { + instructions[i] = gen_instruction; + } + + } + ZigType *expected_type = ir_result_loc_expected_type(ira, &phi_instruction->base, peer_parent->parent); + peer_parent->resolved_type = ir_resolve_peer_types(ira, + peer_parent->base.source_instruction->source_node, expected_type, instructions, + peer_parent->peers.length); + + // the logic below assumes there are no instructions in the new current basic block yet + ir_assert(ira->new_irb.current_basic_block->instruction_list.length == 0, &phi_instruction->base); + + // In case resolving the parent activates a suspend, do it now + IrInstruction *parent_result_loc = ir_resolve_result(ira, &phi_instruction->base, peer_parent->parent, + peer_parent->resolved_type, nullptr, false, false); + if (parent_result_loc != nullptr && + (type_is_invalid(parent_result_loc->value.type) || instr_is_unreachable(parent_result_loc))) + { + return parent_result_loc; + } + // If the above code generated any instructions in the current basic block, we need + // to move them to the peer parent predecessor. + ZigList instrs_to_move = {}; + while (ira->new_irb.current_basic_block->instruction_list.length != 0) { + instrs_to_move.append(ira->new_irb.current_basic_block->instruction_list.pop()); + } + if (instrs_to_move.length != 0) { + IrBasicBlock *predecessor = peer_parent->base.source_instruction->child->owner_bb; + IrInstruction *branch_instruction = predecessor->instruction_list.pop(); + ir_assert(branch_instruction->value.type->id == ZigTypeIdUnreachable, &phi_instruction->base); + while (instrs_to_move.length != 0) { + predecessor->instruction_list.append(instrs_to_move.pop()); + } + predecessor->instruction_list.append(branch_instruction); + } + } + + IrSuspendPosition suspend_pos; + ira_suspend(ira, &phi_instruction->base, nullptr, &suspend_pos); + ir_push_resume(ira, suspend_pos); + + for (size_t i = 0; i < peer_parent->peers.length; i += 1) { + ResultLocPeer *opposite_peer = peer_parent->peers.at(peer_parent->peers.length - i - 1); + if (opposite_peer->base.implicit_elem_type != nullptr && + opposite_peer->base.implicit_elem_type->id != ZigTypeIdUnreachable) + { + ir_push_resume(ira, opposite_peer->suspend_pos); + } + } + + peer_parent->done_resuming = true; + return ira_resume(ira); + } + ZigList new_incoming_blocks = {0}; ZigList new_incoming_values = {0}; @@ -15508,7 +17029,7 @@ static IrInstruction *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionPh IrInstruction *branch_instruction = predecessor->instruction_list.pop(); ir_set_cursor_at_end(&ira->new_irb, predecessor); IrInstruction *casted_value = ir_implicit_cast(ira, new_value, resolved_type); - if (casted_value == ira->codegen->invalid_instruction) { + if (type_is_invalid(casted_value->value.type)) { return ira->codegen->invalid_instruction; } new_incoming_values.items[i] = casted_value; @@ -15524,7 +17045,7 @@ static IrInstruction *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionPh IrInstruction *result = ir_build_phi(&ira->new_irb, phi_instruction->base.scope, phi_instruction->base.source_node, - new_incoming_blocks.length, new_incoming_blocks.items, new_incoming_values.items); + new_incoming_blocks.length, new_incoming_blocks.items, new_incoming_values.items, nullptr); result->value.type = resolved_type; if (all_stack_ptrs) { @@ -15742,6 +17263,52 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct if (array_ptr_val == nullptr) return ira->codegen->invalid_instruction; + if (array_ptr_val->special == ConstValSpecialUndef && elem_ptr_instruction->init_array_type != nullptr) { + if (array_type->id == ZigTypeIdArray) { + array_ptr_val->data.x_array.special = ConstArraySpecialNone; + array_ptr_val->data.x_array.data.s_none.elements = create_const_vals(array_type->data.array.len); + array_ptr_val->special = ConstValSpecialStatic; + for (size_t i = 0; i < array_type->data.array.len; i += 1) { + ConstExprValue *elem_val = &array_ptr_val->data.x_array.data.s_none.elements[i]; + elem_val->special = ConstValSpecialUndef; + elem_val->type = array_type->data.array.child_type; + elem_val->parent.id = ConstParentIdArray; + elem_val->parent.data.p_array.array_val = array_ptr_val; + elem_val->parent.data.p_array.elem_index = i; + } + } else if (is_slice(array_type)) { + ZigType *actual_array_type = ir_resolve_type(ira, elem_ptr_instruction->init_array_type->child); + if (type_is_invalid(actual_array_type)) + return ira->codegen->invalid_instruction; + if (actual_array_type->id != ZigTypeIdArray) { + ir_add_error(ira, elem_ptr_instruction->init_array_type, + buf_sprintf("expected array type or [_], found slice")); + return ira->codegen->invalid_instruction; + } + + ConstExprValue *array_init_val = create_const_vals(1); + array_init_val->special = ConstValSpecialStatic; + array_init_val->type = actual_array_type; + array_init_val->data.x_array.special = ConstArraySpecialNone; + array_init_val->data.x_array.data.s_none.elements = create_const_vals(actual_array_type->data.array.len); + array_init_val->special = ConstValSpecialStatic; + for (size_t i = 0; i < actual_array_type->data.array.len; i += 1) { + ConstExprValue *elem_val = &array_init_val->data.x_array.data.s_none.elements[i]; + elem_val->special = ConstValSpecialUndef; + elem_val->type = actual_array_type->data.array.child_type; + elem_val->parent.id = ConstParentIdArray; + elem_val->parent.data.p_array.array_val = array_init_val; + elem_val->parent.data.p_array.elem_index = i; + } + + init_const_slice(ira->codegen, array_ptr_val, array_init_val, 0, actual_array_type->data.array.len, + false); + array_ptr_val->data.x_struct.fields[slice_ptr_index].data.x_ptr.mut = ConstPtrMutInfer; + } else { + zig_unreachable(); + } + } + if (array_ptr_val->special != ConstValSpecialRuntime && (array_type->id != ZigTypeIdPointer || array_ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr)) @@ -15807,8 +17374,9 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct } else if (is_slice(array_type)) { ConstExprValue *ptr_field = &array_ptr_val->data.x_struct.fields[slice_ptr_index]; if (ptr_field->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) { - IrInstruction *result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope, elem_ptr_instruction->base.source_node, - array_ptr, casted_elem_index, false, elem_ptr_instruction->ptr_len); + IrInstruction *result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope, + elem_ptr_instruction->base.source_node, array_ptr, casted_elem_index, false, + elem_ptr_instruction->ptr_len, nullptr); result->value.type = return_type; return result; } @@ -15861,7 +17429,16 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct } return result; } else if (array_type->id == ZigTypeIdArray) { - IrInstruction *result = ir_const(ira, &elem_ptr_instruction->base, return_type); + IrInstruction *result; + if (orig_array_ptr_val->data.x_ptr.mut == ConstPtrMutInfer) { + result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope, + elem_ptr_instruction->base.source_node, array_ptr, casted_elem_index, + false, elem_ptr_instruction->ptr_len, elem_ptr_instruction->init_array_type); + result->value.type = return_type; + result->value.special = ConstValSpecialStatic; + } else { + result = ir_const(ira, &elem_ptr_instruction->base, return_type); + } ConstExprValue *out_val = &result->value; out_val->data.x_ptr.special = ConstPtrSpecialBaseArray; out_val->data.x_ptr.mut = orig_array_ptr_val->data.x_ptr.mut; @@ -15873,7 +17450,6 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct } } } - } else { // runtime known element index switch (type_requires_comptime(ira->codegen, return_type)) { @@ -15899,8 +17475,9 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct } } - IrInstruction *result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope, elem_ptr_instruction->base.source_node, - array_ptr, casted_elem_index, safety_check_on, elem_ptr_instruction->ptr_len); + IrInstruction *result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope, + elem_ptr_instruction->base.source_node, array_ptr, casted_elem_index, safety_check_on, + elem_ptr_instruction->ptr_len, elem_ptr_instruction->init_array_type); result->value.type = return_type; return result; } @@ -15945,8 +17522,80 @@ static IrInstruction *ir_analyze_container_member_access_inner(IrAnalyze *ira, return ira->codegen->invalid_instruction; } +static IrInstruction *ir_analyze_struct_field_ptr(IrAnalyze *ira, IrInstruction *source_instr, + TypeStructField *field, IrInstruction *struct_ptr, ZigType *struct_type, bool initializing) +{ + switch (type_has_one_possible_value(ira->codegen, field->type_entry)) { + case OnePossibleValueInvalid: + return ira->codegen->invalid_instruction; + case OnePossibleValueYes: { + IrInstruction *elem = ir_const(ira, source_instr, field->type_entry); + return ir_get_ref(ira, source_instr, elem, false, false); + } + case OnePossibleValueNo: + break; + } + assert(struct_ptr->value.type->id == ZigTypeIdPointer); + bool is_packed = (struct_type->data.structure.layout == ContainerLayoutPacked); + uint32_t align_bytes = is_packed ? 1 : get_abi_alignment(ira->codegen, field->type_entry); + uint32_t ptr_bit_offset = struct_ptr->value.type->data.pointer.bit_offset_in_host; + uint32_t ptr_host_int_bytes = struct_ptr->value.type->data.pointer.host_int_bytes; + uint32_t host_int_bytes_for_result_type = (ptr_host_int_bytes == 0) ? + get_host_int_bytes(ira->codegen, struct_type, field) : ptr_host_int_bytes; + bool is_const = struct_ptr->value.type->data.pointer.is_const; + bool is_volatile = struct_ptr->value.type->data.pointer.is_volatile; + ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, field->type_entry, + is_const, is_volatile, PtrLenSingle, align_bytes, + (uint32_t)(ptr_bit_offset + field->bit_offset_in_host), + (uint32_t)host_int_bytes_for_result_type, false); + if (instr_is_comptime(struct_ptr)) { + ConstExprValue *ptr_val = ir_resolve_const(ira, struct_ptr, UndefBad); + if (!ptr_val) + return ira->codegen->invalid_instruction; + + if (ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) { + ConstExprValue *struct_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node); + if (struct_val == nullptr) + return ira->codegen->invalid_instruction; + if (type_is_invalid(struct_val->type)) + return ira->codegen->invalid_instruction; + if (struct_val->special == ConstValSpecialUndef && initializing) { + struct_val->data.x_struct.fields = create_const_vals(struct_type->data.structure.src_field_count); + struct_val->special = ConstValSpecialStatic; + for (size_t i = 0; i < struct_type->data.structure.src_field_count; i += 1) { + ConstExprValue *field_val = &struct_val->data.x_struct.fields[i]; + field_val->special = ConstValSpecialUndef; + field_val->type = struct_type->data.structure.fields[i].type_entry; + field_val->parent.id = ConstParentIdStruct; + field_val->parent.data.p_struct.struct_val = struct_val; + field_val->parent.data.p_struct.field_index = i; + } + } + IrInstruction *result; + if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) { + result = ir_build_struct_field_ptr(&ira->new_irb, source_instr->scope, + source_instr->source_node, struct_ptr, field); + result->value.type = ptr_type; + result->value.special = ConstValSpecialStatic; + } else { + result = ir_const(ira, source_instr, ptr_type); + } + ConstExprValue *const_val = &result->value; + const_val->data.x_ptr.special = ConstPtrSpecialBaseStruct; + const_val->data.x_ptr.mut = ptr_val->data.x_ptr.mut; + const_val->data.x_ptr.data.base_struct.struct_val = struct_val; + const_val->data.x_ptr.data.base_struct.field_index = field->src_index; + return result; + } + } + IrInstruction *result = ir_build_struct_field_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node, + struct_ptr, field); + result->value.type = ptr_type; + return result; +} + static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name, - IrInstruction *source_instr, IrInstruction *container_ptr, ZigType *container_type) + IrInstruction *source_instr, IrInstruction *container_ptr, ZigType *container_type, bool initializing) { Error err; @@ -15955,81 +17604,55 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_ return ira->codegen->invalid_instruction; assert(container_ptr->value.type->id == ZigTypeIdPointer); - bool is_const = container_ptr->value.type->data.pointer.is_const; - bool is_volatile = container_ptr->value.type->data.pointer.is_volatile; if (bare_type->id == ZigTypeIdStruct) { TypeStructField *field = find_struct_type_field(bare_type, field_name); - if (field) { - switch (type_has_one_possible_value(ira->codegen, field->type_entry)) { - case OnePossibleValueInvalid: - return ira->codegen->invalid_instruction; - case OnePossibleValueYes: { - IrInstruction *elem = ir_const(ira, source_instr, field->type_entry); - return ir_get_ref(ira, source_instr, elem, false, false); - } - case OnePossibleValueNo: - break; - } - bool is_packed = (bare_type->data.structure.layout == ContainerLayoutPacked); - uint32_t align_bytes = is_packed ? 1 : get_abi_alignment(ira->codegen, field->type_entry); - uint32_t ptr_bit_offset = container_ptr->value.type->data.pointer.bit_offset_in_host; - uint32_t ptr_host_int_bytes = container_ptr->value.type->data.pointer.host_int_bytes; - uint32_t host_int_bytes_for_result_type = (ptr_host_int_bytes == 0) ? - get_host_int_bytes(ira->codegen, bare_type, field) : ptr_host_int_bytes; - if (instr_is_comptime(container_ptr)) { - ConstExprValue *ptr_val = ir_resolve_const(ira, container_ptr, UndefBad); - if (!ptr_val) - return ira->codegen->invalid_instruction; - - if (ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) { - ConstExprValue *struct_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node); - if (struct_val == nullptr) - return ira->codegen->invalid_instruction; - if (type_is_invalid(struct_val->type)) - return ira->codegen->invalid_instruction; - ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, field->type_entry, - is_const, is_volatile, PtrLenSingle, align_bytes, - (uint32_t)(ptr_bit_offset + field->bit_offset_in_host), - (uint32_t)host_int_bytes_for_result_type, false); - IrInstruction *result = ir_const(ira, source_instr, ptr_type); - ConstExprValue *const_val = &result->value; - const_val->data.x_ptr.special = ConstPtrSpecialBaseStruct; - const_val->data.x_ptr.mut = container_ptr->value.data.x_ptr.mut; - const_val->data.x_ptr.data.base_struct.struct_val = struct_val; - const_val->data.x_ptr.data.base_struct.field_index = field->src_index; - return result; - } - } - IrInstruction *result = ir_build_struct_field_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node, - container_ptr, field); - result->value.type = get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile, - PtrLenSingle, - align_bytes, - (uint32_t)(ptr_bit_offset + field->bit_offset_in_host), - host_int_bytes_for_result_type, false); - return result; + if (field != nullptr) { + return ir_analyze_struct_field_ptr(ira, source_instr, field, container_ptr, bare_type, initializing); } else { return ir_analyze_container_member_access_inner(ira, bare_type, field_name, source_instr, container_ptr, container_type); } - } else if (bare_type->id == ZigTypeIdEnum) { + } + + if (bare_type->id == ZigTypeIdEnum) { return ir_analyze_container_member_access_inner(ira, bare_type, field_name, source_instr, container_ptr, container_type); - } else if (bare_type->id == ZigTypeIdUnion) { + } + + if (bare_type->id == ZigTypeIdUnion) { + bool is_const = container_ptr->value.type->data.pointer.is_const; + bool is_volatile = container_ptr->value.type->data.pointer.is_volatile; + TypeUnionField *field = find_union_type_field(bare_type, field_name); - if (field) { - if (instr_is_comptime(container_ptr)) { - ConstExprValue *ptr_val = ir_resolve_const(ira, container_ptr, UndefBad); - if (!ptr_val) + if (field == nullptr) { + return ir_analyze_container_member_access_inner(ira, bare_type, field_name, + source_instr, container_ptr, container_type); + } + ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, field->type_entry, + is_const, is_volatile, PtrLenSingle, 0, 0, 0, false); + if (instr_is_comptime(container_ptr)) { + ConstExprValue *ptr_val = ir_resolve_const(ira, container_ptr, UndefBad); + if (!ptr_val) + return ira->codegen->invalid_instruction; + + if (ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) { + ConstExprValue *union_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node); + if (union_val == nullptr) + return ira->codegen->invalid_instruction; + if (type_is_invalid(union_val->type)) return ira->codegen->invalid_instruction; - if (ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) { - ConstExprValue *union_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node); - if (union_val == nullptr) - return ira->codegen->invalid_instruction; - if (type_is_invalid(union_val->type)) - return ira->codegen->invalid_instruction; + if (initializing) { + ConstExprValue *payload_val = create_const_vals(1); + payload_val->special = ConstValSpecialUndef; + payload_val->type = field->type_entry; + payload_val->parent.id = ConstParentIdUnion; + payload_val->parent.data.p_union.union_val = union_val; + union_val->special = ConstValSpecialStatic; + bigint_init_bigint(&union_val->data.x_union.tag, &field->enum_field->value); + union_val->data.x_union.payload = payload_val; + } else { TypeUnionField *actual_field = find_union_field_by_tag(bare_type, &union_val->data.x_union.tag); if (actual_field == nullptr) zig_unreachable(); @@ -16040,33 +17663,35 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_ buf_ptr(actual_field->name))); return ira->codegen->invalid_instruction; } - - ConstExprValue *payload_val = union_val->data.x_union.payload; - - ZigType *field_type = field->type_entry; - ZigType *ptr_type = get_pointer_to_type_extra(ira->codegen, field_type, - is_const, is_volatile, PtrLenSingle, 0, 0, 0, false); - - IrInstruction *result = ir_const(ira, source_instr, ptr_type); - ConstExprValue *const_val = &result->value; - const_val->data.x_ptr.special = ConstPtrSpecialRef; - const_val->data.x_ptr.mut = container_ptr->value.data.x_ptr.mut; - const_val->data.x_ptr.data.ref.pointee = payload_val; - return result; } - } - IrInstruction *result = ir_build_union_field_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node, container_ptr, field); - result->value.type = get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile, - PtrLenSingle, 0, 0, 0, false); - return result; - } else { - return ir_analyze_container_member_access_inner(ira, bare_type, field_name, - source_instr, container_ptr, container_type); + ConstExprValue *payload_val = union_val->data.x_union.payload; + + + IrInstruction *result; + if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) { + result = ir_build_union_field_ptr(&ira->new_irb, source_instr->scope, + source_instr->source_node, container_ptr, field, true, initializing); + result->value.type = ptr_type; + result->value.special = ConstValSpecialStatic; + } else { + result = ir_const(ira, source_instr, ptr_type); + } + ConstExprValue *const_val = &result->value; + const_val->data.x_ptr.special = ConstPtrSpecialRef; + const_val->data.x_ptr.mut = container_ptr->value.data.x_ptr.mut; + const_val->data.x_ptr.data.ref.pointee = payload_val; + return result; + } } - } else { - zig_unreachable(); + + IrInstruction *result = ir_build_union_field_ptr(&ira->new_irb, source_instr->scope, + source_instr->source_node, container_ptr, field, true, initializing); + result->value.type = ptr_type; + return result; } + + zig_unreachable(); } static void add_link_lib_symbol(IrAnalyze *ira, Buf *lib_name, Buf *symbol_name, AstNode *source_node) { @@ -16104,6 +17729,11 @@ static void add_link_lib_symbol(IrAnalyze *ira, Buf *lib_name, Buf *symbol_name, link_lib->symbols.append(symbol_name); } +static IrInstruction *ir_error_dependency_loop(IrAnalyze *ira, IrInstruction *source_instr) { + ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("dependency loop detected")); + emit_error_notes_for_ref_stack(ira->codegen, msg); + return ira->codegen->invalid_instruction; +} static IrInstruction *ir_analyze_decl_ref(IrAnalyze *ira, IrInstruction *source_instruction, Tld *tld) { resolve_top_level_decl(ira->codegen, tld, source_instruction->source_node); @@ -16118,6 +17748,9 @@ static IrInstruction *ir_analyze_decl_ref(IrAnalyze *ira, IrInstruction *source_ { TldVar *tld_var = (TldVar *)tld; ZigVar *var = tld_var->var; + if (var == nullptr) { + return ir_error_dependency_loop(ira, source_instruction); + } if (tld_var->extern_lib_name != nullptr) { add_link_lib_symbol(ira, tld_var->extern_lib_name, &var->name, source_instruction->source_node); } @@ -16133,23 +17766,13 @@ static IrInstruction *ir_analyze_decl_ref(IrAnalyze *ira, IrInstruction *source_ if (type_is_invalid(fn_entry->type_entry)) return ira->codegen->invalid_instruction; - // TODO instead of allocating this every time, put it in the tld value and we can reference - // the same one every time - ConstExprValue *const_val = create_const_vals(1); - const_val->special = ConstValSpecialStatic; - const_val->type = fn_entry->type_entry; - const_val->data.x_ptr.data.fn.fn_entry = fn_entry; - const_val->data.x_ptr.special = ConstPtrSpecialFunction; - const_val->data.x_ptr.mut = ConstPtrMutComptimeConst; - if (tld_fn->extern_lib_name != nullptr) { add_link_lib_symbol(ira, tld_fn->extern_lib_name, &fn_entry->symbol_name, source_instruction->source_node); } - bool ptr_is_const = true; - bool ptr_is_volatile = false; - return ir_get_const_ptr(ira, source_instruction, const_val, fn_entry->type_entry, - ConstPtrMutComptimeConst, ptr_is_const, ptr_is_volatile, 0); + IrInstruction *fn_inst = ir_create_const_fn(&ira->new_irb, source_instruction->scope, + source_instruction->source_node, fn_entry); + return ir_get_ref(ira, source_instruction, fn_inst, true, false); } } zig_unreachable(); @@ -16191,14 +17814,14 @@ static IrInstruction *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstruc assert(container_ptr->value.type->id == ZigTypeIdPointer); if (container_type->id == ZigTypeIdPointer) { ZigType *bare_type = container_ref_type(container_type); - IrInstruction *container_child = ir_get_deref(ira, &field_ptr_instruction->base, container_ptr); - IrInstruction *result = ir_analyze_container_field_ptr(ira, field_name, &field_ptr_instruction->base, container_child, bare_type); + IrInstruction *container_child = ir_get_deref(ira, &field_ptr_instruction->base, container_ptr, nullptr); + IrInstruction *result = ir_analyze_container_field_ptr(ira, field_name, &field_ptr_instruction->base, container_child, bare_type, field_ptr_instruction->initializing); return result; } else { - IrInstruction *result = ir_analyze_container_field_ptr(ira, field_name, &field_ptr_instruction->base, container_ptr, container_type); + IrInstruction *result = ir_analyze_container_field_ptr(ira, field_name, &field_ptr_instruction->base, container_ptr, container_type, field_ptr_instruction->initializing); return result; } - } else if (is_array_ref(container_type)) { + } else if (is_array_ref(container_type) && !field_ptr_instruction->initializing) { if (buf_eql_str(field_name, "len")) { ConstExprValue *len_val = create_const_vals(1); if (container_type->id == ZigTypeIdPointer) { @@ -16513,6 +18136,10 @@ static IrInstruction *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstruc buf_sprintf("type '%s' does not support field access", buf_ptr(&child_type->name))); return ira->codegen->invalid_instruction; } + } else if (field_ptr_instruction->initializing) { + ir_add_error(ira, &field_ptr_instruction->base, + buf_sprintf("type '%s' does not support struct initialization syntax", buf_ptr(&container_type->name))); + return ira->codegen->invalid_instruction; } else { ir_add_error_node(ira, field_ptr_instruction->base.source_node, buf_sprintf("type '%s' does not support field access", buf_ptr(&container_type->name))); @@ -16536,7 +18163,7 @@ static IrInstruction *ir_analyze_instruction_load_ptr(IrAnalyze *ira, IrInstruct IrInstruction *ptr = instruction->ptr->child; if (type_is_invalid(ptr->value.type)) return ira->codegen->invalid_instruction; - return ir_get_deref(ira, &instruction->base, ptr); + return ir_get_deref(ira, &instruction->base, ptr, nullptr); } static IrInstruction *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructionTypeOf *typeof_instruction) { @@ -16547,64 +18174,6 @@ static IrInstruction *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructio return ir_const_type(ira, &typeof_instruction->base, type_entry); } -static IrInstruction *ir_analyze_instruction_to_ptr_type(IrAnalyze *ira, - IrInstructionToPtrType *to_ptr_type_instruction) -{ - Error err; - IrInstruction *ptr_ptr = to_ptr_type_instruction->ptr->child; - if (type_is_invalid(ptr_ptr->value.type)) - return ira->codegen->invalid_instruction; - - ZigType *ptr_ptr_type = ptr_ptr->value.type; - assert(ptr_ptr_type->id == ZigTypeIdPointer); - ZigType *type_entry = ptr_ptr_type->data.pointer.child_type; - - ZigType *ptr_type; - if (type_entry->id == ZigTypeIdArray) { - ptr_type = get_pointer_to_type(ira->codegen, type_entry->data.array.child_type, ptr_ptr_type->data.pointer.is_const); - } else if (is_array_ref(type_entry)) { - ptr_type = get_pointer_to_type(ira->codegen, - type_entry->data.pointer.child_type->data.array.child_type, type_entry->data.pointer.is_const); - } else if (is_slice(type_entry)) { - ZigType *slice_ptr_type = type_entry->data.structure.fields[0].type_entry; - ptr_type = adjust_ptr_len(ira->codegen, slice_ptr_type, PtrLenSingle); - // If the pointer is over-aligned, we may have to reduce it based on the alignment of the element type. - if (slice_ptr_type->data.pointer.explicit_alignment != 0) { - ZigType *elem_type = slice_ptr_type->data.pointer.child_type; - if ((err = type_resolve(ira->codegen, elem_type, ResolveStatusAlignmentKnown))) - return ira->codegen->invalid_instruction; - uint32_t elem_align = get_abi_alignment(ira->codegen, elem_type); - uint32_t reduced_align = min(elem_align, slice_ptr_type->data.pointer.explicit_alignment); - ptr_type = adjust_ptr_align(ira->codegen, ptr_type, reduced_align); - } - } else if (type_entry->id == ZigTypeIdArgTuple) { - zig_panic("TODO for loop on var args"); - } else { - ir_add_error_node(ira, to_ptr_type_instruction->base.source_node, - buf_sprintf("expected array type, found '%s'", buf_ptr(&type_entry->name))); - return ira->codegen->invalid_instruction; - } - - return ir_const_type(ira, &to_ptr_type_instruction->base, ptr_type); -} - -static IrInstruction *ir_analyze_instruction_ptr_type_child(IrAnalyze *ira, - IrInstructionPtrTypeChild *ptr_type_child_instruction) -{ - IrInstruction *type_value = ptr_type_child_instruction->value->child; - ZigType *type_entry = ir_resolve_type(ira, type_value); - if (type_is_invalid(type_entry)) - return ira->codegen->invalid_instruction; - - if (type_entry->id != ZigTypeIdPointer) { - ir_add_error_node(ira, ptr_type_child_instruction->base.source_node, - buf_sprintf("expected pointer type, found '%s'", buf_ptr(&type_entry->name))); - return ira->codegen->invalid_instruction; - } - - return ir_const_type(ira, &ptr_type_child_instruction->base, type_entry->data.pointer.child_type); -} - static IrInstruction *ir_analyze_instruction_set_cold(IrAnalyze *ira, IrInstructionSetCold *instruction) { if (ira->new_irb.exec->is_inline) { // ignore setCold when running functions at compile time @@ -17034,7 +18603,7 @@ static IrInstruction *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIns } static IrInstruction *ir_analyze_unwrap_optional_payload(IrAnalyze *ira, IrInstruction *source_instr, - IrInstruction *base_ptr, bool safety_check_on) + IrInstruction *base_ptr, bool safety_check_on, bool initializing) { ZigType *ptr_type = base_ptr->value.type; assert(ptr_type->id == ZigTypeIdPointer); @@ -17064,7 +18633,7 @@ static IrInstruction *ir_analyze_unwrap_optional_payload(IrAnalyze *ira, IrInstr } if (!safety_check_on) return base_ptr; - IrInstruction *c_ptr_val = ir_get_deref(ira, source_instr, base_ptr); + IrInstruction *c_ptr_val = ir_get_deref(ira, source_instr, base_ptr, nullptr); ir_build_assert_non_null(ira, source_instr, c_ptr_val); return base_ptr; } @@ -17079,34 +18648,84 @@ static IrInstruction *ir_analyze_unwrap_optional_payload(IrAnalyze *ira, IrInstr ZigType *result_type = get_pointer_to_type_extra(ira->codegen, child_type, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, PtrLenSingle, 0, 0, 0, false); + bool same_comptime_repr = types_have_same_zig_comptime_repr(type_entry, child_type); + if (instr_is_comptime(base_ptr)) { - ConstExprValue *val = ir_resolve_const(ira, base_ptr, UndefBad); - if (!val) + ConstExprValue *ptr_val = ir_resolve_const(ira, base_ptr, UndefBad); + if (!ptr_val) return ira->codegen->invalid_instruction; - if (val->data.x_ptr.mut != ConstPtrMutRuntimeVar) { - ConstExprValue *maybe_val = const_ptr_pointee(ira, ira->codegen, val, source_instr->source_node); - if (maybe_val == nullptr) + if (ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar) { + ConstExprValue *optional_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node); + if (optional_val == nullptr) return ira->codegen->invalid_instruction; - if (optional_value_is_null(maybe_val)) { + if (initializing && optional_val->special == ConstValSpecialUndef) { + switch (type_has_one_possible_value(ira->codegen, child_type)) { + case OnePossibleValueInvalid: + return ira->codegen->invalid_instruction; + case OnePossibleValueNo: + if (!same_comptime_repr) { + ConstExprValue *payload_val = create_const_vals(1); + payload_val->type = child_type; + payload_val->special = ConstValSpecialUndef; + payload_val->parent.id = ConstParentIdOptionalPayload; + payload_val->parent.data.p_optional_payload.optional_val = optional_val; + + optional_val->data.x_optional = payload_val; + optional_val->special = ConstValSpecialStatic; + } + break; + case OnePossibleValueYes: { + ConstExprValue *pointee = create_const_vals(1); + pointee->special = ConstValSpecialStatic; + pointee->type = child_type; + pointee->parent.id = ConstParentIdOptionalPayload; + pointee->parent.data.p_optional_payload.optional_val = optional_val; + + optional_val->special = ConstValSpecialStatic; + optional_val->data.x_optional = pointee; + break; + } + } + } else if (optional_value_is_null(optional_val)) { ir_add_error(ira, source_instr, buf_sprintf("unable to unwrap null")); return ira->codegen->invalid_instruction; } - IrInstruction *result = ir_const(ira, source_instr, result_type); - ConstExprValue *out_val = &result->value; - out_val->data.x_ptr.special = ConstPtrSpecialRef; - out_val->data.x_ptr.mut = val->data.x_ptr.mut; - if (types_have_same_zig_comptime_repr(type_entry, child_type)) { - out_val->data.x_ptr.data.ref.pointee = maybe_val; + + IrInstruction *result; + if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) { + result = ir_build_optional_unwrap_ptr(&ira->new_irb, source_instr->scope, + source_instr->source_node, base_ptr, false, initializing); + result->value.type = result_type; + result->value.special = ConstValSpecialStatic; } else { - out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_optional; + result = ir_const(ira, source_instr, result_type); + } + ConstExprValue *result_val = &result->value; + result_val->data.x_ptr.special = ConstPtrSpecialRef; + result_val->data.x_ptr.mut = ptr_val->data.x_ptr.mut; + switch (type_has_one_possible_value(ira->codegen, child_type)) { + case OnePossibleValueInvalid: + return ira->codegen->invalid_instruction; + case OnePossibleValueNo: + if (same_comptime_repr) { + result_val->data.x_ptr.data.ref.pointee = optional_val; + } else { + assert(optional_val->data.x_optional != nullptr); + result_val->data.x_ptr.data.ref.pointee = optional_val->data.x_optional; + } + break; + case OnePossibleValueYes: + assert(optional_val->data.x_optional != nullptr); + result_val->data.x_ptr.data.ref.pointee = optional_val->data.x_optional; + break; } return result; } } IrInstruction *result = ir_build_optional_unwrap_ptr(&ira->new_irb, source_instr->scope, - source_instr->source_node, base_ptr, safety_check_on); + source_instr->source_node, base_ptr, safety_check_on, initializing); result->value.type = result_type; return result; } @@ -17118,7 +18737,8 @@ static IrInstruction *ir_analyze_instruction_optional_unwrap_ptr(IrAnalyze *ira, if (type_is_invalid(base_ptr->value.type)) return ira->codegen->invalid_instruction; - return ir_analyze_unwrap_optional_payload(ira, &instruction->base, base_ptr, instruction->safety_check_on); + return ir_analyze_unwrap_optional_payload(ira, &instruction->base, base_ptr, + instruction->safety_check_on, false); } static IrInstruction *ir_analyze_instruction_ctz(IrAnalyze *ira, IrInstructionCtz *instruction) { @@ -17419,7 +19039,7 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira, return result; } - IrInstruction *result = ir_get_deref(ira, &switch_target_instruction->base, target_value_ptr); + IrInstruction *result = ir_get_deref(ira, &switch_target_instruction->base, target_value_ptr, nullptr); result->value.type = target_type; return result; } @@ -17449,7 +19069,7 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira, return result; } - IrInstruction *union_value = ir_get_deref(ira, &switch_target_instruction->base, target_value_ptr); + IrInstruction *union_value = ir_get_deref(ira, &switch_target_instruction->base, target_value_ptr, nullptr); union_value->value.type = target_type; IrInstruction *union_tag_inst = ir_build_union_tag(&ira->new_irb, switch_target_instruction->base.scope, @@ -17473,7 +19093,7 @@ static IrInstruction *ir_analyze_instruction_switch_target(IrAnalyze *ira, return result; } - IrInstruction *enum_value = ir_get_deref(ira, &switch_target_instruction->base, target_value_ptr); + IrInstruction *enum_value = ir_get_deref(ira, &switch_target_instruction->base, target_value_ptr, nullptr); enum_value->value.type = target_type; return enum_value; } @@ -17546,7 +19166,7 @@ static IrInstruction *ir_analyze_instruction_switch_var(IrAnalyze *ira, IrInstru } IrInstruction *result = ir_build_union_field_ptr(&ira->new_irb, - instruction->base.scope, instruction->base.source_node, target_value_ptr, field); + instruction->base.scope, instruction->base.source_node, target_value_ptr, field, false, false); result->value.type = get_pointer_to_type(ira->codegen, field->type_entry, target_value_ptr->value.type->data.pointer.is_const); return result; @@ -17756,7 +19376,8 @@ static IrInstruction *ir_analyze_instruction_ref(IrAnalyze *ira, IrInstructionRe } static IrInstruction *ir_analyze_container_init_fields_union(IrAnalyze *ira, IrInstruction *instruction, - ZigType *container_type, size_t instr_field_count, IrInstructionContainerInitFieldsField *fields) + ZigType *container_type, size_t instr_field_count, IrInstructionContainerInitFieldsField *fields, + IrInstruction *result_loc) { Error err; assert(container_type->id == ZigTypeIdUnion); @@ -17771,12 +19392,12 @@ static IrInstruction *ir_analyze_container_init_fields_union(IrAnalyze *ira, IrI } IrInstructionContainerInitFieldsField *field = &fields[0]; - IrInstruction *field_value = field->value->child; - if (type_is_invalid(field_value->value.type)) + IrInstruction *field_result_loc = field->result_loc->child; + if (type_is_invalid(field_result_loc->value.type)) return ira->codegen->invalid_instruction; TypeUnionField *type_field = find_union_type_field(container_type, field->name); - if (!type_field) { + if (type_field == nullptr) { ir_add_error_node(ira, field->source_node, buf_sprintf("no member named '%s' in union '%s'", buf_ptr(field->name), buf_ptr(&container_type->name))); @@ -17786,46 +19407,36 @@ static IrInstruction *ir_analyze_container_init_fields_union(IrAnalyze *ira, IrI if (type_is_invalid(type_field->type_entry)) return ira->codegen->invalid_instruction; - IrInstruction *casted_field_value = ir_implicit_cast(ira, field_value, type_field->type_entry); - if (casted_field_value == ira->codegen->invalid_instruction) - return ira->codegen->invalid_instruction; - - if ((err = type_resolve(ira->codegen, casted_field_value->value.type, ResolveStatusZeroBitsKnown))) - return ira->codegen->invalid_instruction; + if (result_loc->value.data.x_ptr.mut == ConstPtrMutInfer) { + if (instr_is_comptime(field_result_loc) && + field_result_loc->value.data.x_ptr.mut != ConstPtrMutRuntimeVar) + { + // nothing + } else { + result_loc->value.special = ConstValSpecialRuntime; + } + } bool is_comptime = ir_should_inline(ira->new_irb.exec, instruction->scope) || type_requires_comptime(ira->codegen, container_type) == ReqCompTimeYes; - if (is_comptime || casted_field_value->value.special != ConstValSpecialRuntime || - !type_has_bits(casted_field_value->value.type)) - { - ConstExprValue *field_val = ir_resolve_const(ira, casted_field_value, UndefOk); - if (!field_val) - return ira->codegen->invalid_instruction; - IrInstruction *result = ir_const(ira, instruction, container_type); - ConstExprValue *out_val = &result->value; - out_val->data.x_union.payload = field_val; - out_val->data.x_union.tag = type_field->enum_field->value; - out_val->parent.id = ConstParentIdUnion; - out_val->parent.data.p_union.union_val = out_val; - - return result; + IrInstruction *result = ir_get_deref(ira, instruction, result_loc, nullptr); + if (is_comptime && !instr_is_comptime(result)) { + ir_add_error(ira, field->result_loc, + buf_sprintf("unable to evaluate constant expression")); + return ira->codegen->invalid_instruction; } - - IrInstruction *new_instruction = ir_build_union_init(&ira->new_irb, - instruction->scope, instruction->source_node, - container_type, type_field, casted_field_value); - new_instruction->value.type = container_type; - ir_add_alloca(ira, new_instruction, container_type); - return new_instruction; + return result; } static IrInstruction *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruction *instruction, - ZigType *container_type, size_t instr_field_count, IrInstructionContainerInitFieldsField *fields) + ZigType *container_type, size_t instr_field_count, IrInstructionContainerInitFieldsField *fields, + IrInstruction *result_loc) { Error err; if (container_type->id == ZigTypeIdUnion) { - return ir_analyze_container_init_fields_union(ira, instruction, container_type, instr_field_count, fields); + return ir_analyze_container_init_fields_union(ira, instruction, container_type, instr_field_count, + fields, result_loc); } if (container_type->id != ZigTypeIdStruct || is_slice(container_type)) { ir_add_error(ira, instruction, @@ -17842,22 +19453,28 @@ static IrInstruction *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruc IrInstruction *first_non_const_instruction = nullptr; AstNode **field_assign_nodes = allocate(actual_field_count); - - IrInstructionStructInitField *new_fields = allocate(actual_field_count); + ZigList const_ptrs = {}; bool is_comptime = ir_should_inline(ira->new_irb.exec, instruction->scope) || type_requires_comptime(ira->codegen, container_type) == ReqCompTimeYes; - ConstExprValue const_val = {}; - const_val.special = ConstValSpecialStatic; - const_val.type = container_type; - // const_val.global_refs = allocate(1); - const_val.data.x_struct.fields = create_const_vals(actual_field_count); + + // Here we iterate over the fields that have been initialized, and emit + // compile errors for missing fields and duplicate fields. + // It is only now that we find out whether the struct initialization can be a comptime + // value, but we have already emitted runtime instructions for the fields that + // were initialized with runtime values, and have omitted instructions that would have + // initialized fields with comptime values. + // So now we must clean up this situation. If it turns out the struct initialization can + // be a comptime value, overwrite ConstPtrMutInfer with ConstPtrMutComptimeConst. + // Otherwise, we must emit instructions to runtime-initialize the fields that have + // comptime-known values. + for (size_t i = 0; i < instr_field_count; i += 1) { IrInstructionContainerInitFieldsField *field = &fields[i]; - IrInstruction *field_value = field->value->child; - if (type_is_invalid(field_value->value.type)) + IrInstruction *field_result_loc = field->result_loc->child; + if (type_is_invalid(field_result_loc->value.type)) return ira->codegen->invalid_instruction; TypeStructField *type_field = find_struct_type_field(container_type, field->name); @@ -17871,10 +19488,6 @@ static IrInstruction *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruc if (type_is_invalid(type_field->type_entry)) return ira->codegen->invalid_instruction; - IrInstruction *casted_field_value = ir_implicit_cast(ira, field_value, type_field->type_entry); - if (casted_field_value == ira->codegen->invalid_instruction) - return ira->codegen->invalid_instruction; - size_t field_index = type_field->src_index; AstNode *existing_assign_node = field_assign_nodes[field_index]; if (existing_assign_node) { @@ -17884,26 +19497,18 @@ static IrInstruction *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruc } field_assign_nodes[field_index] = field->source_node; - new_fields[field_index].value = casted_field_value; - new_fields[field_index].type_struct_field = type_field; - - if (const_val.special == ConstValSpecialStatic) { - if (is_comptime || casted_field_value->value.special != ConstValSpecialRuntime) { - ConstExprValue *field_val = ir_resolve_const(ira, casted_field_value, UndefOk); - if (!field_val) - return ira->codegen->invalid_instruction; - - copy_const_val(&const_val.data.x_struct.fields[field_index], field_val, true); - } else { - first_non_const_instruction = casted_field_value; - const_val.special = ConstValSpecialRuntime; - } + if (instr_is_comptime(field_result_loc) && + field_result_loc->value.data.x_ptr.mut != ConstPtrMutRuntimeVar) + { + const_ptrs.append(field_result_loc); + } else { + first_non_const_instruction = field_result_loc; } } bool any_missing = false; for (size_t i = 0; i < actual_field_count; i += 1) { - if (field_assign_nodes[i]) continue; + if (field_assign_nodes[i] != nullptr) continue; // look for a default field value TypeStructField *field = &container_type->data.structure.fields[i]; @@ -17929,182 +19534,177 @@ static IrInstruction *ir_analyze_container_init_fields(IrAnalyze *ira, IrInstruc IrInstruction *runtime_inst = ir_const(ira, instruction, field->init_val->type); copy_const_val(&runtime_inst->value, field->init_val, true); - new_fields[i].value = runtime_inst; - new_fields[i].type_struct_field = field; - - if (const_val.special == ConstValSpecialStatic) { - copy_const_val(&const_val.data.x_struct.fields[i], field->init_val, true); + IrInstruction *field_ptr = ir_analyze_struct_field_ptr(ira, instruction, field, result_loc, + container_type, true); + ir_analyze_store_ptr(ira, instruction, field_ptr, runtime_inst); + if (instr_is_comptime(field_ptr) && field_ptr->value.data.x_ptr.mut != ConstPtrMutRuntimeVar) { + const_ptrs.append(field_ptr); + } else { + first_non_const_instruction = result_loc; } } if (any_missing) return ira->codegen->invalid_instruction; - if (const_val.special == ConstValSpecialStatic) { - IrInstruction *result = ir_const(ira, instruction, nullptr); - ConstExprValue *out_val = &result->value; - copy_const_val(out_val, &const_val, false); - out_val->type = container_type; - - for (size_t i = 0; i < instr_field_count; i += 1) { - ConstExprValue *field_val = &out_val->data.x_struct.fields[i]; - ConstParent *parent = get_const_val_parent(ira->codegen, field_val); - if (parent != nullptr) { - parent->id = ConstParentIdStruct; - parent->data.p_struct.field_index = i; - parent->data.p_struct.struct_val = out_val; + if (result_loc->value.data.x_ptr.mut == ConstPtrMutInfer) { + if (const_ptrs.length != actual_field_count) { + result_loc->value.special = ConstValSpecialRuntime; + for (size_t i = 0; i < const_ptrs.length; i += 1) { + IrInstruction *field_result_loc = const_ptrs.at(i); + IrInstruction *deref = ir_get_deref(ira, field_result_loc, field_result_loc, nullptr); + field_result_loc->value.special = ConstValSpecialRuntime; + ir_analyze_store_ptr(ira, field_result_loc, field_result_loc, deref); } } - - return result; } + IrInstruction *result = ir_get_deref(ira, instruction, result_loc, nullptr); + + if (is_comptime && !instr_is_comptime(result)) { + ir_add_error_node(ira, first_non_const_instruction->source_node, + buf_sprintf("unable to evaluate constant expression")); + return ira->codegen->invalid_instruction; + } + + return result; +} + +static IrInstruction *ir_analyze_instruction_container_init_list(IrAnalyze *ira, + IrInstructionContainerInitList *instruction) +{ + ZigType *container_type = ir_resolve_type(ira, instruction->container_type->child); + if (type_is_invalid(container_type)) + return ira->codegen->invalid_instruction; + + size_t elem_count = instruction->item_count; + + if (is_slice(container_type)) { + ir_add_error(ira, instruction->container_type, + buf_sprintf("expected array type or [_], found slice")); + return ira->codegen->invalid_instruction; + } + + if (container_type->id == ZigTypeIdVoid) { + if (elem_count != 0) { + ir_add_error_node(ira, instruction->base.source_node, + buf_sprintf("void expression expects no arguments")); + return ira->codegen->invalid_instruction; + } + return ir_const_void(ira, &instruction->base); + } + + if (container_type->id == ZigTypeIdStruct && elem_count == 0) { + ir_assert(instruction->result_loc != nullptr, &instruction->base); + IrInstruction *result_loc = instruction->result_loc->child; + if (type_is_invalid(result_loc->value.type)) + return result_loc; + return ir_analyze_container_init_fields(ira, &instruction->base, container_type, 0, nullptr, result_loc); + } + + if (container_type->id != ZigTypeIdArray) { + ir_add_error_node(ira, instruction->base.source_node, + buf_sprintf("type '%s' does not support array initialization", + buf_ptr(&container_type->name))); + return ira->codegen->invalid_instruction; + } + + ir_assert(instruction->result_loc != nullptr, &instruction->base); + IrInstruction *result_loc = instruction->result_loc->child; + if (type_is_invalid(result_loc->value.type)) + return result_loc; + ir_assert(result_loc->value.type->id == ZigTypeIdPointer, &instruction->base); + + ZigType *child_type = container_type->data.array.child_type; + if (container_type->data.array.len != elem_count) { + ZigType *literal_type = get_array_type(ira->codegen, child_type, elem_count); + + ir_add_error(ira, &instruction->base, + buf_sprintf("expected %s literal, found %s literal", + buf_ptr(&container_type->name), buf_ptr(&literal_type->name))); + return ira->codegen->invalid_instruction; + } + + switch (type_has_one_possible_value(ira->codegen, container_type)) { + case OnePossibleValueInvalid: + return ira->codegen->invalid_instruction; + case OnePossibleValueYes: + return ir_const(ira, &instruction->base, container_type); + case OnePossibleValueNo: + break; + } + + bool is_comptime; + switch (type_requires_comptime(ira->codegen, container_type)) { + case ReqCompTimeInvalid: + return ira->codegen->invalid_instruction; + case ReqCompTimeNo: + is_comptime = ir_should_inline(ira->new_irb.exec, instruction->base.scope); + break; + case ReqCompTimeYes: + is_comptime = true; + break; + } + + IrInstruction *first_non_const_instruction = nullptr; + + // The Result Location Mechanism has already emitted runtime instructions to + // initialize runtime elements and has omitted instructions for the comptime + // elements. However it is only now that we find out whether the array initialization + // can be a comptime value. So we must clean up the situation. If it turns out + // array initialization can be a comptime value, overwrite ConstPtrMutInfer with + // ConstPtrMutComptimeConst. Otherwise, emit instructions to runtime-initialize the + // elements that have comptime-known values. + ZigList const_ptrs = {}; + + for (size_t i = 0; i < elem_count; i += 1) { + IrInstruction *elem_result_loc = instruction->elem_result_loc_list[i]->child; + if (type_is_invalid(elem_result_loc->value.type)) + return ira->codegen->invalid_instruction; + + assert(elem_result_loc->value.type->id == ZigTypeIdPointer); + + if (instr_is_comptime(elem_result_loc) && + elem_result_loc->value.data.x_ptr.mut != ConstPtrMutRuntimeVar) + { + const_ptrs.append(elem_result_loc); + } else { + first_non_const_instruction = elem_result_loc; + } + } + + if (result_loc->value.data.x_ptr.mut == ConstPtrMutInfer) { + if (const_ptrs.length != elem_count) { + result_loc->value.special = ConstValSpecialRuntime; + for (size_t i = 0; i < const_ptrs.length; i += 1) { + IrInstruction *elem_result_loc = const_ptrs.at(i); + assert(elem_result_loc->value.special == ConstValSpecialStatic); + IrInstruction *deref = ir_get_deref(ira, elem_result_loc, elem_result_loc, nullptr); + elem_result_loc->value.special = ConstValSpecialRuntime; + ir_analyze_store_ptr(ira, elem_result_loc, elem_result_loc, deref); + } + } + } + + IrInstruction *result = ir_get_deref(ira, &instruction->base, result_loc, nullptr); + if (instr_is_comptime(result)) + return result; + if (is_comptime) { ir_add_error_node(ira, first_non_const_instruction->source_node, buf_sprintf("unable to evaluate constant expression")); return ira->codegen->invalid_instruction; } - IrInstruction *new_instruction = ir_build_struct_init(&ira->new_irb, - instruction->scope, instruction->source_node, - container_type, actual_field_count, new_fields); - new_instruction->value.type = container_type; - ir_add_alloca(ira, new_instruction, container_type); - return new_instruction; -} - -static IrInstruction *ir_analyze_instruction_container_init_list(IrAnalyze *ira, - IrInstructionContainerInitList *instruction) -{ - Error err; - - size_t elem_count = instruction->item_count; - - ZigType *container_type; - if (instruction->container_type != nullptr) { - container_type = ir_resolve_type(ira, instruction->container_type->child); - if (type_is_invalid(container_type)) - return ira->codegen->invalid_instruction; - } else { - ZigType *elem_type = ir_resolve_type(ira, instruction->elem_type->child); - if (type_is_invalid(elem_type)) - return ira->codegen->invalid_instruction; - if ((err = type_resolve(ira->codegen, elem_type, ResolveStatusSizeKnown))) { - return ira->codegen->invalid_instruction; - } - container_type = get_array_type(ira->codegen, elem_type, elem_count); - } - - if (is_slice(container_type)) { - ir_add_error(ira, &instruction->base, - buf_sprintf("expected array type or [_], found slice")); - return ira->codegen->invalid_instruction; - } else if (container_type->id == ZigTypeIdStruct && !is_slice(container_type) && elem_count == 0) { - return ir_analyze_container_init_fields(ira, &instruction->base, container_type, - 0, nullptr); - } else if (container_type->id == ZigTypeIdArray) { - // array is same as slice init but we make a compile error if the length is wrong - ZigType *child_type; - if (container_type->id == ZigTypeIdArray) { - child_type = container_type->data.array.child_type; - if (container_type->data.array.len != elem_count) { - ZigType *literal_type = get_array_type(ira->codegen, child_type, elem_count); - - ir_add_error(ira, &instruction->base, - buf_sprintf("expected %s literal, found %s literal", - buf_ptr(&container_type->name), buf_ptr(&literal_type->name))); - return ira->codegen->invalid_instruction; - } - } else { - ZigType *pointer_type = container_type->data.structure.fields[slice_ptr_index].type_entry; - assert(pointer_type->id == ZigTypeIdPointer); - child_type = pointer_type->data.pointer.child_type; - } - - if ((err = type_resolve(ira->codegen, child_type, ResolveStatusSizeKnown))) { - return ira->codegen->invalid_instruction; - } - - ZigType *fixed_size_array_type = get_array_type(ira->codegen, child_type, elem_count); - - ConstExprValue const_val = {}; - const_val.special = ConstValSpecialStatic; - const_val.type = fixed_size_array_type; - // const_val.global_refs = allocate(1); - const_val.data.x_array.data.s_none.elements = create_const_vals(elem_count); - - bool is_comptime = ir_should_inline(ira->new_irb.exec, instruction->base.scope); - - IrInstruction **new_items = allocate(elem_count); - - IrInstruction *first_non_const_instruction = nullptr; - - for (size_t i = 0; i < elem_count; i += 1) { - IrInstruction *arg_value = instruction->items[i]->child; - if (type_is_invalid(arg_value->value.type)) - return ira->codegen->invalid_instruction; - - IrInstruction *casted_arg = ir_implicit_cast(ira, arg_value, child_type); - if (casted_arg == ira->codegen->invalid_instruction) - return ira->codegen->invalid_instruction; - - new_items[i] = casted_arg; - - if (const_val.special == ConstValSpecialStatic) { - if (is_comptime || casted_arg->value.special != ConstValSpecialRuntime) { - ConstExprValue *elem_val = ir_resolve_const(ira, casted_arg, UndefBad); - if (!elem_val) - return ira->codegen->invalid_instruction; - - copy_const_val(&const_val.data.x_array.data.s_none.elements[i], elem_val, true); - } else { - first_non_const_instruction = casted_arg; - const_val.special = ConstValSpecialRuntime; - } - } - } - - if (const_val.special == ConstValSpecialStatic) { - IrInstruction *result = ir_const(ira, &instruction->base, nullptr); - ConstExprValue *out_val = &result->value; - copy_const_val(out_val, &const_val, false); - result->value.type = fixed_size_array_type; - for (size_t i = 0; i < elem_count; i += 1) { - ConstExprValue *elem_val = &out_val->data.x_array.data.s_none.elements[i]; - ConstParent *parent = get_const_val_parent(ira->codegen, elem_val); - if (parent != nullptr) { - parent->id = ConstParentIdArray; - parent->data.p_array.array_val = out_val; - parent->data.p_array.elem_index = i; - } - } - return result; - } - - if (is_comptime) { - ir_add_error_node(ira, first_non_const_instruction->source_node, - buf_sprintf("unable to evaluate constant expression")); - return ira->codegen->invalid_instruction; - } - - IrInstruction *new_instruction = ir_build_container_init_list(&ira->new_irb, - instruction->base.scope, instruction->base.source_node, - nullptr, nullptr, elem_count, new_items); - new_instruction->value.type = fixed_size_array_type; - ir_add_alloca(ira, new_instruction, fixed_size_array_type); - return new_instruction; - } else if (container_type->id == ZigTypeIdVoid) { - if (elem_count != 0) { - ir_add_error_node(ira, instruction->base.source_node, - buf_sprintf("void expression expects no arguments")); - return ira->codegen->invalid_instruction; - } - return ir_const_void(ira, &instruction->base); - } else { - ir_add_error_node(ira, instruction->base.source_node, - buf_sprintf("type '%s' does not support array initialization", - buf_ptr(&container_type->name))); + ZigType *result_elem_type = result_loc->value.type->data.pointer.child_type; + if (is_slice(result_elem_type)) { + ErrorMsg *msg = ir_add_error(ira, &instruction->base, + buf_sprintf("runtime-initialized array cannot be casted to slice type '%s'", + buf_ptr(&result_elem_type->name))); + add_error_note(ira->codegen, msg, first_non_const_instruction->source_node, + buf_sprintf("this value is not comptime-known")); return ira->codegen->invalid_instruction; } + return result; } static IrInstruction *ir_analyze_instruction_container_init_fields(IrAnalyze *ira, @@ -18115,8 +19715,13 @@ static IrInstruction *ir_analyze_instruction_container_init_fields(IrAnalyze *ir if (type_is_invalid(container_type)) return ira->codegen->invalid_instruction; + ir_assert(instruction->result_loc != nullptr, &instruction->base); + IrInstruction *result_loc = instruction->result_loc->child; + if (type_is_invalid(result_loc->value.type)) + return result_loc; + return ir_analyze_container_init_fields(ira, &instruction->base, container_type, - instruction->field_count, instruction->fields); + instruction->field_count, instruction->fields, result_loc); } static IrInstruction *ir_analyze_instruction_compile_err(IrAnalyze *ira, @@ -18385,12 +19990,6 @@ static IrInstruction *ir_analyze_instruction_bit_offset_of(IrAnalyze *ira, return ir_const_unsigned(ira, &instruction->base, bit_offset); } -static IrInstruction *ir_error_dependency_loop(IrAnalyze *ira, IrInstruction *source_instr) { - ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("dependency loop detected")); - emit_error_notes_for_ref_stack(ira->codegen, msg); - return ira->codegen->invalid_instruction; -} - static void ensure_field_index(ZigType *type, const char *field_name, size_t index) { Buf *field_name_buf; @@ -19509,7 +21108,7 @@ static IrInstruction *ir_analyze_instruction_c_import(IrAnalyze *ira, IrInstruct ir_add_error_node(ira, node, buf_sprintf("C import failed: unable to make dir: %s", err_str(err))); return ira->codegen->invalid_instruction; } - + if ((err = os_write_file(&tmp_c_file_path, &cimport_scope->buf))) { ir_add_error_node(ira, node, buf_sprintf("C import failed: unable to write .h file: %s", err_str(err))); return ira->codegen->invalid_instruction; @@ -19796,12 +21395,21 @@ static IrInstruction *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstructi zig_panic("TODO compile-time execution of cmpxchg"); } - IrInstruction *result = ir_build_cmpxchg_gen(ira, &instruction->base, + ZigType *result_type = get_optional_type(ira->codegen, operand_type); + IrInstruction *result_loc; + if (handle_is_ptr(result_type)) { + result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc, + result_type, nullptr, true, false); + if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) { + return result_loc; + } + } else { + result_loc = nullptr; + } + + return ir_build_cmpxchg_gen(ira, &instruction->base, result_type, casted_ptr, casted_cmp_value, casted_new_value, - success_order, failure_order, instruction->is_weak); - result->value.type = get_optional_type(ira->codegen, operand_type); - ir_add_alloca(ira, result, result->value.type); - return result; + success_order, failure_order, instruction->is_weak, result_loc); } static IrInstruction *ir_analyze_instruction_fence(IrAnalyze *ira, IrInstructionFence *instruction) { @@ -19939,7 +21547,7 @@ static IrInstruction *ir_analyze_instruction_float_cast(IrAnalyze *ira, IrInstru } else { op = CastOpNumLitToConcrete; } - return ir_resolve_cast(ira, &instruction->base, target, dest_type, op, false); + return ir_resolve_cast(ira, &instruction->base, target, dest_type, op); } else { return ira->codegen->invalid_instruction; } @@ -20047,6 +21655,12 @@ static IrInstruction *ir_analyze_instruction_from_bytes(IrAnalyze *ira, IrInstru } } + IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc, + dest_slice_type, nullptr, true, false); + if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) { + return result_loc; + } + if (casted_value->value.data.rh_slice.id == RuntimeHintSliceIdLen) { known_len = casted_value->value.data.rh_slice.len; have_known_len = true; @@ -20066,9 +21680,7 @@ static IrInstruction *ir_analyze_instruction_from_bytes(IrAnalyze *ira, IrInstru } } - IrInstruction *result = ir_build_resize_slice(ira, &instruction->base, casted_value, dest_slice_type); - ir_add_alloca(ira, result, dest_slice_type); - return result; + return ir_build_resize_slice(ira, &instruction->base, casted_value, dest_slice_type, result_loc); } static IrInstruction *ir_analyze_instruction_to_bytes(IrAnalyze *ira, IrInstructionToBytes *instruction) { @@ -20120,9 +21732,13 @@ static IrInstruction *ir_analyze_instruction_to_bytes(IrAnalyze *ira, IrInstruct return result; } - IrInstruction *result = ir_build_resize_slice(ira, &instruction->base, target, dest_slice_type); - ir_add_alloca(ira, result, dest_slice_type); - return result; + IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc, + dest_slice_type, nullptr, true, false); + if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) { + return result_loc; + } + + return ir_build_resize_slice(ira, &instruction->base, target, dest_slice_type, result_loc); } static Error resolve_ptr_align(IrAnalyze *ira, ZigType *ty, uint32_t *result_align) { @@ -20154,7 +21770,7 @@ static IrInstruction *ir_analyze_instruction_int_to_float(IrAnalyze *ira, IrInst return ira->codegen->invalid_instruction; } - return ir_resolve_cast(ira, &instruction->base, target, dest_type, CastOpIntToFloat, false); + return ir_resolve_cast(ira, &instruction->base, target, dest_type, CastOpIntToFloat); } static IrInstruction *ir_analyze_instruction_float_to_int(IrAnalyze *ira, IrInstructionFloatToInt *instruction) { @@ -20176,7 +21792,7 @@ static IrInstruction *ir_analyze_instruction_float_to_int(IrAnalyze *ira, IrInst return ira->codegen->invalid_instruction; } - return ir_resolve_cast(ira, &instruction->base, target, dest_type, CastOpFloatToInt, false); + return ir_resolve_cast(ira, &instruction->base, target, dest_type, CastOpFloatToInt); } static IrInstruction *ir_analyze_instruction_err_to_int(IrAnalyze *ira, IrInstructionErrToInt *instruction) { @@ -20228,7 +21844,7 @@ static IrInstruction *ir_analyze_instruction_bool_to_int(IrAnalyze *ira, IrInstr } ZigType *u1_type = get_int_type(ira->codegen, false, 1); - return ir_resolve_cast(ira, &instruction->base, target, u1_type, CastOpBoolToInt, false); + return ir_resolve_cast(ira, &instruction->base, target, u1_type, CastOpBoolToInt); } static IrInstruction *ir_analyze_instruction_int_type(IrAnalyze *ira, IrInstructionIntType *instruction) { @@ -20389,7 +22005,7 @@ static IrInstruction *ir_analyze_instruction_memset(IrAnalyze *ira, IrInstructio ConstExprValue *byte_val = &casted_byte->value; for (size_t i = start; i < end; i += 1) { - dest_elements[i] = *byte_val; + copy_const_val(&dest_elements[i], byte_val, true); } return ir_const_void(ira, &instruction->base); @@ -20459,7 +22075,7 @@ static IrInstruction *ir_analyze_instruction_memcpy(IrAnalyze *ira, IrInstructio return ira->codegen->invalid_instruction; // TODO test this at comptime with u8 and non-u8 types - // TODO test with dest ptr being a global runtime variable + // TODO test with dest ptr being a global runtime variable if (casted_dest_ptr->value.special == ConstValSpecialStatic && casted_src_ptr->value.special == ConstValSpecialStatic && casted_count->value.special == ConstValSpecialStatic && @@ -20557,7 +22173,7 @@ static IrInstruction *ir_analyze_instruction_memcpy(IrAnalyze *ira, IrInstructio // TODO check for noalias violations - this should be generalized to work for any function for (size_t i = 0; i < count; i += 1) { - dest_elements[dest_start + i] = src_elements[src_start + i]; + copy_const_val(&dest_elements[dest_start + i], &src_elements[src_start + i], true); } return ir_const_void(ira, &instruction->base); @@ -20569,7 +22185,7 @@ static IrInstruction *ir_analyze_instruction_memcpy(IrAnalyze *ira, IrInstructio return result; } -static IrInstruction *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructionSlice *instruction) { +static IrInstruction *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructionSliceSrc *instruction) { IrInstruction *ptr_ptr = instruction->ptr->child; if (type_is_invalid(ptr_ptr->value.type)) return ira->codegen->invalid_instruction; @@ -20858,12 +22474,13 @@ static IrInstruction *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstruction return result; } - IrInstruction *new_instruction = ir_build_slice(&ira->new_irb, - instruction->base.scope, instruction->base.source_node, - ptr_ptr, casted_start, end, instruction->safety_check_on); - new_instruction->value.type = return_type; - ir_add_alloca(ira, new_instruction, return_type); - return new_instruction; + IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc, + return_type, nullptr, true, false); + if (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc)) { + return result_loc; + } + return ir_build_slice_gen(ira, &instruction->base, return_type, + ptr_ptr, casted_start, end, instruction->safety_check_on, result_loc); } static IrInstruction *ir_analyze_instruction_member_count(IrAnalyze *ira, IrInstructionMemberCount *instruction) { @@ -21187,15 +22804,149 @@ static IrInstruction *ir_analyze_instruction_overflow_op(IrAnalyze *ira, IrInstr return result; } -static IrInstruction *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstructionTestErr *instruction) { - IrInstruction *value = instruction->value->child; - if (type_is_invalid(value->value.type)) +static IrInstruction *ir_analyze_instruction_result_ptr(IrAnalyze *ira, IrInstructionResultPtr *instruction) { + IrInstruction *result = instruction->result->child; + if (type_is_invalid(result->value.type)) + return result; + + if (instruction->result_loc->written && instruction->result_loc->resolved_loc != nullptr && + !instr_is_comptime(result)) + { + return instruction->result_loc->resolved_loc; + } + return ir_get_ref(ira, &instruction->base, result, true, false); +} + +static void ir_eval_mul_add(IrAnalyze *ira, IrInstructionMulAdd *source_instr, ZigType *float_type, + ConstExprValue *op1, ConstExprValue *op2, ConstExprValue *op3, ConstExprValue *out_val) { + if (float_type->id == ZigTypeIdComptimeFloat) { + f128M_mulAdd(&out_val->data.x_bigfloat.value, &op1->data.x_bigfloat.value, &op2->data.x_bigfloat.value, + &op3->data.x_bigfloat.value); + } else if (float_type->id == ZigTypeIdFloat) { + switch (float_type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_mulAdd(op1->data.x_f16, op2->data.x_f16, op3->data.x_f16); + break; + case 32: + out_val->data.x_f32 = fmaf(op1->data.x_f32, op2->data.x_f32, op3->data.x_f32); + break; + case 64: + out_val->data.x_f64 = fma(op1->data.x_f64, op2->data.x_f64, op3->data.x_f64); + break; + case 128: + f128M_mulAdd(&op1->data.x_f128, &op2->data.x_f128, &op3->data.x_f128, &out_val->data.x_f128); + break; + default: + zig_unreachable(); + } + } else { + zig_unreachable(); + } +} + +static IrInstruction *ir_analyze_instruction_mul_add(IrAnalyze *ira, IrInstructionMulAdd *instruction) { + IrInstruction *type_value = instruction->type_value->child; + if (type_is_invalid(type_value->value.type)) + return ira->codegen->invalid_instruction; + + ZigType *expr_type = ir_resolve_type(ira, type_value); + if (type_is_invalid(expr_type)) + return ira->codegen->invalid_instruction; + + // Only allow float types, and vectors of floats. + ZigType *float_type = (expr_type->id == ZigTypeIdVector) ? expr_type->data.vector.elem_type : expr_type; + if (float_type->id != ZigTypeIdFloat) { + ir_add_error(ira, type_value, + buf_sprintf("expected float or vector of float type, found '%s'", buf_ptr(&float_type->name))); + return ira->codegen->invalid_instruction; + } + + IrInstruction *op1 = instruction->op1->child; + if (type_is_invalid(op1->value.type)) return ira->codegen->invalid_instruction; - ZigType *type_entry = value->value.type; - if (type_is_invalid(type_entry)) { + IrInstruction *casted_op1 = ir_implicit_cast(ira, op1, expr_type); + if (type_is_invalid(casted_op1->value.type)) return ira->codegen->invalid_instruction; - } else if (type_entry->id == ZigTypeIdErrorUnion) { + + IrInstruction *op2 = instruction->op2->child; + if (type_is_invalid(op2->value.type)) + return ira->codegen->invalid_instruction; + + IrInstruction *casted_op2 = ir_implicit_cast(ira, op2, expr_type); + if (type_is_invalid(casted_op2->value.type)) + return ira->codegen->invalid_instruction; + + IrInstruction *op3 = instruction->op3->child; + if (type_is_invalid(op3->value.type)) + return ira->codegen->invalid_instruction; + + IrInstruction *casted_op3 = ir_implicit_cast(ira, op3, expr_type); + if (type_is_invalid(casted_op3->value.type)) + return ira->codegen->invalid_instruction; + + if (instr_is_comptime(casted_op1) && + instr_is_comptime(casted_op2) && + instr_is_comptime(casted_op3)) { + ConstExprValue *op1_const = ir_resolve_const(ira, casted_op1, UndefBad); + if (!op1_const) + return ira->codegen->invalid_instruction; + ConstExprValue *op2_const = ir_resolve_const(ira, casted_op2, UndefBad); + if (!op2_const) + return ira->codegen->invalid_instruction; + ConstExprValue *op3_const = ir_resolve_const(ira, casted_op3, UndefBad); + if (!op3_const) + return ira->codegen->invalid_instruction; + + IrInstruction *result = ir_const(ira, &instruction->base, expr_type); + ConstExprValue *out_val = &result->value; + + if (expr_type->id == ZigTypeIdVector) { + expand_undef_array(ira->codegen, op1_const); + expand_undef_array(ira->codegen, op2_const); + expand_undef_array(ira->codegen, op3_const); + out_val->special = ConstValSpecialUndef; + expand_undef_array(ira->codegen, out_val); + size_t len = expr_type->data.vector.len; + for (size_t i = 0; i < len; i += 1) { + ConstExprValue *float_operand_op1 = &op1_const->data.x_array.data.s_none.elements[i]; + ConstExprValue *float_operand_op2 = &op2_const->data.x_array.data.s_none.elements[i]; + ConstExprValue *float_operand_op3 = &op3_const->data.x_array.data.s_none.elements[i]; + ConstExprValue *float_out_val = &out_val->data.x_array.data.s_none.elements[i]; + assert(float_operand_op1->type == float_type); + assert(float_operand_op2->type == float_type); + assert(float_operand_op3->type == float_type); + assert(float_out_val->type == float_type); + ir_eval_mul_add(ira, instruction, float_type, + op1_const, op2_const, op3_const, float_out_val); + float_out_val->type = float_type; + } + out_val->type = expr_type; + out_val->special = ConstValSpecialStatic; + } else { + ir_eval_mul_add(ira, instruction, float_type, op1_const, op2_const, op3_const, out_val); + } + return result; + } + + IrInstruction *result = ir_build_mul_add(&ira->new_irb, + instruction->base.scope, instruction->base.source_node, + type_value, casted_op1, casted_op2, casted_op3); + result->value.type = expr_type; + return result; +} + +static IrInstruction *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstructionTestErrSrc *instruction) { + IrInstruction *base_ptr = instruction->base_ptr->child; + if (type_is_invalid(base_ptr->value.type)) + return ira->codegen->invalid_instruction; + + IrInstruction *value = ir_get_deref(ira, &instruction->base, base_ptr, nullptr); + ZigType *type_entry = value->value.type; + if (type_is_invalid(type_entry)) + return ira->codegen->invalid_instruction; + + if (type_entry->id == ZigTypeIdErrorUnion) { if (instr_is_comptime(value)) { ConstExprValue *err_union_val = ir_resolve_const(ira, value, UndefBad); if (!err_union_val) @@ -21207,21 +22958,20 @@ static IrInstruction *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstruct } } - ZigType *err_set_type = type_entry->data.error_union.err_set_type; - if (!resolve_inferred_error_set(ira->codegen, err_set_type, instruction->base.source_node)) { - return ira->codegen->invalid_instruction; - } - if (!type_is_global_error_set(err_set_type) && - err_set_type->data.error_set.err_count == 0) - { - assert(err_set_type->data.error_set.infer_fn == nullptr); - return ir_const_bool(ira, &instruction->base, false); + if (instruction->resolve_err_set) { + ZigType *err_set_type = type_entry->data.error_union.err_set_type; + if (!resolve_inferred_error_set(ira->codegen, err_set_type, instruction->base.source_node)) { + return ira->codegen->invalid_instruction; + } + if (!type_is_global_error_set(err_set_type) && + err_set_type->data.error_set.err_count == 0) + { + assert(err_set_type->data.error_set.infer_fn == nullptr); + return ir_const_bool(ira, &instruction->base, false); + } } - IrInstruction *result = ir_build_test_err(&ira->new_irb, - instruction->base.scope, instruction->base.source_node, value); - result->value.type = ira->codegen->builtin_types.entry_bool; - return result; + return ir_build_test_err_gen(ira, &instruction->base, value); } else if (type_entry->id == ZigTypeIdErrorSet) { return ir_const_bool(ira, &instruction->base, true); } else { @@ -21229,10 +22979,9 @@ static IrInstruction *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstruct } } -static IrInstruction *ir_analyze_instruction_unwrap_err_code(IrAnalyze *ira, IrInstructionUnwrapErrCode *instruction) { - IrInstruction *base_ptr = instruction->err_union->child; - if (type_is_invalid(base_ptr->value.type)) - return ira->codegen->invalid_instruction; +static IrInstruction *ir_analyze_unwrap_err_code(IrAnalyze *ira, IrInstruction *source_instr, + IrInstruction *base_ptr, bool initializing) +{ ZigType *ptr_type = base_ptr->value.type; // This will be a pointer type because unwrap err payload IR instruction operates on a pointer to a thing. @@ -21248,40 +22997,79 @@ static IrInstruction *ir_analyze_instruction_unwrap_err_code(IrAnalyze *ira, IrI return ira->codegen->invalid_instruction; } + ZigType *err_set_type = type_entry->data.error_union.err_set_type; + ZigType *result_type = get_pointer_to_type_extra(ira->codegen, err_set_type, + ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, PtrLenSingle, + ptr_type->data.pointer.explicit_alignment, 0, 0, false); + if (instr_is_comptime(base_ptr)) { ConstExprValue *ptr_val = ir_resolve_const(ira, base_ptr, UndefBad); if (!ptr_val) return ira->codegen->invalid_instruction; - if (ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar) { - ConstExprValue *err_union_val = const_ptr_pointee(ira, ira->codegen, ptr_val, instruction->base.source_node); + if (ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar && + ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) + { + ConstExprValue *err_union_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node); if (err_union_val == nullptr) return ira->codegen->invalid_instruction; - if (err_union_val->special != ConstValSpecialRuntime) { - ErrorTableEntry *err = err_union_val->data.x_err_union.error_set->data.x_err_set; - assert(err); - IrInstruction *result = ir_const(ira, &instruction->base, - type_entry->data.error_union.err_set_type); - result->value.data.x_err_set = err; - return result; + if (initializing && err_union_val->special == ConstValSpecialUndef) { + ConstExprValue *vals = create_const_vals(2); + ConstExprValue *err_set_val = &vals[0]; + ConstExprValue *payload_val = &vals[1]; + + err_set_val->special = ConstValSpecialUndef; + err_set_val->type = err_set_type; + err_set_val->parent.id = ConstParentIdErrUnionCode; + err_set_val->parent.data.p_err_union_code.err_union_val = err_union_val; + + payload_val->special = ConstValSpecialUndef; + payload_val->type = type_entry->data.error_union.payload_type; + payload_val->parent.id = ConstParentIdErrUnionPayload; + payload_val->parent.data.p_err_union_payload.err_union_val = err_union_val; + + err_union_val->special = ConstValSpecialStatic; + err_union_val->data.x_err_union.error_set = err_set_val; + err_union_val->data.x_err_union.payload = payload_val; } + ir_assert(err_union_val->special != ConstValSpecialRuntime, source_instr); + + IrInstruction *result; + if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) { + result = ir_build_unwrap_err_code(&ira->new_irb, source_instr->scope, + source_instr->source_node, base_ptr); + result->value.type = result_type; + result->value.special = ConstValSpecialStatic; + } else { + result = ir_const(ira, source_instr, result_type); + } + ConstExprValue *const_val = &result->value; + const_val->data.x_ptr.special = ConstPtrSpecialBaseErrorUnionCode; + const_val->data.x_ptr.data.base_err_union_code.err_union_val = err_union_val; + const_val->data.x_ptr.mut = ptr_val->data.x_ptr.mut; + return result; } } IrInstruction *result = ir_build_unwrap_err_code(&ira->new_irb, - instruction->base.scope, instruction->base.source_node, base_ptr); - result->value.type = type_entry->data.error_union.err_set_type; + source_instr->scope, source_instr->source_node, base_ptr); + result->value.type = result_type; return result; } -static IrInstruction *ir_analyze_instruction_unwrap_err_payload(IrAnalyze *ira, - IrInstructionUnwrapErrPayload *instruction) +static IrInstruction *ir_analyze_instruction_unwrap_err_code(IrAnalyze *ira, + IrInstructionUnwrapErrCode *instruction) { - assert(instruction->value->child); - IrInstruction *value = instruction->value->child; - if (type_is_invalid(value->value.type)) + IrInstruction *base_ptr = instruction->err_union_ptr->child; + if (type_is_invalid(base_ptr->value.type)) return ira->codegen->invalid_instruction; - ZigType *ptr_type = value->value.type; + return ir_analyze_unwrap_err_code(ira, &instruction->base, base_ptr, false); +} + +static IrInstruction *ir_analyze_unwrap_error_payload(IrAnalyze *ira, IrInstruction *source_instr, + IrInstruction *base_ptr, bool safety_check_on, bool initializing) +{ + ZigType *ptr_type = base_ptr->value.type; // This will be a pointer type because unwrap err payload IR instruction operates on a pointer to a thing. assert(ptr_type->id == ZigTypeIdPointer); @@ -21291,7 +23079,7 @@ static IrInstruction *ir_analyze_instruction_unwrap_err_payload(IrAnalyze *ira, return ira->codegen->invalid_instruction; if (type_entry->id != ZigTypeIdErrorUnion) { - ir_add_error(ira, value, + ir_add_error(ira, base_ptr, buf_sprintf("expected error union type, found '%s'", buf_ptr(&type_entry->name))); return ira->codegen->invalid_instruction; } @@ -21303,36 +23091,73 @@ static IrInstruction *ir_analyze_instruction_unwrap_err_payload(IrAnalyze *ira, ZigType *result_type = get_pointer_to_type_extra(ira->codegen, payload_type, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, PtrLenSingle, 0, 0, 0, false); - if (instr_is_comptime(value)) { - ConstExprValue *ptr_val = ir_resolve_const(ira, value, UndefBad); + if (instr_is_comptime(base_ptr)) { + ConstExprValue *ptr_val = ir_resolve_const(ira, base_ptr, UndefBad); if (!ptr_val) return ira->codegen->invalid_instruction; if (ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar) { - ConstExprValue *err_union_val = const_ptr_pointee(ira, ira->codegen, ptr_val, instruction->base.source_node); + ConstExprValue *err_union_val = const_ptr_pointee(ira, ira->codegen, ptr_val, source_instr->source_node); if (err_union_val == nullptr) return ira->codegen->invalid_instruction; + if (err_union_val->special == ConstValSpecialUndef && initializing) { + ConstExprValue *vals = create_const_vals(2); + ConstExprValue *err_set_val = &vals[0]; + ConstExprValue *payload_val = &vals[1]; + + err_set_val->special = ConstValSpecialStatic; + err_set_val->type = type_entry->data.error_union.err_set_type; + err_set_val->data.x_err_set = nullptr; + + payload_val->special = ConstValSpecialUndef; + payload_val->type = payload_type; + + err_union_val->special = ConstValSpecialStatic; + err_union_val->data.x_err_union.error_set = err_set_val; + err_union_val->data.x_err_union.payload = payload_val; + } + if (err_union_val->special != ConstValSpecialRuntime) { ErrorTableEntry *err = err_union_val->data.x_err_union.error_set->data.x_err_set; if (err != nullptr) { - ir_add_error(ira, &instruction->base, + ir_add_error(ira, source_instr, buf_sprintf("caught unexpected error '%s'", buf_ptr(&err->name))); return ira->codegen->invalid_instruction; } - IrInstruction *result = ir_const(ira, &instruction->base, result_type); + IrInstruction *result; + if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) { + result = ir_build_unwrap_err_payload(&ira->new_irb, source_instr->scope, + source_instr->source_node, base_ptr, safety_check_on, initializing); + result->value.type = result_type; + result->value.special = ConstValSpecialStatic; + } else { + result = ir_const(ira, source_instr, result_type); + } result->value.data.x_ptr.special = ConstPtrSpecialRef; result->value.data.x_ptr.data.ref.pointee = err_union_val->data.x_err_union.payload; + result->value.data.x_ptr.mut = ptr_val->data.x_ptr.mut; return result; } } } - IrInstruction *result = ir_build_unwrap_err_payload(&ira->new_irb, - instruction->base.scope, instruction->base.source_node, value, instruction->safety_check_on); + IrInstruction *result = ir_build_unwrap_err_payload(&ira->new_irb, source_instr->scope, + source_instr->source_node, base_ptr, safety_check_on, initializing); result->value.type = result_type; return result; } +static IrInstruction *ir_analyze_instruction_unwrap_err_payload(IrAnalyze *ira, + IrInstructionUnwrapErrPayload *instruction) +{ + assert(instruction->value->child); + IrInstruction *value = instruction->value->child; + if (type_is_invalid(value->value.type)) + return ira->codegen->invalid_instruction; + + return ir_analyze_unwrap_error_payload(ira, &instruction->base, value, instruction->safety_check_on, false); +} + static IrInstruction *ir_analyze_instruction_fn_proto(IrAnalyze *ira, IrInstructionFnProto *instruction) { AstNode *proto_node = instruction->base.source_node; assert(proto_node->type == NodeTypeFnProto); @@ -21824,14 +23649,19 @@ static IrInstruction *ir_analyze_ptr_cast(IrAnalyze *ira, IrInstruction *source_ } } - IrInstruction *result = ir_const(ira, source_instr, dest_type); + IrInstruction *result; + if (ptr->value.data.x_ptr.mut == ConstPtrMutInfer) { + result = ir_build_ptr_cast_gen(ira, source_instr, dest_type, ptr, safety_check_on); + } else { + result = ir_const(ira, source_instr, dest_type); + } copy_const_val(&result->value, val, true); result->value.type = dest_type; // Keep the bigger alignment, it can only help- // unless the target is zero bits. if (src_align_bytes > dest_align_bytes && type_has_bits(dest_type)) { - result = ir_align_cast(ira, result, src_align_bytes, false); + result = ir_align_cast(ira, result, src_align_bytes, false); } return result; @@ -21915,6 +23745,8 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case ZigTypeIdUndefined: case ZigTypeIdNull: case ZigTypeIdPromise: + case ZigTypeIdErrorUnion: + case ZigTypeIdErrorSet: zig_unreachable(); case ZigTypeIdVoid: return; @@ -22018,10 +23850,6 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue } case ZigTypeIdOptional: zig_panic("TODO buf_write_value_bytes maybe type"); - case ZigTypeIdErrorUnion: - zig_panic("TODO buf_write_value_bytes error union"); - case ZigTypeIdErrorSet: - zig_panic("TODO buf_write_value_bytes pure error type"); case ZigTypeIdFn: zig_panic("TODO buf_write_value_bytes fn type"); case ZigTypeIdUnion: @@ -22210,28 +24038,6 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou zig_unreachable(); } -static bool type_can_bit_cast(ZigType *t) { - switch (t->id) { - case ZigTypeIdInvalid: - zig_unreachable(); - case ZigTypeIdMetaType: - case ZigTypeIdOpaque: - case ZigTypeIdBoundFn: - case ZigTypeIdArgTuple: - case ZigTypeIdUnreachable: - case ZigTypeIdComptimeFloat: - case ZigTypeIdComptimeInt: - case ZigTypeIdEnumLiteral: - case ZigTypeIdUndefined: - case ZigTypeIdNull: - case ZigTypeIdPointer: - return false; - default: - // TODO list these types out explicitly, there are probably some other invalid ones here - return true; - } -} - static IrInstruction *ir_analyze_bit_cast(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, ZigType *dest_type) { @@ -22283,49 +24089,7 @@ static IrInstruction *ir_analyze_bit_cast(IrAnalyze *ira, IrInstruction *source_ return result; } - IrInstruction *result = ir_build_bit_cast_gen(ira, source_instr, value, dest_type); - if (handle_is_ptr(dest_type) && !handle_is_ptr(src_type)) { - ir_add_alloca(ira, result, dest_type); - } - return result; -} - -static IrInstruction *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstructionBitCast *instruction) { - IrInstruction *dest_type_value = instruction->dest_type->child; - ZigType *dest_type = ir_resolve_type(ira, dest_type_value); - if (type_is_invalid(dest_type)) - return ira->codegen->invalid_instruction; - - IrInstruction *value = instruction->value->child; - ZigType *src_type = value->value.type; - if (type_is_invalid(src_type)) - return ira->codegen->invalid_instruction; - - if (get_codegen_ptr_type(src_type) != nullptr) { - ir_add_error(ira, value, - buf_sprintf("unable to @bitCast from pointer type '%s'", buf_ptr(&src_type->name))); - return ira->codegen->invalid_instruction; - } - - if (!type_can_bit_cast(src_type)) { - ir_add_error(ira, dest_type_value, - buf_sprintf("unable to @bitCast from type '%s'", buf_ptr(&src_type->name))); - return ira->codegen->invalid_instruction; - } - - if (get_codegen_ptr_type(dest_type) != nullptr) { - ir_add_error(ira, dest_type_value, - buf_sprintf("unable to @bitCast to pointer type '%s'", buf_ptr(&dest_type->name))); - return ira->codegen->invalid_instruction; - } - - if (!type_can_bit_cast(dest_type)) { - ir_add_error(ira, dest_type_value, - buf_sprintf("unable to @bitCast to type '%s'", buf_ptr(&dest_type->name))); - return ira->codegen->invalid_instruction; - } - - return ir_analyze_bit_cast(ira, &instruction->base, value, dest_type); + return ir_build_bit_cast_gen(ira, source_instr, value, dest_type); } static IrInstruction *ir_analyze_int_to_ptr(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *target, @@ -22395,58 +24159,15 @@ static IrInstruction *ir_analyze_instruction_int_to_ptr(IrAnalyze *ira, IrInstru static IrInstruction *ir_analyze_instruction_decl_ref(IrAnalyze *ira, IrInstructionDeclRef *instruction) { - Tld *tld = instruction->tld; - LVal lval = instruction->lval; - - resolve_top_level_decl(ira->codegen, tld, instruction->base.source_node); - if (tld->resolution == TldResolutionInvalid) + IrInstruction *ref_instruction = ir_analyze_decl_ref(ira, &instruction->base, instruction->tld); + if (type_is_invalid(ref_instruction->value.type)) return ira->codegen->invalid_instruction; - switch (tld->id) { - case TldIdContainer: - case TldIdCompTime: - zig_unreachable(); - case TldIdVar: { - TldVar *tld_var = (TldVar *)tld; - ZigVar *var = tld_var->var; - - if (var == nullptr) { - return ir_error_dependency_loop(ira, &instruction->base); - } - - IrInstruction *var_ptr = ir_get_var_ptr(ira, &instruction->base, var); - if (type_is_invalid(var_ptr->value.type)) - return ira->codegen->invalid_instruction; - - if (tld_var->extern_lib_name != nullptr) { - add_link_lib_symbol(ira, tld_var->extern_lib_name, &var->name, instruction->base.source_node); - } - - if (lval == LValPtr) { - return var_ptr; - } else { - return ir_get_deref(ira, &instruction->base, var_ptr); - } - } - case TldIdFn: { - TldFn *tld_fn = (TldFn *)tld; - ZigFn *fn_entry = tld_fn->fn_entry; - ir_assert(fn_entry->type_entry, &instruction->base); - - if (tld_fn->extern_lib_name != nullptr) { - add_link_lib_symbol(ira, tld_fn->extern_lib_name, &fn_entry->symbol_name, instruction->base.source_node); - } - - IrInstruction *ref_instruction = ir_create_const_fn(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, fn_entry); - if (lval == LValPtr) { - return ir_get_ref(ira, &instruction->base, ref_instruction, true, false); - } else { - return ref_instruction; - } - } + if (instruction->lval == LValPtr) { + return ref_instruction; + } else { + return ir_get_deref(ira, &instruction->base, ref_instruction, nullptr); } - zig_unreachable(); } static IrInstruction *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstructionPtrToInt *instruction) { @@ -22960,7 +24681,7 @@ static IrInstruction *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstr } if (instr_is_comptime(casted_ptr)) { - IrInstruction *result = ir_get_deref(ira, &instruction->base, casted_ptr); + IrInstruction *result = ir_get_deref(ira, &instruction->base, casted_ptr, nullptr); ir_assert(result->value.type != nullptr, &instruction->base); return result; } @@ -23048,70 +24769,254 @@ static IrInstruction *ir_analyze_instruction_mark_err_ret_trace_ptr(IrAnalyze *i return result; } -static IrInstruction *ir_analyze_instruction_sqrt(IrAnalyze *ira, IrInstructionSqrt *instruction) { - ZigType *float_type = ir_resolve_type(ira, instruction->type->child); - if (type_is_invalid(float_type)) +static void ir_eval_float_op(IrAnalyze *ira, IrInstructionFloatOp *source_instr, ZigType *float_type, + ConstExprValue *op, ConstExprValue *out_val) { + assert(ira && source_instr && float_type && out_val && op); + assert(float_type->id == ZigTypeIdFloat || + float_type->id == ZigTypeIdComptimeFloat); + + BuiltinFnId fop = source_instr->op; + unsigned bits; + + switch (float_type->id) { + case ZigTypeIdComptimeFloat: + bits = 128; + break; + case ZigTypeIdFloat: + bits = float_type->data.floating.bit_count; + break; + default: + zig_unreachable(); + } + + switch (bits) { + case 16: { + switch (fop) { + case BuiltinFnIdSqrt: + out_val->data.x_f16 = f16_sqrt(op->data.x_f16); + break; + case BuiltinFnIdSin: + case BuiltinFnIdCos: + case BuiltinFnIdExp: + case BuiltinFnIdExp2: + case BuiltinFnIdLn: + case BuiltinFnIdLog10: + case BuiltinFnIdLog2: + case BuiltinFnIdFabs: + case BuiltinFnIdFloor: + case BuiltinFnIdCeil: + case BuiltinFnIdTrunc: + case BuiltinFnIdNearbyInt: + case BuiltinFnIdRound: + zig_panic("unimplemented f16 builtin"); + default: + zig_unreachable(); + }; + break; + }; + case 32: { + switch (fop) { + case BuiltinFnIdSqrt: + out_val->data.x_f32 = sqrtf(op->data.x_f32); + break; + case BuiltinFnIdSin: + out_val->data.x_f32 = sinf(op->data.x_f32); + break; + case BuiltinFnIdCos: + out_val->data.x_f32 = cosf(op->data.x_f32); + break; + case BuiltinFnIdExp: + out_val->data.x_f32 = expf(op->data.x_f32); + break; + case BuiltinFnIdExp2: + out_val->data.x_f32 = exp2f(op->data.x_f32); + break; + case BuiltinFnIdLn: + out_val->data.x_f32 = logf(op->data.x_f32); + break; + case BuiltinFnIdLog10: + out_val->data.x_f32 = log10f(op->data.x_f32); + break; + case BuiltinFnIdLog2: + out_val->data.x_f32 = log2f(op->data.x_f32); + break; + case BuiltinFnIdFabs: + out_val->data.x_f32 = fabsf(op->data.x_f32); + break; + case BuiltinFnIdFloor: + out_val->data.x_f32 = floorf(op->data.x_f32); + break; + case BuiltinFnIdCeil: + out_val->data.x_f32 = ceilf(op->data.x_f32); + break; + case BuiltinFnIdTrunc: + out_val->data.x_f32 = truncf(op->data.x_f32); + break; + case BuiltinFnIdNearbyInt: + out_val->data.x_f32 = nearbyintf(op->data.x_f32); + break; + case BuiltinFnIdRound: + out_val->data.x_f32 = roundf(op->data.x_f32); + break; + default: + zig_unreachable(); + }; + break; + }; + case 64: { + switch (fop) { + case BuiltinFnIdSqrt: + out_val->data.x_f64 = sqrt(op->data.x_f64); + break; + case BuiltinFnIdSin: + out_val->data.x_f64 = sin(op->data.x_f64); + break; + case BuiltinFnIdCos: + out_val->data.x_f64 = cos(op->data.x_f64); + break; + case BuiltinFnIdExp: + out_val->data.x_f64 = exp(op->data.x_f64); + break; + case BuiltinFnIdExp2: + out_val->data.x_f64 = exp2(op->data.x_f64); + break; + case BuiltinFnIdLn: + out_val->data.x_f64 = log(op->data.x_f64); + break; + case BuiltinFnIdLog10: + out_val->data.x_f64 = log10(op->data.x_f64); + break; + case BuiltinFnIdLog2: + out_val->data.x_f64 = log2(op->data.x_f64); + break; + case BuiltinFnIdFabs: + out_val->data.x_f64 = fabs(op->data.x_f64); + break; + case BuiltinFnIdFloor: + out_val->data.x_f64 = floor(op->data.x_f64); + break; + case BuiltinFnIdCeil: + out_val->data.x_f64 = ceil(op->data.x_f64); + break; + case BuiltinFnIdTrunc: + out_val->data.x_f64 = trunc(op->data.x_f64); + break; + case BuiltinFnIdNearbyInt: + out_val->data.x_f64 = nearbyint(op->data.x_f64); + break; + case BuiltinFnIdRound: + out_val->data.x_f64 = round(op->data.x_f64); + break; + default: + zig_unreachable(); + } + break; + }; + case 128: { + float128_t *out, *in; + if (float_type->id == ZigTypeIdComptimeFloat) { + out = &out_val->data.x_bigfloat.value; + in = &op->data.x_bigfloat.value; + } else { + out = &out_val->data.x_f128; + in = &op->data.x_f128; + } + switch (fop) { + case BuiltinFnIdSqrt: + f128M_sqrt(in, out); + break; + case BuiltinFnIdNearbyInt: + case BuiltinFnIdSin: + case BuiltinFnIdCos: + case BuiltinFnIdExp: + case BuiltinFnIdExp2: + case BuiltinFnIdLn: + case BuiltinFnIdLog10: + case BuiltinFnIdLog2: + case BuiltinFnIdFabs: + case BuiltinFnIdFloor: + case BuiltinFnIdCeil: + case BuiltinFnIdTrunc: + case BuiltinFnIdRound: + zig_panic("unimplemented f128 builtin"); + default: + zig_unreachable(); + } + break; + }; + default: + zig_unreachable(); + } +} + +static IrInstruction *ir_analyze_instruction_float_op(IrAnalyze *ira, IrInstructionFloatOp *instruction) { + IrInstruction *type = instruction->type->child; + if (type_is_invalid(type->value.type)) + return ira->codegen->invalid_instruction; + + ZigType *expr_type = ir_resolve_type(ira, type); + if (type_is_invalid(expr_type)) return ira->codegen->invalid_instruction; - IrInstruction *op = instruction->op->child; - if (type_is_invalid(op->value.type)) - return ira->codegen->invalid_instruction; - - bool ok_type = float_type->id == ZigTypeIdComptimeFloat || float_type->id == ZigTypeIdFloat; - if (!ok_type) { - ir_add_error(ira, instruction->type, buf_sprintf("@sqrt does not support type '%s'", buf_ptr(&float_type->name))); + // Only allow float types, and vectors of floats. + ZigType *float_type = (expr_type->id == ZigTypeIdVector) ? expr_type->data.vector.elem_type : expr_type; + if (float_type->id != ZigTypeIdFloat && float_type->id != ZigTypeIdComptimeFloat) { + ir_add_error(ira, instruction->type, buf_sprintf("@%s does not support type '%s'", float_op_to_name(instruction->op, false), buf_ptr(&float_type->name))); return ira->codegen->invalid_instruction; } - IrInstruction *casted_op = ir_implicit_cast(ira, op, float_type); - if (type_is_invalid(casted_op->value.type)) + IrInstruction *op1 = instruction->op1->child; + if (type_is_invalid(op1->value.type)) return ira->codegen->invalid_instruction; - if (instr_is_comptime(casted_op)) { - ConstExprValue *val = ir_resolve_const(ira, casted_op, UndefBad); - if (!val) + IrInstruction *casted_op1 = ir_implicit_cast(ira, op1, float_type); + if (type_is_invalid(casted_op1->value.type)) + return ira->codegen->invalid_instruction; + + if (instr_is_comptime(casted_op1)) { + // Our comptime 16-bit and 128-bit support is quite limited. + if ((float_type->id == ZigTypeIdComptimeFloat || + float_type->data.floating.bit_count == 16 || + float_type->data.floating.bit_count == 128) && + instruction->op != BuiltinFnIdSqrt) { + ir_add_error(ira, instruction->type, buf_sprintf("@%s does not support type '%s'", float_op_to_name(instruction->op, false), buf_ptr(&float_type->name))); return ira->codegen->invalid_instruction; - - IrInstruction *result = ir_const(ira, &instruction->base, float_type); - ConstExprValue *out_val = &result->value; - - if (float_type->id == ZigTypeIdComptimeFloat) { - bigfloat_sqrt(&out_val->data.x_bigfloat, &val->data.x_bigfloat); - } else if (float_type->id == ZigTypeIdFloat) { - switch (float_type->data.floating.bit_count) { - case 16: - out_val->data.x_f16 = f16_sqrt(val->data.x_f16); - break; - case 32: - out_val->data.x_f32 = sqrtf(val->data.x_f32); - break; - case 64: - out_val->data.x_f64 = sqrt(val->data.x_f64); - break; - case 128: - f128M_sqrt(&val->data.x_f128, &out_val->data.x_f128); - break; - default: - zig_unreachable(); - } - } else { - zig_unreachable(); } + ConstExprValue *op1_const = ir_resolve_const(ira, casted_op1, UndefBad); + if (!op1_const) + return ira->codegen->invalid_instruction; + + IrInstruction *result = ir_const(ira, &instruction->base, expr_type); + ConstExprValue *out_val = &result->value; + + if (expr_type->id == ZigTypeIdVector) { + expand_undef_array(ira->codegen, op1_const); + out_val->special = ConstValSpecialUndef; + expand_undef_array(ira->codegen, out_val); + size_t len = expr_type->data.vector.len; + for (size_t i = 0; i < len; i += 1) { + ConstExprValue *float_operand_op1 = &op1_const->data.x_array.data.s_none.elements[i]; + ConstExprValue *float_out_val = &out_val->data.x_array.data.s_none.elements[i]; + assert(float_operand_op1->type == float_type); + assert(float_out_val->type == float_type); + ir_eval_float_op(ira, instruction, float_type, + op1_const, float_out_val); + float_out_val->type = float_type; + } + out_val->type = expr_type; + out_val->special = ConstValSpecialStatic; + } else { + ir_eval_float_op(ira, instruction, float_type, op1_const, out_val); + } return result; } ir_assert(float_type->id == ZigTypeIdFloat, &instruction->base); - if (float_type->data.floating.bit_count != 16 && - float_type->data.floating.bit_count != 32 && - float_type->data.floating.bit_count != 64) { - ir_add_error(ira, instruction->type, buf_sprintf("compiler TODO: add implementation of sqrt for '%s'", buf_ptr(&float_type->name))); - return ira->codegen->invalid_instruction; - } - IrInstruction *result = ir_build_sqrt(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, nullptr, casted_op); - result->value.type = float_type; + IrInstruction *result = ir_build_float_op(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, nullptr, casted_op1, instruction->op); + result->value.type = expr_type; return result; } @@ -23314,12 +25219,56 @@ static IrInstruction *ir_analyze_instruction_undeclared_ident(IrAnalyze *ira, Ir return ira->codegen->invalid_instruction; } -static IrInstruction *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) { +static IrInstruction *ir_analyze_instruction_end_expr(IrAnalyze *ira, IrInstructionEndExpr *instruction) { + IrInstruction *value = instruction->value->child; + if (type_is_invalid(value->value.type)) + return ira->codegen->invalid_instruction; + + bool was_written = instruction->result_loc->written; + IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base, instruction->result_loc, + value->value.type, value, false, false); + if (result_loc != nullptr) { + if (type_is_invalid(result_loc->value.type)) + return ira->codegen->invalid_instruction; + if (result_loc->value.type->id == ZigTypeIdUnreachable) + return result_loc; + + if (!was_written) { + IrInstruction *store_ptr = ir_analyze_store_ptr(ira, &instruction->base, result_loc, value); + if (type_is_invalid(store_ptr->value.type)) { + return ira->codegen->invalid_instruction; + } + } + + if (result_loc->value.data.x_ptr.mut == ConstPtrMutInfer) { + if (instr_is_comptime(value)) { + result_loc->value.data.x_ptr.mut = ConstPtrMutComptimeConst; + } else { + result_loc->value.special = ConstValSpecialRuntime; + } + } + } + + return ir_const_void(ira, &instruction->base); +} + +static IrInstruction *ir_analyze_instruction_bit_cast_src(IrAnalyze *ira, IrInstructionBitCastSrc *instruction) { + IrInstruction *operand = instruction->operand->child; + if (type_is_invalid(operand->value.type)) + return operand; + + IrInstruction *result_loc = ir_resolve_result(ira, &instruction->base, + &instruction->result_loc_bit_cast->base, operand->value.type, operand, false, false); + if (result_loc != nullptr && (type_is_invalid(result_loc->value.type) || instr_is_unreachable(result_loc))) + return result_loc; + + return instruction->result_loc_bit_cast->parent->gen_instruction; +} + +static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: case IrInstructionIdWidenOrShorten: - case IrInstructionIdStructInit: - case IrInstructionIdUnionInit: case IrInstructionIdStructFieldPtr: case IrInstructionIdUnionFieldPtr: case IrInstructionIdOptionalWrap: @@ -23331,11 +25280,18 @@ static IrInstruction *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructio case IrInstructionIdCmpxchgGen: case IrInstructionIdArrayToVector: case IrInstructionIdVectorToArray: + case IrInstructionIdPtrOfArrayToSlice: case IrInstructionIdAssertZero: case IrInstructionIdAssertNonNull: case IrInstructionIdResizeSlice: case IrInstructionIdLoadPtrGen: case IrInstructionIdBitCastGen: + case IrInstructionIdCallGen: + case IrInstructionIdReturnPtr: + case IrInstructionIdAllocaGen: + case IrInstructionIdSliceGen: + case IrInstructionIdRefGen: + case IrInstructionIdTestErrGen: zig_unreachable(); case IrInstructionIdReturn: @@ -23358,8 +25314,8 @@ static IrInstruction *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructio return ir_analyze_instruction_var_ptr(ira, (IrInstructionVarPtr *)instruction); case IrInstructionIdFieldPtr: return ir_analyze_instruction_field_ptr(ira, (IrInstructionFieldPtr *)instruction); - case IrInstructionIdCall: - return ir_analyze_instruction_call(ira, (IrInstructionCall *)instruction); + case IrInstructionIdCallSrc: + return ir_analyze_instruction_call(ira, (IrInstructionCallSrc *)instruction); case IrInstructionIdBr: return ir_analyze_instruction_br(ira, (IrInstructionBr *)instruction); case IrInstructionIdCondBr: @@ -23370,10 +25326,6 @@ static IrInstruction *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructio return ir_analyze_instruction_phi(ira, (IrInstructionPhi *)instruction); case IrInstructionIdTypeOf: return ir_analyze_instruction_typeof(ira, (IrInstructionTypeOf *)instruction); - case IrInstructionIdToPtrType: - return ir_analyze_instruction_to_ptr_type(ira, (IrInstructionToPtrType *)instruction); - case IrInstructionIdPtrTypeChild: - return ir_analyze_instruction_ptr_type_child(ira, (IrInstructionPtrTypeChild *)instruction); case IrInstructionIdSetCold: return ir_analyze_instruction_set_cold(ira, (IrInstructionSetCold *)instruction); case IrInstructionIdSetRuntimeSafety: @@ -23474,8 +25426,8 @@ static IrInstruction *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructio return ir_analyze_instruction_memset(ira, (IrInstructionMemset *)instruction); case IrInstructionIdMemcpy: return ir_analyze_instruction_memcpy(ira, (IrInstructionMemcpy *)instruction); - case IrInstructionIdSlice: - return ir_analyze_instruction_slice(ira, (IrInstructionSlice *)instruction); + case IrInstructionIdSliceSrc: + return ir_analyze_instruction_slice(ira, (IrInstructionSliceSrc *)instruction); case IrInstructionIdMemberCount: return ir_analyze_instruction_member_count(ira, (IrInstructionMemberCount *)instruction); case IrInstructionIdMemberType: @@ -23494,8 +25446,8 @@ static IrInstruction *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructio return ir_analyze_instruction_align_of(ira, (IrInstructionAlignOf *)instruction); case IrInstructionIdOverflowOp: return ir_analyze_instruction_overflow_op(ira, (IrInstructionOverflowOp *)instruction); - case IrInstructionIdTestErr: - return ir_analyze_instruction_test_err(ira, (IrInstructionTestErr *)instruction); + case IrInstructionIdTestErrSrc: + return ir_analyze_instruction_test_err(ira, (IrInstructionTestErrSrc *)instruction); case IrInstructionIdUnwrapErrCode: return ir_analyze_instruction_unwrap_err_code(ira, (IrInstructionUnwrapErrCode *)instruction); case IrInstructionIdUnwrapErrPayload: @@ -23514,8 +25466,6 @@ static IrInstruction *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructio return ir_analyze_instruction_panic(ira, (IrInstructionPanic *)instruction); case IrInstructionIdPtrCastSrc: return ir_analyze_instruction_ptr_cast(ira, (IrInstructionPtrCastSrc *)instruction); - case IrInstructionIdBitCast: - return ir_analyze_instruction_bit_cast(ira, (IrInstructionBitCast *)instruction); case IrInstructionIdIntToPtr: return ir_analyze_instruction_int_to_ptr(ira, (IrInstructionIntToPtr *)instruction); case IrInstructionIdPtrToInt: @@ -23538,6 +25488,14 @@ static IrInstruction *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructio return ir_analyze_instruction_ptr_type(ira, (IrInstructionPtrType *)instruction); case IrInstructionIdAlignCast: return ir_analyze_instruction_align_cast(ira, (IrInstructionAlignCast *)instruction); + case IrInstructionIdImplicitCast: + return ir_analyze_instruction_implicit_cast(ira, (IrInstructionImplicitCast *)instruction); + case IrInstructionIdResolveResult: + return ir_analyze_instruction_resolve_result(ira, (IrInstructionResolveResult *)instruction); + case IrInstructionIdResetResult: + return ir_analyze_instruction_reset_result(ira, (IrInstructionResetResult *)instruction); + case IrInstructionIdResultPtr: + return ir_analyze_instruction_result_ptr(ira, (IrInstructionResultPtr *)instruction); case IrInstructionIdOpaqueType: return ir_analyze_instruction_opaque_type(ira, (IrInstructionOpaqueType *)instruction); case IrInstructionIdSetAlignStack: @@ -23596,8 +25554,10 @@ static IrInstruction *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructio return ir_analyze_instruction_merge_err_ret_traces(ira, (IrInstructionMergeErrRetTraces *)instruction); case IrInstructionIdMarkErrRetTracePtr: return ir_analyze_instruction_mark_err_ret_trace_ptr(ira, (IrInstructionMarkErrRetTracePtr *)instruction); - case IrInstructionIdSqrt: - return ir_analyze_instruction_sqrt(ira, (IrInstructionSqrt *)instruction); + case IrInstructionIdFloatOp: + return ir_analyze_instruction_float_op(ira, (IrInstructionFloatOp *)instruction); + case IrInstructionIdMulAdd: + return ir_analyze_instruction_mul_add(ira, (IrInstructionMulAdd *)instruction); case IrInstructionIdIntToErr: return ir_analyze_instruction_int_to_err(ira, (IrInstructionIntToErr *)instruction); case IrInstructionIdErrToInt: @@ -23612,17 +25572,16 @@ static IrInstruction *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructio return ir_analyze_instruction_has_decl(ira, (IrInstructionHasDecl *)instruction); case IrInstructionIdUndeclaredIdent: return ir_analyze_instruction_undeclared_ident(ira, (IrInstructionUndeclaredIdent *)instruction); + case IrInstructionIdAllocaSrc: + return nullptr; + case IrInstructionIdEndExpr: + return ir_analyze_instruction_end_expr(ira, (IrInstructionEndExpr *)instruction); + case IrInstructionIdBitCastSrc: + return ir_analyze_instruction_bit_cast_src(ira, (IrInstructionBitCastSrc *)instruction); } zig_unreachable(); } -static IrInstruction *ir_analyze_instruction(IrAnalyze *ira, IrInstruction *old_instruction) { - IrInstruction *new_instruction = ir_analyze_instruction_nocast(ira, old_instruction); - ir_assert(new_instruction->value.type != nullptr, old_instruction); - old_instruction->child = new_instruction; - return new_instruction; -} - // This function attempts to evaluate IR code while doing type checking and other analysis. // It emits a new IrExecutable which is partially evaluated IR code. ZigType *ir_analyze(CodeGen *codegen, IrExecutable *old_exec, IrExecutable *new_exec, @@ -23668,14 +25627,22 @@ ZigType *ir_analyze(CodeGen *codegen, IrExecutable *old_exec, IrExecutable *new_ continue; } - IrInstruction *new_instruction = ir_analyze_instruction(ira, old_instruction); - if (type_is_invalid(new_instruction->value.type) && ir_should_inline(new_exec, old_instruction->scope)) { - return ira->codegen->builtin_types.entry_invalid; + if (ira->codegen->verbose_ir) { + fprintf(stderr, "analyze #%zu\n", old_instruction->debug_id); } + IrInstruction *new_instruction = ir_analyze_instruction_base(ira, old_instruction); + if (new_instruction != nullptr) { + ir_assert(new_instruction->value.type != nullptr || new_instruction->value.type != nullptr, old_instruction); + old_instruction->child = new_instruction; - // unreachable instructions do their own control flow. - if (new_instruction->value.type->id == ZigTypeIdUnreachable) - continue; + if (type_is_invalid(new_instruction->value.type)) { + return ira->codegen->builtin_types.entry_invalid; + } + + // unreachable instructions do their own control flow. + if (new_instruction->value.type->id == ZigTypeIdUnreachable) + continue; + } ira->instruction_index += 1; } @@ -23700,7 +25667,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdDeclVarSrc: case IrInstructionIdDeclVarGen: case IrInstructionIdStorePtr: - case IrInstructionIdCall: + case IrInstructionIdCallSrc: + case IrInstructionIdCallGen: case IrInstructionIdReturn: case IrInstructionIdUnreachable: case IrInstructionIdSetCold: @@ -23747,27 +25715,28 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdResizeSlice: case IrInstructionIdGlobalAsm: case IrInstructionIdUndeclaredIdent: + case IrInstructionIdEndExpr: + case IrInstructionIdPtrOfArrayToSlice: + case IrInstructionIdSliceGen: + case IrInstructionIdOptionalWrap: + case IrInstructionIdVectorToArray: + case IrInstructionIdResetResult: return true; case IrInstructionIdPhi: case IrInstructionIdUnOp: case IrInstructionIdBinOp: case IrInstructionIdLoadPtr: - case IrInstructionIdLoadPtrGen: case IrInstructionIdConst: case IrInstructionIdCast: case IrInstructionIdContainerInitList: case IrInstructionIdContainerInitFields: - case IrInstructionIdStructInit: - case IrInstructionIdUnionInit: case IrInstructionIdFieldPtr: case IrInstructionIdElemPtr: case IrInstructionIdVarPtr: + case IrInstructionIdReturnPtr: case IrInstructionIdTypeOf: - case IrInstructionIdToPtrType: - case IrInstructionIdPtrTypeChild: case IrInstructionIdStructFieldPtr: - case IrInstructionIdUnionFieldPtr: case IrInstructionIdArrayType: case IrInstructionIdPromiseType: case IrInstructionIdSliceType: @@ -23789,7 +25758,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdIntType: case IrInstructionIdVectorType: case IrInstructionIdBoolNot: - case IrInstructionIdSlice: + case IrInstructionIdSliceSrc: case IrInstructionIdMemberCount: case IrInstructionIdMemberType: case IrInstructionIdMemberName: @@ -23797,16 +25766,13 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdReturnAddress: case IrInstructionIdFrameAddress: case IrInstructionIdHandle: - case IrInstructionIdTestErr: - case IrInstructionIdUnwrapErrCode: - case IrInstructionIdOptionalWrap: - case IrInstructionIdErrWrapCode: - case IrInstructionIdErrWrapPayload: + case IrInstructionIdTestErrSrc: + case IrInstructionIdTestErrGen: case IrInstructionIdFnProto: case IrInstructionIdTestComptime: case IrInstructionIdPtrCastSrc: case IrInstructionIdPtrCastGen: - case IrInstructionIdBitCast: + case IrInstructionIdBitCastSrc: case IrInstructionIdBitCastGen: case IrInstructionIdWidenOrShorten: case IrInstructionIdPtrToInt: @@ -23824,6 +25790,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdTypeInfo: case IrInstructionIdTypeId: case IrInstructionIdAlignCast: + case IrInstructionIdImplicitCast: + case IrInstructionIdResolveResult: case IrInstructionIdOpaqueType: case IrInstructionIdArgType: case IrInstructionIdTagType: @@ -23836,7 +25804,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdCoroFree: case IrInstructionIdCoroPromise: case IrInstructionIdPromiseResultType: - case IrInstructionIdSqrt: + case IrInstructionIdFloatOp: + case IrInstructionIdMulAdd: case IrInstructionIdAtomicLoad: case IrInstructionIdIntCast: case IrInstructionIdFloatCast: @@ -23847,9 +25816,11 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdFromBytes: case IrInstructionIdToBytes: case IrInstructionIdEnumToInt: - case IrInstructionIdVectorToArray: case IrInstructionIdArrayToVector: case IrInstructionIdHasDecl: + case IrInstructionIdAllocaSrc: + case IrInstructionIdAllocaGen: + case IrInstructionIdResultPtr: return false; case IrInstructionIdAsm: @@ -23861,8 +25832,21 @@ bool ir_has_side_effects(IrInstruction *instruction) { { IrInstructionUnwrapErrPayload *unwrap_err_payload_instruction = (IrInstructionUnwrapErrPayload *)instruction; - return unwrap_err_payload_instruction->safety_check_on; + return unwrap_err_payload_instruction->safety_check_on || + unwrap_err_payload_instruction->initializing; } + case IrInstructionIdUnwrapErrCode: + return reinterpret_cast(instruction)->initializing; + case IrInstructionIdUnionFieldPtr: + return reinterpret_cast(instruction)->initializing; + case IrInstructionIdErrWrapPayload: + return reinterpret_cast(instruction)->result_loc != nullptr; + case IrInstructionIdErrWrapCode: + return reinterpret_cast(instruction)->result_loc != nullptr; + case IrInstructionIdLoadPtrGen: + return reinterpret_cast(instruction)->result_loc != nullptr; + case IrInstructionIdRefGen: + return reinterpret_cast(instruction)->result_loc != nullptr; } zig_unreachable(); } diff --git a/src/ir.hpp b/src/ir.hpp index 4fb7552212..597624e2e6 100644 --- a/src/ir.hpp +++ b/src/ir.hpp @@ -26,5 +26,6 @@ bool ir_has_side_effects(IrInstruction *instruction); struct IrAnalyze; ConstExprValue *const_ptr_pointee(IrAnalyze *ira, CodeGen *codegen, ConstExprValue *const_val, AstNode *source_node); +const char *float_op_to_name(BuiltinFnId op, bool llvm_name); #endif diff --git a/src/ir_print.cpp b/src/ir_print.cpp index bf9ced89c5..a973079900 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -57,13 +57,18 @@ static void ir_print_other_instruction(IrPrint *irp, IrInstruction *instruction) } static void ir_print_other_block(IrPrint *irp, IrBasicBlock *bb) { - fprintf(irp->f, "$%s_%" ZIG_PRI_usize "", bb->name_hint, bb->debug_id); + if (bb == nullptr) { + fprintf(irp->f, "(null block)"); + } else { + fprintf(irp->f, "$%s_%" ZIG_PRI_usize "", bb->name_hint, bb->debug_id); + } } static void ir_print_return(IrPrint *irp, IrInstructionReturn *return_instruction) { - assert(return_instruction->value); fprintf(irp->f, "return "); - ir_print_other_instruction(irp, return_instruction->value); + if (return_instruction->value != nullptr) { + ir_print_other_instruction(irp, return_instruction->value); + } } static void ir_print_const(IrPrint *irp, IrInstructionConst *const_instruction) { @@ -188,7 +193,7 @@ static void ir_print_decl_var_src(IrPrint *irp, IrInstructionDeclVarSrc *decl_va fprintf(irp->f, " "); } fprintf(irp->f, "= "); - ir_print_other_instruction(irp, decl_var_instruction->init_value); + ir_print_other_instruction(irp, decl_var_instruction->ptr); if (decl_var_instruction->var->is_comptime != nullptr) { fprintf(irp->f, " // comptime = "); ir_print_other_instruction(irp, decl_var_instruction->var->is_comptime); @@ -201,7 +206,56 @@ static void ir_print_cast(IrPrint *irp, IrInstructionCast *cast_instruction) { fprintf(irp->f, " to %s", buf_ptr(&cast_instruction->dest_type->name)); } -static void ir_print_call(IrPrint *irp, IrInstructionCall *call_instruction) { +static void ir_print_result_loc_var(IrPrint *irp, ResultLocVar *result_loc_var) { + fprintf(irp->f, "var("); + ir_print_other_instruction(irp, result_loc_var->base.source_instruction); + fprintf(irp->f, ")"); +} + +static void ir_print_result_loc_instruction(IrPrint *irp, ResultLocInstruction *result_loc_inst) { + fprintf(irp->f, "inst("); + ir_print_other_instruction(irp, result_loc_inst->base.source_instruction); + fprintf(irp->f, ")"); +} + +static void ir_print_result_loc_peer(IrPrint *irp, ResultLocPeer *result_loc_peer) { + fprintf(irp->f, "peer(next="); + ir_print_other_block(irp, result_loc_peer->next_bb); + fprintf(irp->f, ")"); +} + +static void ir_print_result_loc_bit_cast(IrPrint *irp, ResultLocBitCast *result_loc_bit_cast) { + fprintf(irp->f, "bitcast(ty="); + ir_print_other_instruction(irp, result_loc_bit_cast->base.source_instruction); + fprintf(irp->f, ")"); +} + +static void ir_print_result_loc(IrPrint *irp, ResultLoc *result_loc) { + switch (result_loc->id) { + case ResultLocIdInvalid: + zig_unreachable(); + case ResultLocIdNone: + fprintf(irp->f, "none"); + return; + case ResultLocIdReturn: + fprintf(irp->f, "return"); + return; + case ResultLocIdVar: + return ir_print_result_loc_var(irp, (ResultLocVar *)result_loc); + case ResultLocIdInstruction: + return ir_print_result_loc_instruction(irp, (ResultLocInstruction *)result_loc); + case ResultLocIdPeer: + return ir_print_result_loc_peer(irp, (ResultLocPeer *)result_loc); + case ResultLocIdBitCast: + return ir_print_result_loc_bit_cast(irp, (ResultLocBitCast *)result_loc); + case ResultLocIdPeerParent: + fprintf(irp->f, "peer_parent"); + return; + } + zig_unreachable(); +} + +static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instruction) { if (call_instruction->is_async) { fprintf(irp->f, "async"); if (call_instruction->async_allocator != nullptr) { @@ -224,7 +278,35 @@ static void ir_print_call(IrPrint *irp, IrInstructionCall *call_instruction) { fprintf(irp->f, ", "); ir_print_other_instruction(irp, arg); } - fprintf(irp->f, ")"); + fprintf(irp->f, ")result="); + ir_print_result_loc(irp, call_instruction->result_loc); +} + +static void ir_print_call_gen(IrPrint *irp, IrInstructionCallGen *call_instruction) { + if (call_instruction->is_async) { + fprintf(irp->f, "async"); + if (call_instruction->async_allocator != nullptr) { + fprintf(irp->f, "<"); + ir_print_other_instruction(irp, call_instruction->async_allocator); + fprintf(irp->f, ">"); + } + fprintf(irp->f, " "); + } + if (call_instruction->fn_entry) { + fprintf(irp->f, "%s", buf_ptr(&call_instruction->fn_entry->symbol_name)); + } else { + assert(call_instruction->fn_ref); + ir_print_other_instruction(irp, call_instruction->fn_ref); + } + fprintf(irp->f, "("); + for (size_t i = 0; i < call_instruction->arg_count; i += 1) { + IrInstruction *arg = call_instruction->args[i]; + if (i != 0) + fprintf(irp->f, ", "); + ir_print_other_instruction(irp, arg); + } + fprintf(irp->f, ")result="); + ir_print_other_instruction(irp, call_instruction->result_loc); } static void ir_print_cond_br(IrPrint *irp, IrInstructionCondBr *cond_br_instruction) { @@ -270,10 +352,10 @@ static void ir_print_container_init_list(IrPrint *irp, IrInstructionContainerIni fprintf(irp->f, "...(%" ZIG_PRI_usize " items)...", instruction->item_count); } else { for (size_t i = 0; i < instruction->item_count; i += 1) { - IrInstruction *item = instruction->items[i]; + IrInstruction *result_loc = instruction->elem_result_loc_list[i]; if (i != 0) fprintf(irp->f, ", "); - ir_print_other_instruction(irp, item); + ir_print_other_instruction(irp, result_loc); } } fprintf(irp->f, "}"); @@ -286,32 +368,11 @@ static void ir_print_container_init_fields(IrPrint *irp, IrInstructionContainerI IrInstructionContainerInitFieldsField *field = &instruction->fields[i]; const char *comma = (i == 0) ? "" : ", "; fprintf(irp->f, "%s.%s = ", comma, buf_ptr(field->name)); - ir_print_other_instruction(irp, field->value); + ir_print_other_instruction(irp, field->result_loc); } fprintf(irp->f, "} // container init"); } -static void ir_print_struct_init(IrPrint *irp, IrInstructionStructInit *instruction) { - fprintf(irp->f, "%s {", buf_ptr(&instruction->struct_type->name)); - for (size_t i = 0; i < instruction->field_count; i += 1) { - IrInstructionStructInitField *field = &instruction->fields[i]; - Buf *field_name = field->type_struct_field->name; - const char *comma = (i == 0) ? "" : ", "; - fprintf(irp->f, "%s.%s = ", comma, buf_ptr(field_name)); - ir_print_other_instruction(irp, field->value); - } - fprintf(irp->f, "} // struct init"); -} - -static void ir_print_union_init(IrPrint *irp, IrInstructionUnionInit *instruction) { - Buf *field_name = instruction->field->enum_field->name; - - fprintf(irp->f, "%s {", buf_ptr(&instruction->union_type->name)); - fprintf(irp->f, ".%s = ", buf_ptr(field_name)); - ir_print_other_instruction(irp, instruction->init_value); - fprintf(irp->f, "} // union init"); -} - static void ir_print_unreachable(IrPrint *irp, IrInstructionUnreachable *instruction) { fprintf(irp->f, "unreachable"); } @@ -331,14 +392,20 @@ static void ir_print_var_ptr(IrPrint *irp, IrInstructionVarPtr *instruction) { fprintf(irp->f, "&%s", buf_ptr(&instruction->var->name)); } +static void ir_print_return_ptr(IrPrint *irp, IrInstructionReturnPtr *instruction) { + fprintf(irp->f, "@ReturnPtr"); +} + static void ir_print_load_ptr(IrPrint *irp, IrInstructionLoadPtr *instruction) { ir_print_other_instruction(irp, instruction->ptr); fprintf(irp->f, ".*"); } static void ir_print_load_ptr_gen(IrPrint *irp, IrInstructionLoadPtrGen *instruction) { + fprintf(irp->f, "loadptr("); ir_print_other_instruction(irp, instruction->ptr); - fprintf(irp->f, ".*"); + fprintf(irp->f, ")result="); + ir_print_other_instruction(irp, instruction->result_loc); } static void ir_print_store_ptr(IrPrint *irp, IrInstructionStorePtr *instruction) { @@ -354,18 +421,6 @@ static void ir_print_typeof(IrPrint *irp, IrInstructionTypeOf *instruction) { fprintf(irp->f, ")"); } -static void ir_print_to_ptr_type(IrPrint *irp, IrInstructionToPtrType *instruction) { - fprintf(irp->f, "@toPtrType("); - ir_print_other_instruction(irp, instruction->ptr); - fprintf(irp->f, ")"); -} - -static void ir_print_ptr_type_child(IrPrint *irp, IrInstructionPtrTypeChild *instruction) { - fprintf(irp->f, "@ptrTypeChild("); - ir_print_other_instruction(irp, instruction->value); - fprintf(irp->f, ")"); -} - static void ir_print_field_ptr(IrPrint *irp, IrInstructionFieldPtr *instruction) { if (instruction->field_name_buffer) { fprintf(irp->f, "fieldptr "); @@ -618,6 +673,13 @@ static void ir_print_ref(IrPrint *irp, IrInstructionRef *instruction) { ir_print_other_instruction(irp, instruction->value); } +static void ir_print_ref_gen(IrPrint *irp, IrInstructionRefGen *instruction) { + fprintf(irp->f, "@ref("); + ir_print_other_instruction(irp, instruction->operand); + fprintf(irp->f, ")result="); + ir_print_other_instruction(irp, instruction->result_loc); +} + static void ir_print_compile_err(IrPrint *irp, IrInstructionCompileErr *instruction) { fprintf(irp->f, "@compileError("); ir_print_other_instruction(irp, instruction->msg); @@ -682,7 +744,8 @@ static void ir_print_cmpxchg_src(IrPrint *irp, IrInstructionCmpxchgSrc *instruct ir_print_other_instruction(irp, instruction->success_order_value); fprintf(irp->f, ", "); ir_print_other_instruction(irp, instruction->failure_order_value); - fprintf(irp->f, ")"); + fprintf(irp->f, ")result="); + ir_print_result_loc(irp, instruction->result_loc); } static void ir_print_cmpxchg_gen(IrPrint *irp, IrInstructionCmpxchgGen *instruction) { @@ -692,7 +755,8 @@ static void ir_print_cmpxchg_gen(IrPrint *irp, IrInstructionCmpxchgGen *instruct ir_print_other_instruction(irp, instruction->cmp_value); fprintf(irp->f, ", "); ir_print_other_instruction(irp, instruction->new_value); - fprintf(irp->f, ", TODO print atomic orders)"); + fprintf(irp->f, ", TODO print atomic orders)result="); + ir_print_other_instruction(irp, instruction->result_loc); } static void ir_print_fence(IrPrint *irp, IrInstructionFence *instruction) { @@ -810,14 +874,26 @@ static void ir_print_memcpy(IrPrint *irp, IrInstructionMemcpy *instruction) { fprintf(irp->f, ")"); } -static void ir_print_slice(IrPrint *irp, IrInstructionSlice *instruction) { +static void ir_print_slice_src(IrPrint *irp, IrInstructionSliceSrc *instruction) { ir_print_other_instruction(irp, instruction->ptr); fprintf(irp->f, "["); ir_print_other_instruction(irp, instruction->start); fprintf(irp->f, ".."); if (instruction->end) ir_print_other_instruction(irp, instruction->end); - fprintf(irp->f, "]"); + fprintf(irp->f, "]result="); + ir_print_result_loc(irp, instruction->result_loc); +} + +static void ir_print_slice_gen(IrPrint *irp, IrInstructionSliceGen *instruction) { + ir_print_other_instruction(irp, instruction->ptr); + fprintf(irp->f, "["); + ir_print_other_instruction(irp, instruction->start); + fprintf(irp->f, ".."); + if (instruction->end) + ir_print_other_instruction(irp, instruction->end); + fprintf(irp->f, "]result="); + ir_print_other_instruction(irp, instruction->result_loc); } static void ir_print_member_count(IrPrint *irp, IrInstructionMemberCount *instruction) { @@ -889,43 +965,49 @@ static void ir_print_overflow_op(IrPrint *irp, IrInstructionOverflowOp *instruct fprintf(irp->f, ")"); } -static void ir_print_test_err(IrPrint *irp, IrInstructionTestErr *instruction) { +static void ir_print_test_err_src(IrPrint *irp, IrInstructionTestErrSrc *instruction) { fprintf(irp->f, "@testError("); - ir_print_other_instruction(irp, instruction->value); + ir_print_other_instruction(irp, instruction->base_ptr); + fprintf(irp->f, ")"); +} + +static void ir_print_test_err_gen(IrPrint *irp, IrInstructionTestErrGen *instruction) { + fprintf(irp->f, "@testError("); + ir_print_other_instruction(irp, instruction->err_union); fprintf(irp->f, ")"); } static void ir_print_unwrap_err_code(IrPrint *irp, IrInstructionUnwrapErrCode *instruction) { fprintf(irp->f, "UnwrapErrorCode("); - ir_print_other_instruction(irp, instruction->err_union); + ir_print_other_instruction(irp, instruction->err_union_ptr); fprintf(irp->f, ")"); } static void ir_print_unwrap_err_payload(IrPrint *irp, IrInstructionUnwrapErrPayload *instruction) { fprintf(irp->f, "ErrorUnionFieldPayload("); ir_print_other_instruction(irp, instruction->value); - fprintf(irp->f, ")"); - if (!instruction->safety_check_on) { - fprintf(irp->f, " // no safety"); - } + fprintf(irp->f, ")safety=%d,init=%d",instruction->safety_check_on, instruction->initializing); } -static void ir_print_maybe_wrap(IrPrint *irp, IrInstructionOptionalWrap *instruction) { - fprintf(irp->f, "@maybeWrap("); - ir_print_other_instruction(irp, instruction->value); - fprintf(irp->f, ")"); +static void ir_print_optional_wrap(IrPrint *irp, IrInstructionOptionalWrap *instruction) { + fprintf(irp->f, "@optionalWrap("); + ir_print_other_instruction(irp, instruction->operand); + fprintf(irp->f, ")result="); + ir_print_other_instruction(irp, instruction->result_loc); } static void ir_print_err_wrap_code(IrPrint *irp, IrInstructionErrWrapCode *instruction) { fprintf(irp->f, "@errWrapCode("); - ir_print_other_instruction(irp, instruction->value); - fprintf(irp->f, ")"); + ir_print_other_instruction(irp, instruction->operand); + fprintf(irp->f, ")result="); + ir_print_other_instruction(irp, instruction->result_loc); } static void ir_print_err_wrap_payload(IrPrint *irp, IrInstructionErrWrapPayload *instruction) { fprintf(irp->f, "@errWrapPayload("); - ir_print_other_instruction(irp, instruction->value); - fprintf(irp->f, ")"); + ir_print_other_instruction(irp, instruction->operand); + fprintf(irp->f, ")result="); + ir_print_other_instruction(irp, instruction->result_loc); } static void ir_print_fn_proto(IrPrint *irp, IrInstructionFnProto *instruction) { @@ -971,12 +1053,11 @@ static void ir_print_ptr_cast_gen(IrPrint *irp, IrInstructionPtrCastGen *instruc fprintf(irp->f, ")"); } -static void ir_print_bit_cast(IrPrint *irp, IrInstructionBitCast *instruction) { +static void ir_print_bit_cast_src(IrPrint *irp, IrInstructionBitCastSrc *instruction) { fprintf(irp->f, "@bitCast("); - ir_print_other_instruction(irp, instruction->dest_type); - fprintf(irp->f, ","); - ir_print_other_instruction(irp, instruction->value); - fprintf(irp->f, ")"); + ir_print_other_instruction(irp, instruction->operand); + fprintf(irp->f, ")result="); + ir_print_result_loc(irp, &instruction->result_loc_bit_cast->base); } static void ir_print_bit_cast_gen(IrPrint *irp, IrInstructionBitCastGen *instruction) { @@ -1043,7 +1124,15 @@ static void ir_print_array_to_vector(IrPrint *irp, IrInstructionArrayToVector *i static void ir_print_vector_to_array(IrPrint *irp, IrInstructionVectorToArray *instruction) { fprintf(irp->f, "VectorToArray("); ir_print_other_instruction(irp, instruction->vector); - fprintf(irp->f, ")"); + fprintf(irp->f, ")result="); + ir_print_other_instruction(irp, instruction->result_loc); +} + +static void ir_print_ptr_of_array_to_slice(IrPrint *irp, IrInstructionPtrOfArrayToSlice *instruction) { + fprintf(irp->f, "PtrOfArrayToSlice("); + ir_print_other_instruction(irp, instruction->operand); + fprintf(irp->f, ")result="); + ir_print_other_instruction(irp, instruction->result_loc); } static void ir_print_assert_zero(IrPrint *irp, IrInstructionAssertZero *instruction) { @@ -1061,6 +1150,25 @@ static void ir_print_assert_non_null(IrPrint *irp, IrInstructionAssertNonNull *i static void ir_print_resize_slice(IrPrint *irp, IrInstructionResizeSlice *instruction) { fprintf(irp->f, "@resizeSlice("); ir_print_other_instruction(irp, instruction->operand); + fprintf(irp->f, ")result="); + ir_print_other_instruction(irp, instruction->result_loc); +} + +static void ir_print_alloca_src(IrPrint *irp, IrInstructionAllocaSrc *instruction) { + fprintf(irp->f, "Alloca(align="); + ir_print_other_instruction(irp, instruction->align); + fprintf(irp->f, ",name=%s)", instruction->name_hint); +} + +static void ir_print_alloca_gen(IrPrint *irp, IrInstructionAllocaGen *instruction) { + fprintf(irp->f, "Alloca(align=%" PRIu32 ",name=%s)", instruction->align, instruction->name_hint); +} + +static void ir_print_end_expr(IrPrint *irp, IrInstructionEndExpr *instruction) { + fprintf(irp->f, "EndExpr(result="); + ir_print_result_loc(irp, instruction->result_loc); + fprintf(irp->f, ",value="); + ir_print_other_instruction(irp, instruction->value); fprintf(irp->f, ")"); } @@ -1186,6 +1294,34 @@ static void ir_print_align_cast(IrPrint *irp, IrInstructionAlignCast *instructio fprintf(irp->f, ")"); } +static void ir_print_implicit_cast(IrPrint *irp, IrInstructionImplicitCast *instruction) { + fprintf(irp->f, "@implicitCast("); + ir_print_other_instruction(irp, instruction->dest_type); + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->target); + fprintf(irp->f, ")"); +} + +static void ir_print_resolve_result(IrPrint *irp, IrInstructionResolveResult *instruction) { + fprintf(irp->f, "ResolveResult("); + ir_print_result_loc(irp, instruction->result_loc); + fprintf(irp->f, ")"); +} + +static void ir_print_reset_result(IrPrint *irp, IrInstructionResetResult *instruction) { + fprintf(irp->f, "ResetResult("); + ir_print_result_loc(irp, instruction->result_loc); + fprintf(irp->f, ")"); +} + +static void ir_print_result_ptr(IrPrint *irp, IrInstructionResultPtr *instruction) { + fprintf(irp->f, "ResultPtr("); + ir_print_result_loc(irp, instruction->result_loc); + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->result); + fprintf(irp->f, ")"); +} + static void ir_print_opaque_type(IrPrint *irp, IrInstructionOpaqueType *instruction) { fprintf(irp->f, "@OpaqueType()"); } @@ -1427,15 +1563,32 @@ static void ir_print_mark_err_ret_trace_ptr(IrPrint *irp, IrInstructionMarkErrRe fprintf(irp->f, ")"); } -static void ir_print_sqrt(IrPrint *irp, IrInstructionSqrt *instruction) { - fprintf(irp->f, "@sqrt("); +static void ir_print_float_op(IrPrint *irp, IrInstructionFloatOp *instruction) { + + fprintf(irp->f, "@%s(", float_op_to_name(instruction->op, false)); if (instruction->type != nullptr) { ir_print_other_instruction(irp, instruction->type); } else { fprintf(irp->f, "null"); } fprintf(irp->f, ","); - ir_print_other_instruction(irp, instruction->op); + ir_print_other_instruction(irp, instruction->op1); + fprintf(irp->f, ")"); +} + +static void ir_print_mul_add(IrPrint *irp, IrInstructionMulAdd *instruction) { + fprintf(irp->f, "@mulAdd("); + if (instruction->type_value != nullptr) { + ir_print_other_instruction(irp, instruction->type_value); + } else { + fprintf(irp->f, "null"); + } + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->op1); + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->op2); + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->op3); fprintf(irp->f, ")"); } @@ -1446,7 +1599,7 @@ static void ir_print_decl_var_gen(IrPrint *irp, IrInstructionDeclVarGen *decl_va fprintf(irp->f, "%s %s: %s align(%u) = ", var_or_const, name, buf_ptr(&var->var_type->name), var->align_bytes); - ir_print_other_instruction(irp, decl_var_instruction->init_value); + ir_print_other_instruction(irp, decl_var_instruction->var_ptr); if (decl_var_instruction->var->is_comptime != nullptr) { fprintf(irp->f, " // comptime = "); ir_print_other_instruction(irp, decl_var_instruction->var->is_comptime); @@ -1485,8 +1638,11 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdCast: ir_print_cast(irp, (IrInstructionCast *)instruction); break; - case IrInstructionIdCall: - ir_print_call(irp, (IrInstructionCall *)instruction); + case IrInstructionIdCallSrc: + ir_print_call_src(irp, (IrInstructionCallSrc *)instruction); + break; + case IrInstructionIdCallGen: + ir_print_call_gen(irp, (IrInstructionCallGen *)instruction); break; case IrInstructionIdUnOp: ir_print_un_op(irp, (IrInstructionUnOp *)instruction); @@ -1506,12 +1662,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdContainerInitFields: ir_print_container_init_fields(irp, (IrInstructionContainerInitFields *)instruction); break; - case IrInstructionIdStructInit: - ir_print_struct_init(irp, (IrInstructionStructInit *)instruction); - break; - case IrInstructionIdUnionInit: - ir_print_union_init(irp, (IrInstructionUnionInit *)instruction); - break; case IrInstructionIdUnreachable: ir_print_unreachable(irp, (IrInstructionUnreachable *)instruction); break; @@ -1521,6 +1671,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdVarPtr: ir_print_var_ptr(irp, (IrInstructionVarPtr *)instruction); break; + case IrInstructionIdReturnPtr: + ir_print_return_ptr(irp, (IrInstructionReturnPtr *)instruction); + break; case IrInstructionIdLoadPtr: ir_print_load_ptr(irp, (IrInstructionLoadPtr *)instruction); break; @@ -1533,12 +1686,6 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdTypeOf: ir_print_typeof(irp, (IrInstructionTypeOf *)instruction); break; - case IrInstructionIdToPtrType: - ir_print_to_ptr_type(irp, (IrInstructionToPtrType *)instruction); - break; - case IrInstructionIdPtrTypeChild: - ir_print_ptr_type_child(irp, (IrInstructionPtrTypeChild *)instruction); - break; case IrInstructionIdFieldPtr: ir_print_field_ptr(irp, (IrInstructionFieldPtr *)instruction); break; @@ -1617,6 +1764,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdRef: ir_print_ref(irp, (IrInstructionRef *)instruction); break; + case IrInstructionIdRefGen: + ir_print_ref_gen(irp, (IrInstructionRefGen *)instruction); + break; case IrInstructionIdCompileErr: ir_print_compile_err(irp, (IrInstructionCompileErr *)instruction); break; @@ -1692,8 +1842,11 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdMemcpy: ir_print_memcpy(irp, (IrInstructionMemcpy *)instruction); break; - case IrInstructionIdSlice: - ir_print_slice(irp, (IrInstructionSlice *)instruction); + case IrInstructionIdSliceSrc: + ir_print_slice_src(irp, (IrInstructionSliceSrc *)instruction); + break; + case IrInstructionIdSliceGen: + ir_print_slice_gen(irp, (IrInstructionSliceGen *)instruction); break; case IrInstructionIdMemberCount: ir_print_member_count(irp, (IrInstructionMemberCount *)instruction); @@ -1722,8 +1875,11 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdOverflowOp: ir_print_overflow_op(irp, (IrInstructionOverflowOp *)instruction); break; - case IrInstructionIdTestErr: - ir_print_test_err(irp, (IrInstructionTestErr *)instruction); + case IrInstructionIdTestErrSrc: + ir_print_test_err_src(irp, (IrInstructionTestErrSrc *)instruction); + break; + case IrInstructionIdTestErrGen: + ir_print_test_err_gen(irp, (IrInstructionTestErrGen *)instruction); break; case IrInstructionIdUnwrapErrCode: ir_print_unwrap_err_code(irp, (IrInstructionUnwrapErrCode *)instruction); @@ -1732,7 +1888,7 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_unwrap_err_payload(irp, (IrInstructionUnwrapErrPayload *)instruction); break; case IrInstructionIdOptionalWrap: - ir_print_maybe_wrap(irp, (IrInstructionOptionalWrap *)instruction); + ir_print_optional_wrap(irp, (IrInstructionOptionalWrap *)instruction); break; case IrInstructionIdErrWrapCode: ir_print_err_wrap_code(irp, (IrInstructionErrWrapCode *)instruction); @@ -1752,8 +1908,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdPtrCastGen: ir_print_ptr_cast_gen(irp, (IrInstructionPtrCastGen *)instruction); break; - case IrInstructionIdBitCast: - ir_print_bit_cast(irp, (IrInstructionBitCast *)instruction); + case IrInstructionIdBitCastSrc: + ir_print_bit_cast_src(irp, (IrInstructionBitCastSrc *)instruction); break; case IrInstructionIdBitCastGen: ir_print_bit_cast_gen(irp, (IrInstructionBitCastGen *)instruction); @@ -1818,6 +1974,18 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdAlignCast: ir_print_align_cast(irp, (IrInstructionAlignCast *)instruction); break; + case IrInstructionIdImplicitCast: + ir_print_implicit_cast(irp, (IrInstructionImplicitCast *)instruction); + break; + case IrInstructionIdResolveResult: + ir_print_resolve_result(irp, (IrInstructionResolveResult *)instruction); + break; + case IrInstructionIdResetResult: + ir_print_reset_result(irp, (IrInstructionResetResult *)instruction); + break; + case IrInstructionIdResultPtr: + ir_print_result_ptr(irp, (IrInstructionResultPtr *)instruction); + break; case IrInstructionIdOpaqueType: ir_print_opaque_type(irp, (IrInstructionOpaqueType *)instruction); break; @@ -1902,8 +2070,11 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdMarkErrRetTracePtr: ir_print_mark_err_ret_trace_ptr(irp, (IrInstructionMarkErrRetTracePtr *)instruction); break; - case IrInstructionIdSqrt: - ir_print_sqrt(irp, (IrInstructionSqrt *)instruction); + case IrInstructionIdFloatOp: + ir_print_float_op(irp, (IrInstructionFloatOp *)instruction); + break; + case IrInstructionIdMulAdd: + ir_print_mul_add(irp, (IrInstructionMulAdd *)instruction); break; case IrInstructionIdAtomicLoad: ir_print_atomic_load(irp, (IrInstructionAtomicLoad *)instruction); @@ -1923,6 +2094,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdVectorToArray: ir_print_vector_to_array(irp, (IrInstructionVectorToArray *)instruction); break; + case IrInstructionIdPtrOfArrayToSlice: + ir_print_ptr_of_array_to_slice(irp, (IrInstructionPtrOfArrayToSlice *)instruction); + break; case IrInstructionIdAssertZero: ir_print_assert_zero(irp, (IrInstructionAssertZero *)instruction); break; @@ -1938,6 +2112,15 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdUndeclaredIdent: ir_print_undeclared_ident(irp, (IrInstructionUndeclaredIdent *)instruction); break; + case IrInstructionIdAllocaSrc: + ir_print_alloca_src(irp, (IrInstructionAllocaSrc *)instruction); + break; + case IrInstructionIdAllocaGen: + ir_print_alloca_gen(irp, (IrInstructionAllocaGen *)instruction); + break; + case IrInstructionIdEndExpr: + ir_print_end_expr(irp, (IrInstructionEndExpr *)instruction); + break; } fprintf(irp->f, "\n"); } diff --git a/src/main.cpp b/src/main.cpp index 9b1892061b..57eeef59df 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -913,8 +913,20 @@ int main(int argc, char **argv) { get_native_target(&target); } else { if ((err = target_parse_triple(&target, target_string))) { - fprintf(stderr, "invalid target: %s\n", err_str(err)); - return print_error_usage(arg0); + if (err == ErrorUnknownArchitecture && target.arch != ZigLLVM_UnknownArch) { + fprintf(stderr, "'%s' requires a sub-architecture. Try one of these:\n", + target_arch_name(target.arch)); + SubArchList sub_arch_list = target_subarch_list(target.arch); + size_t subarch_count = target_subarch_count(sub_arch_list); + for (size_t sub_i = 0; sub_i < subarch_count; sub_i += 1) { + ZigLLVM_SubArchType sub = target_subarch_enum(sub_arch_list, sub_i); + fprintf(stderr, " %s%s\n", target_arch_name(target.arch), target_subarch_name(sub)); + } + return print_error_usage(arg0); + } else { + fprintf(stderr, "invalid target: %s\n", err_str(err)); + return print_error_usage(arg0); + } } } diff --git a/src/parser.cpp b/src/parser.cpp index 33f8836ef3..f35e54f6de 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -344,7 +344,7 @@ static AstNode *ast_parse_bin_op_expr( op->data.bin_op_expr.op1 = left; op->data.bin_op_expr.op2 = right; break; - case NodeTypeUnwrapErrorExpr: + case NodeTypeCatchExpr: op->data.unwrap_err_expr.op1 = left; op->data.unwrap_err_expr.op2 = right; break; @@ -2404,7 +2404,7 @@ static AstNode *ast_parse_bitwise_op(ParseContext *pc) { Token *catch_token = eat_token_if(pc, TokenIdKeywordCatch); if (catch_token != nullptr) { Token *payload = ast_parse_payload(pc); - AstNode *res = ast_create_node(pc, NodeTypeUnwrapErrorExpr, catch_token); + AstNode *res = ast_create_node(pc, NodeTypeCatchExpr, catch_token); if (payload != nullptr) res->data.unwrap_err_expr.symbol = token_symbol(pc, payload); @@ -2897,7 +2897,7 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont visit_field(&node->data.bin_op_expr.op1, visit, context); visit_field(&node->data.bin_op_expr.op2, visit, context); break; - case NodeTypeUnwrapErrorExpr: + case NodeTypeCatchExpr: visit_field(&node->data.unwrap_err_expr.op1, visit, context); visit_field(&node->data.unwrap_err_expr.symbol, visit, context); visit_field(&node->data.unwrap_err_expr.op2, visit, context); diff --git a/src/target.cpp b/src/target.cpp index 1d74304584..f646b33e22 100644 --- a/src/target.cpp +++ b/src/target.cpp @@ -486,17 +486,17 @@ void get_native_target(ZigTarget *target) { Error target_parse_archsub(ZigLLVM_ArchType *out_arch, ZigLLVM_SubArchType *out_sub, const char *archsub_ptr, size_t archsub_len) { + *out_arch = ZigLLVM_UnknownArch; + *out_sub = ZigLLVM_NoSubArch; for (size_t arch_i = 0; arch_i < array_length(arch_list); arch_i += 1) { ZigLLVM_ArchType arch = arch_list[arch_i]; SubArchList sub_arch_list = target_subarch_list(arch); size_t subarch_count = target_subarch_count(sub_arch_list); - if (subarch_count == 0) { - if (mem_eql_str(archsub_ptr, archsub_len, target_arch_name(arch))) { - *out_arch = arch; - *out_sub = ZigLLVM_NoSubArch; + if (mem_eql_str(archsub_ptr, archsub_len, target_arch_name(arch))) { + *out_arch = arch; + if (subarch_count == 0) { return ErrorNone; } - continue; } for (size_t sub_i = 0; sub_i < subarch_count; sub_i += 1) { ZigLLVM_SubArchType sub = target_subarch_enum(sub_arch_list, sub_i); diff --git a/std/event/fs.zig b/std/event/fs.zig index 0f42375270..c25426b98a 100644 --- a/std/event/fs.zig +++ b/std/event/fs.zig @@ -1290,10 +1290,9 @@ pub fn Watch(comptime V: type) type { error.FileDescriptorAlreadyPresentInSet => unreachable, error.OperationCausesCircularLoop => unreachable, error.FileDescriptorNotRegistered => unreachable, - error.SystemResources => error.SystemResources, - error.UserResourceLimitReached => error.UserResourceLimitReached, error.FileDescriptorIncompatibleWithEpoll => unreachable, error.Unexpected => unreachable, + else => |e| e, }; await (async channel.put(transformed_err) catch unreachable); }; diff --git a/std/event/lock.zig b/std/event/lock.zig index a759c1e0a5..d86902cc06 100644 --- a/std/event/lock.zig +++ b/std/event/lock.zig @@ -123,7 +123,8 @@ pub const Lock = struct { }; test "std.event.Lock" { - // https://github.com/ziglang/zig/issues/1908 + // TODO https://github.com/ziglang/zig/issues/2377 + if (true) return error.SkipZigTest; if (builtin.single_threaded) return error.SkipZigTest; const allocator = std.heap.direct_allocator; diff --git a/std/event/net.zig b/std/event/net.zig index 413bf1432c..46b724e32e 100644 --- a/std/event/net.zig +++ b/std/event/net.zig @@ -263,8 +263,8 @@ pub async fn connect(loop: *Loop, _address: *const std.net.Address) !File { } test "listen on a port, send bytes, receive bytes" { - // https://github.com/ziglang/zig/issues/1908 - if (builtin.single_threaded) return error.SkipZigTest; + // https://github.com/ziglang/zig/issues/2377 + if (true) return error.SkipZigTest; if (builtin.os != builtin.Os.linux) { // TODO build abstractions for other operating systems diff --git a/std/event/rwlock.zig b/std/event/rwlock.zig index 03e6f5ab92..7b97fa24c1 100644 --- a/std/event/rwlock.zig +++ b/std/event/rwlock.zig @@ -212,8 +212,8 @@ pub const RwLock = struct { }; test "std.event.RwLock" { - // https://github.com/ziglang/zig/issues/1908 - if (builtin.single_threaded or builtin.os != builtin.Os.linux) return error.SkipZigTest; + // https://github.com/ziglang/zig/issues/2377 + if (true) return error.SkipZigTest; const allocator = std.heap.direct_allocator; diff --git a/std/fmt.zig b/std/fmt.zig index 7bf1fa3d4d..2e9527f4ca 100644 --- a/std/fmt.zig +++ b/std/fmt.zig @@ -10,6 +10,42 @@ const lossyCast = std.math.lossyCast; pub const default_max_depth = 3; +pub const Alignment = enum { + Left, + Center, + Right, +}; + +pub const FormatOptions = struct { + precision: ?usize = null, + width: ?usize = null, + alignment: ?Alignment = null, + fill: u8 = ' ', +}; + +fn nextArg(comptime used_pos_args: *u32, comptime maybe_pos_arg: ?comptime_int, comptime next_arg: *comptime_int) comptime_int { + if (maybe_pos_arg) |pos_arg| { + used_pos_args.* |= 1 << pos_arg; + return pos_arg; + } else { + const arg = next_arg.*; + next_arg.* += 1; + return arg; + } +} + +fn peekIsAlign(comptime fmt: []const u8) bool { + // Should only be called during a state transition to the format segment. + std.debug.assert(fmt[0] == ':'); + + inline for (([_]u8{ 1, 2 })[0..]) |i| { + if (fmt.len > i and (fmt[i] == '<' or fmt[i] == '^' or fmt[i] == '>')) { + return true; + } + } + return false; +} + /// Renders fmt string with args, calling output with slices of bytes. /// If `output` returns an error, the error is returned from `format` and /// `output` is not called again. @@ -20,17 +56,30 @@ pub fn format( comptime fmt: []const u8, args: ..., ) Errors!void { + const ArgSetType = @IntType(false, 32); + if (args.len > ArgSetType.bit_count) { + @compileError("32 arguments max are supported per format call"); + } + const State = enum { Start, - OpenBrace, + Positional, CloseBrace, - FormatString, + Specifier, + FormatFillAndAlign, + FormatWidth, + FormatPrecision, Pointer, }; comptime var start_index = 0; comptime var state = State.Start; comptime var next_arg = 0; + comptime var maybe_pos_arg: ?comptime_int = null; + comptime var used_pos_args: ArgSetType = 0; + comptime var specifier_start = 0; + comptime var specifier_end = 0; + comptime var options = FormatOptions{}; inline for (fmt) |c, i| { switch (state) { @@ -39,58 +88,183 @@ pub fn format( if (start_index < i) { try output(context, fmt[start_index..i]); } - start_index = i; - state = State.OpenBrace; - }, + start_index = i; + specifier_start = i + 1; + specifier_end = i + 1; + maybe_pos_arg = null; + state = .Positional; + options = FormatOptions{}; + }, '}' => { if (start_index < i) { try output(context, fmt[start_index..i]); } - state = State.CloseBrace; + state = .CloseBrace; }, else => {}, }, - .OpenBrace => switch (c) { + .Positional => switch (c) { '{' => { - state = State.Start; + state = .Start; start_index = i; }, + '*' => { + state = .Pointer; + }, + ':' => { + state = if (comptime peekIsAlign(fmt[i..])) State.FormatFillAndAlign else State.FormatWidth; + specifier_end = i; + }, + '0'...'9' => { + if (maybe_pos_arg == null) { + maybe_pos_arg = 0; + } + + maybe_pos_arg.? *= 10; + maybe_pos_arg.? += c - '0'; + specifier_start = i + 1; + + if (maybe_pos_arg.? >= args.len) { + @compileError("Positional value refers to non-existent argument"); + } + }, '}' => { - try formatType(args[next_arg], fmt[0..0], context, Errors, output, default_max_depth); - next_arg += 1; - state = State.Start; + const arg_to_print = comptime nextArg(&used_pos_args, maybe_pos_arg, &next_arg); + + try formatType( + args[arg_to_print], + fmt[0..0], + options, + context, + Errors, + output, + default_max_depth, + ); + + state = .Start; start_index = i + 1; }, - '*' => state = State.Pointer, else => { - state = State.FormatString; + state = .Specifier; + specifier_start = i; }, }, .CloseBrace => switch (c) { '}' => { - state = State.Start; + state = .Start; start_index = i; }, else => @compileError("Single '}' encountered in format string"), }, - .FormatString => switch (c) { + .Specifier => switch (c) { + ':' => { + specifier_end = i; + state = if (comptime peekIsAlign(fmt[i..])) State.FormatFillAndAlign else State.FormatWidth; + }, '}' => { - const s = start_index + 1; - try formatType(args[next_arg], fmt[s..i], context, Errors, output, default_max_depth); - next_arg += 1; - state = State.Start; + const arg_to_print = comptime nextArg(&used_pos_args, maybe_pos_arg, &next_arg); + + try formatType( + args[arg_to_print], + fmt[specifier_start..i], + options, + context, + Errors, + output, + default_max_depth, + ); + state = .Start; start_index = i + 1; }, else => {}, }, + // Only entered if the format string contains a fill/align segment. + .FormatFillAndAlign => switch (c) { + '<' => { + options.alignment = Alignment.Left; + state = .FormatWidth; + }, + '^' => { + options.alignment = Alignment.Center; + state = .FormatWidth; + }, + '>' => { + options.alignment = Alignment.Right; + state = .FormatWidth; + }, + else => { + options.fill = c; + }, + }, + .FormatWidth => switch (c) { + '0'...'9' => { + if (options.width == null) { + options.width = 0; + } + + options.width.? *= 10; + options.width.? += c - '0'; + }, + '.' => { + state = .FormatPrecision; + }, + '}' => { + const arg_to_print = comptime nextArg(&used_pos_args, maybe_pos_arg, &next_arg); + + try formatType( + args[arg_to_print], + fmt[specifier_start..specifier_end], + options, + context, + Errors, + output, + default_max_depth, + ); + state = .Start; + start_index = i + 1; + }, + else => { + @compileError("Unexpected character in width value: " ++ [_]u8{c}); + }, + }, + .FormatPrecision => switch (c) { + '0'...'9' => { + if (options.precision == null) { + options.precision = 0; + } + + options.precision.? *= 10; + options.precision.? += c - '0'; + }, + '}' => { + const arg_to_print = comptime nextArg(&used_pos_args, maybe_pos_arg, &next_arg); + + try formatType( + args[arg_to_print], + fmt[specifier_start..specifier_end], + options, + context, + Errors, + output, + default_max_depth, + ); + state = .Start; + start_index = i + 1; + }, + else => { + @compileError("Unexpected character in precision value: " ++ [_]u8{c}); + }, + }, .Pointer => switch (c) { '}' => { - try output(context, @typeName(@typeOf(args[next_arg]).Child)); + const arg_to_print = comptime nextArg(&used_pos_args, maybe_pos_arg, &next_arg); + + try output(context, @typeName(@typeOf(args[arg_to_print]).Child)); try output(context, "@"); - try formatInt(@ptrToInt(args[next_arg]), 16, false, 0, context, Errors, output); - next_arg += 1; - state = State.Start; + try formatInt(@ptrToInt(args[arg_to_print]), 16, false, 0, context, Errors, output); + + state = .Start; start_index = i + 1; }, else => @compileError("Unexpected format character after '*'"), @@ -98,7 +272,13 @@ pub fn format( } } comptime { - if (args.len != next_arg) { + // All arguments must have been printed but we allow mixing positional and fixed to achieve this. + var i: usize = 0; + inline while (i < next_arg) : (i += 1) { + used_pos_args |= 1 << i; + } + + if (@popCount(ArgSetType, used_pos_args) != args.len) { @compileError("Unused arguments"); } if (state != State.Start) { @@ -113,6 +293,7 @@ pub fn format( pub fn formatType( value: var, comptime fmt: []const u8, + comptime options: FormatOptions, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void, @@ -121,7 +302,7 @@ pub fn formatType( const T = @typeOf(value); switch (@typeInfo(T)) { .ComptimeInt, .Int, .Float => { - return formatValue(value, fmt, context, Errors, output); + return formatValue(value, fmt, options, context, Errors, output); }, .Void => { return output(context, "void"); @@ -131,16 +312,16 @@ pub fn formatType( }, .Optional => { if (value) |payload| { - return formatType(payload, fmt, context, Errors, output, max_depth); + return formatType(payload, fmt, options, context, Errors, output, max_depth); } else { return output(context, "null"); } }, .ErrorUnion => { if (value) |payload| { - return formatType(payload, fmt, context, Errors, output, max_depth); + return formatType(payload, fmt, options, context, Errors, output, max_depth); } else |err| { - return formatType(err, fmt, context, Errors, output, max_depth); + return formatType(err, fmt, options, context, Errors, output, max_depth); } }, .ErrorSet => { @@ -152,16 +333,16 @@ pub fn formatType( }, .Enum => { if (comptime std.meta.trait.hasFn("format")(T)) { - return value.format(fmt, context, Errors, output); + return value.format(fmt, options, context, Errors, output); } try output(context, @typeName(T)); try output(context, "."); - return formatType(@tagName(value), "", context, Errors, output, max_depth); + return formatType(@tagName(value), "", options, context, Errors, output, max_depth); }, .Union => { if (comptime std.meta.trait.hasFn("format")(T)) { - return value.format(fmt, context, Errors, output); + return value.format(fmt, options, context, Errors, output); } try output(context, @typeName(T)); @@ -175,7 +356,7 @@ pub fn formatType( try output(context, " = "); inline for (info.fields) |u_field| { if (@enumToInt(UnionTagType(value)) == u_field.enum_field.?.value) { - try formatType(@field(value, u_field.name), "", context, Errors, output, max_depth - 1); + try formatType(@field(value, u_field.name), "", options, context, Errors, output, max_depth - 1); } } try output(context, " }"); @@ -185,7 +366,7 @@ pub fn formatType( }, .Struct => { if (comptime std.meta.trait.hasFn("format")(T)) { - return value.format(fmt, context, Errors, output); + return value.format(fmt, options, context, Errors, output); } try output(context, @typeName(T)); @@ -201,7 +382,7 @@ pub fn formatType( } try output(context, @memberName(T, field_i)); try output(context, " = "); - try formatType(@field(value, @memberName(T, field_i)), "", context, Errors, output, max_depth - 1); + try formatType(@field(value, @memberName(T, field_i)), "", options, context, Errors, output, max_depth - 1); } try output(context, " }"); }, @@ -209,12 +390,12 @@ pub fn formatType( .One => switch (@typeInfo(ptr_info.child)) { builtin.TypeId.Array => |info| { if (info.child == u8) { - return formatText(value, fmt, context, Errors, output); + return formatText(value, fmt, options, context, Errors, output); } return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)); }, builtin.TypeId.Enum, builtin.TypeId.Union, builtin.TypeId.Struct => { - return formatType(value.*, fmt, context, Errors, output, max_depth); + return formatType(value.*, fmt, options, context, Errors, output, max_depth); }, else => return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)), }, @@ -222,17 +403,17 @@ pub fn formatType( if (ptr_info.child == u8) { if (fmt.len > 0 and fmt[0] == 's') { const len = mem.len(u8, value); - return formatText(value[0..len], fmt, context, Errors, output); + return formatText(value[0..len], fmt, options, context, Errors, output); } } return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)); }, .Slice => { if (fmt.len > 0 and ((fmt[0] == 'x') or (fmt[0] == 'X'))) { - return formatText(value, fmt, context, Errors, output); + return formatText(value, fmt, options, context, Errors, output); } if (ptr_info.child == u8) { - return formatText(value, fmt, context, Errors, output); + return formatText(value, fmt, options, context, Errors, output); } return format(context, Errors, output, "{}@{x}", @typeName(ptr_info.child), @ptrToInt(value.ptr)); }, @@ -242,7 +423,7 @@ pub fn formatType( }, .Array => |info| { if (info.child == u8) { - return formatText(value, fmt, context, Errors, output); + return formatText(value, fmt, options, context, Errors, output); } return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(&value)); }, @@ -256,28 +437,21 @@ pub fn formatType( fn formatValue( value: var, comptime fmt: []const u8, + comptime options: FormatOptions, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void, ) Errors!void { - if (fmt.len > 0 and fmt[0] == 'B') { - comptime var width: ?usize = null; - if (fmt.len > 1) { - if (fmt[1] == 'i') { - if (fmt.len > 2) { - width = comptime (parseUnsigned(usize, fmt[2..], 10) catch unreachable); - } - return formatBytes(value, width, 1024, context, Errors, output); - } - width = comptime (parseUnsigned(usize, fmt[1..], 10) catch unreachable); - } - return formatBytes(value, width, 1000, context, Errors, output); + if (comptime std.mem.eql(u8, fmt, "B")) { + return formatBytes(value, options.width, 1000, context, Errors, output); + } else if (comptime std.mem.eql(u8, fmt, "Bi")) { + return formatBytes(value, options.width, 1024, context, Errors, output); } const T = @typeOf(value); switch (@typeId(T)) { - .Float => return formatFloatValue(value, fmt, context, Errors, output), - .Int, .ComptimeInt => return formatIntValue(value, fmt, context, Errors, output), + .Float => return formatFloatValue(value, fmt, options, context, Errors, output), + .Int, .ComptimeInt => return formatIntValue(value, fmt, options, context, Errors, output), else => comptime unreachable, } } @@ -285,13 +459,13 @@ fn formatValue( pub fn formatIntValue( value: var, comptime fmt: []const u8, + comptime options: FormatOptions, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void, ) Errors!void { comptime var radix = 10; comptime var uppercase = false; - comptime var width = 0; const int_value = if (@typeOf(value) == comptime_int) blk: { const Int = math.IntFittingRange(value, value); @@ -299,83 +473,69 @@ pub fn formatIntValue( } else value; - if (fmt.len > 0) { - switch (fmt[0]) { - 'c' => { - if (@typeOf(int_value).bit_count <= 8) { - if (fmt.len > 1) - @compileError("Unknown format character: " ++ [_]u8{fmt[1]}); - return formatAsciiChar(u8(int_value), context, Errors, output); - } - }, - 'b' => { - radix = 2; - uppercase = false; - width = 0; - }, - 'd' => { - radix = 10; - uppercase = false; - width = 0; - }, - 'x' => { - radix = 16; - uppercase = false; - width = 0; - }, - 'X' => { - radix = 16; - uppercase = true; - width = 0; - }, - else => @compileError("Unknown format character: " ++ [_]u8{fmt[0]}), + if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "d")) { + radix = 10; + uppercase = false; + } else if (comptime std.mem.eql(u8, fmt, "c")) { + if (@typeOf(int_value).bit_count <= 8) { + return formatAsciiChar(u8(int_value), context, Errors, output); + } else { + @compileError("Cannot print integer that is larger than 8 bits as a ascii"); } - if (fmt.len > 1) width = comptime (parseUnsigned(usize, fmt[1..], 10) catch unreachable); + } else if (comptime std.mem.eql(u8, fmt, "b")) { + radix = 2; + uppercase = false; + } else if (comptime std.mem.eql(u8, fmt, "x")) { + radix = 16; + uppercase = false; + } else if (comptime std.mem.eql(u8, fmt, "X")) { + radix = 16; + uppercase = true; + } else { + @compileError("Unknown format string: '" ++ fmt ++ "'"); } - return formatInt(int_value, radix, uppercase, width, context, Errors, output); + + return formatInt(int_value, radix, uppercase, options.width orelse 0, context, Errors, output); } fn formatFloatValue( value: var, comptime fmt: []const u8, + comptime options: FormatOptions, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void, ) Errors!void { - comptime var width: ?usize = null; - comptime var float_fmt = 'e'; - if (fmt.len > 0) { - float_fmt = fmt[0]; - if (fmt.len > 1) width = comptime (parseUnsigned(usize, fmt[1..], 10) catch unreachable); - } - - switch (float_fmt) { - 'e' => try formatFloatScientific(value, width, context, Errors, output), - '.' => try formatFloatDecimal(value, width, context, Errors, output), - else => @compileError("Unknown format character: " ++ [_]u8{float_fmt}), + if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "e")) { + return formatFloatScientific(value, options.precision, context, Errors, output); + } else if (comptime std.mem.eql(u8, fmt, "d")) { + return formatFloatDecimal(value, options.precision, context, Errors, output); + } else { + @compileError("Unknown format string: '" ++ fmt ++ "'"); } } pub fn formatText( bytes: []const u8, comptime fmt: []const u8, + comptime options: FormatOptions, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void, ) Errors!void { - if (fmt.len > 0) { - if (fmt[0] == 's') { - comptime var width = 0; - if (fmt.len > 1) width = comptime (parseUnsigned(usize, fmt[1..], 10) catch unreachable); - return formatBuf(bytes, width, context, Errors, output); - } else if ((fmt[0] == 'x') or (fmt[0] == 'X')) { - for (bytes) |c| { - try formatInt(c, 16, fmt[0] == 'X', 2, context, Errors, output); - } - return; - } else @compileError("Unknown format character: " ++ [_]u8{fmt[0]}); + if (fmt.len == 0) { + return output(context, bytes); + } else if (comptime std.mem.eql(u8, fmt, "s")) { + if (options.width) |w| return formatBuf(bytes, w, context, Errors, output); + return formatBuf(bytes, 0, context, Errors, output); + } else if (comptime (std.mem.eql(u8, fmt, "x") or std.mem.eql(u8, fmt, "X"))) { + for (bytes) |c| { + try formatInt(c, 16, fmt[0] == 'X', 2, context, Errors, output); + } + return; + } else { + @compileError("Unknown format string: '" ++ fmt ++ "'"); } - return output(context, bytes); } pub fn formatAsciiChar( @@ -868,7 +1028,7 @@ test "parseUnsigned" { pub const parseFloat = @import("fmt/parse_float.zig").parseFloat; -test "fmt.parseFloat" { +test "parseFloat" { _ = @import("fmt/parse_float.zig"); } @@ -960,7 +1120,7 @@ test "parse unsigned comptime" { } } -test "fmt.optional" { +test "optional" { { const value: ?i32 = 1234; try testFmt("optional: 1234\n", "optional: {}\n", value); @@ -971,7 +1131,7 @@ test "fmt.optional" { } } -test "fmt.error" { +test "error" { { const value: anyerror!i32 = 1234; try testFmt("error union: 1234\n", "error union: {}\n", value); @@ -982,14 +1142,14 @@ test "fmt.error" { } } -test "fmt.int.small" { +test "int.small" { { const value: u3 = 0b101; try testFmt("u3: 5\n", "u3: {}\n", value); } } -test "fmt.int.specifier" { +test "int.specifier" { { const value: u8 = 'a'; try testFmt("u8: a\n", "u8: {c}\n", value); @@ -1000,27 +1160,31 @@ test "fmt.int.specifier" { } } -test "fmt.buffer" { +test "int.padded" { + try testFmt("u8: '0001'", "u8: '{:4}'", u8(1)); +} + +test "buffer" { { var buf1: [32]u8 = undefined; var context = BufPrintContext{ .remaining = buf1[0..] }; - try formatType(1234, "", &context, error{BufferTooSmall}, bufPrintWrite, default_max_depth); + try formatType(1234, "", FormatOptions{}, &context, error{BufferTooSmall}, bufPrintWrite, default_max_depth); var res = buf1[0 .. buf1.len - context.remaining.len]; testing.expect(mem.eql(u8, res, "1234")); context = BufPrintContext{ .remaining = buf1[0..] }; - try formatType('a', "c", &context, error{BufferTooSmall}, bufPrintWrite, default_max_depth); + try formatType('a', "c", FormatOptions{}, &context, error{BufferTooSmall}, bufPrintWrite, default_max_depth); res = buf1[0 .. buf1.len - context.remaining.len]; testing.expect(mem.eql(u8, res, "a")); context = BufPrintContext{ .remaining = buf1[0..] }; - try formatType(0b1100, "b", &context, error{BufferTooSmall}, bufPrintWrite, default_max_depth); + try formatType(0b1100, "b", FormatOptions{}, &context, error{BufferTooSmall}, bufPrintWrite, default_max_depth); res = buf1[0 .. buf1.len - context.remaining.len]; testing.expect(mem.eql(u8, res, "1100")); } } -test "fmt.array" { +test "array" { { const value: [3]u8 = "abc"; try testFmt("array: abc\n", "array: {}\n", value); @@ -1035,7 +1199,7 @@ test "fmt.array" { } } -test "fmt.slice" { +test "slice" { { const value: []const u8 = "abc"; try testFmt("slice: abc\n", "slice: {}\n", value); @@ -1045,11 +1209,11 @@ test "fmt.slice" { try testFmt("slice: []const u8@deadbeef\n", "slice: {}\n", value); } - try testFmt("buf: Test \n", "buf: {s5}\n", "Test"); + try testFmt("buf: Test \n", "buf: {s:5}\n", "Test"); try testFmt("buf: Test\n Other text", "buf: {s}\n Other text", "Test"); } -test "fmt.pointer" { +test "pointer" { { const value = @intToPtr(*i32, 0xdeadbeef); try testFmt("pointer: i32@deadbeef\n", "pointer: {}\n", value); @@ -1065,17 +1229,17 @@ test "fmt.pointer" { } } -test "fmt.cstr" { +test "cstr" { try testFmt("cstr: Test C\n", "cstr: {s}\n", c"Test C"); - try testFmt("cstr: Test C \n", "cstr: {s10}\n", c"Test C"); + try testFmt("cstr: Test C \n", "cstr: {s:10}\n", c"Test C"); } -test "fmt.filesize" { +test "filesize" { try testFmt("file size: 63MiB\n", "file size: {Bi}\n", usize(63 * 1024 * 1024)); - try testFmt("file size: 66.06MB\n", "file size: {B2}\n", usize(63 * 1024 * 1024)); + try testFmt("file size: 66.06MB\n", "file size: {B:2}\n", usize(63 * 1024 * 1024)); } -test "fmt.struct" { +test "struct" { { const Struct = struct { field: u8, @@ -1094,7 +1258,7 @@ test "fmt.struct" { } } -test "fmt.enum" { +test "enum" { const Enum = enum { One, Two, @@ -1104,229 +1268,71 @@ test "fmt.enum" { try testFmt("enum: Enum.Two\n", "enum: {}\n", &value); } -test "fmt.float.scientific" { - { - var buf1: [32]u8 = undefined; - const value: f32 = 1.34; - const result = try bufPrint(buf1[0..], "f32: {e}\n", value); - testing.expect(mem.eql(u8, result, "f32: 1.34000003e+00\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f32 = 12.34; - const result = try bufPrint(buf1[0..], "f32: {e}\n", value); - testing.expect(mem.eql(u8, result, "f32: 1.23400001e+01\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = -12.34e10; - const result = try bufPrint(buf1[0..], "f64: {e}\n", value); - testing.expect(mem.eql(u8, result, "f64: -1.234e+11\n")); - } - { - // This fails on release due to a minor rounding difference. - // --release-fast outputs 9.999960000000001e-40 vs. the expected. - // TODO fix this, it should be the same in Debug and ReleaseFast - if (builtin.mode == builtin.Mode.Debug) { - var buf1: [32]u8 = undefined; - const value: f64 = 9.999960e-40; - const result = try bufPrint(buf1[0..], "f64: {e}\n", value); - testing.expect(mem.eql(u8, result, "f64: 9.99996e-40\n")); - } - } +test "float.scientific" { + try testFmt("f32: 1.34000003e+00", "f32: {e}", f32(1.34)); + try testFmt("f32: 1.23400001e+01", "f32: {e}", f32(12.34)); + try testFmt("f64: -1.234e+11", "f64: {e}", f64(-12.34e10)); + try testFmt("f64: 9.99996e-40", "f64: {e}", f64(9.999960e-40)); } -test "fmt.float.scientific.precision" { - { - var buf1: [32]u8 = undefined; - const value: f64 = 1.409706e-42; - const result = try bufPrint(buf1[0..], "f64: {e5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 1.40971e-42\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = @bitCast(f32, u32(814313563)); - const result = try bufPrint(buf1[0..], "f64: {e5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 1.00000e-09\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = @bitCast(f32, u32(1006632960)); - const result = try bufPrint(buf1[0..], "f64: {e5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 7.81250e-03\n")); - } - { - // libc rounds 1.000005e+05 to 1.00000e+05 but zig does 1.00001e+05. - // In fact, libc doesn't round a lot of 5 cases up when one past the precision point. - var buf1: [32]u8 = undefined; - const value: f64 = @bitCast(f32, u32(1203982400)); - const result = try bufPrint(buf1[0..], "f64: {e5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 1.00001e+05\n")); - } +test "float.scientific.precision" { + try testFmt("f64: 1.40971e-42", "f64: {e:.5}", f64(1.409706e-42)); + try testFmt("f64: 1.00000e-09", "f64: {e:.5}", f64(@bitCast(f32, u32(814313563)))); + try testFmt("f64: 7.81250e-03", "f64: {e:.5}", f64(@bitCast(f32, u32(1006632960)))); + // libc rounds 1.000005e+05 to 1.00000e+05 but zig does 1.00001e+05. + // In fact, libc doesn't round a lot of 5 cases up when one past the precision point. + try testFmt("f64: 1.00001e+05", "f64: {e:.5}", f64(@bitCast(f32, u32(1203982400)))); } -test "fmt.float.special" { - { - var buf1: [32]u8 = undefined; - const result = try bufPrint(buf1[0..], "f64: {}\n", math.nan_f64); - testing.expect(mem.eql(u8, result, "f64: nan\n")); - } +test "float.special" { + try testFmt("f64: nan", "f64: {}", math.nan_f64); + // negative nan is not defined by IEE 754, + // and ARM thus normalizes it to positive nan if (builtin.arch != builtin.Arch.arm) { - // negative nan is not defined by IEE 754, - // and ARM thus normalizes it to positive nan - var buf1: [32]u8 = undefined; - const result = try bufPrint(buf1[0..], "f64: {}\n", -math.nan_f64); - testing.expect(mem.eql(u8, result, "f64: -nan\n")); - } - { - var buf1: [32]u8 = undefined; - const result = try bufPrint(buf1[0..], "f64: {}\n", math.inf_f64); - testing.expect(mem.eql(u8, result, "f64: inf\n")); - } - { - var buf1: [32]u8 = undefined; - const result = try bufPrint(buf1[0..], "f64: {}\n", -math.inf_f64); - testing.expect(mem.eql(u8, result, "f64: -inf\n")); + try testFmt("f64: -nan", "f64: {}", -math.nan_f64); } + try testFmt("f64: inf", "f64: {}", math.inf_f64); + try testFmt("f64: -inf", "f64: {}", -math.inf_f64); } -test "fmt.float.decimal" { - { - var buf1: [64]u8 = undefined; - const value: f64 = 1.52314e+29; - const result = try bufPrint(buf1[0..], "f64: {.}\n", value); - testing.expect(mem.eql(u8, result, "f64: 152314000000000000000000000000\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f32 = 1.1234; - const result = try bufPrint(buf1[0..], "f32: {.1}\n", value); - testing.expect(mem.eql(u8, result, "f32: 1.1\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f32 = 1234.567; - const result = try bufPrint(buf1[0..], "f32: {.2}\n", value); - testing.expect(mem.eql(u8, result, "f32: 1234.57\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f32 = -11.1234; - const result = try bufPrint(buf1[0..], "f32: {.4}\n", value); - // -11.1234 is converted to f64 -11.12339... internally (errol3() function takes f64). - // -11.12339... is rounded back up to -11.1234 - testing.expect(mem.eql(u8, result, "f32: -11.1234\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f32 = 91.12345; - const result = try bufPrint(buf1[0..], "f32: {.5}\n", value); - testing.expect(mem.eql(u8, result, "f32: 91.12345\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = 91.12345678901235; - const result = try bufPrint(buf1[0..], "f64: {.10}\n", value); - testing.expect(mem.eql(u8, result, "f64: 91.1234567890\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = 0.0; - const result = try bufPrint(buf1[0..], "f64: {.5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 0.00000\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = 5.700; - const result = try bufPrint(buf1[0..], "f64: {.0}\n", value); - testing.expect(mem.eql(u8, result, "f64: 6\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = 9.999; - const result = try bufPrint(buf1[0..], "f64: {.1}\n", value); - testing.expect(mem.eql(u8, result, "f64: 10.0\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = 1.0; - const result = try bufPrint(buf1[0..], "f64: {.3}\n", value); - testing.expect(mem.eql(u8, result, "f64: 1.000\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = 0.0003; - const result = try bufPrint(buf1[0..], "f64: {.8}\n", value); - testing.expect(mem.eql(u8, result, "f64: 0.00030000\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = 1.40130e-45; - const result = try bufPrint(buf1[0..], "f64: {.5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 0.00000\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = 9.999960e-40; - const result = try bufPrint(buf1[0..], "f64: {.5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 0.00000\n")); - } +test "float.decimal" { + try testFmt("f64: 152314000000000000000000000000", "f64: {d}", f64(1.52314e+29)); + try testFmt("f32: 1.1", "f32: {d:.1}", f32(1.1234)); + try testFmt("f32: 1234.57", "f32: {d:.2}", f32(1234.567)); + // -11.1234 is converted to f64 -11.12339... internally (errol3() function takes f64). + // -11.12339... is rounded back up to -11.1234 + try testFmt("f32: -11.1234", "f32: {d:.4}", f32(-11.1234)); + try testFmt("f32: 91.12345", "f32: {d:.5}", f32(91.12345)); + try testFmt("f64: 91.1234567890", "f64: {d:.10}", f64(91.12345678901235)); + try testFmt("f64: 0.00000", "f64: {d:.5}", f64(0.0)); + try testFmt("f64: 6", "f64: {d:.0}", f64(5.700)); + try testFmt("f64: 10.0", "f64: {d:.1}", f64(9.999)); + try testFmt("f64: 1.000", "f64: {d:.3}", f64(1.0)); + try testFmt("f64: 0.00030000", "f64: {d:.8}", f64(0.0003)); + try testFmt("f64: 0.00000", "f64: {d:.5}", f64(1.40130e-45)); + try testFmt("f64: 0.00000", "f64: {d:.5}", f64(9.999960e-40)); } -test "fmt.float.libc.sanity" { - { - var buf1: [32]u8 = undefined; - const value: f64 = f64(@bitCast(f32, u32(916964781))); - const result = try bufPrint(buf1[0..], "f64: {.5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 0.00001\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = f64(@bitCast(f32, u32(925353389))); - const result = try bufPrint(buf1[0..], "f64: {.5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 0.00001\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = f64(@bitCast(f32, u32(1036831278))); - const result = try bufPrint(buf1[0..], "f64: {.5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 0.10000\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = f64(@bitCast(f32, u32(1065353133))); - const result = try bufPrint(buf1[0..], "f64: {.5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 1.00000\n")); - } - { - var buf1: [32]u8 = undefined; - const value: f64 = f64(@bitCast(f32, u32(1092616192))); - const result = try bufPrint(buf1[0..], "f64: {.5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 10.00000\n")); - } +test "float.libc.sanity" { + try testFmt("f64: 0.00001", "f64: {d:.5}", f64(@bitCast(f32, u32(916964781)))); + try testFmt("f64: 0.00001", "f64: {d:.5}", f64(@bitCast(f32, u32(925353389)))); + try testFmt("f64: 0.10000", "f64: {d:.5}", f64(@bitCast(f32, u32(1036831278)))); + try testFmt("f64: 1.00000", "f64: {d:.5}", f64(@bitCast(f32, u32(1065353133)))); + try testFmt("f64: 10.00000", "f64: {d:.5}", f64(@bitCast(f32, u32(1092616192)))); + // libc differences - { - var buf1: [32]u8 = undefined; - // This is 0.015625 exactly according to gdb. We thus round down, - // however glibc rounds up for some reason. This occurs for all - // floats of the form x.yyyy25 on a precision point. - const value: f64 = f64(@bitCast(f32, u32(1015021568))); - const result = try bufPrint(buf1[0..], "f64: {.5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 0.01563\n")); - } - // std-windows-x86_64-Debug-bare test case fails - { - // errol3 rounds to ... 630 but libc rounds to ...632. Grisu3 - // also rounds to 630 so I'm inclined to believe libc is not - // optimal here. - var buf1: [32]u8 = undefined; - const value: f64 = f64(@bitCast(f32, u32(1518338049))); - const result = try bufPrint(buf1[0..], "f64: {.5}\n", value); - testing.expect(mem.eql(u8, result, "f64: 18014400656965630.00000\n")); - } + // + // This is 0.015625 exactly according to gdb. We thus round down, + // however glibc rounds up for some reason. This occurs for all + // floats of the form x.yyyy25 on a precision point. + try testFmt("f64: 0.01563", "f64: {d:.5}", f64(@bitCast(f32, u32(1015021568)))); + // errol3 rounds to ... 630 but libc rounds to ...632. Grisu3 + // also rounds to 630 so I'm inclined to believe libc is not + // optimal here. + try testFmt("f64: 18014400656965630.00000", "f64: {d:.5}", f64(@bitCast(f32, u32(1518338049)))); } -test "fmt.custom" { +test "custom" { const Vec2 = struct { const SelfType = @This(); x: f32, @@ -1335,20 +1341,17 @@ test "fmt.custom" { pub fn format( self: SelfType, comptime fmt: []const u8, + comptime options: FormatOptions, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void, ) Errors!void { - switch (fmt.len) { - 0 => return std.fmt.format(context, Errors, output, "({.3},{.3})", self.x, self.y), - 1 => switch (fmt[0]) { - //point format - 'p' => return std.fmt.format(context, Errors, output, "({.3},{.3})", self.x, self.y), - //dimension format - 'd' => return std.fmt.format(context, Errors, output, "{.3}x{.3}", self.x, self.y), - else => unreachable, - }, - else => unreachable, + if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "p")) { + return std.fmt.format(context, Errors, output, "({d:.3},{d:.3})", self.x, self.y); + } else if (comptime std.mem.eql(u8, fmt, "d")) { + return std.fmt.format(context, Errors, output, "{d:.3}x{d:.3}", self.x, self.y); + } else { + @compileError("Unknown format character: '" ++ fmt ++ "'"); } } }; @@ -1366,7 +1369,7 @@ test "fmt.custom" { try testFmt("dim: 10.200x2.220\n", "dim: {d}\n", value); } -test "fmt.struct" { +test "struct" { const S = struct { a: u32, b: anyerror, @@ -1380,7 +1383,7 @@ test "fmt.struct" { try testFmt("S{ .a = 456, .b = error.Unused }", "{}", inst); } -test "fmt.union" { +test "union" { const TU = union(enum) { float: f32, int: u32, @@ -1410,7 +1413,7 @@ test "fmt.union" { testing.expect(mem.eql(u8, uu_result[0..3], "EU@")); } -test "fmt.enum" { +test "enum" { const E = enum { One, Two, @@ -1422,7 +1425,7 @@ test "fmt.enum" { try testFmt("E.Two", "{}", inst); } -test "fmt.struct.self-referential" { +test "struct.self-referential" { const S = struct { const SelfType = @This(); a: ?*SelfType, @@ -1436,7 +1439,7 @@ test "fmt.struct.self-referential" { try testFmt("S{ .a = S{ .a = S{ .a = S{ ... } } } }", "{}", inst); } -test "fmt.bytes.hex" { +test "bytes.hex" { const some_bytes = "\xCA\xFE\xBA\xBE"; try testFmt("lowercase: cafebabe\n", "lowercase: {x}\n", some_bytes); try testFmt("uppercase: CAFEBABE\n", "uppercase: {X}\n", some_bytes); @@ -1478,7 +1481,7 @@ pub fn trim(buf: []const u8) []const u8 { return buf[start..end]; } -test "fmt.trim" { +test "trim" { testing.expect(mem.eql(u8, "abc", trim("\n abc \t"))); testing.expect(mem.eql(u8, "", trim(" "))); testing.expect(mem.eql(u8, "", trim(""))); @@ -1505,22 +1508,22 @@ pub fn hexToBytes(out: []u8, input: []const u8) !void { } } -test "fmt.hexToBytes" { +test "hexToBytes" { const test_hex_str = "909A312BB12ED1F819B3521AC4C1E896F2160507FFC1C8381E3B07BB16BD1706"; var pb: [32]u8 = undefined; try hexToBytes(pb[0..], test_hex_str); try testFmt(test_hex_str, "{X}", pb); } -test "fmt.formatIntValue with comptime_int" { +test "formatIntValue with comptime_int" { const value: comptime_int = 123456789123456789; var buf = try std.Buffer.init(std.debug.global_allocator, ""); - try formatIntValue(value, "", &buf, @typeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append); + try formatIntValue(value, "", FormatOptions{}, &buf, @typeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append); assert(mem.eql(u8, buf.toSlice(), "123456789123456789")); } -test "fmt.formatType max_depth" { +test "formatType max_depth" { const Vec2 = struct { const SelfType = @This(); x: f32, @@ -1529,11 +1532,16 @@ test "fmt.formatType max_depth" { pub fn format( self: SelfType, comptime fmt: []const u8, + comptime options: FormatOptions, context: var, comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void, ) Errors!void { - return std.fmt.format(context, Errors, output, "({.3},{.3})", self.x, self.y); + if (fmt.len == 0) { + return std.fmt.format(context, Errors, output, "({d:.3},{d:.3})", self.x, self.y); + } else { + @compileError("Unknown format string: '" ++ fmt ++ "'"); + } } }; const E = enum { @@ -1565,18 +1573,34 @@ test "fmt.formatType max_depth" { inst.tu.ptr = &inst.tu; var buf0 = try std.Buffer.init(std.debug.global_allocator, ""); - try formatType(inst, "", &buf0, @typeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append, 0); + try formatType(inst, "", FormatOptions{}, &buf0, @typeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append, 0); assert(mem.eql(u8, buf0.toSlice(), "S{ ... }")); var buf1 = try std.Buffer.init(std.debug.global_allocator, ""); - try formatType(inst, "", &buf1, @typeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append, 1); + try formatType(inst, "", FormatOptions{}, &buf1, @typeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append, 1); assert(mem.eql(u8, buf1.toSlice(), "S{ .a = S{ ... }, .tu = TU{ ... }, .e = E.Two, .vec = (10.200,2.220) }")); var buf2 = try std.Buffer.init(std.debug.global_allocator, ""); - try formatType(inst, "", &buf2, @typeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append, 2); + try formatType(inst, "", FormatOptions{}, &buf2, @typeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append, 2); assert(mem.eql(u8, buf2.toSlice(), "S{ .a = S{ .a = S{ ... }, .tu = TU{ ... }, .e = E.Two, .vec = (10.200,2.220) }, .tu = TU{ .ptr = TU{ ... } }, .e = E.Two, .vec = (10.200,2.220) }")); var buf3 = try std.Buffer.init(std.debug.global_allocator, ""); - try formatType(inst, "", &buf3, @typeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append, 3); + try formatType(inst, "", FormatOptions{}, &buf3, @typeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append, 3); assert(mem.eql(u8, buf3.toSlice(), "S{ .a = S{ .a = S{ .a = S{ ... }, .tu = TU{ ... }, .e = E.Two, .vec = (10.200,2.220) }, .tu = TU{ .ptr = TU{ ... } }, .e = E.Two, .vec = (10.200,2.220) }, .tu = TU{ .ptr = TU{ .ptr = TU{ ... } } }, .e = E.Two, .vec = (10.200,2.220) }")); } + +test "positional" { + try testFmt("2 1 0", "{2} {1} {0}", usize(0), usize(1), usize(2)); + try testFmt("2 1 0", "{2} {1} {}", usize(0), usize(1), usize(2)); + try testFmt("0 0", "{0} {0}", usize(0)); + try testFmt("0 1", "{} {1}", usize(0), usize(1)); + try testFmt("1 0 0 1", "{1} {} {0} {}", usize(0), usize(1)); +} + +test "positional with specifier" { + try testFmt("10.0", "{0d:.1}", f64(9.999)); +} + +test "positional/alignment/width/precision" { + try testFmt("10.0", "{0d: >3.1}", f64(9.999)); +} diff --git a/std/heap.zig b/std/heap.zig index 648ba46252..d14f34dd6e 100644 --- a/std/heap.zig +++ b/std/heap.zig @@ -8,6 +8,8 @@ const builtin = @import("builtin"); const c = std.c; const maxInt = std.math.maxInt; +pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator; + const Allocator = mem.Allocator; pub const c_allocator = &c_allocator_state; @@ -360,9 +362,9 @@ pub const ArenaAllocator = struct { var it = self.buffer_list.first; while (it) |node| { // this has to occur before the free because the free frees node - it = node.next; - + const next_it = node.next; self.child_allocator.free(node.data); + it = next_it; } } diff --git a/std/heap/logging_allocator.zig b/std/heap/logging_allocator.zig new file mode 100644 index 0000000000..c1f09a1aad --- /dev/null +++ b/std/heap/logging_allocator.zig @@ -0,0 +1,53 @@ +const std = @import("../std.zig"); +const Allocator = std.mem.Allocator; + +const AnyErrorOutStream = std.io.OutStream(anyerror); + +/// This allocator is used in front of another allocator and logs to the provided stream +/// on every call to the allocator. Stream errors are ignored. +/// If https://github.com/ziglang/zig/issues/2586 is implemented, this API can be improved. +pub const LoggingAllocator = struct { + allocator: Allocator, + parent_allocator: *Allocator, + out_stream: *AnyErrorOutStream, + + const Self = @This(); + + pub fn init(parent_allocator: *Allocator, out_stream: *AnyErrorOutStream) Self { + return Self{ + .allocator = Allocator{ + .reallocFn = realloc, + .shrinkFn = shrink, + }, + .parent_allocator = parent_allocator, + .out_stream = out_stream, + }; + } + + fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { + const self = @fieldParentPtr(Self, "allocator", allocator); + if (old_mem.len == 0) { + self.out_stream.print("allocation of {} ", new_size) catch {}; + } else { + self.out_stream.print("resize from {} to {} ", old_mem.len, new_size) catch {}; + } + const result = self.parent_allocator.reallocFn(self.parent_allocator, old_mem, old_align, new_size, new_align); + if (result) |buff| { + self.out_stream.print("success!\n") catch {}; + } else |err| { + self.out_stream.print("failure!\n") catch {}; + } + return result; + } + + fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { + const self = @fieldParentPtr(Self, "allocator", allocator); + const result = self.parent_allocator.shrinkFn(self.parent_allocator, old_mem, old_align, new_size, new_align); + if (new_size == 0) { + self.out_stream.print("free of {} bytes success!\n", old_mem.len) catch {}; + } else { + self.out_stream.print("shrink from {} bytes to {} bytes success!\n", old_mem.len, new_size) catch {}; + } + return result; + } +}; diff --git a/std/http.zig b/std/http.zig new file mode 100644 index 0000000000..acb005a75f --- /dev/null +++ b/std/http.zig @@ -0,0 +1,5 @@ +test "std.http" { + _ = @import("http/headers.zig"); +} + +pub const Headers = @import("http/headers.zig").Headers; diff --git a/std/http/headers.zig b/std/http/headers.zig new file mode 100644 index 0000000000..69ed494f3a --- /dev/null +++ b/std/http/headers.zig @@ -0,0 +1,614 @@ +// HTTP Header data structure/type +// Based on lua-http's http.header module +// +// Design criteria: +// - the same header field is allowed more than once +// - must be able to fetch separate occurrences (important for some headers e.g. Set-Cookie) +// - optionally available as comma separated list +// - http2 adds flag to headers that they should never be indexed +// - header order should be recoverable +// +// Headers are implemented as an array of entries. +// An index of field name => array indices is kept. + +const std = @import("../std.zig"); +const debug = std.debug; +const assert = debug.assert; +const testing = std.testing; +const mem = std.mem; +const Allocator = mem.Allocator; + +fn never_index_default(name: []const u8) bool { + if (mem.eql(u8, "authorization", name)) return true; + if (mem.eql(u8, "proxy-authorization", name)) return true; + if (mem.eql(u8, "cookie", name)) return true; + if (mem.eql(u8, "set-cookie", name)) return true; + return false; +} + +const HeaderEntry = struct { + allocator: *Allocator, + pub name: []const u8, + pub value: []u8, + pub never_index: bool, + + const Self = @This(); + + fn init(allocator: *Allocator, name: []const u8, value: []const u8, never_index: ?bool) !Self { + return Self{ + .allocator = allocator, + .name = name, // takes reference + .value = try mem.dupe(allocator, u8, value), + .never_index = never_index orelse never_index_default(name), + }; + } + + fn deinit(self: Self) void { + self.allocator.free(self.value); + } + + pub fn modify(self: *Self, value: []const u8, never_index: ?bool) !void { + const old_len = self.value.len; + if (value.len > old_len) { + self.value = try self.allocator.realloc(self.value, value.len); + } else if (value.len < old_len) { + self.value = self.allocator.shrink(self.value, value.len); + } + mem.copy(u8, self.value, value); + self.never_index = never_index orelse never_index_default(self.name); + } + + fn compare(a: HeaderEntry, b: HeaderEntry) bool { + if (a.name.ptr != b.name.ptr and a.name.len != b.name.len) { + // Things beginning with a colon *must* be before others + const a_is_colon = a.name[0] == ':'; + const b_is_colon = b.name[0] == ':'; + if (a_is_colon and !b_is_colon) { + return true; + } else if (!a_is_colon and b_is_colon) { + return false; + } + + // Sort lexicographically on header name + return mem.compare(u8, a.name, b.name) == mem.Compare.LessThan; + } + + // Sort lexicographically on header value + if (!mem.eql(u8, a.value, b.value)) { + return mem.compare(u8, a.value, b.value) == mem.Compare.LessThan; + } + + // Doesn't matter here; need to pick something for sort consistency + return a.never_index; + } +}; + +var test_memory: [32 * 1024]u8 = undefined; +var test_fba_state = std.heap.FixedBufferAllocator.init(&test_memory); +const test_allocator = &test_fba_state.allocator; + +test "HeaderEntry" { + var e = try HeaderEntry.init(test_allocator, "foo", "bar", null); + defer e.deinit(); + testing.expectEqualSlices(u8, "foo", e.name); + testing.expectEqualSlices(u8, "bar", e.value); + testing.expectEqual(false, e.never_index); + + try e.modify("longer value", null); + testing.expectEqualSlices(u8, "longer value", e.value); + + // shorter value + try e.modify("x", null); + testing.expectEqualSlices(u8, "x", e.value); +} + +const HeaderList = std.ArrayList(HeaderEntry); +const HeaderIndexList = std.ArrayList(usize); +const HeaderIndex = std.AutoHashMap([]const u8, HeaderIndexList); + +pub const Headers = struct { + // the owned header field name is stored in the index as part of the key + allocator: *Allocator, + data: HeaderList, + index: HeaderIndex, + + const Self = @This(); + + pub fn init(allocator: *Allocator) Self { + return Self{ + .allocator = allocator, + .data = HeaderList.init(allocator), + .index = HeaderIndex.init(allocator), + }; + } + + pub fn deinit(self: Self) void { + { + var it = self.index.iterator(); + while (it.next()) |kv| { + var dex = &kv.value; + dex.deinit(); + self.allocator.free(kv.key); + } + self.index.deinit(); + } + { + var it = self.data.iterator(); + while (it.next()) |entry| { + entry.deinit(); + } + self.data.deinit(); + } + } + + pub fn clone(self: Self, allocator: *Allocator) !Self { + var other = Headers.init(allocator); + errdefer other.deinit(); + try other.data.ensureCapacity(self.data.count()); + try other.index.initCapacity(self.index.entries.len); + var it = self.data.iterator(); + while (it.next()) |entry| { + try other.append(entry.name, entry.value, entry.never_index); + } + return other; + } + + pub fn count(self: Self) usize { + return self.data.count(); + } + + pub const Iterator = HeaderList.Iterator; + + pub fn iterator(self: Self) Iterator { + return self.data.iterator(); + } + + pub fn append(self: *Self, name: []const u8, value: []const u8, never_index: ?bool) !void { + const n = self.data.count() + 1; + try self.data.ensureCapacity(n); + var entry: HeaderEntry = undefined; + if (self.index.get(name)) |kv| { + entry = try HeaderEntry.init(self.allocator, kv.key, value, never_index); + errdefer entry.deinit(); + var dex = &kv.value; + try dex.append(n - 1); + } else { + const name_dup = try mem.dupe(self.allocator, u8, name); + errdefer self.allocator.free(name_dup); + entry = try HeaderEntry.init(self.allocator, name_dup, value, never_index); + errdefer entry.deinit(); + var dex = HeaderIndexList.init(self.allocator); + try dex.append(n - 1); + errdefer dex.deinit(); + _ = try self.index.put(name, dex); + } + self.data.appendAssumeCapacity(entry); + } + + /// If the header already exists, replace the current value, otherwise append it to the list of headers. + /// If the header has multiple entries then returns an error. + pub fn upsert(self: *Self, name: []const u8, value: []const u8, never_index: ?bool) !void { + if (self.index.get(name)) |kv| { + const dex = kv.value; + if (dex.count() != 1) + return error.CannotUpsertMultiValuedField; + var e = &self.data.at(dex.at(0)); + try e.modify(value, never_index); + } else { + try self.append(name, value, never_index); + } + } + + /// Returns boolean indicating if the field is present. + pub fn contains(self: Self, name: []const u8) bool { + return self.index.contains(name); + } + + /// Returns boolean indicating if something was deleted. + pub fn delete(self: *Self, name: []const u8) bool { + if (self.index.remove(name)) |kv| { + var dex = &kv.value; + // iterate backwards + var i = dex.count(); + while (i > 0) { + i -= 1; + const data_index = dex.at(i); + const removed = self.data.orderedRemove(data_index); + assert(mem.eql(u8, removed.name, name)); + removed.deinit(); + } + dex.deinit(); + self.allocator.free(kv.key); + self.rebuild_index(); + return true; + } else { + return false; + } + } + + /// Removes the element at the specified index. + /// Moves items down to fill the empty space. + pub fn orderedRemove(self: *Self, i: usize) void { + const removed = self.data.orderedRemove(i); + const kv = self.index.get(removed.name).?; + var dex = &kv.value; + if (dex.count() == 1) { + // was last item; delete the index + _ = self.index.remove(kv.key); + dex.deinit(); + removed.deinit(); + self.allocator.free(kv.key); + } else { + dex.shrink(dex.count() - 1); + removed.deinit(); + } + // if it was the last item; no need to rebuild index + if (i != self.data.count()) { + self.rebuild_index(); + } + } + + /// Removes the element at the specified index. + /// The empty slot is filled from the end of the list. + pub fn swapRemove(self: *Self, i: usize) void { + const removed = self.data.swapRemove(i); + const kv = self.index.get(removed.name).?; + var dex = &kv.value; + if (dex.count() == 1) { + // was last item; delete the index + _ = self.index.remove(kv.key); + dex.deinit(); + removed.deinit(); + self.allocator.free(kv.key); + } else { + dex.shrink(dex.count() - 1); + removed.deinit(); + } + // if it was the last item; no need to rebuild index + if (i != self.data.count()) { + self.rebuild_index(); + } + } + + /// Access the header at the specified index. + pub fn at(self: Self, i: usize) HeaderEntry { + return self.data.at(i); + } + + /// Returns a list of indices containing headers with the given name. + /// The returned list should not be modified by the caller. + pub fn getIndices(self: Self, name: []const u8) ?HeaderIndexList { + if (self.index.get(name)) |kv| { + return kv.value; + } else { + return null; + } + } + + /// Returns a slice containing each header with the given name. + pub fn get(self: Self, allocator: *Allocator, name: []const u8) !?[]const HeaderEntry { + const dex = self.getIndices(name) orelse return null; + + const buf = try allocator.alloc(HeaderEntry, dex.count()); + var it = dex.iterator(); + var n: usize = 0; + while (it.next()) |idx| { + buf[n] = self.data.at(idx); + n += 1; + } + return buf; + } + + /// Returns all headers with the given name as a comma seperated string. + /// + /// Useful for HTTP headers that follow RFC-7230 section 3.2.2: + /// A recipient MAY combine multiple header fields with the same field + /// name into one "field-name: field-value" pair, without changing the + /// semantics of the message, by appending each subsequent field value to + /// the combined field value in order, separated by a comma. The order + /// in which header fields with the same field name are received is + /// therefore significant to the interpretation of the combined field + /// value + pub fn getCommaSeparated(self: Self, allocator: *Allocator, name: []const u8) !?[]u8 { + const dex = self.getIndices(name) orelse return null; + + // adapted from mem.join + const total_len = blk: { + var sum: usize = dex.count() - 1; // space for separator(s) + var it = dex.iterator(); + while (it.next()) |idx| + sum += self.data.at(idx).value.len; + break :blk sum; + }; + + const buf = try allocator.alloc(u8, total_len); + errdefer allocator.free(buf); + + const first_value = self.data.at(dex.at(0)).value; + mem.copy(u8, buf, first_value); + var buf_index: usize = first_value.len; + for (dex.toSlice()[1..]) |idx| { + const value = self.data.at(idx).value; + buf[buf_index] = ','; + buf_index += 1; + mem.copy(u8, buf[buf_index..], value); + buf_index += value.len; + } + + // No need for shrink since buf is exactly the correct size. + return buf; + } + + fn rebuild_index(self: *Self) void { + { // clear out the indexes + var it = self.index.iterator(); + while (it.next()) |kv| { + var dex = &kv.value; + dex.len = 0; // keeps capacity available + } + } + { // fill up indexes again; we know capacity is fine from before + var it = self.data.iterator(); + while (it.next()) |entry| { + var dex = &self.index.get(entry.name).?.value; + dex.appendAssumeCapacity(it.count); + } + } + } + + pub fn sort(self: *Self) void { + std.sort.sort(HeaderEntry, self.data.toSlice(), HeaderEntry.compare); + self.rebuild_index(); + } + + pub fn format( + self: Self, + comptime fmt: []const u8, + options: std.fmt.FormatOptions, + context: var, + comptime Errors: type, + output: fn (@typeOf(context), []const u8) Errors!void, + ) Errors!void { + var it = self.iterator(); + while (it.next()) |entry| { + try output(context, entry.name); + try output(context, ": "); + try output(context, entry.value); + try output(context, "\n"); + } + } +}; + +test "Headers.iterator" { + var h = Headers.init(test_allocator); + defer h.deinit(); + try h.append("foo", "bar", null); + try h.append("cookie", "somevalue", null); + + var count: i32 = 0; + var it = h.iterator(); + while (it.next()) |e| { + if (count == 0) { + testing.expectEqualSlices(u8, "foo", e.name); + testing.expectEqualSlices(u8, "bar", e.value); + testing.expectEqual(false, e.never_index); + } else if (count == 1) { + testing.expectEqualSlices(u8, "cookie", e.name); + testing.expectEqualSlices(u8, "somevalue", e.value); + testing.expectEqual(true, e.never_index); + } + count += 1; + } + testing.expectEqual(i32(2), count); +} + +test "Headers.contains" { + var h = Headers.init(test_allocator); + defer h.deinit(); + try h.append("foo", "bar", null); + try h.append("cookie", "somevalue", null); + + testing.expectEqual(true, h.contains("foo")); + testing.expectEqual(false, h.contains("flooble")); +} + +test "Headers.delete" { + var h = Headers.init(test_allocator); + defer h.deinit(); + try h.append("foo", "bar", null); + try h.append("baz", "qux", null); + try h.append("cookie", "somevalue", null); + + testing.expectEqual(false, h.delete("not-present")); + testing.expectEqual(usize(3), h.count()); + + testing.expectEqual(true, h.delete("foo")); + testing.expectEqual(usize(2), h.count()); + { + const e = h.at(0); + testing.expectEqualSlices(u8, "baz", e.name); + testing.expectEqualSlices(u8, "qux", e.value); + testing.expectEqual(false, e.never_index); + } + { + const e = h.at(1); + testing.expectEqualSlices(u8, "cookie", e.name); + testing.expectEqualSlices(u8, "somevalue", e.value); + testing.expectEqual(true, e.never_index); + } + + testing.expectEqual(false, h.delete("foo")); +} + +test "Headers.orderedRemove" { + var h = Headers.init(test_allocator); + defer h.deinit(); + try h.append("foo", "bar", null); + try h.append("baz", "qux", null); + try h.append("cookie", "somevalue", null); + + h.orderedRemove(0); + testing.expectEqual(usize(2), h.count()); + { + const e = h.at(0); + testing.expectEqualSlices(u8, "baz", e.name); + testing.expectEqualSlices(u8, "qux", e.value); + testing.expectEqual(false, e.never_index); + } + { + const e = h.at(1); + testing.expectEqualSlices(u8, "cookie", e.name); + testing.expectEqualSlices(u8, "somevalue", e.value); + testing.expectEqual(true, e.never_index); + } +} + +test "Headers.swapRemove" { + var h = Headers.init(test_allocator); + defer h.deinit(); + try h.append("foo", "bar", null); + try h.append("baz", "qux", null); + try h.append("cookie", "somevalue", null); + + h.swapRemove(0); + testing.expectEqual(usize(2), h.count()); + { + const e = h.at(0); + testing.expectEqualSlices(u8, "cookie", e.name); + testing.expectEqualSlices(u8, "somevalue", e.value); + testing.expectEqual(true, e.never_index); + } + { + const e = h.at(1); + testing.expectEqualSlices(u8, "baz", e.name); + testing.expectEqualSlices(u8, "qux", e.value); + testing.expectEqual(false, e.never_index); + } +} + +test "Headers.at" { + var h = Headers.init(test_allocator); + defer h.deinit(); + try h.append("foo", "bar", null); + try h.append("cookie", "somevalue", null); + + { + const e = h.at(0); + testing.expectEqualSlices(u8, "foo", e.name); + testing.expectEqualSlices(u8, "bar", e.value); + testing.expectEqual(false, e.never_index); + } + { + const e = h.at(1); + testing.expectEqualSlices(u8, "cookie", e.name); + testing.expectEqualSlices(u8, "somevalue", e.value); + testing.expectEqual(true, e.never_index); + } +} + +test "Headers.getIndices" { + var h = Headers.init(test_allocator); + defer h.deinit(); + try h.append("foo", "bar", null); + try h.append("set-cookie", "x=1", null); + try h.append("set-cookie", "y=2", null); + + testing.expect(null == h.getIndices("not-present")); + testing.expectEqualSlices(usize, [_]usize{0}, h.getIndices("foo").?.toSliceConst()); + testing.expectEqualSlices(usize, [_]usize{ 1, 2 }, h.getIndices("set-cookie").?.toSliceConst()); +} + +test "Headers.get" { + var h = Headers.init(test_allocator); + defer h.deinit(); + try h.append("foo", "bar", null); + try h.append("set-cookie", "x=1", null); + try h.append("set-cookie", "y=2", null); + + { + const v = try h.get(test_allocator, "not-present"); + testing.expect(null == v); + } + { + const v = (try h.get(test_allocator, "foo")).?; + defer test_allocator.free(v); + const e = v[0]; + testing.expectEqualSlices(u8, "foo", e.name); + testing.expectEqualSlices(u8, "bar", e.value); + testing.expectEqual(false, e.never_index); + } + { + const v = (try h.get(test_allocator, "set-cookie")).?; + defer test_allocator.free(v); + { + const e = v[0]; + testing.expectEqualSlices(u8, "set-cookie", e.name); + testing.expectEqualSlices(u8, "x=1", e.value); + testing.expectEqual(true, e.never_index); + } + { + const e = v[1]; + testing.expectEqualSlices(u8, "set-cookie", e.name); + testing.expectEqualSlices(u8, "y=2", e.value); + testing.expectEqual(true, e.never_index); + } + } +} + +test "Headers.getCommaSeparated" { + var h = Headers.init(test_allocator); + defer h.deinit(); + try h.append("foo", "bar", null); + try h.append("set-cookie", "x=1", null); + try h.append("set-cookie", "y=2", null); + + { + const v = try h.getCommaSeparated(test_allocator, "not-present"); + testing.expect(null == v); + } + { + const v = (try h.getCommaSeparated(test_allocator, "foo")).?; + defer test_allocator.free(v); + testing.expectEqualSlices(u8, "bar", v); + } + { + const v = (try h.getCommaSeparated(test_allocator, "set-cookie")).?; + defer test_allocator.free(v); + testing.expectEqualSlices(u8, "x=1,y=2", v); + } +} + +test "Headers.sort" { + var h = Headers.init(test_allocator); + defer h.deinit(); + try h.append("foo", "bar", null); + try h.append("cookie", "somevalue", null); + + h.sort(); + { + const e = h.at(0); + testing.expectEqualSlices(u8, "cookie", e.name); + testing.expectEqualSlices(u8, "somevalue", e.value); + testing.expectEqual(true, e.never_index); + } + { + const e = h.at(1); + testing.expectEqualSlices(u8, "foo", e.name); + testing.expectEqualSlices(u8, "bar", e.value); + testing.expectEqual(false, e.never_index); + } +} + +test "Headers.format" { + var h = Headers.init(test_allocator); + defer h.deinit(); + try h.append("foo", "bar", null); + try h.append("cookie", "somevalue", null); + + var buf: [100]u8 = undefined; + testing.expectEqualSlices(u8, + \\foo: bar + \\cookie: somevalue + \\ + , try std.fmt.bufPrint(buf[0..], "{}", h)); +} diff --git a/std/io.zig b/std/io.zig index a02fb56e24..3a8da3ed3e 100644 --- a/std/io.zig +++ b/std/io.zig @@ -164,32 +164,32 @@ pub fn InStream(comptime ReadError: type) type { /// Reads a native-endian integer pub fn readIntNative(self: *Self, comptime T: type) !T { - var bytes: [(T.bit_count + 7 )/ 8]u8 = undefined; + var bytes: [(T.bit_count + 7) / 8]u8 = undefined; try self.readNoEof(bytes[0..]); return mem.readIntNative(T, &bytes); } /// Reads a foreign-endian integer pub fn readIntForeign(self: *Self, comptime T: type) !T { - var bytes: [(T.bit_count + 7 )/ 8]u8 = undefined; + var bytes: [(T.bit_count + 7) / 8]u8 = undefined; try self.readNoEof(bytes[0..]); return mem.readIntForeign(T, &bytes); } pub fn readIntLittle(self: *Self, comptime T: type) !T { - var bytes: [(T.bit_count + 7 )/ 8]u8 = undefined; + var bytes: [(T.bit_count + 7) / 8]u8 = undefined; try self.readNoEof(bytes[0..]); return mem.readIntLittle(T, &bytes); } pub fn readIntBig(self: *Self, comptime T: type) !T { - var bytes: [(T.bit_count + 7 )/ 8]u8 = undefined; + var bytes: [(T.bit_count + 7) / 8]u8 = undefined; try self.readNoEof(bytes[0..]); return mem.readIntBig(T, &bytes); } pub fn readInt(self: *Self, comptime T: type, endian: builtin.Endian) !T { - var bytes: [(T.bit_count + 7 )/ 8]u8 = undefined; + var bytes: [(T.bit_count + 7) / 8]u8 = undefined; try self.readNoEof(bytes[0..]); return mem.readInt(T, &bytes, endian); } @@ -249,32 +249,32 @@ pub fn OutStream(comptime WriteError: type) type { /// Write a native-endian integer. pub fn writeIntNative(self: *Self, comptime T: type, value: T) Error!void { - var bytes: [(T.bit_count + 7 )/ 8]u8 = undefined; + var bytes: [(T.bit_count + 7) / 8]u8 = undefined; mem.writeIntNative(T, &bytes, value); return self.writeFn(self, bytes); } /// Write a foreign-endian integer. pub fn writeIntForeign(self: *Self, comptime T: type, value: T) Error!void { - var bytes: [(T.bit_count + 7 )/ 8]u8 = undefined; + var bytes: [(T.bit_count + 7) / 8]u8 = undefined; mem.writeIntForeign(T, &bytes, value); return self.writeFn(self, bytes); } pub fn writeIntLittle(self: *Self, comptime T: type, value: T) Error!void { - var bytes: [(T.bit_count + 7 )/ 8]u8 = undefined; + var bytes: [(T.bit_count + 7) / 8]u8 = undefined; mem.writeIntLittle(T, &bytes, value); return self.writeFn(self, bytes); } pub fn writeIntBig(self: *Self, comptime T: type, value: T) Error!void { - var bytes: [(T.bit_count + 7 )/ 8]u8 = undefined; + var bytes: [(T.bit_count + 7) / 8]u8 = undefined; mem.writeIntBig(T, &bytes, value); return self.writeFn(self, bytes); } pub fn writeInt(self: *Self, comptime T: type, value: T, endian: builtin.Endian) Error!void { - var bytes: [(T.bit_count + 7 )/ 8]u8 = undefined; + var bytes: [(T.bit_count + 7) / 8]u8 = undefined; mem.writeInt(T, &bytes, value, endian); return self.writeFn(self, bytes); } diff --git a/std/io/test.zig b/std/io/test.zig index 4b25d645fc..40258eab5f 100644 --- a/std/io/test.zig +++ b/std/io/test.zig @@ -597,7 +597,10 @@ test "c out stream" { const filename = c"tmp_io_test_file.txt"; const out_file = std.c.fopen(filename, c"w") orelse return error.UnableToOpenTestFile; - defer fs.deleteFileC(filename) catch {}; + defer { + _ = std.c.fclose(out_file); + fs.deleteFileC(filename) catch {}; + } const out_stream = &io.COutStream.init(out_file).stream; try out_stream.print("hi: {}\n", i32(123)); diff --git a/std/json.zig b/std/json.zig index 8d42d1bcf0..e135911170 100644 --- a/std/json.zig +++ b/std/json.zig @@ -876,8 +876,9 @@ pub const TokenStream = struct { pub fn next(self: *TokenStream) !?Token { if (self.token) |token| { + const copy = token; self.token = null; - return token; + return copy; } var t1: ?Token = undefined; diff --git a/std/math/big/int.zig b/std/math/big/int.zig index 46b1bed9a3..4ad5c92b3f 100644 --- a/std/math/big/int.zig +++ b/std/math/big/int.zig @@ -519,6 +519,7 @@ pub const Int = struct { pub fn format( self: Int, comptime fmt: []const u8, + comptime options: std.fmt.FormatOptions, context: var, comptime FmtError: type, output: fn (@typeOf(context), []const u8) FmtError!void, diff --git a/std/mem.zig b/std/mem.zig index 7ecd483020..ef001d5dab 100644 --- a/std/mem.zig +++ b/std/mem.zig @@ -1481,6 +1481,7 @@ test "subArrayPtr" { } /// Round an address up to the nearest aligned address +/// The alignment must be a power of 2 and greater than 0. pub fn alignForward(addr: usize, alignment: usize) usize { return alignBackward(addr + (alignment - 1), alignment); } @@ -1500,13 +1501,18 @@ test "alignForward" { testing.expect(alignForward(17, 8) == 24); } +/// Round an address up to the previous aligned address +/// The alignment must be a power of 2 and greater than 0. pub fn alignBackward(addr: usize, alignment: usize) usize { + assert(@popCount(usize, alignment) == 1); // 000010000 // example addr // 000001111 // subtract 1 // 111110000 // binary not return addr & ~(alignment - 1); } +/// Given an address and an alignment, return true if the address is a multiple of the alignment +/// The alignment must be a power of 2 and greater than 0. pub fn isAligned(addr: usize, alignment: usize) bool { return alignBackward(addr, alignment) == addr; } diff --git a/std/net.zig b/std/net.zig index 8c4ab399b6..efcbf7000d 100644 --- a/std/net.zig +++ b/std/net.zig @@ -33,7 +33,6 @@ pub const Address = struct { pub fn initIp6(ip6: *const Ip6Addr, _port: u16) Address { return Address{ - .family = os.AF_INET6, .os_addr = os.sockaddr{ .in6 = os.sockaddr_in6{ .family = os.AF_INET6, diff --git a/std/os/windows.zig b/std/os/windows.zig index d10ab695db..ad5263dc0b 100644 --- a/std/os/windows.zig +++ b/std/os/windows.zig @@ -348,6 +348,7 @@ pub const DeleteFileError = error{ FileNotFound, AccessDenied, NameTooLong, + FileBusy, Unexpected, }; @@ -363,6 +364,7 @@ pub fn DeleteFileW(filename: [*]const u16) DeleteFileError!void { ERROR.ACCESS_DENIED => return error.AccessDenied, ERROR.FILENAME_EXCED_RANGE => return error.NameTooLong, ERROR.INVALID_PARAMETER => return error.NameTooLong, + ERROR.SHARING_VIOLATION => return error.FileBusy, else => |err| return unexpectedError(err), } } diff --git a/std/special/bootstrap.zig b/std/special/bootstrap.zig index 45ce23c00f..7177f58b8a 100644 --- a/std/special/bootstrap.zig +++ b/std/special/bootstrap.zig @@ -1,7 +1,6 @@ -// This file is in a package which has the root source file exposed as "@root". -// It is included in the compilation unit when exporting an executable. +// This file is included in the compilation unit when exporting an executable. -const root = @import("@root"); +const root = @import("root"); const std = @import("std"); const builtin = @import("builtin"); const assert = std.debug.assert; @@ -114,20 +113,20 @@ extern fn main(c_argc: i32, c_argv: [*][*]u8, c_envp: [*]?[*]u8) i32 { // and we want fewer call frames in stack traces. inline fn callMain() u8 { switch (@typeId(@typeOf(root.main).ReturnType)) { - builtin.TypeId.NoReturn => { + .NoReturn => { root.main(); }, - builtin.TypeId.Void => { + .Void => { root.main(); return 0; }, - builtin.TypeId.Int => { + .Int => { if (@typeOf(root.main).ReturnType.bit_count != 8) { @compileError("expected return type of main to be 'u8', 'noreturn', 'void', or '!void'"); } return root.main(); }, - builtin.TypeId.ErrorUnion => { + .ErrorUnion => { root.main() catch |err| { std.debug.warn("error: {}\n", @errorName(err)); if (builtin.os != builtin.Os.zen) { diff --git a/std/special/build_runner.zig b/std/special/build_runner.zig index e88c3de7bd..d6404d43f5 100644 --- a/std/special/build_runner.zig +++ b/std/special/build_runner.zig @@ -167,7 +167,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void { const allocator = builder.allocator; for (builder.top_level_steps.toSliceConst()) |top_level_step| { - try out_stream.print(" {s22} {}\n", top_level_step.step.name, top_level_step.description); + try out_stream.print(" {s:22} {}\n", top_level_step.step.name, top_level_step.description); } try out_stream.write( @@ -188,7 +188,7 @@ fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void { for (builder.available_options_list.toSliceConst()) |option| { const name = try fmt.allocPrint(allocator, " -D{}=[{}]", option.name, Builder.typeIdName(option.type_id)); defer allocator.free(name); - try out_stream.print("{s24} {}\n", name, option.description); + try out_stream.print("{s:24} {}\n", name, option.description); } } diff --git a/std/special/c.zig b/std/special/c.zig index 456070f609..15cefbd2a0 100644 --- a/std/special/c.zig +++ b/std/special/c.zig @@ -254,19 +254,32 @@ export fn fmod(x: f64, y: f64) f64 { // TODO add intrinsics for these (and probably the double version too) // and have the math stuff use the intrinsic. same as @mod and @rem -export fn floorf(x: f32) f32 { - return math.floor(x); -} -export fn ceilf(x: f32) f32 { - return math.ceil(x); -} -export fn floor(x: f64) f64 { - return math.floor(x); -} -export fn ceil(x: f64) f64 { - return math.ceil(x); -} - +export fn floorf(x: f32) f32 {return math.floor(x);} +export fn ceilf(x: f32) f32 {return math.ceil(x);} +export fn floor(x: f64) f64 {return math.floor(x);} +export fn ceil(x: f64) f64 {return math.ceil(x);} +export fn fma(a: f64, b: f64, c: f64) f64 {return math.fma(f64, a, b, c);} +export fn fmaf(a: f32, b: f32, c: f32) f32 {return math.fma(f32, a, b, c);} +export fn sin(a: f64) f64 {return math.sin(a);} +export fn sinf(a: f32) f32 {return math.sin(a);} +export fn cos(a: f64) f64 {return math.cos(a);} +export fn cosf(a: f32) f32 {return math.cos(a);} +export fn exp(a: f64) f64 {return math.exp(a);} +export fn expf(a: f32) f32 {return math.exp(a);} +export fn exp2(a: f64) f64 {return math.exp2(a);} +export fn exp2f(a: f32) f32 {return math.exp2(a);} +export fn log(a: f64) f64 {return math.ln(a);} +export fn logf(a: f32) f32 {return math.ln(a);} +export fn log2(a: f64) f64 {return math.log2(a);} +export fn log2f(a: f32) f32 {return math.log2(a);} +export fn log10(a: f64) f64 {return math.log10(a);} +export fn log10f(a: f32) f32 {return math.log10(a);} +export fn fabs(a: f64) f64 {return math.fabs(a);} +export fn fabsf(a: f32) f32 {return math.fabs(a);} +export fn trunc(a: f64) f64 {return math.trunc(a);} +export fn truncf(a: f32) f32 {return math.trunc(a);} +export fn round(a: f64) f64 {return math.round(a);} +export fn roundf(a: f32) f32 {return math.round(a);} fn generic_fmod(comptime T: type, x: T, y: T) T { @setRuntimeSafety(false); diff --git a/std/special/compiler_rt.zig b/std/special/compiler_rt.zig index 46607a3adf..914f9dcb00 100644 --- a/std/special/compiler_rt.zig +++ b/std/special/compiler_rt.zig @@ -405,15 +405,15 @@ const use_thumb_1 = usesThumb1(builtin.arch); fn usesThumb1(arch: builtin.Arch) bool { return switch (arch) { - .arm => switch (arch.arm) { + .arm => |sub_arch| switch (sub_arch) { .v6m => true, else => false, }, - .armeb => switch (arch.armeb) { + .armeb => |sub_arch| switch (sub_arch) { .v6m => true, else => false, }, - .thumb => switch (arch.thumb) { + .thumb => |sub_arch| switch (sub_arch) { .v5, .v5te, .v4t, @@ -423,7 +423,7 @@ fn usesThumb1(arch: builtin.Arch) bool { => true, else => false, }, - .thumbeb => switch (arch.thumbeb) { + .thumbeb => |sub_arch| switch (sub_arch) { .v5, .v5te, .v4t, @@ -471,6 +471,22 @@ test "usesThumb1" { //etc. } +const use_thumb_1_pre_armv6 = usesThumb1PreArmv6(builtin.arch); + +fn usesThumb1PreArmv6(arch: builtin.Arch) bool { + return switch (arch) { + .thumb => |sub_arch| switch (sub_arch) { + .v5, .v5te, .v4t => true, + else => false, + }, + .thumbeb => |sub_arch| switch (sub_arch) { + .v5, .v5te, .v4t => true, + else => false, + }, + else => false, + }; +} + nakedcc fn __aeabi_memcpy() noreturn { @setRuntimeSafety(false); if (use_thumb_1) { @@ -505,7 +521,16 @@ nakedcc fn __aeabi_memmove() noreturn { nakedcc fn __aeabi_memset() noreturn { @setRuntimeSafety(false); - if (use_thumb_1) { + if (use_thumb_1_pre_armv6) { + asm volatile ( + \\ eors r1, r2 + \\ eors r2, r1 + \\ eors r1, r2 + \\ push {r7, lr} + \\ b memset + \\ pop {r7, pc} + ); + } else if (use_thumb_1) { asm volatile ( \\ mov r3, r1 \\ mov r1, r2 @@ -527,7 +552,15 @@ nakedcc fn __aeabi_memset() noreturn { nakedcc fn __aeabi_memclr() noreturn { @setRuntimeSafety(false); - if (use_thumb_1) { + if (use_thumb_1_pre_armv6) { + asm volatile ( + \\ adds r2, r1, #0 + \\ movs r1, #0 + \\ push {r7, lr} + \\ bl memset + \\ pop {r7, pc} + ); + } else if (use_thumb_1) { asm volatile ( \\ mov r2, r1 \\ movs r1, #0 diff --git a/std/special/compiler_rt/comparetf2.zig b/std/special/compiler_rt/comparetf2.zig index 0912b71bd5..aaaba954d6 100644 --- a/std/special/compiler_rt/comparetf2.zig +++ b/std/special/compiler_rt/comparetf2.zig @@ -73,12 +73,15 @@ pub extern fn __getf2(a: f128, b: f128) c_int { if (aAbs > infRep or bAbs > infRep) return GE_UNORDERED; if ((aAbs | bAbs) == 0) return GE_EQUAL; - return if ((aInt & bInt) >= 0) if (aInt < bInt) - GE_LESS - else if (aInt == bInt) - GE_EQUAL - else - GE_GREATER else if (aInt > bInt) + // zig fmt issue here, see https://github.com/ziglang/zig/issues/2661 + return if ((aInt & bInt) >= 0) + if (aInt < bInt) + GE_LESS + else if (aInt == bInt) + GE_EQUAL + else + GE_GREATER + else if (aInt > bInt) GE_LESS else if (aInt == bInt) GE_EQUAL diff --git a/std/special/panic.zig b/std/special/panic.zig index 40b1d5e7fe..92e0d9164c 100644 --- a/std/special/panic.zig +++ b/std/special/panic.zig @@ -9,16 +9,15 @@ const std = @import("std"); pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn { @setCold(true); switch (builtin.os) { - // TODO: fix panic in zen - builtin.Os.freestanding, builtin.Os.zen => { + .freestanding => { while (true) {} }, - builtin.Os.wasi => { + .wasi => { std.debug.warn("{}", msg); _ = std.os.wasi.proc_raise(std.os.wasi.SIGABRT); unreachable; }, - builtin.Os.uefi => { + .uefi => { // TODO look into using the debug info and logging helpful messages std.os.abort(); }, diff --git a/std/std.zig b/std/std.zig index 603cb10929..350f0fb437 100644 --- a/std/std.zig +++ b/std/std.zig @@ -37,6 +37,7 @@ pub const fs = @import("fs.zig"); pub const hash = @import("hash.zig"); pub const hash_map = @import("hash_map.zig"); pub const heap = @import("heap.zig"); +pub const http = @import("http.zig"); pub const io = @import("io.zig"); pub const json = @import("json.zig"); pub const lazyInit = @import("lazy_init.zig").lazyInit; @@ -89,6 +90,7 @@ test "std" { _ = @import("fs.zig"); _ = @import("hash.zig"); _ = @import("heap.zig"); + _ = @import("http.zig"); _ = @import("io.zig"); _ = @import("json.zig"); _ = @import("lazy_init.zig"); diff --git a/std/zig/parse.zig b/std/zig/parse.zig index 7a8287db0c..da258c9237 100644 --- a/std/zig/parse.zig +++ b/std/zig/parse.zig @@ -2833,8 +2833,8 @@ fn parseIf(arena: *Allocator, it: *TokenIterator, tree: *Tree, bodyParseFn: Node const else_token = eatToken(it, .Keyword_else) orelse return node; const payload = try parsePayload(arena, it, tree); - const else_expr = try expectNode(arena, it, tree, parseExpr, AstError{ - .ExpectedExpr = AstError.ExpectedExpr{ .token = it.index }, + const else_expr = try expectNode(arena, it, tree, bodyParseFn, AstError{ + .InvalidToken = AstError.InvalidToken{ .token = it.index }, }); const else_node = try arena.create(Node.Else); else_node.* = Node.Else{ diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index 281f09d57b..f78e666779 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -1,4 +1,4 @@ -// TODO remove `use` keyword eventually +// TODO remove `use` keyword eventually: https://github.com/ziglang/zig/issues/2591 test "zig fmt: change use to usingnamespace" { try testTransform( \\use @import("std"); @@ -1105,7 +1105,7 @@ test "zig fmt: first line comment in struct initializer" { try testCanonical( \\pub async fn acquire(self: *Self) HeldLock { \\ return HeldLock{ - \\ // TODO guaranteed allocation elision + \\ // guaranteed allocation elision \\ .held = await (async self.lock.acquire() catch unreachable), \\ .value = &self.private_data, \\ }; @@ -2234,6 +2234,18 @@ test "zig fmt: multiline string in array" { ); } +test "zig fmt: if type expr" { + try testCanonical( + \\const mycond = true; + \\pub fn foo() if (mycond) i32 else void { + \\ if (mycond) { + \\ return 42; + \\ } + \\} + \\ + ); +} + const std = @import("std"); const mem = std.mem; const warn = std.debug.warn; diff --git a/std/zig/render.zig b/std/zig/render.zig index ef5c8f2346..2e8e4481be 100644 --- a/std/zig/render.zig +++ b/std/zig/render.zig @@ -939,10 +939,10 @@ fn renderExpression( } switch (container_decl.init_arg_expr) { - ast.Node.ContainerDecl.InitArg.None => { + .None => { try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.Space); // union }, - ast.Node.ContainerDecl.InitArg.Enum => |enum_tag_type| { + .Enum => |enum_tag_type| { try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.None); // union const lparen = tree.nextToken(container_decl.kind_token); @@ -962,7 +962,7 @@ fn renderExpression( try renderToken(tree, stream, tree.nextToken(enum_token), indent, start_col, Space.Space); // ) } }, - ast.Node.ContainerDecl.InitArg.Type => |type_expr| { + .Type => |type_expr| { try renderToken(tree, stream, container_decl.kind_token, indent, start_col, Space.None); // union const lparen = tree.nextToken(container_decl.kind_token); diff --git a/test/compare_output.zig b/test/compare_output.zig index ad15ef47b3..79057f3c54 100644 --- a/test/compare_output.zig +++ b/test/compare_output.zig @@ -122,7 +122,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\ \\pub fn main() void { \\ const stdout = &(io.getStdOut() catch unreachable).outStream().stream; - \\ stdout.print("Hello, world!\n{d4} {x3} {c}\n", u32(12), u16(0x12), u8('a')) catch unreachable; + \\ stdout.print("Hello, world!\n{d:4} {x:3} {c}\n", u32(12), u16(0x12), u8('a')) catch unreachable; \\} , "Hello, world!\n0012 012 a\n"); diff --git a/test/compile_errors.zig b/test/compile_errors.zig index e85b2f3395..94cd152eb7 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2,13 +2,22 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "slice passed as array init type with elems", + \\export fn entry() void { + \\ const x = []u8{1, 2}; + \\} + , + "tmp.zig:2:15: error: expected array type or [_], found slice", + ); + cases.add( "slice passed as array init type", \\export fn entry() void { \\ const x = []u8{}; \\} , - "tmp.zig:2:19: error: expected array type or [_], found slice", + "tmp.zig:2:15: error: expected array type or [_], found slice", ); cases.add( @@ -49,16 +58,11 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\const Foo = struct { \\ a: undefined, \\}; - \\const Bar = union { - \\ a: undefined, - \\}; - \\pub fn main() void { + \\export fn entry1() void { \\ const foo: Foo = undefined; - \\ const bar: Bar = undefined; \\} , "tmp.zig:2:8: error: expected type 'type', found '(undefined)'", - "tmp.zig:5:8: error: expected type 'type', found '(undefined)'", ); cases.add( @@ -461,13 +465,25 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\const G = packed struct { \\ x: Enum, \\}; - \\export fn entry() void { + \\export fn entry1() void { \\ var a: A = undefined; + \\} + \\export fn entry2() void { \\ var b: B = undefined; + \\} + \\export fn entry3() void { \\ var r: C = undefined; + \\} + \\export fn entry4() void { \\ var d: D = undefined; + \\} + \\export fn entry5() void { \\ var e: E = undefined; + \\} + \\export fn entry6() void { \\ var f: F = undefined; + \\} + \\export fn entry7() void { \\ var g: G = undefined; \\} \\const S = struct { @@ -489,7 +505,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { "tmp.zig:14:5: error: non-packed, non-extern struct 'U' not allowed in packed struct; no guaranteed in-memory representation", "tmp.zig:17:5: error: type '?anyerror' not allowed in packed struct; no guaranteed in-memory representation", "tmp.zig:20:5: error: type 'Enum' not allowed in packed struct; no guaranteed in-memory representation", - "tmp.zig:38:14: note: enum declaration does not specify an integer tag type", + "tmp.zig:50:14: note: enum declaration does not specify an integer tag type", ); cases.addCase(x: { @@ -721,7 +737,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ var oops = @bitCast(u7, byte); \\} , - "tmp.zig:2:16: error: destination type 'u7' has 7 bits but source type 'u8' has 8 bits", + "tmp.zig:2:25: error: destination type 'u7' has 7 bits but source type 'u8' has 8 bits", ); cases.add( @@ -1381,7 +1397,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ for (xx) |f| {} \\} , - "tmp.zig:7:15: error: variable of type 'Foo' must be const or comptime", + "tmp.zig:7:5: error: values of type 'Foo' must be comptime known, but index value is runtime known", ); cases.add( @@ -2250,6 +2266,9 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\} \\ \\extern fn bar(x: *void) void { } + \\export fn entry2() void { + \\ bar(&{}); + \\} , "tmp.zig:1:30: error: parameter of type '*void' has 0 bits; not allowed in function with calling convention 'ccc'", "tmp.zig:7:18: error: parameter of type '*void' has 0 bits; not allowed in function with calling convention 'ccc'", @@ -2576,7 +2595,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ \\fn b() void {} , - "tmp.zig:3:5: error: unreachable code", + "tmp.zig:3:6: error: unreachable code", ); cases.add( @@ -2596,7 +2615,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\} , "tmp.zig:3:5: error: use of undeclared identifier 'b'", - "tmp.zig:4:5: error: use of undeclared identifier 'c'", ); cases.add( @@ -2662,7 +2680,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ const a: noreturn = {}; \\} , - "tmp.zig:2:14: error: variable of type 'noreturn' not allowed", + "tmp.zig:2:25: error: expected type 'noreturn', found 'void'", ); cases.add( @@ -2725,9 +2743,13 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ var bad : bool = undefined; \\ bad[bad] = bad[bad]; \\} + \\export fn g() void { + \\ var bad : bool = undefined; + \\ _ = bad[bad]; + \\} , "tmp.zig:3:8: error: array access of non-array type 'bool'", - "tmp.zig:3:19: error: array access of non-array type 'bool'", + "tmp.zig:7:12: error: array access of non-array type 'bool'", ); cases.add( @@ -2737,9 +2759,14 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ var bad = false; \\ array[bad] = array[bad]; \\} + \\export fn g() void { + \\ var array = "aoeu"; + \\ var bad = false; + \\ _ = array[bad]; + \\} , "tmp.zig:4:11: error: expected type 'usize', found 'bool'", - "tmp.zig:4:24: error: expected type 'usize', found 'bool'", + "tmp.zig:9:15: error: expected type 'usize', found 'bool'", ); cases.add( @@ -2757,12 +2784,14 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { "missing else clause", \\fn f(b: bool) void { \\ const x : i32 = if (b) h: { break :h 1; }; + \\} + \\fn g(b: bool) void { \\ const y = if (b) h: { break :h i32(1); }; \\} - \\export fn entry() void { f(true); } + \\export fn entry() void { f(true); g(true); } , "tmp.zig:2:42: error: integer value 1 cannot be implicitly casted to type 'void'", - "tmp.zig:3:15: error: incompatible types: 'i32' and 'void'", + "tmp.zig:5:15: error: incompatible types: 'i32' and 'void'", ); cases.add( @@ -2773,9 +2802,13 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ a.foo = 1; \\ const y = a.bar; \\} + \\export fn g() void { + \\ var a : A = undefined; + \\ const y = a.bar; + \\} , "tmp.zig:4:6: error: no member named 'foo' in struct 'A'", - "tmp.zig:5:16: error: no member named 'bar' in struct 'A'", + "tmp.zig:9:16: error: no member named 'bar' in struct 'A'", ); cases.add( @@ -2920,7 +2953,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ _ = foo; \\} , - "tmp.zig:1:19: error: type '[3]u16' does not support struct initialization syntax", + "tmp.zig:1:21: error: type '[3]u16' does not support struct initialization syntax", ); cases.add( @@ -3239,7 +3272,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ \\export fn entry() usize { return @sizeOf(@typeOf(Foo)); } , - "tmp.zig:5:25: error: unable to evaluate constant expression", + "tmp.zig:5:18: error: unable to evaluate constant expression", "tmp.zig:2:12: note: called from here", "tmp.zig:2:8: note: called from here", ); @@ -3856,7 +3889,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ return 2; \\} , - "tmp.zig:2:15: error: values of type 'comptime_int' must be comptime known", + "tmp.zig:5:17: error: cannot store runtime value in type 'comptime_int'", ); cases.add( @@ -5108,7 +5141,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ const array = [2]u8{1, 2, 3}; \\} , - "tmp.zig:2:24: error: expected [2]u8 literal, found [3]u8 literal", + "tmp.zig:2:31: error: index 2 outside array of size 2", ); cases.add( @@ -5125,36 +5158,47 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { cases.add( "non-const variables of things that require const variables", - \\const Opaque = @OpaqueType(); - \\ - \\export fn entry(opaque: *Opaque) void { + \\export fn entry1() void { \\ var m2 = &2; - \\ const y: u32 = m2.*; - \\ + \\} + \\export fn entry2() void { \\ var a = undefined; + \\} + \\export fn entry3() void { \\ var b = 1; + \\} + \\export fn entry4() void { \\ var c = 1.0; + \\} + \\export fn entry5() void { \\ var d = null; + \\} + \\export fn entry6(opaque: *Opaque) void { \\ var e = opaque.*; + \\} + \\export fn entry7() void { \\ var f = i32; + \\} + \\export fn entry8() void { \\ var h = (Foo {}).bar; - \\ + \\} + \\export fn entry9() void { \\ var z: noreturn = return; \\} - \\ + \\const Opaque = @OpaqueType(); \\const Foo = struct { \\ fn bar(self: *const Foo) void {} \\}; , - "tmp.zig:4:4: error: variable of type '*comptime_int' must be const or comptime", - "tmp.zig:7:4: error: variable of type '(undefined)' must be const or comptime", + "tmp.zig:2:4: error: variable of type '*comptime_int' must be const or comptime", + "tmp.zig:5:4: error: variable of type '(undefined)' must be const or comptime", "tmp.zig:8:4: error: variable of type 'comptime_int' must be const or comptime", - "tmp.zig:9:4: error: variable of type 'comptime_float' must be const or comptime", - "tmp.zig:10:4: error: variable of type '(null)' must be const or comptime", - "tmp.zig:11:4: error: variable of type 'Opaque' not allowed", - "tmp.zig:12:4: error: variable of type 'type' must be const or comptime", - "tmp.zig:13:4: error: variable of type '(bound fn(*const Foo) void)' must be const or comptime", - "tmp.zig:15:4: error: unreachable code", + "tmp.zig:11:4: error: variable of type 'comptime_float' must be const or comptime", + "tmp.zig:14:4: error: variable of type '(null)' must be const or comptime", + "tmp.zig:17:4: error: variable of type 'Opaque' not allowed", + "tmp.zig:20:4: error: variable of type 'type' must be const or comptime", + "tmp.zig:23:4: error: variable of type '(bound fn(*const Foo) void)' must be const or comptime", + "tmp.zig:26:4: error: unreachable code", ); cases.add( @@ -5300,7 +5344,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ } \\} , - "tmp.zig:37:16: error: cannot store runtime value in compile time variable", + "tmp.zig:37:29: error: cannot store runtime value in compile time variable", ); cases.add( @@ -5924,7 +5968,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ const foo = Foo { .Bar = x, .Baz = u8 }; \\} , - "tmp.zig:7:30: error: unable to evaluate constant expression", + "tmp.zig:7:23: error: unable to evaluate constant expression", ); cases.add( @@ -5938,7 +5982,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ const foo = Foo { .Bar = x }; \\} , - "tmp.zig:7:30: error: unable to evaluate constant expression", + "tmp.zig:7:23: error: unable to evaluate constant expression", ); cases.addTest( diff --git a/test/stage1/behavior.zig b/test/stage1/behavior.zig index f477bb64ed..5cb04966e3 100644 --- a/test/stage1/behavior.zig +++ b/test/stage1/behavior.zig @@ -69,6 +69,8 @@ comptime { _ = @import("behavior/optional.zig"); _ = @import("behavior/pointers.zig"); _ = @import("behavior/popcount.zig"); + _ = @import("behavior/muladd.zig"); + _ = @import("behavior/floatop.zig"); _ = @import("behavior/ptrcast.zig"); _ = @import("behavior/pub_enum.zig"); _ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig"); diff --git a/test/stage1/behavior/array.zig b/test/stage1/behavior/array.zig index 21d07f75f5..49f9885702 100644 --- a/test/stage1/behavior/array.zig +++ b/test/stage1/behavior/array.zig @@ -172,6 +172,12 @@ fn plusOne(x: u32) u32 { return x + 1; } +test "runtime initialize array elem and then implicit cast to slice" { + var two: i32 = 2; + const x: []const i32 = [_]i32{two}; + expect(x[0] == 2); +} + test "array literal as argument to function" { const S = struct { fn entry(two: i32) void { @@ -227,7 +233,7 @@ test "double nested array to const slice cast in array literal" { const cases2 = [_][]const i32{ [_]i32{1}, - [_]i32{ two, 3 }, + &[_]i32{ two, 3 }, }; expect(cases2.len == 2); expect(cases2[0].len == 1); @@ -238,7 +244,7 @@ test "double nested array to const slice cast in array literal" { const cases3 = [_][]const []const i32{ [_][]const i32{[_]i32{1}}, - [_][]const i32{[_]i32{ two, 3 }}, + &[_][]const i32{&[_]i32{ two, 3 }}, [_][]const i32{ [_]i32{4}, [_]i32{ 5, 6, 7 }, diff --git a/test/stage1/behavior/bitcast.zig b/test/stage1/behavior/bitcast.zig index e86c50885e..394ade1a21 100644 --- a/test/stage1/behavior/bitcast.zig +++ b/test/stage1/behavior/bitcast.zig @@ -112,3 +112,16 @@ test "bitcast packed struct to integer and back" { S.doTheTest(); comptime S.doTheTest(); } + +test "implicit cast to error union by returning" { + const S = struct { + fn entry() void { + expect((func(-1) catch unreachable) == maxInt(u64)); + } + pub fn func(sz: i64) anyerror!u64 { + return @bitCast(u64, sz); + } + }; + S.entry(); + comptime S.entry(); +} diff --git a/test/stage1/behavior/cast.zig b/test/stage1/behavior/cast.zig index 0a2ffb6c2f..edb0f4ff17 100644 --- a/test/stage1/behavior/cast.zig +++ b/test/stage1/behavior/cast.zig @@ -482,3 +482,40 @@ test "@intCast to u0 and use the result" { S.doTheTest(0, 1, 0); comptime S.doTheTest(0, 1, 0); } + +test "peer type resolution: unreachable, null, slice" { + const S = struct { + fn doTheTest(num: usize, word: []const u8) void { + const result = switch (num) { + 0 => null, + 1 => word, + else => unreachable, + }; + expect(mem.eql(u8, result.?, "hi")); + } + }; + S.doTheTest(1, "hi"); +} + +test "peer type resolution: unreachable, error set, unreachable" { + const Error = error { + FileDescriptorAlreadyPresentInSet, + OperationCausesCircularLoop, + FileDescriptorNotRegistered, + SystemResources, + UserResourceLimitReached, + FileDescriptorIncompatibleWithEpoll, + Unexpected, + }; + var err = Error.SystemResources; + const transformed_err = switch (err) { + error.FileDescriptorAlreadyPresentInSet => unreachable, + error.OperationCausesCircularLoop => unreachable, + error.FileDescriptorNotRegistered => unreachable, + error.SystemResources => error.SystemResources, + error.UserResourceLimitReached => error.UserResourceLimitReached, + error.FileDescriptorIncompatibleWithEpoll => unreachable, + error.Unexpected => unreachable, + }; + expect(transformed_err == error.SystemResources); +} diff --git a/test/stage1/behavior/defer.zig b/test/stage1/behavior/defer.zig index 0bb9125e7c..6c0e2a432a 100644 --- a/test/stage1/behavior/defer.zig +++ b/test/stage1/behavior/defer.zig @@ -76,3 +76,20 @@ fn testNestedFnErrDefer() anyerror!void { }; return S.baz(); } + +test "return variable while defer expression in scope to modify it" { + const S = struct { + fn doTheTest() void { + expect(notNull().? == 1); + } + + fn notNull() ?u8 { + var res: ?u8 = 1; + defer res = null; + return res; + } + }; + + S.doTheTest(); + comptime S.doTheTest(); +} diff --git a/test/stage1/behavior/error.zig b/test/stage1/behavior/error.zig index babefba6f5..264f140c9d 100644 --- a/test/stage1/behavior/error.zig +++ b/test/stage1/behavior/error.zig @@ -335,3 +335,43 @@ test "debug info for optional error set" { const SomeError = error{Hello}; var a_local_variable: ?SomeError = null; } + +test "nested catch" { + const S = struct { + fn entry() void { + expectError(error.Bad, func()); + } + fn fail() anyerror!Foo { + return error.Wrong; + } + fn func() anyerror!Foo { + const x = fail() catch + fail() catch + return error.Bad; + unreachable; + } + const Foo = struct { + field: i32, + }; + }; + S.entry(); + comptime S.entry(); +} + +test "implicit cast to optional to error union to return result loc" { + const S = struct { + fn entry() void { + if (func(undefined)) |opt| { + expect(opt != null); + } else |_| @panic("expected non error"); + } + fn func(f: *Foo) anyerror!?*Foo { + return f; + } + const Foo = struct { + field: i32, + }; + }; + S.entry(); + //comptime S.entry(); TODO +} diff --git a/test/stage1/behavior/eval.zig b/test/stage1/behavior/eval.zig index b575d16086..97d3a269cc 100644 --- a/test/stage1/behavior/eval.zig +++ b/test/stage1/behavior/eval.zig @@ -190,6 +190,17 @@ fn testTryToTrickEvalWithRuntimeIf(b: bool) usize { } } +test "inlined loop has array literal with elided runtime scope on first iteration but not second iteration" { + var runtime = [1]i32{3}; + comptime var i: usize = 0; + inline while (i < 2) : (i += 1) { + const result = if (i == 0) [1]i32{2} else runtime; + } + comptime { + expect(i == 2); + } +} + fn max(comptime T: type, a: T, b: T) T { if (T == bool) { return a or b; @@ -756,8 +767,7 @@ test "comptime bitwise operators" { test "*align(1) u16 is the same as *align(1:0:2) u16" { comptime { expect(*align(1:0:2) u16 == *align(1) u16); - // TODO add parsing support for this syntax - //expect(*align(:0:2) u16 == *u16); + expect(*align(:0:2) u16 == *u16); } } diff --git a/test/stage1/behavior/floatop.zig b/test/stage1/behavior/floatop.zig new file mode 100644 index 0000000000..de2f6815a6 --- /dev/null +++ b/test/stage1/behavior/floatop.zig @@ -0,0 +1,243 @@ +const expect = @import("std").testing.expect; +const pi = @import("std").math.pi; +const e = @import("std").math.e; + +test "@sqrt" { + comptime testSqrt(); + testSqrt(); +} + +fn testSqrt() void { + { + var a: f16 = 4; + expect(@sqrt(f16, a) == 2); + } + { + var a: f32 = 9; + expect(@sqrt(f32, a) == 3); + } + { + var a: f64 = 25; + expect(@sqrt(f64, a) == 5); + } + { + const a: comptime_float = 25.0; + expect(@sqrt(comptime_float, a) == 5.0); + } + // Waiting on a c.zig implementation + //{ + // var a: f128 = 49; + // expect(@sqrt(f128, a) == 7); + //} +} + +test "@sin" { + comptime testSin(); + testSin(); +} + +fn testSin() void { + // TODO - this is actually useful and should be implemented + // (all the trig functions for f16) + // but will probably wait till self-hosted + //{ + // var a: f16 = pi; + // expect(@sin(f16, a/2) == 1); + //} + { + var a: f32 = 0; + expect(@sin(f32, a) == 0); + } + { + var a: f64 = 0; + expect(@sin(f64, a) == 0); + } + // TODO + //{ + // var a: f16 = pi; + // expect(@sqrt(f128, a/2) == 1); + //} +} + +test "@cos" { + comptime testCos(); + testCos(); +} + +fn testCos() void { + { + var a: f32 = 0; + expect(@cos(f32, a) == 1); + } + { + var a: f64 = 0; + expect(@cos(f64, a) == 1); + } +} + +test "@exp" { + comptime testExp(); + testExp(); +} + +fn testExp() void { + { + var a: f32 = 0; + expect(@exp(f32, a) == 1); + } + { + var a: f64 = 0; + expect(@exp(f64, a) == 1); + } +} + +test "@exp2" { + comptime testExp2(); + testExp2(); +} + +fn testExp2() void { + { + var a: f32 = 2; + expect(@exp2(f32, a) == 4); + } + { + var a: f64 = 2; + expect(@exp2(f64, a) == 4); + } +} + +test "@ln" { + // Old musl (and glibc?), and our current math.ln implementation do not return 1 + // so also accept those values. + comptime testLn(); + testLn(); +} + +fn testLn() void { + { + var a: f32 = e; + expect(@ln(f32, a) == 1 or @ln(f32, a) == @bitCast(f32, u32(0x3f7fffff))); + } + { + var a: f64 = e; + expect(@ln(f64, a) == 1 or @ln(f64, a) == @bitCast(f64, u64(0x3ff0000000000000))); + } +} + +test "@log2" { + comptime testLog2(); + testLog2(); +} + +fn testLog2() void { + { + var a: f32 = 4; + expect(@log2(f32, a) == 2); + } + { + var a: f64 = 4; + expect(@log2(f64, a) == 2); + } +} + +test "@log10" { + comptime testLog10(); + testLog10(); +} + +fn testLog10() void { + { + var a: f32 = 100; + expect(@log10(f32, a) == 2); + } + { + var a: f64 = 1000; + expect(@log10(f64, a) == 3); + } +} + +test "@fabs" { + comptime testFabs(); + testFabs(); +} + +fn testFabs() void { + { + var a: f32 = -2.5; + var b: f32 = 2.5; + expect(@fabs(f32, a) == 2.5); + expect(@fabs(f32, b) == 2.5); + } + { + var a: f64 = -2.5; + var b: f64 = 2.5; + expect(@fabs(f64, a) == 2.5); + expect(@fabs(f64, b) == 2.5); + } +} + +test "@floor" { + comptime testFloor(); + testFloor(); +} + +fn testFloor() void { + { + var a: f32 = 2.1; + expect(@floor(f32, a) == 2); + } + { + var a: f64 = 3.5; + expect(@floor(f64, a) == 3); + } +} + +test "@ceil" { + comptime testCeil(); + testCeil(); +} + +fn testCeil() void { + { + var a: f32 = 2.1; + expect(@ceil(f32, a) == 3); + } + { + var a: f64 = 3.5; + expect(@ceil(f64, a) == 4); + } +} + +test "@trunc" { + comptime testTrunc(); + testTrunc(); +} + +fn testTrunc() void { + { + var a: f32 = 2.1; + expect(@trunc(f32, a) == 2); + } + { + var a: f64 = -3.5; + expect(@trunc(f64, a) == -3); + } +} + +// This is waiting on library support for the Windows build (not sure why the other's don't need it) +//test "@nearbyInt" { +// comptime testNearbyInt(); +// testNearbyInt(); +//} + +//fn testNearbyInt() void { +// { +// var a: f32 = 2.1; +// expect(@nearbyInt(f32, a) == 2); +// } +// { +// var a: f64 = -3.75; +// expect(@nearbyInt(f64, a) == -4); +// } +//} diff --git a/test/stage1/behavior/fn.zig b/test/stage1/behavior/fn.zig index e01169c67a..d6d670b09b 100644 --- a/test/stage1/behavior/fn.zig +++ b/test/stage1/behavior/fn.zig @@ -205,3 +205,26 @@ test "extern struct with stdcallcc fn pointer" { s.ptr = S.foo; expect(s.ptr() == 1234); } + +test "implicit cast fn call result to optional in field result" { + const S = struct { + fn entry() void { + var x = Foo{ + .field = optionalPtr(), + }; + expect(x.field.?.* == 999); + } + + const glob: i32 = 999; + + fn optionalPtr() *const i32 { + return &glob; + } + + const Foo = struct { + field: ?*const i32, + }; + }; + S.entry(); + comptime S.entry(); +} diff --git a/test/stage1/behavior/for.zig b/test/stage1/behavior/for.zig index f670b04c00..cfa68bd216 100644 --- a/test/stage1/behavior/for.zig +++ b/test/stage1/behavior/for.zig @@ -110,3 +110,35 @@ fn testContinueOuter() void { } expect(counter == array.len); } + +test "2 break statements and an else" { + const S = struct { + fn entry(t: bool, f: bool) void { + var buf: [10]u8 = undefined; + var ok = false; + ok = for (buf) |item| { + if (f) break false; + if (t) break true; + } else false; + expect(ok); + } + }; + S.entry(true, false); + comptime S.entry(true, false); +} + +test "for with null and T peer types and inferred result location type" { + const S = struct { + fn doTheTest(slice: []const u8) void { + if (for (slice) |item| { + if (item == 10) { + break item; + } + } else null) |v| { + @panic("fail"); + } + } + }; + S.doTheTest([_]u8{ 1, 2 }); + comptime S.doTheTest([_]u8{ 1, 2 }); +} diff --git a/test/stage1/behavior/if.zig b/test/stage1/behavior/if.zig index a506a1e301..5f92962957 100644 --- a/test/stage1/behavior/if.zig +++ b/test/stage1/behavior/if.zig @@ -52,3 +52,14 @@ test "unwrap mutable global var" { expect(e == error.SomeError); } } + +test "labeled break inside comptime if inside runtime if" { + var answer: i32 = 0; + var c = true; + if (c) { + answer = if (true) blk: { + break :blk i32(42); + }; + } + expect(answer == 42); +} diff --git a/test/stage1/behavior/misc.zig b/test/stage1/behavior/misc.zig index 28df26f9fa..d499df4cb7 100644 --- a/test/stage1/behavior/misc.zig +++ b/test/stage1/behavior/misc.zig @@ -698,3 +698,11 @@ test "unicode escape in character literal" { var a: u24 = '\U01f4a9'; expect(a == 128169); } + +test "result location zero sized array inside struct field implicit cast to slice" { + const E = struct { + entries: []u32, + }; + var foo = E{ .entries = [_]u32{} }; + expect(foo.entries.len == 0); +} diff --git a/test/stage1/behavior/muladd.zig b/test/stage1/behavior/muladd.zig new file mode 100644 index 0000000000..143e6a93e4 --- /dev/null +++ b/test/stage1/behavior/muladd.zig @@ -0,0 +1,34 @@ +const expect = @import("std").testing.expect; + +test "@mulAdd" { + comptime testMulAdd(); + testMulAdd(); +} + +fn testMulAdd() void { + { + var a: f16 = 5.5; + var b: f16 = 2.5; + var c: f16 = 6.25; + expect(@mulAdd(f16, a, b, c) == 20); + } + { + var a: f32 = 5.5; + var b: f32 = 2.5; + var c: f32 = 6.25; + expect(@mulAdd(f32, a, b, c) == 20); + } + { + var a: f64 = 5.5; + var b: f64 = 2.5; + var c: f64 = 6.25; + expect(@mulAdd(f64, a, b, c) == 20); + } + // Awaits implementation in libm.zig + //{ + // var a: f16 = 5.5; + // var b: f128 = 2.5; + // var c: f128 = 6.25; + // expect(@mulAdd(f128, a, b, c) == 20); + //} +} \ No newline at end of file diff --git a/test/stage1/behavior/optional.zig b/test/stage1/behavior/optional.zig index a65bed020c..ee3cb4aef9 100644 --- a/test/stage1/behavior/optional.zig +++ b/test/stage1/behavior/optional.zig @@ -76,6 +76,27 @@ test "unwrap function call with optional pointer return value" { } }; S.entry(); - // TODO https://github.com/ziglang/zig/issues/1901 - //comptime S.entry(); + comptime S.entry(); +} + +test "nested orelse" { + const S = struct { + fn entry() void { + expect(func() == null); + } + fn maybe() ?Foo { + return null; + } + fn func() ?Foo { + const x = maybe() orelse + maybe() orelse + return null; + unreachable; + } + const Foo = struct { + field: i32, + }; + }; + S.entry(); + comptime S.entry(); } diff --git a/test/stage1/behavior/struct.zig b/test/stage1/behavior/struct.zig index 0ebd0654d0..b86b171daf 100644 --- a/test/stage1/behavior/struct.zig +++ b/test/stage1/behavior/struct.zig @@ -578,3 +578,24 @@ test "default struct initialization fields" { }; expectEqual(1239, x.a + x.b); } + +test "extern fn returns struct by value" { + const S = struct { + fn entry() void { + var x = makeBar(10); + expectEqual(i32(10), x.handle); + } + + const ExternBar = extern struct { + handle: i32, + }; + + extern fn makeBar(t: i32) ExternBar { + return ExternBar{ + .handle = t, + }; + } + }; + S.entry(); + comptime S.entry(); +} diff --git a/test/stage1/behavior/switch.zig b/test/stage1/behavior/switch.zig index c3e259c625..12e026d0ba 100644 --- a/test/stage1/behavior/switch.zig +++ b/test/stage1/behavior/switch.zig @@ -360,3 +360,34 @@ test "switch prongs with error set cases make a new error set type for capture v S.doTheTest(); comptime S.doTheTest(); } + +test "return result loc and then switch with range implicit casted to error union" { + const S = struct { + fn doTheTest() void { + expect((func(0xb) catch unreachable) == 0xb); + } + fn func(d: u8) anyerror!u8 { + return switch (d) { + 0xa...0xf => d, + else => unreachable, + }; + } + }; + S.doTheTest(); + comptime S.doTheTest(); +} + +test "switch with null and T peer types and inferred result location type" { + const S = struct { + fn doTheTest(c: u8) void { + if (switch (c) { + 0 => true, + else => null, + }) |v| { + @panic("fail"); + } + } + }; + S.doTheTest(1); + comptime S.doTheTest(1); +} diff --git a/test/stage1/behavior/union.zig b/test/stage1/behavior/union.zig index 0e8e9f4eda..410b7e9615 100644 --- a/test/stage1/behavior/union.zig +++ b/test/stage1/behavior/union.zig @@ -402,3 +402,23 @@ test "comptime union field value equality" { expect(a0 != a1); expect(b0 != b1); } + +test "return union init with void payload" { + const S = struct { + fn entry() void { + expect(func().state == State.one); + } + const Outer = union(enum) { + state: State, + }; + const State = union(enum) { + one: void, + two: u32, + }; + fn func() Outer { + return Outer{ .state = State{ .one = {} }}; + } + }; + S.entry(); + comptime S.entry(); +} diff --git a/test/stage1/behavior/vector.zig b/test/stage1/behavior/vector.zig index 21bbe3160d..70b47c4590 100644 --- a/test/stage1/behavior/vector.zig +++ b/test/stage1/behavior/vector.zig @@ -61,3 +61,16 @@ test "vector bit operators" { S.doTheTest(); comptime S.doTheTest(); } + +test "implicit cast vector to array" { + const S = struct { + fn doTheTest() void { + var a: @Vector(4, i32) = [_]i32{ 1, 2, 3, 4 }; + var result_array: [4]i32 = a; + result_array = a; + expect(mem.eql(i32, result_array, [4]i32{ 1, 2, 3, 4 })); + } + }; + S.doTheTest(); + comptime S.doTheTest(); +} diff --git a/test/stage1/behavior/while.zig b/test/stage1/behavior/while.zig index 29ad90ed17..58ff713c23 100644 --- a/test/stage1/behavior/while.zig +++ b/test/stage1/behavior/while.zig @@ -226,3 +226,48 @@ fn returnFalse() bool { fn returnTrue() bool { return true; } + +test "while bool 2 break statements and an else" { + const S = struct { + fn entry(t: bool, f: bool) void { + var ok = false; + ok = while (t) { + if (f) break false; + if (t) break true; + } else false; + expect(ok); + } + }; + S.entry(true, false); + comptime S.entry(true, false); +} + +test "while optional 2 break statements and an else" { + const S = struct { + fn entry(opt_t: ?bool, f: bool) void { + var ok = false; + ok = while (opt_t) |t| { + if (f) break false; + if (t) break true; + } else false; + expect(ok); + } + }; + S.entry(true, false); + comptime S.entry(true, false); +} + +test "while error 2 break statements and an else" { + const S = struct { + fn entry(opt_t: anyerror!bool, f: bool) void { + var ok = false; + ok = while (opt_t) |t| { + if (f) break false; + if (t) break true; + } else |_| false; + expect(ok); + } + }; + S.entry(true, false); + comptime S.entry(true, false); +} diff --git a/test/tests.zig b/test/tests.zig index 76bef9ca60..411f16d92b 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -811,23 +811,21 @@ pub const CompileErrorContext = struct { pub fn addCase(self: *CompileErrorContext, case: *const TestCase) void { const b = self.b; - for (self.modes) |mode| { - const annotated_case_name = fmt.allocPrint(self.b.allocator, "compile-error {} ({})", case.name, @tagName(mode)) catch unreachable; - if (self.test_filter) |filter| { - if (mem.indexOf(u8, annotated_case_name, filter) == null) continue; - } + const annotated_case_name = fmt.allocPrint(self.b.allocator, "compile-error {}", case.name) catch unreachable; + if (self.test_filter) |filter| { + if (mem.indexOf(u8, annotated_case_name, filter) == null) return; + } - const compile_and_cmp_errors = CompileCmpOutputStep.create(self, annotated_case_name, case, mode); - self.step.dependOn(&compile_and_cmp_errors.step); + const compile_and_cmp_errors = CompileCmpOutputStep.create(self, annotated_case_name, case, .Debug); + self.step.dependOn(&compile_and_cmp_errors.step); - for (case.sources.toSliceConst()) |src_file| { - const expanded_src_path = fs.path.join( - b.allocator, - [_][]const u8{ b.cache_root, src_file.filename }, - ) catch unreachable; - const write_src = b.addWriteFile(expanded_src_path, src_file.source); - compile_and_cmp_errors.step.dependOn(&write_src.step); - } + for (case.sources.toSliceConst()) |src_file| { + const expanded_src_path = fs.path.join( + b.allocator, + [_][]const u8{ b.cache_root, src_file.filename }, + ) catch unreachable; + const write_src = b.addWriteFile(expanded_src_path, src_file.source); + compile_and_cmp_errors.step.dependOn(&write_src.step); } } };