From af10b0fec213172b0403abfd8ff6e53c88f8c3c6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 19 Feb 2018 23:19:59 -0500 Subject: [PATCH 01/56] add async, await, suspend, resume, cancel keywords See #727 --- src/tokenizer.cpp | 10 ++++++++++ src/tokenizer.hpp | 5 +++++ 2 files changed, 15 insertions(+) diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp index dd60815b7f..dc17829c0f 100644 --- a/src/tokenizer.cpp +++ b/src/tokenizer.cpp @@ -110,7 +110,10 @@ static const struct ZigKeyword zig_keywords[] = { {"align", TokenIdKeywordAlign}, {"and", TokenIdKeywordAnd}, {"asm", TokenIdKeywordAsm}, + {"async", TokenIdKeywordAsync}, + {"await", TokenIdKeywordAwait}, {"break", TokenIdKeywordBreak}, + {"cancel", TokenIdKeywordCancel}, {"catch", TokenIdKeywordCatch}, {"comptime", TokenIdKeywordCompTime}, {"const", TokenIdKeywordConst}, @@ -133,10 +136,12 @@ static const struct ZigKeyword zig_keywords[] = { {"or", TokenIdKeywordOr}, {"packed", TokenIdKeywordPacked}, {"pub", TokenIdKeywordPub}, + {"resume", TokenIdKeywordResume}, {"return", TokenIdKeywordReturn}, {"section", TokenIdKeywordSection}, {"stdcallcc", TokenIdKeywordStdcallCC}, {"struct", TokenIdKeywordStruct}, + {"suspend", TokenIdKeywordSuspend}, {"switch", TokenIdKeywordSwitch}, {"test", TokenIdKeywordTest}, {"this", TokenIdKeywordThis}, @@ -1523,6 +1528,11 @@ const char * token_name(TokenId id) { case TokenIdFatArrow: return "=>"; case TokenIdFloatLiteral: return "FloatLiteral"; case TokenIdIntLiteral: return "IntLiteral"; + case TokenIdKeywordAsync: return "async"; + case TokenIdKeywordAwait: return "await"; + case TokenIdKeywordResume: return "resume"; + case TokenIdKeywordSuspend: return "suspend"; + case TokenIdKeywordCancel: return "cancel"; case TokenIdKeywordAlign: return "align"; case TokenIdKeywordAnd: return "and"; case TokenIdKeywordAsm: return "asm"; diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp index 225b75d844..2d71427997 100644 --- a/src/tokenizer.hpp +++ b/src/tokenizer.hpp @@ -51,7 +51,10 @@ enum TokenId { TokenIdKeywordAlign, TokenIdKeywordAnd, TokenIdKeywordAsm, + TokenIdKeywordAsync, + TokenIdKeywordAwait, TokenIdKeywordBreak, + TokenIdKeywordCancel, TokenIdKeywordCatch, TokenIdKeywordCompTime, TokenIdKeywordConst, @@ -74,10 +77,12 @@ enum TokenId { TokenIdKeywordOr, TokenIdKeywordPacked, TokenIdKeywordPub, + TokenIdKeywordResume, TokenIdKeywordReturn, TokenIdKeywordSection, TokenIdKeywordStdcallCC, TokenIdKeywordStruct, + TokenIdKeywordSuspend, TokenIdKeywordSwitch, TokenIdKeywordTest, TokenIdKeywordThis, From 3d58d7232ab6d1fd54523182beb99c31512bc4b9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Feb 2018 00:05:38 -0500 Subject: [PATCH 02/56] parse async fn calls and cancel expressions --- doc/langref.html.in | 6 +++-- src/all_types.hpp | 16 ++++++++++++ src/analyze.cpp | 1 + src/ast_render.cpp | 8 ++++++ src/codegen.cpp | 16 ++++++++++++ src/ir.cpp | 46 ++++++++++++++++++++++++++++++++++ src/ir_print.cpp | 8 ++++++ src/parser.cpp | 61 +++++++++++++++++++++++++++++++++++++++++---- std/mem.zig | 16 ++++++++++++ 9 files changed, 171 insertions(+), 7 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index 2b09ca81bd..9123b1df74 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5663,7 +5663,7 @@ ErrorSetExpr = (PrefixOpExpression "!" PrefixOpExpression) | PrefixOpExpression BlockOrExpression = Block | Expression -Expression = TryExpression | ReturnExpression | BreakExpression | AssignmentExpression +Expression = TryExpression | ReturnExpression | BreakExpression | AssignmentExpression | CancelExpression AsmExpression = "asm" option("volatile") "(" String option(AsmOutput) ")" @@ -5707,6 +5707,8 @@ TryExpression = "try" Expression BreakExpression = "break" option(":" Symbol) option(Expression) +CancelExpression = "cancel" Expression; + Defer(body) = ("defer" | "deferror") body IfExpression(body) = "if" "(" Expression ")" body option("else" BlockExpression(body)) @@ -5745,7 +5747,7 @@ MultiplyOperator = "||" | "*" | "/" | "%" | "**" | "*%" PrefixOpExpression = PrefixOp ErrorSetExpr | SuffixOpExpression -SuffixOpExpression = PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression) +SuffixOpExpression = ("async" option("(" Expression ")") PrimaryExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression) FieldAccessExpression = "." Symbol diff --git a/src/all_types.hpp b/src/all_types.hpp index 04b781a598..694a2f64d9 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -393,6 +393,7 @@ enum NodeType { NodeTypeIfErrorExpr, NodeTypeTestExpr, NodeTypeErrorSetDecl, + NodeTypeCancel, }; struct AstNodeRoot { @@ -567,6 +568,8 @@ struct AstNodeFnCallExpr { AstNode *fn_ref_expr; ZigList params; bool is_builtin; + bool is_async; + AstNode *async_allocator; }; struct AstNodeArrayAccessExpr { @@ -829,6 +832,10 @@ struct AstNodeBreakExpr { AstNode *expr; // may be null }; +struct AstNodeCancelExpr { + AstNode *expr; +}; + struct AstNodeContinueExpr { Buf *name; }; @@ -900,6 +907,7 @@ struct AstNode { AstNodeErrorType error_type; AstNodeVarLiteral var_literal; AstNodeErrorSetDecl err_set_decl; + AstNodeCancelExpr cancel_expr; } data; }; @@ -1495,6 +1503,7 @@ struct CodeGen { TypeTableEntry *entry_var; TypeTableEntry *entry_global_error_set; TypeTableEntry *entry_arg_tuple; + TypeTableEntry *entry_promise; } builtin_types; EmitFileType emit_file_type; @@ -1939,6 +1948,7 @@ enum IrInstructionId { IrInstructionIdExport, IrInstructionIdErrorReturnTrace, IrInstructionIdErrorUnion, + IrInstructionIdCancel, }; struct IrInstruction { @@ -2776,6 +2786,12 @@ struct IrInstructionErrorUnion { IrInstruction *payload; }; +struct IrInstructionCancel { + IrInstruction base; + + IrInstruction *target; +}; + static const size_t slice_ptr_index = 0; static const size_t slice_len_index = 1; diff --git a/src/analyze.cpp b/src/analyze.cpp index c16a5d462a..4545db9d78 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3117,6 +3117,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeIfErrorExpr: case NodeTypeTestExpr: case NodeTypeErrorSetDecl: + case NodeTypeCancel: zig_unreachable(); } } diff --git a/src/ast_render.cpp b/src/ast_render.cpp index aed4b3e6db..eec4b996a0 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -244,6 +244,8 @@ static const char *node_type_str(NodeType node_type) { return "TestExpr"; case NodeTypeErrorSetDecl: return "ErrorSetDecl"; + case NodeTypeCancel: + return "Cancel"; } zig_unreachable(); } @@ -1037,6 +1039,12 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { fprintf(ar->f, "}"); break; } + case NodeTypeCancel: + { + fprintf(ar->f, "cancel "); + render_node_grouped(ar, node->data.cancel_expr.expr); + break; + } case NodeTypeFnDecl: case NodeTypeParamDecl: case NodeTypeTestDecl: diff --git a/src/codegen.cpp b/src/codegen.cpp index 15648cbdec..4d9b0279d0 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3088,6 +3088,10 @@ static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *execu return g->cur_err_ret_trace_val; } +static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) { + zig_panic("TODO ir_render_cancel"); +} + static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) { switch (atomic_order) { case AtomicOrderUnordered: return LLVMAtomicOrderingUnordered; @@ -3862,6 +3866,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_align_cast(g, executable, (IrInstructionAlignCast *)instruction); case IrInstructionIdErrorReturnTrace: return ir_render_error_return_trace(g, executable, (IrInstructionErrorReturnTrace *)instruction); + case IrInstructionIdCancel: + return ir_render_cancel(g, executable, (IrInstructionCancel *)instruction); } zig_unreachable(); } @@ -5271,6 +5277,16 @@ static void define_builtin_types(CodeGen *g) { g->primitive_type_table.put(&entry->name, entry); } + { + TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false); + TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdVoid); + entry->type_ref = u8_ptr_type->type_ref; + entry->zero_bits = false; + buf_init_from_str(&entry->name, "promise"); + entry->di_type = u8_ptr_type->di_type; + g->builtin_types.entry_promise = entry; + g->primitive_type_table.put(&entry->name, entry); + } } diff --git a/src/ir.cpp b/src/ir.cpp index 7eac9e4d23..d220efde79 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -637,6 +637,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionErrorUnion *) { return IrInstructionIdErrorUnion; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionCancel *) { + return IrInstructionIdCancel; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -2396,6 +2400,17 @@ static IrInstruction *ir_build_error_union(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *target) +{ + IrInstructionCancel *instruction = ir_build_instruction(irb, scope, source_node); + instruction->target = target; + + ir_ref_instruction(target, irb->current_basic_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -3873,6 +3888,10 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node return args[i]; } + if (node->data.fn_call_expr.is_async) { + zig_panic("TODO ir_gen_fn_call for async fn calls"); + } + return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto); } @@ -5598,6 +5617,16 @@ static IrInstruction *ir_gen_fn_proto(IrBuilder *irb, Scope *parent_scope, AstNo return ir_build_fn_proto(irb, parent_scope, node, param_types, align_value, return_type, is_var_args); } +static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *parent_scope, AstNode *node) { + assert(node->type == NodeTypeCancel); + + IrInstruction *target_inst = ir_gen_node(irb, node->data.cancel_expr.expr, parent_scope); + if (target_inst == irb->codegen->invalid_instruction) + return irb->codegen->invalid_instruction; + + return ir_build_cancel(irb, parent_scope, node, target_inst); +} + static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope, LVal lval) { @@ -5694,6 +5723,8 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop return ir_lval_wrap(irb, scope, ir_gen_fn_proto(irb, scope, node), lval); case NodeTypeErrorSetDecl: return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval); + case NodeTypeCancel: + return ir_lval_wrap(irb, scope, ir_gen_cancel(irb, scope, node), lval); } zig_unreachable(); } @@ -16459,6 +16490,17 @@ static TypeTableEntry *ir_analyze_instruction_tag_type(IrAnalyze *ira, IrInstruc } } +static TypeTableEntry *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) { + IrInstruction *casted_target = ir_implicit_cast(ira, instruction->target->other, ira->codegen->builtin_types.entry_promise); + if (type_is_invalid(casted_target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_target); + result->value.type = casted_target->value.type; + ir_link_new_instruction(result, &instruction->base); + return result->value.type; +} + static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: @@ -16661,6 +16703,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_error_return_trace(ira, (IrInstructionErrorReturnTrace *)instruction); case IrInstructionIdErrorUnion: return ir_analyze_instruction_error_union(ira, (IrInstructionErrorUnion *)instruction); + case IrInstructionIdCancel: + return ir_analyze_instruction_cancel(ira, (IrInstructionCancel *)instruction); } zig_unreachable(); } @@ -16774,7 +16818,9 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdPtrTypeOf: case IrInstructionIdSetAlignStack: case IrInstructionIdExport: + case IrInstructionIdCancel: return true; + case IrInstructionIdPhi: case IrInstructionIdUnOp: case IrInstructionIdBinOp: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index f2c0d6a5b4..5c0c3bab0d 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1010,6 +1010,11 @@ static void ir_print_error_union(IrPrint *irp, IrInstructionErrorUnion *instruct ir_print_other_instruction(irp, instruction->payload); } +static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) { + fprintf(irp->f, "cancel "); + ir_print_other_instruction(irp, instruction->target); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -1330,6 +1335,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdErrorUnion: ir_print_error_union(irp, (IrInstructionErrorUnion *)instruction); break; + case IrInstructionIdCancel: + ir_print_cancel(irp, (IrInstructionCancel *)instruction); + break; } fprintf(irp->f, "\n"); } diff --git a/src/parser.cpp b/src/parser.cpp index 6ce9e25221..fc682ee62a 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -920,7 +920,7 @@ static AstNode *ast_parse_curly_suffix_expr(ParseContext *pc, size_t *token_inde } /* -SuffixOpExpression = PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression) +SuffixOpExpression = ("async" option("(" Expression ")") PrimaryExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression) FnCallExpression : token(LParen) list(Expression, token(Comma)) token(RParen) ArrayAccessExpression : token(LBracket) Expression token(RBracket) SliceExpression = "[" Expression ".." option(Expression) "]" @@ -928,9 +928,34 @@ FieldAccessExpression : token(Dot) token(Symbol) StructLiteralField : token(Dot) token(Symbol) token(Eq) Expression */ static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index, bool mandatory) { - AstNode *primary_expr = ast_parse_primary_expr(pc, token_index, mandatory); - if (!primary_expr) - return nullptr; + AstNode *primary_expr; + + Token *async_token = &pc->tokens->at(*token_index); + if (async_token->id == TokenIdKeywordAsync) { + *token_index += 1; + + AstNode *allocator_expr_node = nullptr; + Token *async_lparen_tok = &pc->tokens->at(*token_index); + if (async_lparen_tok->id == TokenIdLParen) { + *token_index += 1; + allocator_expr_node = ast_parse_expression(pc, token_index, true); + ast_eat_token(pc, token_index, TokenIdRParen); + } + + AstNode *fn_ref_expr_node = ast_parse_primary_expr(pc, token_index, true); + Token *lparen_tok = ast_eat_token(pc, token_index, TokenIdLParen); + AstNode *node = ast_create_node(pc, NodeTypeFnCallExpr, lparen_tok); + node->data.fn_call_expr.is_async = true; + node->data.fn_call_expr.async_allocator = allocator_expr_node; + node->data.fn_call_expr.fn_ref_expr = fn_ref_expr_node; + ast_parse_fn_call_param_list(pc, token_index, &node->data.fn_call_expr.params); + + primary_expr = node; + } else { + primary_expr = ast_parse_primary_expr(pc, token_index, mandatory); + if (!primary_expr) + return nullptr; + } while (true) { Token *first_token = &pc->tokens->at(*token_index); @@ -1535,6 +1560,24 @@ static AstNode *ast_parse_break_expr(ParseContext *pc, size_t *token_index) { return node; } +/* +CancelExpression = "cancel" Expression; +*/ +static AstNode *ast_parse_cancel_expr(ParseContext *pc, size_t *token_index) { + Token *token = &pc->tokens->at(*token_index); + + if (token->id != TokenIdKeywordCancel) { + return nullptr; + } + *token_index += 1; + + AstNode *node = ast_create_node(pc, NodeTypeCancel, token); + + node->data.cancel_expr.expr = ast_parse_expression(pc, token_index, false); + + return node; +} + /* Defer(body) = ("defer" | "errdefer") body */ @@ -2159,7 +2202,7 @@ static AstNode *ast_parse_block_or_expression(ParseContext *pc, size_t *token_in } /* -Expression = TryExpression | ReturnExpression | BreakExpression | AssignmentExpression +Expression = TryExpression | ReturnExpression | BreakExpression | AssignmentExpression | CancelExpression */ static AstNode *ast_parse_expression(ParseContext *pc, size_t *token_index, bool mandatory) { Token *token = &pc->tokens->at(*token_index); @@ -2176,6 +2219,10 @@ static AstNode *ast_parse_expression(ParseContext *pc, size_t *token_index, bool if (break_expr) return break_expr; + AstNode *cancel_expr = ast_parse_cancel_expr(pc, token_index); + if (cancel_expr) + return cancel_expr; + AstNode *ass_expr = ast_parse_ass_expr(pc, token_index, false); if (ass_expr) return ass_expr; @@ -2809,6 +2856,7 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeFnCallExpr: visit_field(&node->data.fn_call_expr.fn_ref_expr, visit, context); visit_node_list(&node->data.fn_call_expr.params, visit, context); + visit_field(&node->data.fn_call_expr.async_allocator, visit, context); break; case NodeTypeArrayAccessExpr: visit_field(&node->data.array_access_expr.array_ref_expr, visit, context); @@ -2931,5 +2979,8 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeErrorSetDecl: visit_node_list(&node->data.err_set_decl.decls, visit, context); break; + case NodeTypeCancel: + visit_field(&node->data.cancel_expr.expr, visit, context); + break; } } diff --git a/std/mem.zig b/std/mem.zig index 07521bfcb8..1dfef86a8f 100644 --- a/std/mem.zig +++ b/std/mem.zig @@ -116,6 +116,22 @@ pub const Allocator = struct { const non_const_ptr = @intToPtr(&u8, @ptrToInt(bytes.ptr)); self.freeFn(self, non_const_ptr[0..bytes.len]); } + + pub const AsyncAllocator = struct { + allocator: &Allocator, + + fn alloc(self: &const AsyncAllocator, byte_count: usize, alignment: u29) Error![]u8 { + return self.allocator.allocFn(self.allocator, byte_count, alignment); + } + + fn free(self: &const AsyncAllocator, old_mem: []u8) { + return self.allocator.freeFn(self.allocator, old_mem); + } + }; + + fn toAsync(self: &Allocator) AsyncAllocator { + return AsyncAllocator { .allocator = self }; + } }; /// Copy all of source into dest at position 0. From a06f3c74fdc0d8bf0f42427a261ab32351753fcb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Feb 2018 00:31:52 -0500 Subject: [PATCH 03/56] parse async fn definitions See #727 --- doc/langref.html.in | 2 +- src/all_types.hpp | 3 +++ src/analyze.cpp | 2 ++ src/codegen.cpp | 2 ++ src/ir.cpp | 49 ++++++++++++++++++++++++++++++++++++--------- src/parser.cpp | 6 +++++- std/mem.zig | 2 +- 7 files changed, 53 insertions(+), 13 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index 9123b1df74..05e804d78a 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5645,7 +5645,7 @@ UseDecl = "use" Expression ";" ExternDecl = "extern" option(String) (FnProto | VariableDeclaration) ";" -FnProto = option("nakedcc" | "stdcallcc" | "extern") "fn" option(Symbol) ParamDeclList option("align" "(" Expression ")") option("section" "(" Expression ")") option("!") TypeExpr +FnProto = option("nakedcc" | "stdcallcc" | "extern" | "async") "fn" option(Symbol) ParamDeclList option("align" "(" Expression ")") option("section" "(" Expression ")") option("!") TypeExpr FnDef = option("inline" | "export") FnProto Block diff --git a/src/all_types.hpp b/src/all_types.hpp index 694a2f64d9..b87899c526 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -406,6 +406,7 @@ enum CallingConvention { CallingConventionCold, CallingConventionNaked, CallingConventionStdcall, + CallingConventionAsync, }; struct AstNodeFnProto { @@ -2152,6 +2153,8 @@ struct IrInstructionCall { bool is_comptime; LLVMValueRef tmp_ptr; FnInline fn_inline; + bool is_async; + IrInstruction *async_allocator; }; struct IrInstructionConst { diff --git a/src/analyze.cpp b/src/analyze.cpp index 4545db9d78..4d6b218d0e 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -884,6 +884,7 @@ static const char *calling_convention_name(CallingConvention cc) { case CallingConventionCold: return "coldcc"; case CallingConventionNaked: return "nakedcc"; case CallingConventionStdcall: return "stdcallcc"; + case CallingConventionAsync: return "async"; } zig_unreachable(); } @@ -895,6 +896,7 @@ static const char *calling_convention_fn_type_str(CallingConvention cc) { case CallingConventionCold: return "coldcc "; case CallingConventionNaked: return "nakedcc "; case CallingConventionStdcall: return "stdcallcc "; + case CallingConventionAsync: return "async "; } zig_unreachable(); } diff --git a/src/codegen.cpp b/src/codegen.cpp index 4d9b0279d0..7c80c33754 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -381,6 +381,8 @@ static LLVMCallConv get_llvm_cc(CodeGen *g, CallingConvention cc) { } else { return LLVMCCallConv; } + case CallingConventionAsync: + return LLVMFastCallConv; } zig_unreachable(); } diff --git a/src/ir.cpp b/src/ir.cpp index d220efde79..7e95d98427 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -986,7 +986,7 @@ static IrInstruction *ir_build_union_field_ptr_from(IrBuilder *irb, IrInstructio static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *source_node, FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - bool is_comptime, FnInline fn_inline) + bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator) { IrInstructionCall *call_instruction = ir_build_instruction(irb, scope, source_node); call_instruction->fn_entry = fn_entry; @@ -995,21 +995,25 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc call_instruction->fn_inline = fn_inline; call_instruction->args = args; call_instruction->arg_count = arg_count; + call_instruction->is_async = is_async; + call_instruction->async_allocator = async_allocator; if (fn_ref) ir_ref_instruction(fn_ref, irb->current_basic_block); for (size_t i = 0; i < arg_count; i += 1) ir_ref_instruction(args[i], irb->current_basic_block); + if (async_allocator) + ir_ref_instruction(async_allocator, irb->current_basic_block); return &call_instruction->base; } static IrInstruction *ir_build_call_from(IrBuilder *irb, IrInstruction *old_instruction, FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - bool is_comptime, FnInline fn_inline) + bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator) { IrInstruction *new_instruction = ir_build_call(irb, old_instruction->scope, - old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline); + old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline, is_async, async_allocator); ir_link_new_instruction(new_instruction, old_instruction); return new_instruction; } @@ -3754,7 +3758,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever; - return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline); + return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline, false, nullptr); } case BuiltinFnIdTypeId: { @@ -3888,11 +3892,17 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node return args[i]; } - if (node->data.fn_call_expr.is_async) { - zig_panic("TODO ir_gen_fn_call for async fn calls"); + bool is_async = node->data.fn_call_expr.is_async; + IrInstruction *async_allocator = nullptr; + if (is_async) { + if (node->data.fn_call_expr.async_allocator) { + async_allocator = ir_gen_node(irb, node->data.fn_call_expr.async_allocator, scope); + if (async_allocator == irb->codegen->invalid_instruction) + return async_allocator; + } } - return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto); + return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator); } static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -10584,6 +10594,11 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi buf_sprintf("exported function must specify calling convention")); add_error_note(ira->codegen, msg, fn_entry->proto_node, buf_sprintf("declared here")); } break; + case CallingConventionAsync: { + ErrorMsg *msg = ir_add_error(ira, target, + buf_sprintf("exported function cannot be async")); + add_error_note(ira->codegen, msg, fn_entry->proto_node, buf_sprintf("declared here")); + } break; case CallingConventionC: case CallingConventionNaked: case CallingConventionCold: @@ -10963,6 +10978,13 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal } return ira->codegen->builtin_types.entry_invalid; } + if (fn_type_id->cc == CallingConventionAsync && !call_instruction->is_async) { + ErrorMsg *msg = ir_add_error(ira, fn_ref, buf_sprintf("must use async keyword to call async function")); + if (fn_proto_node) { + add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("declared here")); + } + return ira->codegen->builtin_types.entry_invalid; + } if (fn_type_id->is_var_args) { @@ -11258,7 +11280,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal size_t impl_param_count = impl_fn->type_entry->data.fn.fn_type_id.param_count; IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, - impl_fn, nullptr, impl_param_count, casted_args, false, fn_inline); + impl_fn, nullptr, impl_param_count, casted_args, false, fn_inline, false, nullptr); TypeTableEntry *return_type = impl_fn->type_entry->data.fn.fn_type_id.return_type; ir_add_alloca(ira, new_call_instruction, return_type); @@ -11324,12 +11346,16 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal assert(next_arg_index == call_param_count); + if (call_instruction->is_async) { + zig_panic("TODO handle async fn call"); + } + TypeTableEntry *return_type = fn_type_id->return_type; if (type_is_invalid(return_type)) return ira->codegen->builtin_types.entry_invalid; IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, - fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline); + fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr); ir_add_alloca(ira, new_call_instruction, return_type); return ir_finish_anal(ira, return_type); @@ -16491,7 +16517,10 @@ static TypeTableEntry *ir_analyze_instruction_tag_type(IrAnalyze *ira, IrInstruc } static TypeTableEntry *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructionCancel *instruction) { - IrInstruction *casted_target = ir_implicit_cast(ira, instruction->target->other, ira->codegen->builtin_types.entry_promise); + IrInstruction *target_inst = instruction->target->other; + if (type_is_invalid(target_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + IrInstruction *casted_target = ir_implicit_cast(ira, target_inst, ira->codegen->builtin_types.entry_promise); if (type_is_invalid(casted_target->value.type)) return ira->codegen->builtin_types.entry_invalid; diff --git a/src/parser.cpp b/src/parser.cpp index fc682ee62a..ee7141a910 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -2333,7 +2333,7 @@ static AstNode *ast_parse_block(ParseContext *pc, size_t *token_index, bool mand } /* -FnProto = option("nakedcc" | "stdcallcc" | "extern") "fn" option(Symbol) ParamDeclList option("align" "(" Expression ")") option("section" "(" Expression ")") option("!") TypeExpr +FnProto = option("nakedcc" | "stdcallcc" | "extern" | "async") "fn" option(Symbol) ParamDeclList option("align" "(" Expression ")") option("section" "(" Expression ")") option("!") TypeExpr */ static AstNode *ast_parse_fn_proto(ParseContext *pc, size_t *token_index, bool mandatory, VisibMod visib_mod) { Token *first_token = &pc->tokens->at(*token_index); @@ -2345,6 +2345,10 @@ static AstNode *ast_parse_fn_proto(ParseContext *pc, size_t *token_index, bool m *token_index += 1; fn_token = ast_eat_token(pc, token_index, TokenIdKeywordFn); cc = CallingConventionNaked; + } else if (first_token->id == TokenIdKeywordAsync) { + *token_index += 1; + fn_token = ast_eat_token(pc, token_index, TokenIdKeywordFn); + cc = CallingConventionAsync; } else if (first_token->id == TokenIdKeywordStdcallCC) { *token_index += 1; fn_token = ast_eat_token(pc, token_index, TokenIdKeywordFn); diff --git a/std/mem.zig b/std/mem.zig index 1dfef86a8f..2adb647ef6 100644 --- a/std/mem.zig +++ b/std/mem.zig @@ -124,7 +124,7 @@ pub const Allocator = struct { return self.allocator.allocFn(self.allocator, byte_count, alignment); } - fn free(self: &const AsyncAllocator, old_mem: []u8) { + fn free(self: &const AsyncAllocator, old_mem: []u8) void { return self.allocator.freeFn(self.allocator, old_mem); } }; From 65a51b401cfe17daee0c64404c8f564b0f282224 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Feb 2018 16:42:14 -0500 Subject: [PATCH 04/56] add promise type See #727 --- src/all_types.hpp | 8 ++++++++ src/analyze.cpp | 52 ++++++++++++++++++++++++++++++++++++++++++++++- src/analyze.hpp | 1 + src/codegen.cpp | 13 ++++++------ src/ir.cpp | 14 +++++++++++++ 5 files changed, 80 insertions(+), 8 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index b87899c526..dbd8e0aa11 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1096,6 +1096,11 @@ struct TypeTableEntryBoundFn { TypeTableEntry *fn_type; }; +struct TypeTableEntryPromise { + // null if `promise` instead of `promise->T` + TypeTableEntry *result_type; +}; + enum TypeTableEntryId { TypeTableEntryIdInvalid, TypeTableEntryIdVar, @@ -1123,6 +1128,7 @@ enum TypeTableEntryId { TypeTableEntryIdBoundFn, TypeTableEntryIdArgTuple, TypeTableEntryIdOpaque, + TypeTableEntryIdPromise, }; struct TypeTableEntry { @@ -1149,11 +1155,13 @@ struct TypeTableEntry { TypeTableEntryUnion unionation; TypeTableEntryFn fn; TypeTableEntryBoundFn bound_fn; + TypeTableEntryPromise promise; } data; // use these fields to make sure we don't duplicate type table entries for the same type TypeTableEntry *pointer_parent[2]; // [0 - mut, 1 - const] TypeTableEntry *maybe_parent; + TypeTableEntry *promise_parent; // If we generate a constant name value for this type, we memoize it here. // The type of this is array ConstExprValue *cached_const_name_val; diff --git a/src/analyze.cpp b/src/analyze.cpp index 4d6b218d0e..e28a2d20f0 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -230,6 +230,7 @@ bool type_is_complete(TypeTableEntry *type_entry) { case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: + case TypeTableEntryIdPromise: return true; } zig_unreachable(); @@ -267,6 +268,7 @@ bool type_has_zero_bits_known(TypeTableEntry *type_entry) { case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: + case TypeTableEntryIdPromise: return true; } zig_unreachable(); @@ -339,6 +341,32 @@ TypeTableEntry *get_smallest_unsigned_int_type(CodeGen *g, uint64_t x) { return get_int_type(g, false, bits_needed_for_unsigned(x)); } +TypeTableEntry *get_promise_type(CodeGen *g, TypeTableEntry *result_type) { + if (result_type != nullptr && result_type->promise_parent != nullptr) { + return result_type->promise_parent; + } else if (result_type == nullptr && g->builtin_types.entry_promise != nullptr) { + return g->builtin_types.entry_promise; + } + + TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false); + TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdPromise); + entry->type_ref = u8_ptr_type->type_ref; + entry->zero_bits = false; + entry->data.promise.result_type = result_type; + buf_init_from_str(&entry->name, "promise"); + if (result_type != nullptr) { + buf_appendf(&entry->name, "->%s", buf_ptr(&result_type->name)); + } + entry->di_type = u8_ptr_type->di_type; + + if (result_type != nullptr) { + result_type->promise_parent = entry; + } else if (result_type == nullptr) { + g->builtin_types.entry_promise = entry; + } + return entry; +} + TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type, bool is_const, bool is_volatile, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count) { @@ -1203,6 +1231,7 @@ static bool type_allowed_in_packed_struct(TypeTableEntry *type_entry) { case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: + case TypeTableEntryIdPromise: return false; case TypeTableEntryIdVoid: case TypeTableEntryIdBool: @@ -1243,6 +1272,7 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: + case TypeTableEntryIdPromise: return false; case TypeTableEntryIdOpaque: case TypeTableEntryIdUnreachable: @@ -1383,7 +1413,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdBoundFn: case TypeTableEntryIdMetaType: add_node_error(g, param_node->data.param_decl.type, - buf_sprintf("parameter of type '%s' must be declared inline", + buf_sprintf("parameter of type '%s' must be declared comptime", buf_ptr(&type_entry->name))); return g->builtin_types.entry_invalid; case TypeTableEntryIdVoid: @@ -1399,6 +1429,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdEnum: case TypeTableEntryIdUnion: case TypeTableEntryIdFn: + case TypeTableEntryIdPromise: ensure_complete_type(g, type_entry); if (fn_type_id.cc == CallingConventionUnspecified && !type_is_copyable(g, type_entry)) { add_node_error(g, param_node->data.param_decl.type, @@ -1480,6 +1511,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdEnum: case TypeTableEntryIdUnion: case TypeTableEntryIdFn: + case TypeTableEntryIdPromise: break; } @@ -3175,6 +3207,7 @@ TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEnt case TypeTableEntryIdUnion: case TypeTableEntryIdFn: case TypeTableEntryIdBoundFn: + case TypeTableEntryIdPromise: return type_entry; } zig_unreachable(); @@ -3553,6 +3586,7 @@ static bool is_container(TypeTableEntry *type_entry) { case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: + case TypeTableEntryIdPromise: return false; } zig_unreachable(); @@ -3603,6 +3637,7 @@ void resolve_container_type(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdVar: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: + case TypeTableEntryIdPromise: zig_unreachable(); } } @@ -4095,6 +4130,7 @@ bool handle_is_ptr(TypeTableEntry *type_entry) { case TypeTableEntryIdErrorSet: case TypeTableEntryIdFn: case TypeTableEntryIdEnum: + case TypeTableEntryIdPromise: return false; case TypeTableEntryIdArray: case TypeTableEntryIdStruct: @@ -4342,6 +4378,9 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { } zig_unreachable(); } + case TypeTableEntryIdPromise: + // TODO better hashing algorithm + return 223048345; case TypeTableEntryIdUndefLit: return 162837799; case TypeTableEntryIdNullLit: @@ -4501,6 +4540,7 @@ bool type_requires_comptime(TypeTableEntry *type_entry) { case TypeTableEntryIdPointer: case TypeTableEntryIdVoid: case TypeTableEntryIdUnreachable: + case TypeTableEntryIdPromise: return false; } zig_unreachable(); @@ -4970,6 +5010,7 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { case TypeTableEntryIdInvalid: case TypeTableEntryIdUnreachable: case TypeTableEntryIdVar: + case TypeTableEntryIdPromise: zig_unreachable(); } zig_unreachable(); @@ -5244,6 +5285,8 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { buf_appendf(buf, "(args value)"); return; } + case TypeTableEntryIdPromise: + zig_unreachable(); } zig_unreachable(); } @@ -5305,6 +5348,7 @@ uint32_t type_id_hash(TypeId x) { case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: + case TypeTableEntryIdPromise: zig_unreachable(); case TypeTableEntryIdErrorUnion: return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type); @@ -5342,6 +5386,7 @@ bool type_id_eql(TypeId a, TypeId b) { case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdMaybe: + case TypeTableEntryIdPromise: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: case TypeTableEntryIdUnion: @@ -5469,6 +5514,7 @@ static const TypeTableEntryId all_type_ids[] = { TypeTableEntryIdBoundFn, TypeTableEntryIdArgTuple, TypeTableEntryIdOpaque, + TypeTableEntryIdPromise, }; TypeTableEntryId type_id_at_index(size_t index) { @@ -5533,6 +5579,8 @@ size_t type_id_index(TypeTableEntryId id) { return 22; case TypeTableEntryIdOpaque: return 23; + case TypeTableEntryIdPromise: + return 24; } zig_unreachable(); } @@ -5590,6 +5638,8 @@ const char *type_id_name(TypeTableEntryId id) { return "ArgTuple"; case TypeTableEntryIdOpaque: return "Opaque"; + case TypeTableEntryIdPromise: + return "Promise"; } zig_unreachable(); } diff --git a/src/analyze.hpp b/src/analyze.hpp index c0c89cf36b..34339c3688 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -35,6 +35,7 @@ TypeTableEntry *get_bound_fn_type(CodeGen *g, FnTableEntry *fn_entry); TypeTableEntry *get_opaque_type(CodeGen *g, Scope *scope, AstNode *source_node, const char *name); TypeTableEntry *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[], TypeTableEntry *field_types[], size_t field_count); +TypeTableEntry *get_promise_type(CodeGen *g, TypeTableEntry *result_type); TypeTableEntry *get_test_fn_type(CodeGen *g); bool handle_is_ptr(TypeTableEntry *type_entry); void find_libc_include_path(CodeGen *g); diff --git a/src/codegen.cpp b/src/codegen.cpp index 7c80c33754..291db7017b 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4017,6 +4017,7 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con case TypeTableEntryIdPointer: case TypeTableEntryIdFn: case TypeTableEntryIdMaybe: + case TypeTableEntryIdPromise: { LLVMValueRef ptr_val = gen_const_val(g, const_val, ""); LLVMValueRef ptr_size_int_val = LLVMConstPtrToInt(ptr_val, g->builtin_types.entry_usize->type_ref); @@ -4434,6 +4435,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c case TypeTableEntryIdVar: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: + case TypeTableEntryIdPromise: zig_unreachable(); } @@ -5280,13 +5282,7 @@ static void define_builtin_types(CodeGen *g) { g->primitive_type_table.put(&entry->name, entry); } { - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false); - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdVoid); - entry->type_ref = u8_ptr_type->type_ref; - entry->zero_bits = false; - buf_init_from_str(&entry->name, "promise"); - entry->di_type = u8_ptr_type->di_type; - g->builtin_types.entry_promise = entry; + TypeTableEntry *entry = get_promise_type(g, nullptr); g->primitive_type_table.put(&entry->name, entry); } @@ -5916,6 +5912,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, TypeTableEntry case TypeTableEntryIdArgTuple: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: + case TypeTableEntryIdPromise: zig_unreachable(); case TypeTableEntryIdVoid: case TypeTableEntryIdUnreachable: @@ -6102,6 +6099,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, TypeTableEntry *type_entry, Buf case TypeTableEntryIdNullLit: case TypeTableEntryIdVar: case TypeTableEntryIdArgTuple: + case TypeTableEntryIdPromise: zig_unreachable(); } } @@ -6262,6 +6260,7 @@ static void gen_h_file(CodeGen *g) { case TypeTableEntryIdArgTuple: case TypeTableEntryIdMaybe: case TypeTableEntryIdFn: + case TypeTableEntryIdPromise: zig_unreachable(); case TypeTableEntryIdEnum: assert(type_entry->data.enumeration.layout == ContainerLayoutExtern); diff --git a/src/ir.cpp b/src/ir.cpp index 7e95d98427..8d03a2276a 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -9589,6 +9589,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: + case TypeTableEntryIdPromise: if (!is_equality_cmp) { ir_add_error_node(ira, source_node, buf_sprintf("operator not allowed for type '%s'", buf_ptr(&resolved_type->name))); @@ -10418,6 +10419,7 @@ static VarClassRequired get_var_class_required(TypeTableEntry *type_entry) { case TypeTableEntryIdVoid: case TypeTableEntryIdErrorSet: case TypeTableEntryIdFn: + case TypeTableEntryIdPromise: return VarClassRequiredAny; case TypeTableEntryIdNumLitFloat: case TypeTableEntryIdNumLitInt: @@ -10688,6 +10690,7 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: + case TypeTableEntryIdPromise: ir_add_error(ira, target, buf_sprintf("invalid export target '%s'", buf_ptr(&type_value->name))); break; @@ -10712,6 +10715,7 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: + case TypeTableEntryIdPromise: ir_add_error(ira, target, buf_sprintf("invalid export target type '%s'", buf_ptr(&target->value.type->name))); break; @@ -11481,6 +11485,7 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: + case TypeTableEntryIdPromise: { ConstExprValue *out_val = ir_build_const_from(ira, &un_op_instruction->base); out_val->data.x_type = get_maybe_type(ira->codegen, type_entry); @@ -12710,6 +12715,7 @@ static TypeTableEntry *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructi case TypeTableEntryIdFn: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: + case TypeTableEntryIdPromise: { ConstExprValue *out_val = ir_build_const_from(ira, &typeof_instruction->base); out_val->data.x_type = type_entry; @@ -12977,6 +12983,7 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira, case TypeTableEntryIdFn: case TypeTableEntryIdNamespace: case TypeTableEntryIdBoundFn: + case TypeTableEntryIdPromise: { type_ensure_zero_bits_known(ira->codegen, child_type); TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, child_type, @@ -13085,6 +13092,7 @@ static TypeTableEntry *ir_analyze_instruction_array_type(IrAnalyze *ira, case TypeTableEntryIdFn: case TypeTableEntryIdNamespace: case TypeTableEntryIdBoundFn: + case TypeTableEntryIdPromise: { TypeTableEntry *result_type = get_array_type(ira->codegen, child_type, size); ConstExprValue *out_val = ir_build_const_from(ira, &array_type_instruction->base); @@ -13136,6 +13144,7 @@ static TypeTableEntry *ir_analyze_instruction_size_of(IrAnalyze *ira, case TypeTableEntryIdEnum: case TypeTableEntryIdUnion: case TypeTableEntryIdFn: + case TypeTableEntryIdPromise: { uint64_t size_in_bytes = type_size(ira->codegen, type_entry); ConstExprValue *out_val = ir_build_const_from(ira, &size_of_instruction->base); @@ -13465,6 +13474,7 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira, case TypeTableEntryIdNumLitFloat: case TypeTableEntryIdNumLitInt: case TypeTableEntryIdPointer: + case TypeTableEntryIdPromise: case TypeTableEntryIdFn: case TypeTableEntryIdNamespace: case TypeTableEntryIdErrorSet: @@ -14053,6 +14063,7 @@ static TypeTableEntry *ir_analyze_min_max(IrAnalyze *ira, IrInstruction *source_ case TypeTableEntryIdMetaType: case TypeTableEntryIdUnreachable: case TypeTableEntryIdPointer: + case TypeTableEntryIdPromise: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: case TypeTableEntryIdNumLitFloat: @@ -15313,6 +15324,7 @@ static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruc case TypeTableEntryIdInt: case TypeTableEntryIdFloat: case TypeTableEntryIdPointer: + case TypeTableEntryIdPromise: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: case TypeTableEntryIdMaybe: @@ -16008,6 +16020,7 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case TypeTableEntryIdNumLitInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: + case TypeTableEntryIdPromise: zig_unreachable(); case TypeTableEntryIdVoid: return; @@ -16075,6 +16088,7 @@ static void buf_read_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case TypeTableEntryIdNumLitInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: + case TypeTableEntryIdPromise: zig_unreachable(); case TypeTableEntryIdVoid: return; From 236bbe1183575d7644f943b59096f2eb275ffa3a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 21 Feb 2018 00:52:20 -0500 Subject: [PATCH 05/56] implement IR analysis for async function calls See #727 --- doc/langref.html.in | 2 +- src/all_types.hpp | 7 ++ src/analyze.cpp | 36 ++++++- src/analyze.hpp | 1 - src/codegen.cpp | 11 ++ src/ir.cpp | 243 +++++++++++++++++++++++++++++++++++--------- src/ir_print.cpp | 16 +++ src/parser.cpp | 10 +- 8 files changed, 272 insertions(+), 54 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index 05e804d78a..9c33f9e607 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5645,7 +5645,7 @@ UseDecl = "use" Expression ";" ExternDecl = "extern" option(String) (FnProto | VariableDeclaration) ";" -FnProto = option("nakedcc" | "stdcallcc" | "extern" | "async") "fn" option(Symbol) ParamDeclList option("align" "(" Expression ")") option("section" "(" Expression ")") option("!") TypeExpr +FnProto = option("nakedcc" | "stdcallcc" | "extern" | ("async" option("(" Expression ")"))) "fn" option(Symbol) ParamDeclList option("align" "(" Expression ")") option("section" "(" Expression ")") option("!") TypeExpr FnDef = option("inline" | "export") FnProto Block diff --git a/src/all_types.hpp b/src/all_types.hpp index dbd8e0aa11..c4792c7921 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -428,6 +428,7 @@ struct AstNodeFnProto { AstNode *section_expr; bool auto_err_set; + AstNode *async_allocator_type; }; struct AstNodeFnDef { @@ -935,6 +936,7 @@ struct FnTypeId { bool is_var_args; CallingConvention cc; uint32_t alignment; + TypeTableEntry *async_allocator_type; }; uint32_t fn_type_id_hash(FnTypeId*); @@ -1958,6 +1960,7 @@ enum IrInstructionId { IrInstructionIdErrorReturnTrace, IrInstructionIdErrorUnion, IrInstructionIdCancel, + IrInstructionIdGetImplicitAllocator, }; struct IrInstruction { @@ -2803,6 +2806,10 @@ struct IrInstructionCancel { IrInstruction *target; }; +struct IrInstructionGetImplicitAllocator { + IrInstruction base; +}; + static const size_t slice_ptr_index = 0; static const size_t slice_len_index = 1; diff --git a/src/analyze.cpp b/src/analyze.cpp index e28a2d20f0..d83f195a85 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -954,8 +954,13 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { // populate the name of the type buf_resize(&fn_type->name, 0); - const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc); - buf_appendf(&fn_type->name, "%sfn(", cc_str); + if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) { + buf_appendf(&fn_type->name, "async(%s) ", buf_ptr(&fn_type_id->async_allocator_type->name)); + } else { + const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc); + buf_appendf(&fn_type->name, "%s", cc_str); + } + buf_appendf(&fn_type->name, "fn("); for (size_t i = 0; i < fn_type_id->param_count; i += 1) { FnTypeParamInfo *param_info = &fn_type_id->param_info[i]; @@ -1126,7 +1131,16 @@ TypeTableEntry *analyze_type_expr(CodeGen *g, Scope *scope, AstNode *node) { TypeTableEntry *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id) { TypeTableEntry *fn_type = new_type_table_entry(TypeTableEntryIdFn); fn_type->is_copyable = false; - buf_init_from_str(&fn_type->name, "fn("); + buf_resize(&fn_type->name, 0); + if (fn_type->data.fn.fn_type_id.cc == CallingConventionAsync) { + const char *async_allocator_type_str = (fn_type->data.fn.fn_type_id.async_allocator_type == nullptr) ? + "var" : buf_ptr(&fn_type_id->async_allocator_type->name); + buf_appendf(&fn_type->name, "async(%s) ", async_allocator_type_str); + } else { + const char *cc_str = calling_convention_fn_type_str(fn_type->data.fn.fn_type_id.cc); + buf_appendf(&fn_type->name, "%s", cc_str); + } + buf_appendf(&fn_type->name, "fn("); size_t i = 0; for (; i < fn_type_id->next_param_index; i += 1) { const char *comma_str = (i == 0) ? "" : ","; @@ -1515,6 +1529,16 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c break; } + if (fn_type_id.cc == CallingConventionAsync) { + if (fn_proto->async_allocator_type == nullptr) { + return get_generic_fn_type(g, &fn_type_id); + } + fn_type_id.async_allocator_type = analyze_type_expr(g, child_scope, fn_proto->async_allocator_type); + if (type_is_invalid(fn_type_id.async_allocator_type)) { + return g->builtin_types.entry_invalid; + } + } + return get_fn_type(g, &fn_type_id); } @@ -3676,7 +3700,7 @@ AstNode *get_param_decl_node(FnTableEntry *fn_entry, size_t index) { return nullptr; } -void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry, VariableTableEntry **arg_vars) { +static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry, VariableTableEntry **arg_vars) { TypeTableEntry *fn_type = fn_table_entry->type_entry; assert(!fn_type->data.fn.is_generic); FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; @@ -4242,6 +4266,7 @@ uint32_t fn_type_id_hash(FnTypeId *id) { result += ((uint32_t)(id->cc)) * (uint32_t)3349388391; result += id->is_var_args ? (uint32_t)1931444534 : 0; result += hash_ptr(id->return_type); + result += hash_ptr(id->async_allocator_type); result += id->alignment * 0xd3b3f3e2; for (size_t i = 0; i < id->param_count; i += 1) { FnTypeParamInfo *info = &id->param_info[i]; @@ -4256,7 +4281,8 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) { a->return_type != b->return_type || a->is_var_args != b->is_var_args || a->param_count != b->param_count || - a->alignment != b->alignment) + a->alignment != b->alignment || + a->async_allocator_type != b->async_allocator_type) { return false; } diff --git a/src/analyze.hpp b/src/analyze.hpp index 34339c3688..a4036c597c 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -93,7 +93,6 @@ void eval_min_max_value(CodeGen *g, TypeTableEntry *type_entry, ConstExprValue * void eval_min_max_value_int(CodeGen *g, TypeTableEntry *int_type, BigInt *bigint, bool is_max); void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val); -void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry, VariableTableEntry **arg_vars); void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_type_node); ScopeBlock *create_block_scope(AstNode *node, Scope *parent); diff --git a/src/codegen.cpp b/src/codegen.cpp index 291db7017b..d52cc2f9e2 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2521,6 +2521,10 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; + if (fn_type_id->cc == CallingConventionAsync) { + zig_panic("TODO codegen async function call"); + } + TypeTableEntry *src_return_type = fn_type_id->return_type; bool ret_has_bits = type_has_bits(src_return_type); bool first_arg_ret = ret_has_bits && handle_is_ptr(src_return_type); @@ -3094,6 +3098,10 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns zig_panic("TODO ir_render_cancel"); } +static LLVMValueRef ir_render_get_implicit_allocator(CodeGen *g, IrExecutable *executable, IrInstructionGetImplicitAllocator *instruction) { + zig_panic("TODO ir_render_get_implicit_allocator"); +} + static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) { switch (atomic_order) { case AtomicOrderUnordered: return LLVMAtomicOrderingUnordered; @@ -3752,6 +3760,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdExport: case IrInstructionIdErrorUnion: zig_unreachable(); + case IrInstructionIdReturn: return ir_render_return(g, executable, (IrInstructionReturn *)instruction); case IrInstructionIdDeclVar: @@ -3870,6 +3879,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_error_return_trace(g, executable, (IrInstructionErrorReturnTrace *)instruction); case IrInstructionIdCancel: return ir_render_cancel(g, executable, (IrInstructionCancel *)instruction); + case IrInstructionIdGetImplicitAllocator: + return ir_render_get_implicit_allocator(g, executable, (IrInstructionGetImplicitAllocator *)instruction); } zig_unreachable(); } diff --git a/src/ir.cpp b/src/ir.cpp index 8d03a2276a..a82e168986 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -65,6 +65,7 @@ enum ConstCastResultId { ConstCastResultIdFnArgNoAlias, ConstCastResultIdType, ConstCastResultIdUnresolvedInferredErrSet, + ConstCastResultIdAsyncAllocatorType, }; struct ConstCastErrSetMismatch { @@ -92,6 +93,7 @@ struct ConstCastOnly { ConstCastOnly *error_union_payload; ConstCastOnly *error_union_error_set; ConstCastOnly *return_type; + ConstCastOnly *async_allocator_type; ConstCastArg fn_arg; ConstCastArgNoAlias arg_no_alias; } data; @@ -104,6 +106,8 @@ static TypeTableEntry *ir_analyze_instruction(IrAnalyze *ira, IrInstruction *ins static IrInstruction *ir_implicit_cast(IrAnalyze *ira, IrInstruction *value, TypeTableEntry *expected_type); static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruction, IrInstruction *ptr); static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutable *exec, AstNode *source_node, Buf *msg); +static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name, + IrInstruction *source_instr, IrInstruction *container_ptr, TypeTableEntry *container_type); ConstExprValue *const_ptr_pointee(CodeGen *g, ConstExprValue *const_val) { assert(const_val->type->id == TypeTableEntryIdPointer); @@ -641,6 +645,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCancel *) { return IrInstructionIdCancel; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionGetImplicitAllocator *) { + return IrInstructionIdGetImplicitAllocator; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -954,15 +962,6 @@ static IrInstruction *ir_build_struct_field_ptr(IrBuilder *irb, Scope *scope, As return &instruction->base; } -static IrInstruction *ir_build_struct_field_ptr_from(IrBuilder *irb, IrInstruction *old_instruction, - IrInstruction *struct_ptr, TypeStructField *type_struct_field) -{ - IrInstruction *new_instruction = ir_build_struct_field_ptr(irb, old_instruction->scope, - old_instruction->source_node, struct_ptr, type_struct_field); - ir_link_new_instruction(new_instruction, old_instruction); - return new_instruction; -} - static IrInstruction *ir_build_union_field_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *union_ptr, TypeUnionField *field) { @@ -2415,6 +2414,12 @@ static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *sou return &instruction->base; } +static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *scope, AstNode *source_node) { + IrInstructionGetImplicitAllocator *instruction = ir_build_instruction(irb, scope, source_node); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -6740,6 +6745,12 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry return result; } + if (expected_type == ira->codegen->builtin_types.entry_promise && + actual_type->id == TypeTableEntryIdPromise) + { + return result; + } + // fn if (expected_type->id == TypeTableEntryIdFn && actual_type->id == TypeTableEntryIdFn) @@ -6771,6 +6782,16 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry return result; } } + if (!expected_type->data.fn.is_generic && expected_type->data.fn.fn_type_id.cc == CallingConventionAsync) { + ConstCastOnly child = types_match_const_cast_only(ira, actual_type->data.fn.fn_type_id.async_allocator_type, + expected_type->data.fn.fn_type_id.async_allocator_type, source_node); + if (child.id != ConstCastResultIdOk) { + result.id = ConstCastResultIdAsyncAllocatorType; + result.data.async_allocator_type = allocate_nonzero(1); + *result.data.async_allocator_type = child; + return result; + } + } if (expected_type->data.fn.fn_type_id.param_count != actual_type->data.fn.fn_type_id.param_count) { result.id = ConstCastResultIdFnArgCount; return result; @@ -10768,6 +10789,58 @@ static TypeTableEntry *ir_analyze_instruction_error_union(IrAnalyze *ira, return ira->codegen->builtin_types.entry_type; } +IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr, FnTableEntry *parent_fn_entry) { + FnTypeId *parent_fn_type = &parent_fn_entry->type_entry->data.fn.fn_type_id; + if (parent_fn_type->cc != CallingConventionAsync) { + ir_add_error(ira, source_instr, buf_sprintf("async function call from non-async caller requires allocator parameter")); + return ira->codegen->invalid_instruction; + } + + assert(parent_fn_type->async_allocator_type != nullptr); + IrInstruction *result = ir_build_get_implicit_allocator(&ira->new_irb, source_instr->scope, source_instr->source_node); + result->value.type = parent_fn_type->async_allocator_type; + return result; +} + +static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *call_instruction, FnTableEntry *fn_entry, TypeTableEntry *fn_type, + IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count, IrInstruction *async_allocator_inst) +{ + Buf *alloc_field_name = buf_create_from_str("allocFn"); + //Buf *free_field_name = buf_create_from_str("freeFn"); + assert(async_allocator_inst->value.type->id == TypeTableEntryIdPointer); + TypeTableEntry *container_type = async_allocator_inst->value.type->data.pointer.child_type; + IrInstruction *field_ptr_inst = ir_analyze_container_field_ptr(ira, alloc_field_name, &call_instruction->base, + async_allocator_inst, container_type); + if (type_is_invalid(field_ptr_inst->value.type)) { + return ira->codegen->invalid_instruction; + } + TypeTableEntry *ptr_to_alloc_fn_type = field_ptr_inst->value.type; + assert(ptr_to_alloc_fn_type->id == TypeTableEntryIdPointer); + + TypeTableEntry *alloc_fn_type = ptr_to_alloc_fn_type->data.pointer.child_type; + if (alloc_fn_type->id != TypeTableEntryIdFn) { + ir_add_error(ira, &call_instruction->base, + buf_sprintf("expected allocation function, found '%s'", buf_ptr(&alloc_fn_type->name))); + return ira->codegen->invalid_instruction; + } + + TypeTableEntry *alloc_fn_return_type = alloc_fn_type->data.fn.fn_type_id.return_type; + if (alloc_fn_return_type->id != TypeTableEntryIdErrorUnion) { + ir_add_error(ira, fn_ref, + buf_sprintf("expected allocation function to return error union, but it returns '%s'", buf_ptr(&alloc_fn_return_type->name))); + return ira->codegen->invalid_instruction; + } + TypeTableEntry *alloc_fn_error_set_type = alloc_fn_return_type->data.error_union.err_set_type; + TypeTableEntry *return_type = fn_type->data.fn.fn_type_id.return_type; + TypeTableEntry *promise_type = get_promise_type(ira->codegen, return_type); + TypeTableEntry *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type); + + IrInstruction *result = ir_build_call(&ira->new_irb, call_instruction->base.scope, call_instruction->base.source_node, + fn_entry, fn_ref, arg_count, casted_args, false, FnInlineAuto, true, async_allocator_inst); + result->value.type = async_return_type; + return result; +} + static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node, IrInstruction *arg, Scope **exec_scope, size_t *next_proto_i) { @@ -10989,6 +11062,13 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal } return ira->codegen->builtin_types.entry_invalid; } + if (fn_type_id->cc != CallingConventionAsync && call_instruction->is_async) { + ErrorMsg *msg = ir_add_error(ira, fn_ref, buf_sprintf("cannot use async keyword to call non-async function")); + if (fn_proto_node) { + add_error_note(ira->codegen, msg, fn_proto_node, buf_sprintf("declared here")); + } + return ira->codegen->builtin_types.entry_invalid; + } if (fn_type_id->is_var_args) { @@ -11115,6 +11195,11 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal buf_sprintf("calling a generic function requires compile-time known function value")); return ira->codegen->builtin_types.entry_invalid; } + if (call_instruction->is_async && fn_type_id->is_var_args) { + ir_add_error(ira, call_instruction->fn_ref, + buf_sprintf("compiler bug: TODO: implement var args async functions. https://github.com/zig-lang/zig/issues/557")); + return ira->codegen->builtin_types.entry_invalid; + } // Count the arguments of the function type id we are creating size_t new_fn_arg_count = first_arg_1_or_0; @@ -11263,6 +11348,35 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal return ir_analyze_fn_call(ira, call_instruction, fn_entry, fn_type, fn_ref, first_arg_ptr, true, FnInlineAuto); } } + IrInstruction *async_allocator_inst = nullptr; + if (call_instruction->is_async) { + AstNode *async_allocator_type_node = fn_proto_node->data.fn_proto.async_allocator_type; + if (async_allocator_type_node != nullptr) { + TypeTableEntry *async_allocator_type = analyze_type_expr(ira->codegen, impl_fn->child_scope, async_allocator_type_node); + if (type_is_invalid(async_allocator_type)) + return ira->codegen->builtin_types.entry_invalid; + inst_fn_type_id.async_allocator_type = async_allocator_type; + } + IrInstruction *uncasted_async_allocator_inst; + if (call_instruction->async_allocator == nullptr) { + uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base, parent_fn_entry); + if (type_is_invalid(uncasted_async_allocator_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + } else { + uncasted_async_allocator_inst = call_instruction->async_allocator->other; + if (type_is_invalid(uncasted_async_allocator_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + } + if (inst_fn_type_id.async_allocator_type == nullptr) { + IrInstruction *casted_inst = ir_implicit_byval_const_ref_cast(ira, uncasted_async_allocator_inst); + if (type_is_invalid(casted_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + inst_fn_type_id.async_allocator_type = casted_inst->value.type; + } + async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, inst_fn_type_id.async_allocator_type); + if (type_is_invalid(async_allocator_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + } auto existing_entry = ira->codegen->generic_table.put_unique(generic_id, impl_fn); if (existing_entry) { @@ -11282,17 +11396,24 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal ira->codegen->fn_defs.append(impl_fn); } - size_t impl_param_count = impl_fn->type_entry->data.fn.fn_type_id.param_count; - IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, - impl_fn, nullptr, impl_param_count, casted_args, false, fn_inline, false, nullptr); - TypeTableEntry *return_type = impl_fn->type_entry->data.fn.fn_type_id.return_type; - ir_add_alloca(ira, new_call_instruction, return_type); - if (return_type->id == TypeTableEntryIdErrorSet || return_type->id == TypeTableEntryIdErrorUnion) { parent_fn_entry->calls_errorable_function = true; } + size_t impl_param_count = impl_fn->type_entry->data.fn.fn_type_id.param_count; + if (call_instruction->is_async) { + IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry, fn_ref, casted_args, impl_param_count, async_allocator_inst); + ir_link_new_instruction(result, &call_instruction->base); + return ir_finish_anal(ira, result->value.type); + } + + IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, + impl_fn, nullptr, impl_param_count, casted_args, false, fn_inline, + call_instruction->is_async, async_allocator_inst); + + ir_add_alloca(ira, new_call_instruction, return_type); + return ir_finish_anal(ira, return_type); } @@ -11350,14 +11471,31 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal assert(next_arg_index == call_param_count); - if (call_instruction->is_async) { - zig_panic("TODO handle async fn call"); - } - TypeTableEntry *return_type = fn_type_id->return_type; if (type_is_invalid(return_type)) return ira->codegen->builtin_types.entry_invalid; + if (call_instruction->is_async) { + IrInstruction *uncasted_async_allocator_inst; + if (call_instruction->async_allocator == nullptr) { + uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base, parent_fn_entry); + if (type_is_invalid(uncasted_async_allocator_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + } else { + uncasted_async_allocator_inst = call_instruction->async_allocator->other; + if (type_is_invalid(uncasted_async_allocator_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + } + IrInstruction *async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, fn_type_id->async_allocator_type); + if (type_is_invalid(async_allocator_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref, casted_args, call_param_count, async_allocator_inst); + ir_link_new_instruction(result, &call_instruction->base); + return ir_finish_anal(ira, result->value.type); + } + + IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr); @@ -12054,8 +12192,8 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc return return_type; } -static TypeTableEntry *ir_analyze_container_member_access_inner(IrAnalyze *ira, - TypeTableEntry *bare_struct_type, Buf *field_name, IrInstructionFieldPtr *field_ptr_instruction, +static IrInstruction *ir_analyze_container_member_access_inner(IrAnalyze *ira, + TypeTableEntry *bare_struct_type, Buf *field_name, IrInstruction *source_instr, IrInstruction *container_ptr, TypeTableEntry *container_type) { if (!is_slice(bare_struct_type)) { @@ -12063,17 +12201,17 @@ static TypeTableEntry *ir_analyze_container_member_access_inner(IrAnalyze *ira, auto entry = container_scope->decl_table.maybe_get(field_name); Tld *tld = entry ? entry->value : nullptr; if (tld && tld->id == TldIdFn) { - resolve_top_level_decl(ira->codegen, tld, false, field_ptr_instruction->base.source_node); + resolve_top_level_decl(ira->codegen, tld, false, source_instr->source_node); if (tld->resolution == TldResolutionInvalid) - return ira->codegen->builtin_types.entry_invalid; + return ira->codegen->invalid_instruction; TldFn *tld_fn = (TldFn *)tld; FnTableEntry *fn_entry = tld_fn->fn_entry; if (type_is_invalid(fn_entry->type_entry)) - return ira->codegen->builtin_types.entry_invalid; + return ira->codegen->invalid_instruction; - IrInstruction *bound_fn_value = ir_build_const_bound_fn(&ira->new_irb, field_ptr_instruction->base.scope, - field_ptr_instruction->base.source_node, fn_entry, container_ptr); - return ir_analyze_ref(ira, &field_ptr_instruction->base, bound_fn_value, true, false); + IrInstruction *bound_fn_value = ir_build_const_bound_fn(&ira->new_irb, source_instr->scope, + source_instr->source_node, fn_entry, container_ptr); + return ir_get_ref(ira, source_instr, bound_fn_value, true, false); } } const char *prefix_name; @@ -12088,19 +12226,19 @@ static TypeTableEntry *ir_analyze_container_member_access_inner(IrAnalyze *ira, } else { prefix_name = ""; } - ir_add_error_node(ira, field_ptr_instruction->base.source_node, + ir_add_error_node(ira, source_instr->source_node, buf_sprintf("no member named '%s' in %s'%s'", buf_ptr(field_name), prefix_name, buf_ptr(&bare_struct_type->name))); - return ira->codegen->builtin_types.entry_invalid; + return ira->codegen->invalid_instruction; } -static TypeTableEntry *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name, - IrInstructionFieldPtr *field_ptr_instruction, IrInstruction *container_ptr, TypeTableEntry *container_type) +static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name, + IrInstruction *source_instr, IrInstruction *container_ptr, TypeTableEntry *container_type) { TypeTableEntry *bare_type = container_ref_type(container_type); ensure_complete_type(ira->codegen, bare_type); if (type_is_invalid(bare_type)) - return ira->codegen->builtin_types.entry_invalid; + return ira->codegen->invalid_instruction; assert(container_ptr->value.type->id == TypeTableEntryIdPointer); bool is_const = container_ptr->value.type->data.pointer.is_const; @@ -12117,46 +12255,51 @@ static TypeTableEntry *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field if (instr_is_comptime(container_ptr)) { ConstExprValue *ptr_val = ir_resolve_const(ira, container_ptr, UndefBad); if (!ptr_val) - return ira->codegen->builtin_types.entry_invalid; + return ira->codegen->invalid_instruction; if (ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr) { ConstExprValue *struct_val = const_ptr_pointee(ira->codegen, ptr_val); if (type_is_invalid(struct_val->type)) - return ira->codegen->builtin_types.entry_invalid; + return ira->codegen->invalid_instruction; ConstExprValue *field_val = &struct_val->data.x_struct.fields[field->src_index]; TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, field_val->type, is_const, is_volatile, align_bytes, (uint32_t)(ptr_bit_offset + field->packed_bits_offset), (uint32_t)unaligned_bit_count_for_result_type); - ConstExprValue *const_val = ir_build_const_from(ira, &field_ptr_instruction->base); + IrInstruction *result = ir_get_const(ira, source_instr); + ConstExprValue *const_val = &result->value; const_val->data.x_ptr.special = ConstPtrSpecialBaseStruct; const_val->data.x_ptr.mut = container_ptr->value.data.x_ptr.mut; const_val->data.x_ptr.data.base_struct.struct_val = struct_val; const_val->data.x_ptr.data.base_struct.field_index = field->src_index; - return ptr_type; + const_val->type = ptr_type; + return result; } } - ir_build_struct_field_ptr_from(&ira->new_irb, &field_ptr_instruction->base, container_ptr, field); - return get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile, + IrInstruction *result = ir_build_struct_field_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node, + container_ptr, field); + result->value.type = get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile, align_bytes, (uint32_t)(ptr_bit_offset + field->packed_bits_offset), (uint32_t)unaligned_bit_count_for_result_type); + return result; } else { return ir_analyze_container_member_access_inner(ira, bare_type, field_name, - field_ptr_instruction, container_ptr, container_type); + source_instr, container_ptr, container_type); } } else if (bare_type->id == TypeTableEntryIdEnum) { return ir_analyze_container_member_access_inner(ira, bare_type, field_name, - field_ptr_instruction, container_ptr, container_type); + source_instr, container_ptr, container_type); } else if (bare_type->id == TypeTableEntryIdUnion) { TypeUnionField *field = find_union_type_field(bare_type, field_name); if (field) { - ir_build_union_field_ptr_from(&ira->new_irb, &field_ptr_instruction->base, container_ptr, field); - return get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile, + IrInstruction *result = ir_build_union_field_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node, container_ptr, field); + result->value.type = get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile, get_abi_alignment(ira->codegen, field->type_entry), 0, 0); + return result; } else { return ir_analyze_container_member_access_inner(ira, bare_type, field_name, - field_ptr_instruction, container_ptr, container_type); + source_instr, container_ptr, container_type); } } else { zig_unreachable(); @@ -12266,9 +12409,13 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru if (container_type->id == TypeTableEntryIdPointer) { TypeTableEntry *bare_type = container_ref_type(container_type); IrInstruction *container_child = ir_get_deref(ira, &field_ptr_instruction->base, container_ptr); - return ir_analyze_container_field_ptr(ira, field_name, field_ptr_instruction, container_child, bare_type); + IrInstruction *result = ir_analyze_container_field_ptr(ira, field_name, &field_ptr_instruction->base, container_child, bare_type); + ir_link_new_instruction(result, &field_ptr_instruction->base); + return result->value.type; } else { - return ir_analyze_container_field_ptr(ira, field_name, field_ptr_instruction, container_ptr, container_type); + IrInstruction *result = ir_analyze_container_field_ptr(ira, field_name, &field_ptr_instruction->base, container_ptr, container_type); + ir_link_new_instruction(result, &field_ptr_instruction->base); + return result->value.type; } } else if (container_type->id == TypeTableEntryIdArray) { if (buf_eql_str(field_name, "len")) { @@ -16539,7 +16686,8 @@ static TypeTableEntry *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructi return ira->codegen->builtin_types.entry_invalid; IrInstruction *result = ir_build_cancel(&ira->new_irb, instruction->base.scope, instruction->base.source_node, casted_target); - result->value.type = casted_target->value.type; + result->value.type = ira->codegen->builtin_types.entry_void; + result->value.special = ConstValSpecialStatic; ir_link_new_instruction(result, &instruction->base); return result->value.type; } @@ -16559,6 +16707,7 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi case IrInstructionIdErrWrapCode: case IrInstructionIdErrWrapPayload: case IrInstructionIdCast: + case IrInstructionIdGetImplicitAllocator: zig_unreachable(); case IrInstructionIdReturn: return ir_analyze_instruction_return(ira, (IrInstructionReturn *)instruction); @@ -16936,7 +17085,9 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdTagType: case IrInstructionIdErrorReturnTrace: case IrInstructionIdErrorUnion: + case IrInstructionIdGetImplicitAllocator: return false; + case IrInstructionIdAsm: { IrInstructionAsm *asm_instruction = (IrInstructionAsm *)instruction; diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 5c0c3bab0d..32eb0d0a9f 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -198,6 +198,15 @@ static void ir_print_cast(IrPrint *irp, IrInstructionCast *cast_instruction) { } static void ir_print_call(IrPrint *irp, IrInstructionCall *call_instruction) { + if (call_instruction->is_async) { + fprintf(irp->f, "async"); + if (call_instruction->async_allocator != nullptr) { + fprintf(irp->f, "("); + ir_print_other_instruction(irp, call_instruction->async_allocator); + fprintf(irp->f, ")"); + } + fprintf(irp->f, " "); + } if (call_instruction->fn_entry) { fprintf(irp->f, "%s", buf_ptr(&call_instruction->fn_entry->symbol_name)); } else { @@ -1015,6 +1024,10 @@ static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) { ir_print_other_instruction(irp, instruction->target); } +static void ir_print_get_implicit_allocator(IrPrint *irp, IrInstructionGetImplicitAllocator *instruction) { + fprintf(irp->f, "@getImplicitAllocator()"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -1338,6 +1351,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdCancel: ir_print_cancel(irp, (IrInstructionCancel *)instruction); break; + case IrInstructionIdGetImplicitAllocator: + ir_print_get_implicit_allocator(irp, (IrInstructionGetImplicitAllocator *)instruction); + break; } fprintf(irp->f, "\n"); } diff --git a/src/parser.cpp b/src/parser.cpp index ee7141a910..e64c569e2f 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -2333,7 +2333,7 @@ static AstNode *ast_parse_block(ParseContext *pc, size_t *token_index, bool mand } /* -FnProto = option("nakedcc" | "stdcallcc" | "extern" | "async") "fn" option(Symbol) ParamDeclList option("align" "(" Expression ")") option("section" "(" Expression ")") option("!") TypeExpr +FnProto = option("nakedcc" | "stdcallcc" | "extern" | ("async" option("(" Expression ")"))) "fn" option(Symbol) ParamDeclList option("align" "(" Expression ")") option("section" "(" Expression ")") option("!") TypeExpr */ static AstNode *ast_parse_fn_proto(ParseContext *pc, size_t *token_index, bool mandatory, VisibMod visib_mod) { Token *first_token = &pc->tokens->at(*token_index); @@ -2341,12 +2341,18 @@ static AstNode *ast_parse_fn_proto(ParseContext *pc, size_t *token_index, bool m CallingConvention cc; bool is_extern = false; + AstNode *async_allocator_type_node = nullptr; if (first_token->id == TokenIdKeywordNakedCC) { *token_index += 1; fn_token = ast_eat_token(pc, token_index, TokenIdKeywordFn); cc = CallingConventionNaked; } else if (first_token->id == TokenIdKeywordAsync) { *token_index += 1; + Token *next_token = &pc->tokens->at(*token_index); + if (next_token->id == TokenIdLParen) { + async_allocator_type_node = ast_parse_type_expr(pc, token_index, true); + ast_eat_token(pc, token_index, TokenIdRParen); + } fn_token = ast_eat_token(pc, token_index, TokenIdKeywordFn); cc = CallingConventionAsync; } else if (first_token->id == TokenIdKeywordStdcallCC) { @@ -2383,6 +2389,7 @@ static AstNode *ast_parse_fn_proto(ParseContext *pc, size_t *token_index, bool m node->data.fn_proto.visib_mod = visib_mod; node->data.fn_proto.cc = cc; node->data.fn_proto.is_extern = is_extern; + node->data.fn_proto.async_allocator_type = async_allocator_type_node; Token *fn_name = &pc->tokens->at(*token_index); @@ -2798,6 +2805,7 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont visit_node_list(&node->data.fn_proto.params, visit, context); visit_field(&node->data.fn_proto.align_expr, visit, context); visit_field(&node->data.fn_proto.section_expr, visit, context); + visit_field(&node->data.fn_proto.async_allocator_type, visit, context); break; case NodeTypeFnDef: visit_field(&node->data.fn_def.fn_proto, visit, context); From b261da067248c4003b0008f6aaeefde43290a061 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 21 Feb 2018 23:28:35 -0500 Subject: [PATCH 06/56] add coroutine startup IR to async functions See #727 --- src/all_types.hpp | 26 +++++++++ src/codegen.cpp | 26 +++++++++ src/ir.cpp | 138 +++++++++++++++++++++++++++++++++++++++++++++- src/ir_print.cpp | 34 ++++++++++++ 4 files changed, 223 insertions(+), 1 deletion(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index c4792c7921..b2d073f698 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -56,6 +56,7 @@ struct IrExecutable { IrAnalyze *analysis; Scope *begin_scope; ZigList tld_list; + IrInstruction *coro_handle; }; enum OutType { @@ -1961,6 +1962,10 @@ enum IrInstructionId { IrInstructionIdErrorUnion, IrInstructionIdCancel, IrInstructionIdGetImplicitAllocator, + IrInstructionIdCoroId, + IrInstructionIdCoroAlloc, + IrInstructionIdCoroSize, + IrInstructionIdCoroBegin, }; struct IrInstruction { @@ -2810,6 +2815,27 @@ struct IrInstructionGetImplicitAllocator { IrInstruction base; }; +struct IrInstructionCoroId { + IrInstruction base; +}; + +struct IrInstructionCoroAlloc { + IrInstruction base; + + IrInstruction *coro_id; +}; + +struct IrInstructionCoroSize { + IrInstruction base; +}; + +struct IrInstructionCoroBegin { + IrInstruction base; + + IrInstruction *coro_id; + IrInstruction *coro_mem_ptr; +}; + static const size_t slice_ptr_index = 0; static const size_t slice_len_index = 1; diff --git a/src/codegen.cpp b/src/codegen.cpp index d52cc2f9e2..e304d3cc24 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3696,6 +3696,23 @@ static LLVMValueRef ir_render_panic(CodeGen *g, IrExecutable *executable, IrInst return nullptr; } +static LLVMValueRef ir_render_coro_id(CodeGen *g, IrExecutable *executable, IrInstructionCoroId *instruction) { + zig_panic("TODO ir_render_coro_id"); +} + +static LLVMValueRef ir_render_coro_alloc(CodeGen *g, IrExecutable *executable, IrInstructionCoroAlloc *instruction) { + zig_panic("TODO ir_render_coro_alloc"); +} + +static LLVMValueRef ir_render_coro_size(CodeGen *g, IrExecutable *executable, IrInstructionCoroSize *instruction) { + zig_panic("TODO ir_render_coro_size"); +} + +static LLVMValueRef ir_render_coro_begin(CodeGen *g, IrExecutable *executable, IrInstructionCoroBegin *instruction) { + zig_panic("TODO ir_render_coro_begin"); +} + + static void set_debug_location(CodeGen *g, IrInstruction *instruction) { AstNode *source_node = instruction->source_node; Scope *scope = instruction->scope; @@ -3881,12 +3898,21 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_cancel(g, executable, (IrInstructionCancel *)instruction); case IrInstructionIdGetImplicitAllocator: return ir_render_get_implicit_allocator(g, executable, (IrInstructionGetImplicitAllocator *)instruction); + case IrInstructionIdCoroId: + return ir_render_coro_id(g, executable, (IrInstructionCoroId *)instruction); + case IrInstructionIdCoroAlloc: + return ir_render_coro_alloc(g, executable, (IrInstructionCoroAlloc *)instruction); + case IrInstructionIdCoroSize: + return ir_render_coro_size(g, executable, (IrInstructionCoroSize *)instruction); + case IrInstructionIdCoroBegin: + return ir_render_coro_begin(g, executable, (IrInstructionCoroBegin *)instruction); } zig_unreachable(); } static void ir_render(CodeGen *g, FnTableEntry *fn_entry) { assert(fn_entry); + IrExecutable *executable = &fn_entry->analyzed_executable; assert(executable->basic_block_list.length > 0); for (size_t block_i = 0; block_i < executable->basic_block_list.length; block_i += 1) { diff --git a/src/ir.cpp b/src/ir.cpp index a82e168986..3c0d9d652f 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -45,6 +45,9 @@ static LVal make_lval_addr(bool is_const, bool is_volatile) { return { true, is_const, is_volatile }; } +static const char * ASYNC_ALLOC_FIELD_NAME = "allocFn"; +//static const char * ASYNC_FREE_FIELD_NAME = "freeFn"; + enum ConstCastResultId { ConstCastResultIdOk, ConstCastResultIdErrSet, @@ -649,6 +652,22 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionGetImplicitAlloc return IrInstructionIdGetImplicitAllocator; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroId *) { + return IrInstructionIdCoroId; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAlloc *) { + return IrInstructionIdCoroAlloc; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSize *) { + return IrInstructionIdCoroSize; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroBegin *) { + return IrInstructionIdCoroBegin; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -2420,6 +2439,38 @@ static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *sco return &instruction->base; } +static IrInstruction *ir_build_coro_id(IrBuilder *irb, Scope *scope, AstNode *source_node) { + IrInstructionCoroId *instruction = ir_build_instruction(irb, scope, source_node); + + return &instruction->base; +} + +static IrInstruction *ir_build_coro_alloc(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *coro_id) { + IrInstructionCoroAlloc *instruction = ir_build_instruction(irb, scope, source_node); + instruction->coro_id = coro_id; + + ir_ref_instruction(coro_id, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_coro_size(IrBuilder *irb, Scope *scope, AstNode *source_node) { + IrInstructionCoroSize *instruction = ir_build_instruction(irb, scope, source_node); + + return &instruction->base; +} + +static IrInstruction *ir_build_coro_begin(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *coro_id, IrInstruction *coro_mem_ptr) { + IrInstructionCoroBegin *instruction = ir_build_instruction(irb, scope, source_node); + instruction->coro_id = coro_id; + instruction->coro_mem_ptr = coro_mem_ptr; + + ir_ref_instruction(coro_id, irb->current_basic_block); + ir_ref_instruction(coro_mem_ptr, irb->current_basic_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -5782,6 +5833,63 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec // Entry block gets a reference because we enter it to begin. ir_ref_bb(irb->current_basic_block); + FnTableEntry *fn_entry = exec_fn_entry(irb->exec); + bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync; + if (is_async) { + IrInstruction *is_comptime_false = ir_build_const_bool(irb, scope, node, false); + IrInstruction *coro_id = ir_build_coro_id(irb, scope, node); + IrInstruction *need_dyn_alloc = ir_build_coro_alloc(irb, scope, node, coro_id); + IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); + IrInstruction *u8_ptr_type = ir_build_const_type(irb, scope, node, + get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false)); + IrInstruction *null_ptr = ir_build_int_to_ptr(irb, scope, node, u8_ptr_type, zero); + + IrBasicBlock *dyn_alloc_block = ir_create_basic_block(irb, scope, "DynAlloc"); + IrBasicBlock *coro_begin_block = ir_create_basic_block(irb, scope, "CoroBegin"); + ir_build_cond_br(irb, scope, node, need_dyn_alloc, dyn_alloc_block, coro_begin_block, is_comptime_false); + + ir_set_cursor_at_end_and_append_block(irb, dyn_alloc_block); + IrInstruction *coro_size = ir_build_coro_size(irb, scope, node); + IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node); + Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME); + IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, alloc_field_name); + IrInstruction *alloc_fn = ir_build_load_ptr(irb, scope, node, alloc_fn_ptr); + IrInstruction *implicit_allocator = ir_build_load_ptr(irb, scope, node, implicit_allocator_ptr); + IrInstruction *alignment = ir_build_const_usize(irb, scope, node, irb->codegen->pointer_size_bytes * 2); + size_t arg_count = 3; + IrInstruction **args = allocate(arg_count); + args[0] = implicit_allocator; // self + args[1] = coro_size; // byte_count + args[2] = alignment; // alignment + IrInstruction *alloc_result = ir_build_call(irb, scope, node, nullptr, alloc_fn, arg_count, args, false, FnInlineAuto, false, nullptr); + IrInstruction *alloc_result_ptr = ir_build_ref(irb, scope, node, alloc_result, true, false); + IrInstruction *alloc_result_is_err = ir_build_test_err(irb, scope, node, alloc_result_ptr); + IrBasicBlock *alloc_err_block = ir_create_basic_block(irb, scope, "AllocError"); + IrBasicBlock *alloc_ok_block = ir_create_basic_block(irb, scope, "AllocOk"); + ir_build_cond_br(irb, scope, node, alloc_result_is_err, alloc_err_block, alloc_ok_block, is_comptime_false); + + ir_set_cursor_at_end_and_append_block(irb, alloc_err_block); + IrInstruction *err_val = ir_build_unwrap_err_code(irb, scope, node, alloc_result_ptr); + ir_build_return(irb, scope, node, err_val); + + ir_set_cursor_at_end_and_append_block(irb, alloc_ok_block); + IrInstruction *unwrapped_mem_ptr = ir_build_unwrap_err_payload(irb, scope, node, alloc_result_ptr, false); + Buf *ptr_field_name = buf_create_from_str("ptr"); + IrInstruction *coro_mem_ptr_field = ir_build_field_ptr(irb, scope, node, unwrapped_mem_ptr, ptr_field_name); + IrInstruction *coro_mem_ptr = ir_build_load_ptr(irb, scope, node, coro_mem_ptr_field); + ir_build_br(irb, scope, node, coro_begin_block, is_comptime_false); + + ir_set_cursor_at_end_and_append_block(irb, coro_begin_block); + IrBasicBlock **incoming_blocks = allocate(2); + IrInstruction **incoming_values = allocate(2); + incoming_blocks[0] = entry_block; + incoming_values[0] = null_ptr; + incoming_blocks[1] = dyn_alloc_block; + incoming_values[1] = coro_mem_ptr; + IrInstruction *coro_mem = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); + irb->exec->coro_handle = ir_build_coro_begin(irb, scope, node, coro_id, coro_mem); + } + IrInstruction *result = ir_gen_node_extra(irb, node, scope, LVAL_NONE); assert(result); if (irb->exec->invalid) @@ -10805,7 +10913,7 @@ IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_i static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *call_instruction, FnTableEntry *fn_entry, TypeTableEntry *fn_type, IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count, IrInstruction *async_allocator_inst) { - Buf *alloc_field_name = buf_create_from_str("allocFn"); + Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME); //Buf *free_field_name = buf_create_from_str("freeFn"); assert(async_allocator_inst->value.type->id == TypeTableEntryIdPointer); TypeTableEntry *container_type = async_allocator_inst->value.type->data.pointer.child_type; @@ -16692,6 +16800,22 @@ static TypeTableEntry *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructi return result->value.type; } +static TypeTableEntry *ir_analyze_instruction_coro_id(IrAnalyze *ira, IrInstructionCoroId *instruction) { + zig_panic("TODO ir_analyze_instruction_coro_id"); +} + +static TypeTableEntry *ir_analyze_instruction_coro_alloc(IrAnalyze *ira, IrInstructionCoroAlloc *instruction) { + zig_panic("TODO ir_analyze_instruction_coro_alloc"); +} + +static TypeTableEntry *ir_analyze_instruction_coro_size(IrAnalyze *ira, IrInstructionCoroSize *instruction) { + zig_panic("TODO ir_analyze_instruction_coro_size"); +} + +static TypeTableEntry *ir_analyze_instruction_coro_begin(IrAnalyze *ira, IrInstructionCoroBegin *instruction) { + zig_panic("TODO ir_analyze_instruction_coro_begin"); +} + static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: @@ -16897,6 +17021,14 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_error_union(ira, (IrInstructionErrorUnion *)instruction); case IrInstructionIdCancel: return ir_analyze_instruction_cancel(ira, (IrInstructionCancel *)instruction); + case IrInstructionIdCoroId: + return ir_analyze_instruction_coro_id(ira, (IrInstructionCoroId *)instruction); + case IrInstructionIdCoroAlloc: + return ir_analyze_instruction_coro_alloc(ira, (IrInstructionCoroAlloc *)instruction); + case IrInstructionIdCoroSize: + return ir_analyze_instruction_coro_size(ira, (IrInstructionCoroSize *)instruction); + case IrInstructionIdCoroBegin: + return ir_analyze_instruction_coro_begin(ira, (IrInstructionCoroBegin *)instruction); } zig_unreachable(); } @@ -17011,6 +17143,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdSetAlignStack: case IrInstructionIdExport: case IrInstructionIdCancel: + case IrInstructionIdCoroId: + case IrInstructionIdCoroBegin: return true; case IrInstructionIdPhi: @@ -17086,6 +17220,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdErrorReturnTrace: case IrInstructionIdErrorUnion: case IrInstructionIdGetImplicitAllocator: + case IrInstructionIdCoroAlloc: + case IrInstructionIdCoroSize: return false; case IrInstructionIdAsm: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 32eb0d0a9f..2fcdb75a18 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1028,6 +1028,28 @@ static void ir_print_get_implicit_allocator(IrPrint *irp, IrInstructionGetImplic fprintf(irp->f, "@getImplicitAllocator()"); } +static void ir_print_coro_id(IrPrint *irp, IrInstructionCoroId *instruction) { + fprintf(irp->f, "@coroId()"); +} + +static void ir_print_coro_alloc(IrPrint *irp, IrInstructionCoroAlloc *instruction) { + fprintf(irp->f, "@coroAlloc("); + ir_print_other_instruction(irp, instruction->coro_id); + fprintf(irp->f, ")"); +} + +static void ir_print_coro_size(IrPrint *irp, IrInstructionCoroSize *instruction) { + fprintf(irp->f, "@coroSize()"); +} + +static void ir_print_coro_begin(IrPrint *irp, IrInstructionCoroBegin *instruction) { + fprintf(irp->f, "@coroBegin("); + ir_print_other_instruction(irp, instruction->coro_id); + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->coro_mem_ptr); + fprintf(irp->f, ")"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -1354,6 +1376,18 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdGetImplicitAllocator: ir_print_get_implicit_allocator(irp, (IrInstructionGetImplicitAllocator *)instruction); break; + case IrInstructionIdCoroId: + ir_print_coro_id(irp, (IrInstructionCoroId *)instruction); + break; + case IrInstructionIdCoroAlloc: + ir_print_coro_alloc(irp, (IrInstructionCoroAlloc *)instruction); + break; + case IrInstructionIdCoroSize: + ir_print_coro_size(irp, (IrInstructionCoroSize *)instruction); + break; + case IrInstructionIdCoroBegin: + ir_print_coro_begin(irp, (IrInstructionCoroBegin *)instruction); + break; } fprintf(irp->f, "\n"); } From 37c07d4f3f52f2227e86943cf84aebdc729684b7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 22 Feb 2018 09:30:55 -0500 Subject: [PATCH 07/56] coroutines: analyze get_implicit_allocator instruction see #727 --- src/ir.cpp | 22 +++++++++++++++++----- src/ir_print.cpp | 2 ++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index 3c0d9d652f..2bac97849f 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -5884,7 +5884,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction **incoming_values = allocate(2); incoming_blocks[0] = entry_block; incoming_values[0] = null_ptr; - incoming_blocks[1] = dyn_alloc_block; + incoming_blocks[1] = alloc_ok_block; incoming_values[1] = coro_mem_ptr; IrInstruction *coro_mem = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); irb->exec->coro_handle = ir_build_coro_begin(irb, scope, node, coro_id, coro_mem); @@ -10897,7 +10897,13 @@ static TypeTableEntry *ir_analyze_instruction_error_union(IrAnalyze *ira, return ira->codegen->builtin_types.entry_type; } -IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr, FnTableEntry *parent_fn_entry) { +IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr) { + FnTableEntry *parent_fn_entry = exec_fn_entry(ira->new_irb.exec); + if (parent_fn_entry == nullptr) { + ir_add_error(ira, source_instr, buf_sprintf("no implicit allocator available")); + return ira->codegen->invalid_instruction; + } + FnTypeId *parent_fn_type = &parent_fn_entry->type_entry->data.fn.fn_type_id; if (parent_fn_type->cc != CallingConventionAsync) { ir_add_error(ira, source_instr, buf_sprintf("async function call from non-async caller requires allocator parameter")); @@ -11467,7 +11473,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal } IrInstruction *uncasted_async_allocator_inst; if (call_instruction->async_allocator == nullptr) { - uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base, parent_fn_entry); + uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base); if (type_is_invalid(uncasted_async_allocator_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; } else { @@ -11586,7 +11592,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal if (call_instruction->is_async) { IrInstruction *uncasted_async_allocator_inst; if (call_instruction->async_allocator == nullptr) { - uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base, parent_fn_entry); + uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base); if (type_is_invalid(uncasted_async_allocator_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; } else { @@ -16816,6 +16822,11 @@ static TypeTableEntry *ir_analyze_instruction_coro_begin(IrAnalyze *ira, IrInstr zig_panic("TODO ir_analyze_instruction_coro_begin"); } +static TypeTableEntry *ir_analyze_instruction_get_implicit_allocator(IrAnalyze *ira, IrInstructionGetImplicitAllocator *instruction) { + IrInstruction *result = ir_get_implicit_allocator(ira, &instruction->base); + return result->value.type; +} + static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: @@ -16831,7 +16842,6 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi case IrInstructionIdErrWrapCode: case IrInstructionIdErrWrapPayload: case IrInstructionIdCast: - case IrInstructionIdGetImplicitAllocator: zig_unreachable(); case IrInstructionIdReturn: return ir_analyze_instruction_return(ira, (IrInstructionReturn *)instruction); @@ -17029,6 +17039,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_coro_size(ira, (IrInstructionCoroSize *)instruction); case IrInstructionIdCoroBegin: return ir_analyze_instruction_coro_begin(ira, (IrInstructionCoroBegin *)instruction); + case IrInstructionIdGetImplicitAllocator: + return ir_analyze_instruction_get_implicit_allocator(ira, (IrInstructionGetImplicitAllocator *)instruction); } zig_unreachable(); } diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 2fcdb75a18..186e2711a4 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -839,6 +839,8 @@ static void ir_print_ptr_to_int(IrPrint *irp, IrInstructionPtrToInt *instruction static void ir_print_int_to_ptr(IrPrint *irp, IrInstructionIntToPtr *instruction) { fprintf(irp->f, "@intToPtr("); + ir_print_other_instruction(irp, instruction->dest_type); + fprintf(irp->f, ","); ir_print_other_instruction(irp, instruction->target); fprintf(irp->f, ")"); } From 88e7b9bf80ef261be23ab5a427f6f10604225be1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 22 Feb 2018 09:36:58 -0500 Subject: [PATCH 08/56] ir analysis for coro_id and coro_alloc See #727 --- src/ir.cpp | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index 2bac97849f..7235ad72f2 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -16807,11 +16807,22 @@ static TypeTableEntry *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructi } static TypeTableEntry *ir_analyze_instruction_coro_id(IrAnalyze *ira, IrInstructionCoroId *instruction) { - zig_panic("TODO ir_analyze_instruction_coro_id"); + IrInstruction *result = ir_build_coro_id(&ira->new_irb, instruction->base.scope, instruction->base.source_node); + ir_link_new_instruction(result, &instruction->base); + result->value.type = ira->codegen->builtin_types.entry_usize; + return result->value.type; } static TypeTableEntry *ir_analyze_instruction_coro_alloc(IrAnalyze *ira, IrInstructionCoroAlloc *instruction) { - zig_panic("TODO ir_analyze_instruction_coro_alloc"); + IrInstruction *coro_id = instruction->coro_id->other; + if (type_is_invalid(coro_id->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_build_coro_alloc(&ira->new_irb, instruction->base.scope, instruction->base.source_node, + coro_id); + ir_link_new_instruction(result, &instruction->base); + result->value.type = ira->codegen->builtin_types.entry_bool; + return result->value.type; } static TypeTableEntry *ir_analyze_instruction_coro_size(IrAnalyze *ira, IrInstructionCoroSize *instruction) { From ca1b77b2d51408589659f652b1b1dbe2a25e149f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 22 Feb 2018 11:54:27 -0500 Subject: [PATCH 09/56] IR analysis for coro.begin See #727 --- src/all_types.hpp | 8 ++++++ src/codegen.cpp | 6 +++++ src/ir.cpp | 68 ++++++++++++++++++++++++++++++++++++++++++----- src/ir_print.cpp | 9 +++++++ 4 files changed, 84 insertions(+), 7 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index b2d073f698..9575f6cdad 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1490,6 +1490,7 @@ struct CodeGen { TypeTableEntry *entry_u8; TypeTableEntry *entry_u16; TypeTableEntry *entry_u32; + TypeTableEntry *entry_u29; TypeTableEntry *entry_u64; TypeTableEntry *entry_u128; TypeTableEntry *entry_i8; @@ -1966,6 +1967,7 @@ enum IrInstructionId { IrInstructionIdCoroAlloc, IrInstructionIdCoroSize, IrInstructionIdCoroBegin, + IrInstructionIdCoroAllocFail, }; struct IrInstruction { @@ -2836,6 +2838,12 @@ struct IrInstructionCoroBegin { IrInstruction *coro_mem_ptr; }; +struct IrInstructionCoroAllocFail { + IrInstruction base; + + IrInstruction *err_val; +}; + static const size_t slice_ptr_index = 0; static const size_t slice_len_index = 1; diff --git a/src/codegen.cpp b/src/codegen.cpp index e304d3cc24..cb3a98bb88 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3712,6 +3712,9 @@ static LLVMValueRef ir_render_coro_begin(CodeGen *g, IrExecutable *executable, I zig_panic("TODO ir_render_coro_begin"); } +static LLVMValueRef ir_render_coro_alloc_fail(CodeGen *g, IrExecutable *executable, IrInstructionCoroAllocFail *instruction) { + zig_panic("TODO ir_render_coro_alloc_fail"); +} static void set_debug_location(CodeGen *g, IrInstruction *instruction) { AstNode *source_node = instruction->source_node; @@ -3906,6 +3909,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_coro_size(g, executable, (IrInstructionCoroSize *)instruction); case IrInstructionIdCoroBegin: return ir_render_coro_begin(g, executable, (IrInstructionCoroBegin *)instruction); + case IrInstructionIdCoroAllocFail: + return ir_render_coro_alloc_fail(g, executable, (IrInstructionCoroAllocFail *)instruction); } zig_unreachable(); } @@ -5282,6 +5287,7 @@ static void define_builtin_types(CodeGen *g) { g->builtin_types.entry_u8 = get_int_type(g, false, 8); g->builtin_types.entry_u16 = get_int_type(g, false, 16); + g->builtin_types.entry_u29 = get_int_type(g, false, 29); g->builtin_types.entry_u32 = get_int_type(g, false, 32); g->builtin_types.entry_u64 = get_int_type(g, false, 64); g->builtin_types.entry_u128 = get_int_type(g, false, 128); diff --git a/src/ir.cpp b/src/ir.cpp index 7235ad72f2..68d31a7712 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -668,6 +668,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroBegin *) { return IrInstructionIdCoroBegin; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocFail *) { + return IrInstructionIdCoroAllocFail; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -810,6 +814,14 @@ static IrInstruction *ir_build_const_usize(IrBuilder *irb, Scope *scope, AstNode return &const_instruction->base; } +static IrInstruction *ir_build_const_u29(IrBuilder *irb, Scope *scope, AstNode *source_node, uint32_t value) { + IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node); + const_instruction->base.value.type = irb->codegen->builtin_types.entry_u29; + const_instruction->base.value.special = ConstValSpecialStatic; + bigint_init_unsigned(&const_instruction->base.value.data.x_bigint, value); + return &const_instruction->base; +} + static IrInstruction *ir_create_const_type(IrBuilder *irb, Scope *scope, AstNode *source_node, TypeTableEntry *type_entry) { @@ -2471,6 +2483,17 @@ static IrInstruction *ir_build_coro_begin(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_coro_alloc_fail(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *err_val) { + IrInstructionCoroAllocFail *instruction = ir_build_instruction(irb, scope, source_node); + instruction->base.value.type = irb->codegen->builtin_types.entry_unreachable; + instruction->base.value.special = ConstValSpecialStatic; + instruction->err_val = err_val; + + ir_ref_instruction(err_val, irb->current_basic_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -5854,23 +5877,22 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME); IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, alloc_field_name); IrInstruction *alloc_fn = ir_build_load_ptr(irb, scope, node, alloc_fn_ptr); - IrInstruction *implicit_allocator = ir_build_load_ptr(irb, scope, node, implicit_allocator_ptr); - IrInstruction *alignment = ir_build_const_usize(irb, scope, node, irb->codegen->pointer_size_bytes * 2); + IrInstruction *alignment = ir_build_const_u29(irb, scope, node, irb->codegen->pointer_size_bytes * 2); size_t arg_count = 3; IrInstruction **args = allocate(arg_count); - args[0] = implicit_allocator; // self + args[0] = implicit_allocator_ptr; // self args[1] = coro_size; // byte_count args[2] = alignment; // alignment IrInstruction *alloc_result = ir_build_call(irb, scope, node, nullptr, alloc_fn, arg_count, args, false, FnInlineAuto, false, nullptr); IrInstruction *alloc_result_ptr = ir_build_ref(irb, scope, node, alloc_result, true, false); - IrInstruction *alloc_result_is_err = ir_build_test_err(irb, scope, node, alloc_result_ptr); + IrInstruction *alloc_result_is_err = ir_build_test_err(irb, scope, node, alloc_result); IrBasicBlock *alloc_err_block = ir_create_basic_block(irb, scope, "AllocError"); IrBasicBlock *alloc_ok_block = ir_create_basic_block(irb, scope, "AllocOk"); ir_build_cond_br(irb, scope, node, alloc_result_is_err, alloc_err_block, alloc_ok_block, is_comptime_false); ir_set_cursor_at_end_and_append_block(irb, alloc_err_block); IrInstruction *err_val = ir_build_unwrap_err_code(irb, scope, node, alloc_result_ptr); - ir_build_return(irb, scope, node, err_val); + ir_build_coro_alloc_fail(irb, scope, node, err_val); ir_set_cursor_at_end_and_append_block(irb, alloc_ok_block); IrInstruction *unwrapped_mem_ptr = ir_build_unwrap_err_payload(irb, scope, node, alloc_result_ptr, false); @@ -16826,18 +16848,47 @@ static TypeTableEntry *ir_analyze_instruction_coro_alloc(IrAnalyze *ira, IrInstr } static TypeTableEntry *ir_analyze_instruction_coro_size(IrAnalyze *ira, IrInstructionCoroSize *instruction) { - zig_panic("TODO ir_analyze_instruction_coro_size"); + IrInstruction *result = ir_build_coro_size(&ira->new_irb, instruction->base.scope, instruction->base.source_node); + ir_link_new_instruction(result, &instruction->base); + result->value.type = ira->codegen->builtin_types.entry_usize; + return result->value.type; } static TypeTableEntry *ir_analyze_instruction_coro_begin(IrAnalyze *ira, IrInstructionCoroBegin *instruction) { - zig_panic("TODO ir_analyze_instruction_coro_begin"); + IrInstruction *coro_id = instruction->coro_id->other; + if (type_is_invalid(coro_id->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *coro_mem_ptr = instruction->coro_mem_ptr->other; + if (type_is_invalid(coro_mem_ptr->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + FnTableEntry *fn_entry = exec_fn_entry(ira->new_irb.exec); + assert(fn_entry != nullptr); + IrInstruction *result = ir_build_coro_begin(&ira->new_irb, instruction->base.scope, instruction->base.source_node, + coro_id, coro_mem_ptr); + ir_link_new_instruction(result, &instruction->base); + result->value.type = get_promise_type(ira->codegen, fn_entry->type_entry->data.fn.fn_type_id.return_type); + return result->value.type; } static TypeTableEntry *ir_analyze_instruction_get_implicit_allocator(IrAnalyze *ira, IrInstructionGetImplicitAllocator *instruction) { IrInstruction *result = ir_get_implicit_allocator(ira, &instruction->base); + ir_link_new_instruction(result, &instruction->base); return result->value.type; } +static TypeTableEntry *ir_analyze_instruction_coro_alloc_fail(IrAnalyze *ira, IrInstructionCoroAllocFail *instruction) { + IrInstruction *err_val = instruction->err_val->other; + if (type_is_invalid(err_val->value.type)) + return ir_unreach_error(ira); + + IrInstruction *result = ir_build_coro_alloc_fail(&ira->new_irb, instruction->base.scope, instruction->base.source_node, err_val); + ir_link_new_instruction(result, &instruction->base); + result->value.type = ira->codegen->builtin_types.entry_unreachable; + return ir_finish_anal(ira, result->value.type); +} + static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: @@ -17052,6 +17103,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_coro_begin(ira, (IrInstructionCoroBegin *)instruction); case IrInstructionIdGetImplicitAllocator: return ir_analyze_instruction_get_implicit_allocator(ira, (IrInstructionGetImplicitAllocator *)instruction); + case IrInstructionIdCoroAllocFail: + return ir_analyze_instruction_coro_alloc_fail(ira, (IrInstructionCoroAllocFail *)instruction); } zig_unreachable(); } @@ -17168,6 +17221,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdCancel: case IrInstructionIdCoroId: case IrInstructionIdCoroBegin: + case IrInstructionIdCoroAllocFail: return true; case IrInstructionIdPhi: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 186e2711a4..bb49273d5c 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1052,6 +1052,12 @@ static void ir_print_coro_begin(IrPrint *irp, IrInstructionCoroBegin *instructio fprintf(irp->f, ")"); } +static void ir_print_coro_alloc_fail(IrPrint *irp, IrInstructionCoroAllocFail *instruction) { + fprintf(irp->f, "@coroAllocFail("); + ir_print_other_instruction(irp, instruction->err_val); + fprintf(irp->f, ")"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -1390,6 +1396,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdCoroBegin: ir_print_coro_begin(irp, (IrInstructionCoroBegin *)instruction); break; + case IrInstructionIdCoroAllocFail: + ir_print_coro_alloc_fail(irp, (IrInstructionCoroAllocFail *)instruction); + break; } fprintf(irp->f, "\n"); } From 99985ad6fc0ff4ff09c0284c40023a9c826f8108 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 23 Feb 2018 03:03:06 -0500 Subject: [PATCH 10/56] implement Zig IR for async functions See #727 --- src/all_types.hpp | 36 +++++ src/analyze.cpp | 10 +- src/analyze.hpp | 1 + src/codegen.cpp | 46 ++++-- src/ir.cpp | 382 +++++++++++++++++++++++++++++++++++++++++----- src/ir_print.cpp | 46 +++++- 6 files changed, 467 insertions(+), 54 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 9575f6cdad..002a2d4a4c 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -56,7 +56,13 @@ struct IrExecutable { IrAnalyze *analysis; Scope *begin_scope; ZigList tld_list; + IrInstruction *coro_handle; + IrInstruction *coro_awaiter_field_ptr; + IrInstruction *coro_result_ptr_field_ptr; + IrInstruction *implicit_allocator_ptr; + IrBasicBlock *coro_early_final; + IrBasicBlock *coro_normal_final; }; enum OutType { @@ -1968,6 +1974,10 @@ enum IrInstructionId { IrInstructionIdCoroSize, IrInstructionIdCoroBegin, IrInstructionIdCoroAllocFail, + IrInstructionIdCoroSuspend, + IrInstructionIdCoroEnd, + IrInstructionIdCoroFree, + IrInstructionIdCoroResume, }; struct IrInstruction { @@ -2819,6 +2829,8 @@ struct IrInstructionGetImplicitAllocator { struct IrInstructionCoroId { IrInstruction base; + + IrInstruction *promise_ptr; }; struct IrInstructionCoroAlloc { @@ -2844,6 +2856,30 @@ struct IrInstructionCoroAllocFail { IrInstruction *err_val; }; +struct IrInstructionCoroSuspend { + IrInstruction base; + + IrInstruction *save_point; + IrInstruction *is_final; +}; + +struct IrInstructionCoroEnd { + IrInstruction base; +}; + +struct IrInstructionCoroFree { + IrInstruction base; + + IrInstruction *coro_id; + IrInstruction *coro_handle; +}; + +struct IrInstructionCoroResume { + IrInstruction base; + + IrInstruction *awaiter_handle; +}; + static const size_t slice_ptr_index = 0; static const size_t slice_len_index = 1; diff --git a/src/analyze.cpp b/src/analyze.cpp index d83f195a85..c00014d8ca 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -475,9 +475,7 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) { if (child_type->zero_bits) { entry->type_ref = LLVMInt1Type(); entry->di_type = g->builtin_types.entry_bool->di_type; - } else if (child_type->id == TypeTableEntryIdPointer || - child_type->id == TypeTableEntryIdFn) - { + } else if (type_is_codegen_pointer(child_type)) { // this is an optimization but also is necessary for calling C // functions where all pointers are maybe pointers // function types are technically pointers @@ -1262,7 +1260,7 @@ static bool type_allowed_in_packed_struct(TypeTableEntry *type_entry) { case TypeTableEntryIdMaybe: { TypeTableEntry *child_type = type_entry->data.maybe.child_type; - return child_type->id == TypeTableEntryIdPointer || child_type->id == TypeTableEntryIdFn; + return type_is_codegen_pointer(child_type); } case TypeTableEntryIdEnum: return type_entry->data.enumeration.decl_node->data.container_decl.init_arg_expr != nullptr; @@ -1673,6 +1671,8 @@ TypeTableEntry *get_struct_type(CodeGen *g, const char *type_name, const char *f field->src_index = i; field->gen_index = i; + assert(type_has_bits(field->type_entry)); + auto prev_entry = struct_type->data.structure.fields_by_name.put_unique(field->name, field); assert(prev_entry == nullptr); } @@ -3669,9 +3669,11 @@ void resolve_container_type(CodeGen *g, TypeTableEntry *type_entry) { TypeTableEntry *get_codegen_ptr_type(TypeTableEntry *type) { if (type->id == TypeTableEntryIdPointer) return type; if (type->id == TypeTableEntryIdFn) return type; + if (type->id == TypeTableEntryIdPromise) return type; if (type->id == TypeTableEntryIdMaybe) { if (type->data.maybe.child_type->id == TypeTableEntryIdPointer) return type->data.maybe.child_type; if (type->data.maybe.child_type->id == TypeTableEntryIdFn) return type->data.maybe.child_type; + if (type->data.maybe.child_type->id == TypeTableEntryIdPromise) return type->data.maybe.child_type; } return nullptr; } diff --git a/src/analyze.hpp b/src/analyze.hpp index a4036c597c..2fe41f6572 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -51,6 +51,7 @@ VariableTableEntry *find_variable(CodeGen *g, Scope *orig_context, Buf *name); Tld *find_decl(CodeGen *g, Scope *scope, Buf *name); void resolve_top_level_decl(CodeGen *g, Tld *tld, bool pointer_only, AstNode *source_node); bool type_is_codegen_pointer(TypeTableEntry *type); + TypeTableEntry *get_codegen_ptr_type(TypeTableEntry *type); uint32_t get_ptr_align(TypeTableEntry *type); TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEntry *type_entry); diff --git a/src/codegen.cpp b/src/codegen.cpp index cb3a98bb88..6b1a2513c7 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -542,7 +542,7 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, FnTableEntry *fn_table_entry) { if (!type_has_bits(return_type)) { // nothing to do - } else if (return_type->id == TypeTableEntryIdPointer || return_type->id == TypeTableEntryIdFn) { + } else if (type_is_codegen_pointer(return_type)) { addLLVMAttr(fn_table_entry->llvm_value, 0, "nonnull"); } else if (handle_is_ptr(return_type) && calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc)) @@ -2789,7 +2789,7 @@ static LLVMValueRef gen_non_null_bit(CodeGen *g, TypeTableEntry *maybe_type, LLV if (child_type->zero_bits) { return maybe_handle; } else { - bool maybe_is_ptr = (child_type->id == TypeTableEntryIdPointer || child_type->id == TypeTableEntryIdFn); + bool maybe_is_ptr = type_is_codegen_pointer(child_type); if (maybe_is_ptr) { return LLVMBuildICmp(g->builder, LLVMIntNE, maybe_handle, LLVMConstNull(maybe_type->type_ref), ""); } else { @@ -2829,7 +2829,7 @@ static LLVMValueRef ir_render_unwrap_maybe(CodeGen *g, IrExecutable *executable, if (child_type->zero_bits) { return nullptr; } else { - bool maybe_is_ptr = (child_type->id == TypeTableEntryIdPointer || child_type->id == TypeTableEntryIdFn); + bool maybe_is_ptr = type_is_codegen_pointer(child_type); if (maybe_is_ptr) { return maybe_ptr; } else { @@ -3052,6 +3052,10 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I { align_bytes = target_type->data.maybe.child_type->data.fn.fn_type_id.alignment; ptr_val = target_val; + } else if (target_type->id == TypeTableEntryIdMaybe && + target_type->data.maybe.child_type->id == TypeTableEntryIdPromise) + { + zig_panic("TODO audit this function"); } else if (target_type->id == TypeTableEntryIdStruct && target_type->data.structure.is_slice) { TypeTableEntry *slice_ptr_type = target_type->data.structure.fields[slice_ptr_index].type_entry; align_bytes = slice_ptr_type->data.pointer.alignment; @@ -3522,9 +3526,7 @@ static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, I } LLVMValueRef payload_val = ir_llvm_value(g, instruction->value); - if (child_type->id == TypeTableEntryIdPointer || - child_type->id == TypeTableEntryIdFn) - { + if (type_is_codegen_pointer(child_type)) { return payload_val; } @@ -3716,6 +3718,22 @@ static LLVMValueRef ir_render_coro_alloc_fail(CodeGen *g, IrExecutable *executab zig_panic("TODO ir_render_coro_alloc_fail"); } +static LLVMValueRef ir_render_coro_suspend(CodeGen *g, IrExecutable *executable, IrInstructionCoroSuspend *instruction) { + zig_panic("TODO ir_render_coro_suspend"); +} + +static LLVMValueRef ir_render_coro_end(CodeGen *g, IrExecutable *executable, IrInstructionCoroEnd *instruction) { + zig_panic("TODO ir_render_coro_end"); +} + +static LLVMValueRef ir_render_coro_free(CodeGen *g, IrExecutable *executable, IrInstructionCoroFree *instruction) { + zig_panic("TODO ir_render_coro_free"); +} + +static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, IrInstructionCoroResume *instruction) { + zig_panic("TODO ir_render_coro_resume"); +} + static void set_debug_location(CodeGen *g, IrInstruction *instruction) { AstNode *source_node = instruction->source_node; Scope *scope = instruction->scope; @@ -3911,6 +3929,14 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_coro_begin(g, executable, (IrInstructionCoroBegin *)instruction); case IrInstructionIdCoroAllocFail: return ir_render_coro_alloc_fail(g, executable, (IrInstructionCoroAllocFail *)instruction); + case IrInstructionIdCoroSuspend: + return ir_render_coro_suspend(g, executable, (IrInstructionCoroSuspend *)instruction); + case IrInstructionIdCoroEnd: + return ir_render_coro_end(g, executable, (IrInstructionCoroEnd *)instruction); + case IrInstructionIdCoroFree: + return ir_render_coro_free(g, executable, (IrInstructionCoroFree *)instruction); + case IrInstructionIdCoroResume: + return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction); } zig_unreachable(); } @@ -4155,9 +4181,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c TypeTableEntry *child_type = type_entry->data.maybe.child_type; if (child_type->zero_bits) { return LLVMConstInt(LLVMInt1Type(), const_val->data.x_maybe ? 1 : 0, false); - } else if (child_type->id == TypeTableEntryIdPointer || - child_type->id == TypeTableEntryIdFn) - { + } else if (type_is_codegen_pointer(child_type)) { if (const_val->data.x_maybe) { return gen_const_val(g, const_val->data.x_maybe, ""); } else { @@ -6085,9 +6109,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, TypeTableEntry *type_entry, Buf if (child_type->zero_bits) { buf_init_from_str(out_buf, "bool"); return; - } else if (child_type->id == TypeTableEntryIdPointer || - child_type->id == TypeTableEntryIdFn) - { + } else if (type_is_codegen_pointer(child_type)) { return get_c_type(g, gen_h, child_type, out_buf); } else { zig_unreachable(); diff --git a/src/ir.cpp b/src/ir.cpp index 68d31a7712..183543b0fe 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -46,7 +46,10 @@ static LVal make_lval_addr(bool is_const, bool is_volatile) { } static const char * ASYNC_ALLOC_FIELD_NAME = "allocFn"; -//static const char * ASYNC_FREE_FIELD_NAME = "freeFn"; +static const char * ASYNC_FREE_FIELD_NAME = "freeFn"; +static const char * AWAITER_HANDLE_FIELD_NAME = "awaiter_handle"; +static const char * RESULT_FIELD_NAME = "result"; +static const char * RESULT_PTR_FIELD_NAME = "result_ptr"; enum ConstCastResultId { ConstCastResultIdOk, @@ -672,6 +675,22 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocFail *) return IrInstructionIdCoroAllocFail; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSuspend *) { + return IrInstructionIdCoroSuspend; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroEnd *) { + return IrInstructionIdCoroEnd; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroFree *) { + return IrInstructionIdCoroFree; +} + +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) { + return IrInstructionIdCoroResume; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -743,14 +762,6 @@ static IrInstruction *ir_build_return(IrBuilder *irb, Scope *scope, AstNode *sou return &return_instruction->base; } -static IrInstruction *ir_build_return_from(IrBuilder *irb, IrInstruction *old_instruction, - IrInstruction *return_value) -{ - IrInstruction *new_instruction = ir_build_return(irb, old_instruction->scope, old_instruction->source_node, return_value); - ir_link_new_instruction(new_instruction, old_instruction); - return new_instruction; -} - static IrInstruction *ir_create_const(IrBuilder *irb, Scope *scope, AstNode *source_node, TypeTableEntry *type_entry) { @@ -822,6 +833,14 @@ static IrInstruction *ir_build_const_u29(IrBuilder *irb, Scope *scope, AstNode * return &const_instruction->base; } +static IrInstruction *ir_build_const_u8(IrBuilder *irb, Scope *scope, AstNode *source_node, uint8_t value) { + IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node); + const_instruction->base.value.type = irb->codegen->builtin_types.entry_u8; + const_instruction->base.value.special = ConstValSpecialStatic; + bigint_init_unsigned(&const_instruction->base.value.data.x_bigint, value); + return &const_instruction->base; +} + static IrInstruction *ir_create_const_type(IrBuilder *irb, Scope *scope, AstNode *source_node, TypeTableEntry *type_entry) { @@ -909,6 +928,33 @@ static IrInstruction *ir_build_const_c_str_lit(IrBuilder *irb, Scope *scope, Ast return &const_instruction->base; } +static IrInstruction *ir_build_const_promise_init(IrBuilder *irb, Scope *scope, AstNode *source_node, + TypeTableEntry *return_type) +{ + TypeTableEntry *awaiter_handle_type = get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise); + TypeTableEntry *result_ptr_type = get_pointer_to_type(irb->codegen, return_type, false); + const char *field_names[] = {AWAITER_HANDLE_FIELD_NAME, RESULT_FIELD_NAME, RESULT_PTR_FIELD_NAME}; + TypeTableEntry *field_types[] = {awaiter_handle_type, return_type, result_ptr_type}; + size_t field_count = type_has_bits(result_ptr_type) ? 3 : 1; + TypeTableEntry *struct_type = get_struct_type(irb->codegen, "AsyncFramePromise", field_names, field_types, + field_count); + + IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node); + const_instruction->base.value.type = struct_type; + const_instruction->base.value.special = ConstValSpecialStatic; + const_instruction->base.value.data.x_struct.fields = allocate(2); + const_instruction->base.value.data.x_struct.fields[0].type = awaiter_handle_type; + const_instruction->base.value.data.x_struct.fields[0].special = ConstValSpecialStatic; + const_instruction->base.value.data.x_struct.fields[0].data.x_maybe = nullptr; + if (field_count == 3) { + const_instruction->base.value.data.x_struct.fields[1].type = return_type; + const_instruction->base.value.data.x_struct.fields[1].special = ConstValSpecialUndef; + const_instruction->base.value.data.x_struct.fields[2].type = result_ptr_type; + const_instruction->base.value.data.x_struct.fields[2].special = ConstValSpecialUndef; + } + return &const_instruction->base; +} + static IrInstruction *ir_build_bin_op(IrBuilder *irb, Scope *scope, AstNode *source_node, IrBinOp op_id, IrInstruction *op1, IrInstruction *op2, bool safety_check_on) { @@ -2451,8 +2497,11 @@ static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *sco return &instruction->base; } -static IrInstruction *ir_build_coro_id(IrBuilder *irb, Scope *scope, AstNode *source_node) { +static IrInstruction *ir_build_coro_id(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *promise_ptr) { IrInstructionCoroId *instruction = ir_build_instruction(irb, scope, source_node); + instruction->promise_ptr = promise_ptr; + + ir_ref_instruction(promise_ptr, irb->current_basic_block); return &instruction->base; } @@ -2494,6 +2543,48 @@ static IrInstruction *ir_build_coro_alloc_fail(IrBuilder *irb, Scope *scope, Ast return &instruction->base; } +static IrInstruction *ir_build_coro_suspend(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *save_point, IrInstruction *is_final) +{ + IrInstructionCoroSuspend *instruction = ir_build_instruction(irb, scope, source_node); + instruction->save_point = save_point; + instruction->is_final = is_final; + + if (save_point != nullptr) ir_ref_instruction(save_point, irb->current_basic_block); + ir_ref_instruction(is_final, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_coro_end(IrBuilder *irb, Scope *scope, AstNode *source_node) { + IrInstructionCoroEnd *instruction = ir_build_instruction(irb, scope, source_node); + return &instruction->base; +} + +static IrInstruction *ir_build_coro_free(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *coro_id, IrInstruction *coro_handle) +{ + IrInstructionCoroFree *instruction = ir_build_instruction(irb, scope, source_node); + instruction->coro_id = coro_id; + instruction->coro_handle = coro_handle; + + ir_ref_instruction(coro_id, irb->current_basic_block); + ir_ref_instruction(coro_handle, irb->current_basic_block); + + return &instruction->base; +} + +static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *awaiter_handle) +{ + IrInstructionCoroResume *instruction = ir_build_instruction(irb, scope, source_node); + instruction->awaiter_handle = awaiter_handle; + + ir_ref_instruction(awaiter_handle, irb->current_basic_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -2566,6 +2657,29 @@ static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) { return nullptr; } +static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode *node, IrInstruction *return_value, + bool is_generated_code) +{ + FnTableEntry *fn_entry = exec_fn_entry(irb->exec); + bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync; + if (!is_async) { + IrInstruction *return_inst = ir_build_return(irb, scope, node, return_value); + return_inst->is_gen = is_generated_code; + return return_inst; + } + + if (irb->exec->coro_result_ptr_field_ptr) { + IrInstruction *result_ptr = ir_build_load_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr); + ir_build_store_ptr(irb, scope, node, result_ptr, return_value); + } + IrInstruction *maybe_await_handle = ir_build_load_ptr(irb, scope, node, irb->exec->coro_awaiter_field_ptr); + IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_await_handle); + IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false); + return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final, irb->exec->coro_early_final, + is_comptime); + // the above blocks are rendered by ir_gen after the rest of codegen +} + static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { assert(node->type == NodeTypeReturnExpr); @@ -2615,18 +2729,22 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, } ir_mark_gen(ir_build_cond_br(irb, scope, node, is_err, err_block, ok_block, is_comptime)); + IrBasicBlock *ret_stmt_block = ir_create_basic_block(irb, scope, "RetStmt"); ir_set_cursor_at_end_and_append_block(irb, err_block); ir_gen_defers_for_block(irb, scope, outer_scope, true); - ir_build_return(irb, scope, node, return_value); + ir_build_br(irb, scope, node, ret_stmt_block, is_comptime); ir_set_cursor_at_end_and_append_block(irb, ok_block); ir_gen_defers_for_block(irb, scope, outer_scope, false); - return ir_build_return(irb, scope, node, return_value); + ir_build_br(irb, scope, node, ret_stmt_block, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, ret_stmt_block); + return ir_gen_async_return(irb, scope, node, return_value, false); } else { // generate unconditional defers ir_gen_defers_for_block(irb, scope, outer_scope, false); - return ir_build_return(irb, scope, node, return_value); + return ir_gen_async_return(irb, scope, node, return_value, false); } } case ReturnKindError: @@ -2646,7 +2764,7 @@ static IrInstruction *ir_gen_return(IrBuilder *irb, Scope *scope, AstNode *node, ir_set_cursor_at_end_and_append_block(irb, return_block); ir_gen_defers_for_block(irb, scope, outer_scope, true); IrInstruction *err_val = ir_build_unwrap_err_code(irb, scope, node, err_union_ptr); - ir_build_return(irb, scope, node, err_val); + ir_gen_async_return(irb, scope, node, err_val, false); ir_set_cursor_at_end_and_append_block(irb, continue_block); IrInstruction *unwrapped_ptr = ir_build_unwrap_err_payload(irb, scope, node, err_union_ptr, false); @@ -5842,6 +5960,7 @@ static void invalidate_exec(IrExecutable *exec) { invalidate_exec(exec->source_exec); } + bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_executable) { assert(node->owner); @@ -5858,48 +5977,81 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec FnTableEntry *fn_entry = exec_fn_entry(irb->exec); bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync; + IrInstruction *u8_ptr_type; + IrInstruction *const_bool_false; + IrInstruction *coro_unwrapped_mem_ptr; + IrInstruction *coro_id; + IrInstruction *coro_promise_ptr; + IrInstruction *coro_result_field_ptr; + TypeTableEntry *return_type; + Buf *result_ptr_field_name; if (is_async) { - IrInstruction *is_comptime_false = ir_build_const_bool(irb, scope, node, false); - IrInstruction *coro_id = ir_build_coro_id(irb, scope, node); + // create the coro promise + const_bool_false = ir_build_const_bool(irb, scope, node, false); + VariableTableEntry *promise_var = ir_create_var(irb, node, scope, nullptr, false, false, true, const_bool_false); + //scope = promise_var->child_scope; + + return_type = fn_entry->type_entry->data.fn.fn_type_id.return_type; + IrInstruction *promise_init = ir_build_const_promise_init(irb, scope, node, return_type); + ir_build_var_decl(irb, scope, node, promise_var, nullptr, nullptr, promise_init); + + coro_promise_ptr = ir_build_var_ptr(irb, scope, node, promise_var, false, false); + Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME); + irb->exec->coro_awaiter_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, + awaiter_handle_field_name); + if (type_has_bits(return_type)) { + Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME); + coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name); + result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME); + irb->exec->coro_result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, + result_ptr_field_name); + ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr, coro_result_field_ptr); + } + + u8_ptr_type = ir_build_const_type(irb, scope, node, + get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false)); + IrInstruction *promise_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_promise_ptr); + coro_id = ir_build_coro_id(irb, scope, node, promise_as_u8_ptr); IrInstruction *need_dyn_alloc = ir_build_coro_alloc(irb, scope, node, coro_id); IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); - IrInstruction *u8_ptr_type = ir_build_const_type(irb, scope, node, - get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false)); IrInstruction *null_ptr = ir_build_int_to_ptr(irb, scope, node, u8_ptr_type, zero); IrBasicBlock *dyn_alloc_block = ir_create_basic_block(irb, scope, "DynAlloc"); IrBasicBlock *coro_begin_block = ir_create_basic_block(irb, scope, "CoroBegin"); - ir_build_cond_br(irb, scope, node, need_dyn_alloc, dyn_alloc_block, coro_begin_block, is_comptime_false); + ir_build_cond_br(irb, scope, node, need_dyn_alloc, dyn_alloc_block, coro_begin_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, dyn_alloc_block); IrInstruction *coro_size = ir_build_coro_size(irb, scope, node); - IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node); + irb->exec->implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node); Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME); - IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, alloc_field_name); + IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, scope, node, irb->exec->implicit_allocator_ptr, + alloc_field_name); IrInstruction *alloc_fn = ir_build_load_ptr(irb, scope, node, alloc_fn_ptr); IrInstruction *alignment = ir_build_const_u29(irb, scope, node, irb->codegen->pointer_size_bytes * 2); size_t arg_count = 3; IrInstruction **args = allocate(arg_count); - args[0] = implicit_allocator_ptr; // self + args[0] = irb->exec->implicit_allocator_ptr; // self args[1] = coro_size; // byte_count args[2] = alignment; // alignment - IrInstruction *alloc_result = ir_build_call(irb, scope, node, nullptr, alloc_fn, arg_count, args, false, FnInlineAuto, false, nullptr); + IrInstruction *alloc_result = ir_build_call(irb, scope, node, nullptr, alloc_fn, arg_count, args, false, + FnInlineAuto, false, nullptr); IrInstruction *alloc_result_ptr = ir_build_ref(irb, scope, node, alloc_result, true, false); IrInstruction *alloc_result_is_err = ir_build_test_err(irb, scope, node, alloc_result); IrBasicBlock *alloc_err_block = ir_create_basic_block(irb, scope, "AllocError"); IrBasicBlock *alloc_ok_block = ir_create_basic_block(irb, scope, "AllocOk"); - ir_build_cond_br(irb, scope, node, alloc_result_is_err, alloc_err_block, alloc_ok_block, is_comptime_false); + ir_build_cond_br(irb, scope, node, alloc_result_is_err, alloc_err_block, alloc_ok_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, alloc_err_block); IrInstruction *err_val = ir_build_unwrap_err_code(irb, scope, node, alloc_result_ptr); ir_build_coro_alloc_fail(irb, scope, node, err_val); ir_set_cursor_at_end_and_append_block(irb, alloc_ok_block); - IrInstruction *unwrapped_mem_ptr = ir_build_unwrap_err_payload(irb, scope, node, alloc_result_ptr, false); + coro_unwrapped_mem_ptr = ir_build_unwrap_err_payload(irb, scope, node, alloc_result_ptr, false); Buf *ptr_field_name = buf_create_from_str("ptr"); - IrInstruction *coro_mem_ptr_field = ir_build_field_ptr(irb, scope, node, unwrapped_mem_ptr, ptr_field_name); + IrInstruction *coro_mem_ptr_field = ir_build_field_ptr(irb, scope, node, coro_unwrapped_mem_ptr, + ptr_field_name); IrInstruction *coro_mem_ptr = ir_build_load_ptr(irb, scope, node, coro_mem_ptr_field); - ir_build_br(irb, scope, node, coro_begin_block, is_comptime_false); + ir_build_br(irb, scope, node, coro_begin_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, coro_begin_block); IrBasicBlock **incoming_blocks = allocate(2); @@ -5910,6 +6062,8 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec incoming_values[1] = coro_mem_ptr; IrInstruction *coro_mem = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); irb->exec->coro_handle = ir_build_coro_begin(irb, scope, node, coro_id, coro_mem); + irb->exec->coro_early_final = ir_create_basic_block(irb, scope, "CoroEarlyFinal"); + irb->exec->coro_normal_final = ir_create_basic_block(irb, scope, "CoroNormalFinal"); } IrInstruction *result = ir_gen_node_extra(irb, node, scope, LVAL_NONE); @@ -5918,7 +6072,84 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec return false; if (!instr_is_unreachable(result)) { - ir_mark_gen(ir_build_return(irb, scope, result->source_node, result)); + ir_gen_async_return(irb, scope, result->source_node, result, true); + } + + if (is_async) { + IrBasicBlock *invalid_resume_block = ir_create_basic_block(irb, scope, "InvalidResume"); + IrBasicBlock *final_cleanup_block = ir_create_basic_block(irb, scope, "FinalCleanup"); + IrBasicBlock *suspend_block = ir_create_basic_block(irb, scope, "Suspend"); + IrBasicBlock *check_free_block = ir_create_basic_block(irb, scope, "CheckFree"); + + ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_early_final); + IrInstruction *const_bool_true = ir_build_const_bool(irb, scope, node, true); + IrInstruction *suspend_code = ir_build_coro_suspend(irb, scope, node, nullptr, const_bool_true); + IrInstructionSwitchBrCase *cases = allocate(2); + cases[0].value = ir_build_const_u8(irb, scope, node, 0); + cases[0].block = invalid_resume_block; + cases[1].value = ir_build_const_u8(irb, scope, node, 1); + cases[1].block = final_cleanup_block; + ir_build_switch_br(irb, scope, node, suspend_code, suspend_block, 2, cases, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, suspend_block); + ir_build_coro_end(irb, scope, node); + ir_build_return(irb, scope, node, irb->exec->coro_handle); + + ir_set_cursor_at_end_and_append_block(irb, invalid_resume_block); + ir_build_unreachable(irb, scope, node); + + ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_normal_final); + ir_build_br(irb, scope, node, check_free_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, final_cleanup_block); + if (type_has_bits(return_type)) { + IrInstruction *result_ptr = ir_build_load_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr); + IrInstruction *result_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, result_ptr); + IrInstruction *return_value_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, + coro_result_field_ptr); + IrInstruction *return_type_inst = ir_build_const_type(irb, scope, node, + fn_entry->type_entry->data.fn.fn_type_id.return_type); + IrInstruction *size_of_ret_val = ir_build_size_of(irb, scope, node, return_type_inst); + ir_build_memcpy(irb, scope, node, result_ptr_as_u8_ptr, return_value_ptr_as_u8_ptr, size_of_ret_val); + } + ir_build_br(irb, scope, node, check_free_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, check_free_block); + IrBasicBlock **incoming_blocks = allocate(2); + IrInstruction **incoming_values = allocate(2); + incoming_blocks[0] = final_cleanup_block; + incoming_values[0] = const_bool_false; + incoming_blocks[1] = irb->exec->coro_normal_final; + incoming_values[1] = const_bool_true; + IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); + IrInstruction *mem_to_free = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle); + IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, mem_to_free); + IrBasicBlock *dyn_free_block = ir_create_basic_block(irb, scope, "DynFree"); + IrBasicBlock *end_free_block = ir_create_basic_block(irb, scope, "EndFree"); + ir_build_cond_br(irb, scope, node, is_non_null, dyn_free_block, end_free_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, dyn_free_block); + Buf *free_field_name = buf_create_from_str(ASYNC_FREE_FIELD_NAME); + IrInstruction *free_fn_ptr = ir_build_field_ptr(irb, scope, node, irb->exec->implicit_allocator_ptr, + free_field_name); + IrInstruction *free_fn = ir_build_load_ptr(irb, scope, node, free_fn_ptr); + size_t arg_count = 2; + IrInstruction **args = allocate(arg_count); + args[0] = irb->exec->implicit_allocator_ptr; // self + args[1] = ir_build_load_ptr(irb, scope, node, coro_unwrapped_mem_ptr); // old_mem + ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr); + ir_build_br(irb, scope, node, end_free_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, end_free_block); + IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume"); + ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, suspend_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, resume_block); + IrInstruction *unwrapped_await_handle_ptr = ir_build_unwrap_maybe(irb, scope, node, + irb->exec->coro_awaiter_field_ptr, false); + IrInstruction *awaiter_handle = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr); + ir_build_coro_resume(irb, scope, node, awaiter_handle); + ir_build_br(irb, scope, node, suspend_block, const_bool_false); } return true; @@ -9514,8 +9745,11 @@ static TypeTableEntry *ir_analyze_instruction_return(IrAnalyze *ira, ir_add_error(ira, casted_value, buf_sprintf("function returns address of local variable")); return ir_unreach_error(ira); } - ir_build_return_from(&ira->new_irb, &return_instruction->base, casted_value); - return ir_finish_anal(ira, ira->codegen->builtin_types.entry_unreachable); + IrInstruction *result = ir_build_return(&ira->new_irb, return_instruction->base.scope, + return_instruction->base.source_node, casted_value); + result->value.type = ira->codegen->builtin_types.entry_unreachable; + ir_link_new_instruction(result, &return_instruction->base); + return ir_finish_anal(ira, result->value.type); } static TypeTableEntry *ir_analyze_instruction_const(IrAnalyze *ira, IrInstructionConst *const_instruction) { @@ -16624,11 +16858,8 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr TypeTableEntry *usize = ira->codegen->builtin_types.entry_usize; - if (!(target->value.type->id == TypeTableEntryIdPointer || - target->value.type->id == TypeTableEntryIdFn || - (target->value.type->id == TypeTableEntryIdMaybe && - (target->value.type->data.maybe.child_type->id == TypeTableEntryIdPointer || - target->value.type->data.maybe.child_type->id == TypeTableEntryIdFn)))) + if (!(type_is_codegen_pointer(target->value.type) || (target->value.type->id == TypeTableEntryIdMaybe && + type_is_codegen_pointer(target->value.type->data.maybe.child_type)))) { ir_add_error(ira, target, buf_sprintf("expected pointer, found '%s'", buf_ptr(&target->value.type->name))); @@ -16829,7 +17060,12 @@ static TypeTableEntry *ir_analyze_instruction_cancel(IrAnalyze *ira, IrInstructi } static TypeTableEntry *ir_analyze_instruction_coro_id(IrAnalyze *ira, IrInstructionCoroId *instruction) { - IrInstruction *result = ir_build_coro_id(&ira->new_irb, instruction->base.scope, instruction->base.source_node); + IrInstruction *promise_ptr = instruction->promise_ptr->other; + if (type_is_invalid(promise_ptr->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_build_coro_id(&ira->new_irb, instruction->base.scope, instruction->base.source_node, + promise_ptr); ir_link_new_instruction(result, &instruction->base); result->value.type = ira->codegen->builtin_types.entry_usize; return result->value.type; @@ -16889,6 +17125,63 @@ static TypeTableEntry *ir_analyze_instruction_coro_alloc_fail(IrAnalyze *ira, Ir return ir_finish_anal(ira, result->value.type); } +static TypeTableEntry *ir_analyze_instruction_coro_suspend(IrAnalyze *ira, IrInstructionCoroSuspend *instruction) { + IrInstruction *save_point = nullptr; + if (instruction->save_point != nullptr) { + save_point = instruction->save_point->other; + if (type_is_invalid(save_point->value.type)) + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *is_final = instruction->is_final->other; + if (type_is_invalid(is_final->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_build_coro_suspend(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, save_point, is_final); + ir_link_new_instruction(result, &instruction->base); + result->value.type = ira->codegen->builtin_types.entry_u8; + return result->value.type; +} + +static TypeTableEntry *ir_analyze_instruction_coro_end(IrAnalyze *ira, IrInstructionCoroEnd *instruction) { + IrInstruction *result = ir_build_coro_end(&ira->new_irb, instruction->base.scope, + instruction->base.source_node); + ir_link_new_instruction(result, &instruction->base); + result->value.type = ira->codegen->builtin_types.entry_void; + return result->value.type; +} + +static TypeTableEntry *ir_analyze_instruction_coro_free(IrAnalyze *ira, IrInstructionCoroFree *instruction) { + IrInstruction *coro_id = instruction->coro_id->other; + if (type_is_invalid(coro_id->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *coro_handle = instruction->coro_handle->other; + if (type_is_invalid(coro_handle->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_build_coro_free(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, coro_id, coro_handle); + ir_link_new_instruction(result, &instruction->base); + TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false); + result->value.type = get_maybe_type(ira->codegen, ptr_type); + return result->value.type; +} + +static TypeTableEntry *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInstructionCoroResume *instruction) { + IrInstruction *awaiter_handle = instruction->awaiter_handle->other; + if (type_is_invalid(awaiter_handle->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_build_coro_resume(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, awaiter_handle); + ir_link_new_instruction(result, &instruction->base); + result->value.type = ira->codegen->builtin_types.entry_void; + return result->value.type; +} + + static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { case IrInstructionIdInvalid: @@ -17105,6 +17398,14 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_get_implicit_allocator(ira, (IrInstructionGetImplicitAllocator *)instruction); case IrInstructionIdCoroAllocFail: return ir_analyze_instruction_coro_alloc_fail(ira, (IrInstructionCoroAllocFail *)instruction); + case IrInstructionIdCoroSuspend: + return ir_analyze_instruction_coro_suspend(ira, (IrInstructionCoroSuspend *)instruction); + case IrInstructionIdCoroEnd: + return ir_analyze_instruction_coro_end(ira, (IrInstructionCoroEnd *)instruction); + case IrInstructionIdCoroFree: + return ir_analyze_instruction_coro_free(ira, (IrInstructionCoroFree *)instruction); + case IrInstructionIdCoroResume: + return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction); } zig_unreachable(); } @@ -17134,7 +17435,10 @@ TypeTableEntry *ir_analyze(CodeGen *codegen, IrExecutable *old_exec, IrExecutabl IrAnalyze *ira = allocate(1); old_exec->analysis = ira; ira->codegen = codegen; - ira->explicit_return_type = expected_type; + + FnTableEntry *fn_entry = exec_fn_entry(old_exec); + bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync; + ira->explicit_return_type = is_async ? get_promise_type(codegen, expected_type) : expected_type; ira->old_irb.codegen = codegen; ira->old_irb.exec = old_exec; @@ -17222,6 +17526,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdCoroId: case IrInstructionIdCoroBegin: case IrInstructionIdCoroAllocFail: + case IrInstructionIdCoroEnd: + case IrInstructionIdCoroResume: return true; case IrInstructionIdPhi: @@ -17299,6 +17605,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdGetImplicitAllocator: case IrInstructionIdCoroAlloc: case IrInstructionIdCoroSize: + case IrInstructionIdCoroSuspend: + case IrInstructionIdCoroFree: return false; case IrInstructionIdAsm: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index bb49273d5c..ca7eb25879 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1031,7 +1031,9 @@ static void ir_print_get_implicit_allocator(IrPrint *irp, IrInstructionGetImplic } static void ir_print_coro_id(IrPrint *irp, IrInstructionCoroId *instruction) { - fprintf(irp->f, "@coroId()"); + fprintf(irp->f, "@coroId("); + ir_print_other_instruction(irp, instruction->promise_ptr); + fprintf(irp->f, ")"); } static void ir_print_coro_alloc(IrPrint *irp, IrInstructionCoroAlloc *instruction) { @@ -1058,6 +1060,36 @@ static void ir_print_coro_alloc_fail(IrPrint *irp, IrInstructionCoroAllocFail *i fprintf(irp->f, ")"); } +static void ir_print_coro_suspend(IrPrint *irp, IrInstructionCoroSuspend *instruction) { + fprintf(irp->f, "@coroSuspend("); + if (instruction->save_point != nullptr) { + ir_print_other_instruction(irp, instruction->save_point); + } else { + fprintf(irp->f, "null"); + } + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->is_final); + fprintf(irp->f, ")"); +} + +static void ir_print_coro_end(IrPrint *irp, IrInstructionCoroEnd *instruction) { + fprintf(irp->f, "@coroEnd()"); +} + +static void ir_print_coro_free(IrPrint *irp, IrInstructionCoroFree *instruction) { + fprintf(irp->f, "@coroFree("); + ir_print_other_instruction(irp, instruction->coro_id); + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->coro_handle); + fprintf(irp->f, ")"); +} + +static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruction) { + fprintf(irp->f, "@coroResume("); + ir_print_other_instruction(irp, instruction->awaiter_handle); + fprintf(irp->f, ")"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -1399,6 +1431,18 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdCoroAllocFail: ir_print_coro_alloc_fail(irp, (IrInstructionCoroAllocFail *)instruction); break; + case IrInstructionIdCoroSuspend: + ir_print_coro_suspend(irp, (IrInstructionCoroSuspend *)instruction); + break; + case IrInstructionIdCoroEnd: + ir_print_coro_end(irp, (IrInstructionCoroEnd *)instruction); + break; + case IrInstructionIdCoroFree: + ir_print_coro_free(irp, (IrInstructionCoroFree *)instruction); + break; + case IrInstructionIdCoroResume: + ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction); + break; } fprintf(irp->f, "\n"); } From f11b9480192dea44acb92cb9edd7a91c7c73cd2f Mon Sep 17 00:00:00 2001 From: Ben Noordhuis Date: Fri, 23 Feb 2018 15:25:42 +0100 Subject: [PATCH 11/56] allow implicit cast from `S` to `?&const S` Allow implicit casts from container types to nullable const pointers to said container type. That is: fn f() void { const s = S {}; g(s); // Works. g(&s); // So does this. } fn g(_: ?&const S) void { // Nullable const pointer. } Fixes #731. --- src/ir.cpp | 17 +++++++++-- test/cases/cast.zig | 71 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 2 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index b276abff33..e79235830c 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -8817,16 +8817,29 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst // explicit cast from child type of maybe type to maybe type if (wanted_type->id == TypeTableEntryIdMaybe) { - if (types_match_const_cast_only(ira, wanted_type->data.maybe.child_type, actual_type, source_node).id == ConstCastResultIdOk) { + TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type; + if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk) { return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type); } else if (actual_type->id == TypeTableEntryIdNumLitInt || actual_type->id == TypeTableEntryIdNumLitFloat) { - if (ir_num_lit_fits_in_other_type(ira, value, wanted_type->data.maybe.child_type, true)) { + if (ir_num_lit_fits_in_other_type(ira, value, wanted_child_type, true)) { return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type); } else { return ira->codegen->invalid_instruction; } + } else if (wanted_child_type->id == TypeTableEntryIdPointer && + wanted_child_type->data.pointer.is_const && + is_container(actual_type)) { + IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_child_type, value); + if (type_is_invalid(cast1->value.type)) + return ira->codegen->invalid_instruction; + + IrInstruction *cast2 = ir_analyze_cast(ira, source_instr, wanted_type, cast1); + if (type_is_invalid(cast2->value.type)) + return ira->codegen->invalid_instruction; + + return cast2; } } diff --git a/test/cases/cast.zig b/test/cases/cast.zig index 6ffb558174..dabf97a799 100644 --- a/test/cases/cast.zig +++ b/test/cases/cast.zig @@ -32,6 +32,77 @@ fn funcWithConstPtrPtr(x: &const &i32) void { **x += 1; } +test "implicitly cast a container to a const pointer of it" { + const z = Struct(void) { .x = void{} }; + assert(0 == @sizeOf(@typeOf(z))); + assert(void{} == Struct(void).pointer(z).x); + assert(void{} == Struct(void).pointer(&z).x); + assert(void{} == Struct(void).maybePointer(z).x); + assert(void{} == Struct(void).maybePointer(&z).x); + assert(void{} == Struct(void).maybePointer(null).x); + const s = Struct(u8) { .x = 42 }; + assert(0 != @sizeOf(@typeOf(s))); + assert(42 == Struct(u8).pointer(s).x); + assert(42 == Struct(u8).pointer(&s).x); + assert(42 == Struct(u8).maybePointer(s).x); + assert(42 == Struct(u8).maybePointer(&s).x); + assert(0 == Struct(u8).maybePointer(null).x); + const u = Union { .x = 42 }; + assert(42 == Union.pointer(u).x); + assert(42 == Union.pointer(&u).x); + assert(42 == Union.maybePointer(u).x); + assert(42 == Union.maybePointer(&u).x); + assert(0 == Union.maybePointer(null).x); + const e = Enum.Some; + assert(Enum.Some == Enum.pointer(e)); + assert(Enum.Some == Enum.pointer(&e)); + assert(Enum.Some == Enum.maybePointer(e)); + assert(Enum.Some == Enum.maybePointer(&e)); + assert(Enum.None == Enum.maybePointer(null)); +} + +fn Struct(comptime T: type) type { + return struct { + const Self = this; + x: T, + + fn pointer(self: &const Self) Self { + return *self; + } + + fn maybePointer(self: ?&const Self) Self { + const none = Self { .x = if (T == void) void{} else 0 }; + return *(self ?? &none); + } + }; +} + +const Union = union { + x: u8, + + fn pointer(self: &const Union) Union { + return *self; + } + + fn maybePointer(self: ?&const Union) Union { + const none = Union { .x = 0 }; + return *(self ?? &none); + } +}; + +const Enum = enum { + None, + Some, + + fn pointer(self: &const Enum) Enum { + return *self; + } + + fn maybePointer(self: ?&const Enum) Enum { + return *(self ?? &Enum.None); + } +}; + test "explicit cast from integer to error type" { testCastIntToErr(error.ItBroke); comptime testCastIntToErr(error.ItBroke); From 40dbcd09da27a271c5d1b0990e712bd2b2bfe68d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 23 Feb 2018 12:49:21 -0500 Subject: [PATCH 12/56] fix type_is_codegen_pointer being used incorrectly The names of these functions should probably change, but at least the semantics are correct now: * type_is_codegen_pointer - the type is either a fn, ptr, or promise * get_codegen_ptr_type - - ?&T and &T returns &T - ?promise and promise returns promise - ?fn()void and fn()void returns fn()void - otherwise returns nullptr --- src/analyze.cpp | 6 ++++-- src/ir.cpp | 18 ++++++++---------- std/hash_map.zig | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index c00014d8ca..cf5d9e0eab 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3679,7 +3679,7 @@ TypeTableEntry *get_codegen_ptr_type(TypeTableEntry *type) { } bool type_is_codegen_pointer(TypeTableEntry *type) { - return get_codegen_ptr_type(type) != nullptr; + return get_codegen_ptr_type(type) == type; } uint32_t get_ptr_align(TypeTableEntry *type) { @@ -3688,6 +3688,8 @@ uint32_t get_ptr_align(TypeTableEntry *type) { return ptr_type->data.pointer.alignment; } else if (ptr_type->id == TypeTableEntryIdFn) { return (ptr_type->data.fn.fn_type_id.alignment == 0) ? 1 : ptr_type->data.fn.fn_type_id.alignment; + } else if (ptr_type->id == TypeTableEntryIdPromise) { + return 1; } else { zig_unreachable(); } @@ -3723,7 +3725,7 @@ static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entr TypeTableEntry *param_type = param_info->type; bool is_noalias = param_info->is_noalias; - if (is_noalias && !type_is_codegen_pointer(param_type)) { + if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) { add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter")); } diff --git a/src/ir.cpp b/src/ir.cpp index 183543b0fe..cc57e20c62 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -16470,12 +16470,12 @@ static TypeTableEntry *ir_analyze_instruction_ptr_cast(IrAnalyze *ira, IrInstruc if (type_is_invalid(src_type)) return ira->codegen->builtin_types.entry_invalid; - if (!type_is_codegen_pointer(src_type)) { + if (get_codegen_ptr_type(src_type) == nullptr) { ir_add_error(ira, ptr, buf_sprintf("expected pointer, found '%s'", buf_ptr(&src_type->name))); return ira->codegen->builtin_types.entry_invalid; } - if (!type_is_codegen_pointer(dest_type)) { + if (get_codegen_ptr_type(dest_type) == nullptr) { ir_add_error(ira, dest_type_value, buf_sprintf("expected pointer, found '%s'", buf_ptr(&dest_type->name))); return ira->codegen->builtin_types.entry_invalid; @@ -16662,9 +16662,9 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc ensure_complete_type(ira->codegen, dest_type); ensure_complete_type(ira->codegen, src_type); - if (type_is_codegen_pointer(src_type)) { + if (get_codegen_ptr_type(src_type) != nullptr) { ir_add_error(ira, value, - buf_sprintf("unable to @bitCast from type '%s'", buf_ptr(&src_type->name))); + buf_sprintf("unable to @bitCast from pointer type '%s'", buf_ptr(&src_type->name))); return ira->codegen->builtin_types.entry_invalid; } @@ -16689,9 +16689,9 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc break; } - if (type_is_codegen_pointer(dest_type)) { + if (get_codegen_ptr_type(dest_type) != nullptr) { ir_add_error(ira, dest_type_value, - buf_sprintf("unable to @bitCast to type '%s'", buf_ptr(&dest_type->name))); + buf_sprintf("unable to @bitCast to pointer type '%s'", buf_ptr(&dest_type->name))); return ira->codegen->builtin_types.entry_invalid; } @@ -16752,7 +16752,7 @@ static TypeTableEntry *ir_analyze_instruction_int_to_ptr(IrAnalyze *ira, IrInstr if (type_is_invalid(dest_type)) return ira->codegen->builtin_types.entry_invalid; - if (!type_is_codegen_pointer(dest_type)) { + if (get_codegen_ptr_type(dest_type) == nullptr) { ir_add_error(ira, dest_type_value, buf_sprintf("expected pointer, found '%s'", buf_ptr(&dest_type->name))); return ira->codegen->builtin_types.entry_invalid; } @@ -16858,9 +16858,7 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr TypeTableEntry *usize = ira->codegen->builtin_types.entry_usize; - if (!(type_is_codegen_pointer(target->value.type) || (target->value.type->id == TypeTableEntryIdMaybe && - type_is_codegen_pointer(target->value.type->data.maybe.child_type)))) - { + if (get_codegen_ptr_type(target->value.type) == nullptr) { ir_add_error(ira, target, buf_sprintf("expected pointer, found '%s'", buf_ptr(&target->value.type->name))); return ira->codegen->builtin_types.entry_invalid; diff --git a/std/hash_map.zig b/std/hash_map.zig index 659783bc84..becced64ff 100644 --- a/std/hash_map.zig +++ b/std/hash_map.zig @@ -235,7 +235,7 @@ pub fn HashMap(comptime K: type, comptime V: type, }; } -test "basicHashMapTest" { +test "basic hash map usage" { var map = HashMap(i32, i32, hash_i32, eql_i32).init(debug.global_allocator); defer map.deinit(); From 8db7a1420f892deb4a2a3c63b1a3227b65aadccf Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 23 Feb 2018 20:43:47 -0500 Subject: [PATCH 13/56] update errors section of docs closes #768 --- doc/langref.html.in | 103 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 86 insertions(+), 17 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index 2b09ca81bd..abbebaa6fb 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -2782,30 +2782,96 @@ test "fn reflection" { {#header_close#} {#header_close#} {#header_open|Errors#} + {#header_open|Error Set Type#}

- One of the distinguishing features of Zig is its exception handling strategy. + An error set is like an {#link|enum#}. + However, each error name across the entire compilation gets assigned an unsigned integer + greater than 0. You are allowed to declare the same error name more than once, and if you do, it + gets assigned the same integer value.

- TODO rewrite the errors section to take into account error sets + The number of unique error values across the entire compilation should determine the size of the error set type. + However right now it is hard coded to be a u16. See #768.

- These error values are assigned an unsigned integer value greater than 0 at - compile time. You are allowed to declare the same error value more than once, - and if you do, it gets assigned the same integer value. + You can implicitly cast an error from a subset to its superset: +

+ {#code_begin|test#} +const std = @import("std"); + +const FileOpenError = error { + AccessDenied, + OutOfMemory, + FileNotFound, +}; + +const AllocationError = error { + OutOfMemory, +}; + +test "implicit cast subset to superset" { + const err = foo(AllocationError.OutOfMemory); + std.debug.assert(err == FileOpenError.OutOfMemory); +} + +fn foo(err: AllocationError) FileOpenError { + return err; +} + {#code_end#} +

+ But you cannot implicitly cast an error from a superset to a subset: +

+ {#code_begin|test_err|not a member of destination error set#} +const FileOpenError = error { + AccessDenied, + OutOfMemory, + FileNotFound, +}; + +const AllocationError = error { + OutOfMemory, +}; + +test "implicit cast superset to subset" { + foo(FileOpenError.OutOfMemory) catch {}; +} + +fn foo(err: FileOpenError) AllocationError { + return err; +} + {#code_end#} +

+ There is a shortcut for declaring an error set with only 1 value, and then getting that value: +

+ {#code_begin|syntax#} +const err = error.FileNotFound; + {#code_end#} +

This is equivalent to:

+ {#code_begin|syntax#} +const err = (error {FileNotFound}).FileNotFound; + {#code_end#} +

+ This becomes useful when using {#link|Inferred Error Sets#}. +

+ {#header_open|The Global Error Set#} +

error refers to the global error set. + This is the error set that contains all errors in the entire compilation unit. + It is a superset of all other error sets and a subset of none of them.

- You can refer to these error values with the error namespace such as - error.FileNotFound. + You can implicitly cast any error set to the global one, and you can explicitly + cast an error of global error set to a non-global one. This inserts a language-level + assert to make sure the error value is in fact in the destination error set.

- Each error value across the entire compilation unit gets a unique integer, - and this determines the size of the error set type. + The global error set should generally be avoided when possible, because it prevents + the compiler from knowing what errors are possible at compile-time. Knowing + the error set at compile-time is better for generated documentationt and for + helpful error messages such as forgetting a possible error value in a {#link|switch#}.

-

- The error set type is one of the error values, and in the same way that pointers - cannot be null, a error set instance is always an error. -

- {#code_begin|syntax#}const pure_error = error.FileNotFound;{#code_end#} + {#header_close#} + {#header_close#} + {#header_open|Error Union Type#}

Most of the time you will not find yourself using an error set type. Instead, likely you will be using the error union type. This is when you take an error set @@ -2918,7 +2984,6 @@ fn doAThing(str: []u8) !void { a panic in Debug and ReleaseSafe modes and undefined behavior in ReleaseFast mode. So, while we're debugging the application, if there was a surprise error here, the application would crash appropriately. - TODO: mention error return traces

Finally, you may want to take a different action for every situation. For that, we combine @@ -2986,7 +3051,7 @@ fn createFoo(param: i32) !Foo { {#see_also|defer|if|switch#} - {#header_open|Error Union Type#} +

An error union is created with the ! binary operator. You can use compile-time reflection to access the child type of an error union:

{#code_begin|test#} @@ -3008,8 +3073,12 @@ test "error union" { comptime assert(@typeOf(foo).ErrorSet == error); } {#code_end#} +

TODO the || operator for error sets

+ {#header_open|Inferred Error Sets#} +

TODO

{#header_close#} - {#header_open|Error Set Type#} + {#header_close#} + {#header_open|Error Return Traces#}

TODO

{#header_close#} {#header_close#} From 08d595b4724217f0b2b10cd3b9e31a71698cf5c1 Mon Sep 17 00:00:00 2001 From: Marc Tiehuis Date: Fri, 23 Feb 2018 21:20:15 +1300 Subject: [PATCH 14/56] Add utf8 string view --- std/unicode.zig | 149 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 140 insertions(+), 9 deletions(-) diff --git a/std/unicode.zig b/std/unicode.zig index df62e9162f..81bbc2aab6 100644 --- a/std/unicode.zig +++ b/std/unicode.zig @@ -1,4 +1,5 @@ const std = @import("./index.zig"); +const debug = std.debug; /// Given the first byte of a UTF-8 codepoint, /// returns a number 1-4 indicating the total length of the codepoint in bytes. @@ -25,8 +26,8 @@ pub fn utf8Decode(bytes: []const u8) !u32 { }; } pub fn utf8Decode2(bytes: []const u8) !u32 { - std.debug.assert(bytes.len == 2); - std.debug.assert(bytes[0] & 0b11100000 == 0b11000000); + debug.assert(bytes.len == 2); + debug.assert(bytes[0] & 0b11100000 == 0b11000000); var value: u32 = bytes[0] & 0b00011111; if (bytes[1] & 0b11000000 != 0b10000000) return error.Utf8ExpectedContinuation; @@ -38,8 +39,8 @@ pub fn utf8Decode2(bytes: []const u8) !u32 { return value; } pub fn utf8Decode3(bytes: []const u8) !u32 { - std.debug.assert(bytes.len == 3); - std.debug.assert(bytes[0] & 0b11110000 == 0b11100000); + debug.assert(bytes.len == 3); + debug.assert(bytes[0] & 0b11110000 == 0b11100000); var value: u32 = bytes[0] & 0b00001111; if (bytes[1] & 0b11000000 != 0b10000000) return error.Utf8ExpectedContinuation; @@ -56,8 +57,8 @@ pub fn utf8Decode3(bytes: []const u8) !u32 { return value; } pub fn utf8Decode4(bytes: []const u8) !u32 { - std.debug.assert(bytes.len == 4); - std.debug.assert(bytes[0] & 0b11111000 == 0b11110000); + debug.assert(bytes.len == 4); + debug.assert(bytes[0] & 0b11111000 == 0b11110000); var value: u32 = bytes[0] & 0b00000111; if (bytes[1] & 0b11000000 != 0b10000000) return error.Utf8ExpectedContinuation; @@ -78,6 +79,136 @@ pub fn utf8Decode4(bytes: []const u8) !u32 { return value; } +pub fn utf8ValidateSlice(s: []const u8) bool { + var i: usize = 0; + while (i < s.len) { + if (utf8ByteSequenceLength(s[i])) |cp_len| { + if (i + cp_len > s.len) { + return false; + } + + if (utf8Decode(s[i..i+cp_len])) |_| {} else |_| { return false; } + i += cp_len; + } else |err| { + return false; + } + } + return true; +} + +const Utf8View = struct { + bytes: []const u8, + + pub fn init(s: []const u8) !Utf8View { + if (!utf8ValidateSlice(s)) { + return error.InvalidUtf8; + } + + return initUnchecked(s); + } + + pub fn initUnchecked(s: []const u8) Utf8View { + return Utf8View { + .bytes = s, + }; + } + + pub fn initComptime(comptime s: []const u8) Utf8View { + if (comptime init(s)) |r| { + return r; + } else |err| switch (err) { + error.InvalidUtf8 => { + @compileError("invalid utf8"); + unreachable; + } + } + } + + pub fn Iterator(s: &const Utf8View) Utf8Iterator { + return Utf8Iterator { + .bytes = s.bytes, + .i = 0, + }; + } +}; + +const Utf8Iterator = struct { + bytes: []const u8, + i: usize, + + pub fn nextCodepointSlice(it: &Utf8Iterator) ?[]const u8 { + if (it.i >= it.bytes.len) { + return null; + } + + const cp_len = utf8ByteSequenceLength(it.bytes[it.i]) catch unreachable; + + it.i += cp_len; + return it.bytes[it.i-cp_len..it.i]; + } + + pub fn nextCodepoint(it: &Utf8Iterator) ?u32 { + const slice = it.nextCodepointSlice() ?? return null; + + const r = switch (slice.len) { + 1 => u32(slice[0]), + 2 => utf8Decode2(slice), + 3 => utf8Decode3(slice), + 4 => utf8Decode4(slice), + else => unreachable, + }; + + return r catch unreachable; + } +}; + +test "utf8 iterator on ascii" { + const s = Utf8View.initComptime("abc"); + + var it1 = s.Iterator(); + debug.assert(std.mem.eql(u8, "a", ??it1.nextCodepointSlice())); + debug.assert(std.mem.eql(u8, "b", ??it1.nextCodepointSlice())); + debug.assert(std.mem.eql(u8, "c", ??it1.nextCodepointSlice())); + debug.assert(it1.nextCodepointSlice() == null); + + var it2 = s.Iterator(); + debug.assert(??it2.nextCodepoint() == 'a'); + debug.assert(??it2.nextCodepoint() == 'b'); + debug.assert(??it2.nextCodepoint() == 'c'); + debug.assert(it2.nextCodepoint() == null); +} + +test "utf8 view bad" { + // Compile-time error. + // const s3 = Utf8View.initComptime("\xfe\xf2"); + + const s = Utf8View.init("hel\xadlo"); + if (s) |_| { unreachable; } else |err| { debug.assert(err == error.InvalidUtf8); } +} + +test "utf8 view ok" { + const s = Utf8View.initComptime("東京市"); + + var it1 = s.Iterator(); + debug.assert(std.mem.eql(u8, "東", ??it1.nextCodepointSlice())); + debug.assert(std.mem.eql(u8, "京", ??it1.nextCodepointSlice())); + debug.assert(std.mem.eql(u8, "市", ??it1.nextCodepointSlice())); + debug.assert(it1.nextCodepointSlice() == null); + + var it2 = s.Iterator(); + debug.assert(??it2.nextCodepoint() == 0x6771); + debug.assert(??it2.nextCodepoint() == 0x4eac); + debug.assert(??it2.nextCodepoint() == 0x5e02); + debug.assert(it2.nextCodepoint() == null); +} + +test "bad utf8 slice" { + debug.assert(utf8ValidateSlice("abc")); + debug.assert(!utf8ValidateSlice("abc\xc0")); + debug.assert(!utf8ValidateSlice("abc\xc0abc")); + debug.assert(utf8ValidateSlice("abc\xdf\xbf")); +} + test "valid utf8" { testValid("\x00", 0x0); testValid("\x20", 0x20); @@ -145,17 +276,17 @@ fn testError(bytes: []const u8, expected_err: error) void { if (testDecode(bytes)) |_| { unreachable; } else |err| { - std.debug.assert(err == expected_err); + debug.assert(err == expected_err); } } fn testValid(bytes: []const u8, expected_codepoint: u32) void { - std.debug.assert((testDecode(bytes) catch unreachable) == expected_codepoint); + debug.assert((testDecode(bytes) catch unreachable) == expected_codepoint); } fn testDecode(bytes: []const u8) !u32 { const length = try utf8ByteSequenceLength(bytes[0]); if (bytes.len < length) return error.UnexpectedEof; - std.debug.assert(bytes.len == length); + debug.assert(bytes.len == length); return utf8Decode(bytes); } From 05bf666eb690d1a1328234cc408960133dba9563 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 02:47:31 -0500 Subject: [PATCH 15/56] codegen for calling an async function See #727 --- src/analyze.cpp | 34 +++++++++++++++++++++++++++++----- src/codegen.cpp | 22 ++++++++++++++++++---- src/ir.cpp | 2 ++ 3 files changed, 49 insertions(+), 9 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index cf5d9e0eab..2126f5ba07 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -986,20 +986,25 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { if (!skip_debug_info) { bool first_arg_return = calling_convention_does_first_arg_return(fn_type_id->cc) && handle_is_ptr(fn_type_id->return_type); + bool is_async = fn_type_id->cc == CallingConventionAsync; bool prefix_arg_error_return_trace = g->have_err_ret_tracing && (fn_type_id->return_type->id == TypeTableEntryIdErrorUnion || fn_type_id->return_type->id == TypeTableEntryIdErrorSet); // +1 for maybe making the first argument the return value - // +1 for maybe last argument the error return trace - LLVMTypeRef *gen_param_types = allocate(2 + fn_type_id->param_count); + // +1 for maybe first argument the error return trace + // +2 for maybe arguments async allocator and error code pointer + LLVMTypeRef *gen_param_types = allocate(4 + fn_type_id->param_count); // +1 because 0 is the return type and // +1 for maybe making first arg ret val and - // +1 for maybe last argument the error return trace - ZigLLVMDIType **param_di_types = allocate(3 + fn_type_id->param_count); + // +1 for maybe first argument the error return trace + // +2 for maybe arguments async allocator and error code pointer + ZigLLVMDIType **param_di_types = allocate(5 + fn_type_id->param_count); param_di_types[0] = fn_type_id->return_type->di_type; size_t gen_param_index = 0; TypeTableEntry *gen_return_type; - if (!type_has_bits(fn_type_id->return_type)) { + if (is_async) { + gen_return_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false); + } else if (!type_has_bits(fn_type_id->return_type)) { gen_return_type = g->builtin_types.entry_void; } else if (first_arg_return) { TypeTableEntry *gen_type = get_pointer_to_type(g, fn_type_id->return_type, false); @@ -1020,6 +1025,25 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { // after the gen_param_index += 1 because 0 is the return type param_di_types[gen_param_index] = gen_type->di_type; } + if (is_async) { + { + // async allocator param + TypeTableEntry *gen_type = fn_type_id->async_allocator_type; + gen_param_types[gen_param_index] = gen_type->type_ref; + gen_param_index += 1; + // after the gen_param_index += 1 because 0 is the return type + param_di_types[gen_param_index] = gen_type->di_type; + } + + { + // error code pointer + TypeTableEntry *gen_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false); + gen_param_types[gen_param_index] = gen_type->type_ref; + gen_param_index += 1; + // after the gen_param_index += 1 because 0 is the return type + param_di_types[gen_param_index] = gen_type->di_type; + } + } fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count); for (size_t i = 0; i < fn_type_id->param_count; i += 1) { diff --git a/src/codegen.cpp b/src/codegen.cpp index 6b1a2513c7..ec14d85064 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2521,13 +2521,12 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; - if (fn_type_id->cc == CallingConventionAsync) { - zig_panic("TODO codegen async function call"); - } TypeTableEntry *src_return_type = fn_type_id->return_type; bool ret_has_bits = type_has_bits(src_return_type); - bool first_arg_ret = ret_has_bits && handle_is_ptr(src_return_type); + + bool first_arg_ret = ret_has_bits && handle_is_ptr(src_return_type) && + calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc); bool prefix_arg_err_ret_stack = g->have_err_ret_tracing && (src_return_type->id == TypeTableEntryIdErrorUnion || src_return_type->id == TypeTableEntryIdErrorSet); size_t actual_param_count = instruction->arg_count + (first_arg_ret ? 1 : 0) + (prefix_arg_err_ret_stack ? 1 : 0); bool is_var_args = fn_type_id->is_var_args; @@ -2541,6 +2540,15 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr gen_param_values[gen_param_index] = g->cur_err_ret_trace_val; gen_param_index += 1; } + if (instruction->is_async) { + gen_param_values[gen_param_index] = ir_llvm_value(g, instruction->async_allocator); + gen_param_index += 1; + + LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, err_union_err_index, ""); + LLVMBuildStore(g->builder, LLVMConstNull(g->builtin_types.entry_global_error_set->type_ref), err_val_ptr); + gen_param_values[gen_param_index] = err_val_ptr; + gen_param_index += 1; + } for (size_t call_i = 0; call_i < instruction->arg_count; call_i += 1) { IrInstruction *param_instruction = instruction->args[call_i]; TypeTableEntry *param_type = param_instruction->value.type; @@ -2578,6 +2586,12 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } } + if (instruction->is_async) { + LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, err_union_payload_index, ""); + LLVMBuildStore(g->builder, result, payload_ptr); + return instruction->tmp_ptr; + } + if (src_return_type->id == TypeTableEntryIdUnreachable) { return LLVMBuildUnreachable(g->builder); } else if (!ret_has_bits) { diff --git a/src/ir.cpp b/src/ir.cpp index cc57e20c62..f484f32e01 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -11775,6 +11775,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal if (call_instruction->is_async) { IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry, fn_ref, casted_args, impl_param_count, async_allocator_inst); ir_link_new_instruction(result, &call_instruction->base); + ir_add_alloca(ira, result, result->value.type); return ir_finish_anal(ira, result->value.type); } @@ -11862,6 +11863,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref, casted_args, call_param_count, async_allocator_inst); ir_link_new_instruction(result, &call_instruction->base); + ir_add_alloca(ira, result, result->value.type); return ir_finish_anal(ira, result->value.type); } From 7567448b91fd7012bf61d3f5532bfd86304899ae Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 14:47:58 -0500 Subject: [PATCH 16/56] codegen for cancel See #727 --- src/all_types.hpp | 1 + src/codegen.cpp | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 002a2d4a4c..e9b4561eba 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1609,6 +1609,7 @@ struct CodeGen { LLVMValueRef trap_fn_val; LLVMValueRef return_address_fn_val; LLVMValueRef frame_address_fn_val; + LLVMValueRef coro_destroy_fn_val; bool error_during_imports; const char **clang_argv; diff --git a/src/codegen.cpp b/src/codegen.cpp index ec14d85064..783f5fd8fe 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -927,6 +927,21 @@ static LLVMValueRef get_memcpy_fn_val(CodeGen *g) { return g->memcpy_fn_val; } +static LLVMValueRef get_coro_destroy_fn_val(CodeGen *g) { + if (g->coro_destroy_fn_val) + return g->coro_destroy_fn_val; + + LLVMTypeRef param_types[] = { + LLVMPointerType(LLVMInt8Type(), 0), + }; + LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 1, false); + Buf *name = buf_sprintf("llvm.coro.destroy"); + g->coro_destroy_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->coro_destroy_fn_val)); + + return g->coro_destroy_fn_val; +} + static LLVMValueRef get_return_address_fn_val(CodeGen *g) { if (g->return_address_fn_val) return g->return_address_fn_val; @@ -3113,7 +3128,9 @@ static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *execu } static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrInstructionCancel *instruction) { - zig_panic("TODO ir_render_cancel"); + LLVMValueRef target_handle = ir_llvm_value(g, instruction->target); + LLVMBuildCall(g->builder, get_coro_destroy_fn_val(g), &target_handle, 1, ""); + return nullptr; } static LLVMValueRef ir_render_get_implicit_allocator(CodeGen *g, IrExecutable *executable, IrInstructionGetImplicitAllocator *instruction) { From 9f6c5a20de03a59bfcaead703fe9490a6d622f84 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 15:10:29 -0500 Subject: [PATCH 17/56] codegen for coro_id instruction See #727 --- src/all_types.hpp | 1 + src/analyze.cpp | 4 ++++ src/analyze.hpp | 2 ++ src/codegen.cpp | 30 +++++++++++++++++++++++++++++- src/ir.cpp | 3 ++- src/zig_llvm.cpp | 3 +++ src/zig_llvm.h | 2 ++ 7 files changed, 43 insertions(+), 2 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index e9b4561eba..8e362cd64e 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1610,6 +1610,7 @@ struct CodeGen { LLVMValueRef return_address_fn_val; LLVMValueRef frame_address_fn_val; LLVMValueRef coro_destroy_fn_val; + LLVMValueRef coro_id_fn_val; bool error_during_imports; const char **clang_argv; diff --git a/src/analyze.cpp b/src/analyze.cpp index 2126f5ba07..2787906c64 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5776,3 +5776,7 @@ bool type_is_global_error_set(TypeTableEntry *err_set_type) { assert(err_set_type->data.error_set.infer_fn == nullptr); return err_set_type->data.error_set.err_count == UINT32_MAX; } + +uint32_t get_coro_frame_align_bytes(CodeGen *g) { + return g->pointer_size_bytes * 2; +} diff --git a/src/analyze.hpp b/src/analyze.hpp index 2fe41f6572..926793c58a 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -191,4 +191,6 @@ void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry); TypeTableEntry *get_auto_err_set_type(CodeGen *g, FnTableEntry *fn_entry); +uint32_t get_coro_frame_align_bytes(CodeGen *g); + #endif diff --git a/src/codegen.cpp b/src/codegen.cpp index 783f5fd8fe..21ad715977 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -942,6 +942,24 @@ static LLVMValueRef get_coro_destroy_fn_val(CodeGen *g) { return g->coro_destroy_fn_val; } +static LLVMValueRef get_coro_id_fn_val(CodeGen *g) { + if (g->coro_id_fn_val) + return g->coro_id_fn_val; + + LLVMTypeRef param_types[] = { + LLVMInt32Type(), + LLVMPointerType(LLVMInt8Type(), 0), + LLVMPointerType(LLVMInt8Type(), 0), + LLVMPointerType(LLVMInt8Type(), 0), + }; + LLVMTypeRef fn_type = LLVMFunctionType(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), param_types, 4, false); + Buf *name = buf_sprintf("llvm.coro.id"); + g->coro_id_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->coro_id_fn_val)); + + return g->coro_id_fn_val; +} + static LLVMValueRef get_return_address_fn_val(CodeGen *g) { if (g->return_address_fn_val) return g->return_address_fn_val; @@ -3730,7 +3748,17 @@ static LLVMValueRef ir_render_panic(CodeGen *g, IrExecutable *executable, IrInst } static LLVMValueRef ir_render_coro_id(CodeGen *g, IrExecutable *executable, IrInstructionCoroId *instruction) { - zig_panic("TODO ir_render_coro_id"); + LLVMValueRef promise_ptr = ir_llvm_value(g, instruction->promise_ptr); + LLVMValueRef align_val = LLVMConstInt(LLVMInt32Type(), get_coro_frame_align_bytes(g), false); + LLVMValueRef null = LLVMConstIntToPtr(LLVMConstNull(g->builtin_types.entry_usize->type_ref), + LLVMPointerType(LLVMInt8Type(), 0)); + LLVMValueRef params[] = { + align_val, + promise_ptr, + null, + null, + }; + return LLVMBuildCall(g->builder, get_coro_id_fn_val(g), params, 4, ""); } static LLVMValueRef ir_render_coro_alloc(CodeGen *g, IrExecutable *executable, IrInstructionCoroAlloc *instruction) { diff --git a/src/ir.cpp b/src/ir.cpp index f484f32e01..5ab2b149d6 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -6027,7 +6027,8 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, scope, node, irb->exec->implicit_allocator_ptr, alloc_field_name); IrInstruction *alloc_fn = ir_build_load_ptr(irb, scope, node, alloc_fn_ptr); - IrInstruction *alignment = ir_build_const_u29(irb, scope, node, irb->codegen->pointer_size_bytes * 2); + IrInstruction *alignment = ir_build_const_u29(irb, scope, node, + get_coro_frame_align_bytes(irb->codegen)); size_t arg_count = 3; IrInstruction **args = allocate(arg_count); args[0] = irb->exec->implicit_allocator_ptr; // self diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index 97c07ab820..34defc6dc6 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -182,6 +182,9 @@ bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMM return false; } +ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref) { + return wrap(Type::getTokenTy(*unwrap(context_ref))); +} LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args, unsigned NumArgs, unsigned CC, ZigLLVM_FnInline fn_inline, const char *Name) diff --git a/src/zig_llvm.h b/src/zig_llvm.h index 4ae25ef6fd..01a78a6af4 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -54,6 +54,8 @@ enum ZigLLVM_EmitOutputType { ZIG_EXTERN_C bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMModuleRef module_ref, const char *filename, enum ZigLLVM_EmitOutputType output_type, char **error_message, bool is_debug); +ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref); + enum ZigLLVM_FnInline { ZigLLVM_FnInlineAuto, ZigLLVM_FnInlineAlways, From 93cbd4eeb97f30062311d6e083dd056b8fb8b021 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 15:20:31 -0500 Subject: [PATCH 18/56] codegen for coro_alloc and coro_size instructions See #727 --- src/all_types.hpp | 2 ++ src/codegen.cpp | 32 ++++++++++++++++++++++++++++++-- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 8e362cd64e..f95cb8425a 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1611,6 +1611,8 @@ struct CodeGen { LLVMValueRef frame_address_fn_val; LLVMValueRef coro_destroy_fn_val; LLVMValueRef coro_id_fn_val; + LLVMValueRef coro_alloc_fn_val; + LLVMValueRef coro_size_fn_val; bool error_during_imports; const char **clang_argv; diff --git a/src/codegen.cpp b/src/codegen.cpp index 21ad715977..c7b6ce1f7c 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -960,6 +960,33 @@ static LLVMValueRef get_coro_id_fn_val(CodeGen *g) { return g->coro_id_fn_val; } +static LLVMValueRef get_coro_alloc_fn_val(CodeGen *g) { + if (g->coro_alloc_fn_val) + return g->coro_alloc_fn_val; + + LLVMTypeRef param_types[] = { + ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), + }; + LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt1Type(), param_types, 1, false); + Buf *name = buf_sprintf("llvm.coro.alloc"); + g->coro_alloc_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->coro_alloc_fn_val)); + + return g->coro_alloc_fn_val; +} + +static LLVMValueRef get_coro_size_fn_val(CodeGen *g) { + if (g->coro_size_fn_val) + return g->coro_size_fn_val; + + LLVMTypeRef fn_type = LLVMFunctionType(g->builtin_types.entry_usize->type_ref, nullptr, 0, false); + Buf *name = buf_sprintf("llvm.coro.size.i%d", g->pointer_size_bytes * 8); + g->coro_size_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->coro_size_fn_val)); + + return g->coro_size_fn_val; +} + static LLVMValueRef get_return_address_fn_val(CodeGen *g) { if (g->return_address_fn_val) return g->return_address_fn_val; @@ -3762,11 +3789,12 @@ static LLVMValueRef ir_render_coro_id(CodeGen *g, IrExecutable *executable, IrIn } static LLVMValueRef ir_render_coro_alloc(CodeGen *g, IrExecutable *executable, IrInstructionCoroAlloc *instruction) { - zig_panic("TODO ir_render_coro_alloc"); + LLVMValueRef token = ir_llvm_value(g, instruction->coro_id); + return LLVMBuildCall(g->builder, get_coro_alloc_fn_val(g), &token, 1, ""); } static LLVMValueRef ir_render_coro_size(CodeGen *g, IrExecutable *executable, IrInstructionCoroSize *instruction) { - zig_panic("TODO ir_render_coro_size"); + return LLVMBuildCall(g->builder, get_coro_size_fn_val(g), nullptr, 0, ""); } static LLVMValueRef ir_render_coro_begin(CodeGen *g, IrExecutable *executable, IrInstructionCoroBegin *instruction) { From bced3fb64cbb9006886bc237d959ce0f2ca3c3f7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 16:05:10 -0500 Subject: [PATCH 19/56] codegen for get_implicit_allocator instruction See #727 --- src/codegen.cpp | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index c7b6ce1f7c..0c8e601bc0 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2568,6 +2568,11 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI } } +static bool get_prefix_arg_err_ret_stack(CodeGen *g, TypeTableEntry *src_return_type) { + return g->have_err_ret_tracing && + (src_return_type->id == TypeTableEntryIdErrorUnion || src_return_type->id == TypeTableEntryIdErrorSet); +} + static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCall *instruction) { LLVMValueRef fn_val; TypeTableEntry *fn_type; @@ -2587,7 +2592,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr bool first_arg_ret = ret_has_bits && handle_is_ptr(src_return_type) && calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc); - bool prefix_arg_err_ret_stack = g->have_err_ret_tracing && (src_return_type->id == TypeTableEntryIdErrorUnion || src_return_type->id == TypeTableEntryIdErrorSet); + bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, src_return_type); size_t actual_param_count = instruction->arg_count + (first_arg_ret ? 1 : 0) + (prefix_arg_err_ret_stack ? 1 : 0); bool is_var_args = fn_type_id->is_var_args; LLVMValueRef *gen_param_values = allocate(actual_param_count); @@ -3178,8 +3183,13 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns return nullptr; } -static LLVMValueRef ir_render_get_implicit_allocator(CodeGen *g, IrExecutable *executable, IrInstructionGetImplicitAllocator *instruction) { - zig_panic("TODO ir_render_get_implicit_allocator"); +static LLVMValueRef ir_render_get_implicit_allocator(CodeGen *g, IrExecutable *executable, + IrInstructionGetImplicitAllocator *instruction) +{ + TypeTableEntry *src_return_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; + bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, src_return_type); + size_t allocator_arg_index = prefix_arg_err_ret_stack ? 1 : 0; + return LLVMGetParam(g->cur_fn_val, allocator_arg_index); } static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) { From 79f1ff574b3badf7cf0a0cd7632c529801b4c611 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 16:15:14 -0500 Subject: [PATCH 20/56] codegen for coro_alloc_fail instruction See #727 --- src/codegen.cpp | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 0c8e601bc0..60667ac465 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3811,8 +3811,24 @@ static LLVMValueRef ir_render_coro_begin(CodeGen *g, IrExecutable *executable, I zig_panic("TODO ir_render_coro_begin"); } -static LLVMValueRef ir_render_coro_alloc_fail(CodeGen *g, IrExecutable *executable, IrInstructionCoroAllocFail *instruction) { - zig_panic("TODO ir_render_coro_alloc_fail"); +static LLVMValueRef ir_render_coro_alloc_fail(CodeGen *g, IrExecutable *executable, + IrInstructionCoroAllocFail *instruction) +{ + TypeTableEntry *src_return_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; + bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, src_return_type); + size_t err_code_ptr_arg_index = prefix_arg_err_ret_stack ? 2 : 1; + LLVMValueRef err_code_ptr_val = LLVMGetParam(g->cur_fn_val, err_code_ptr_arg_index); + LLVMValueRef err_code = ir_llvm_value(g, instruction->err_val); + LLVMBuildStore(g->builder, err_code, err_code_ptr_val); + + LLVMValueRef return_value; + if (ir_want_runtime_safety(g, &instruction->base)) { + return_value = LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0)); + } else { + return_value = LLVMGetUndef(LLVMPointerType(LLVMInt8Type(), 0)); + } + LLVMBuildRet(g->builder, return_value); + return nullptr; } static LLVMValueRef ir_render_coro_suspend(CodeGen *g, IrExecutable *executable, IrInstructionCoroSuspend *instruction) { From d0f2eca106f556fe5826d96b08ffdf1be1293915 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 16:22:19 -0500 Subject: [PATCH 21/56] codegen for coro_begin instruction See #727 --- src/all_types.hpp | 1 + src/codegen.cpp | 24 +++++++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index f95cb8425a..d2bfd25446 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1613,6 +1613,7 @@ struct CodeGen { LLVMValueRef coro_id_fn_val; LLVMValueRef coro_alloc_fn_val; LLVMValueRef coro_size_fn_val; + LLVMValueRef coro_begin_fn_val; bool error_during_imports; const char **clang_argv; diff --git a/src/codegen.cpp b/src/codegen.cpp index 60667ac465..c1d6346253 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -987,6 +987,22 @@ static LLVMValueRef get_coro_size_fn_val(CodeGen *g) { return g->coro_size_fn_val; } +static LLVMValueRef get_coro_begin_fn_val(CodeGen *g) { + if (g->coro_begin_fn_val) + return g->coro_begin_fn_val; + + LLVMTypeRef param_types[] = { + ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), + LLVMPointerType(LLVMInt8Type(), 0), + }; + LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 2, false); + Buf *name = buf_sprintf("llvm.coro.begin"); + g->coro_begin_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->coro_begin_fn_val)); + + return g->coro_begin_fn_val; +} + static LLVMValueRef get_return_address_fn_val(CodeGen *g) { if (g->return_address_fn_val) return g->return_address_fn_val; @@ -3808,7 +3824,13 @@ static LLVMValueRef ir_render_coro_size(CodeGen *g, IrExecutable *executable, Ir } static LLVMValueRef ir_render_coro_begin(CodeGen *g, IrExecutable *executable, IrInstructionCoroBegin *instruction) { - zig_panic("TODO ir_render_coro_begin"); + LLVMValueRef coro_id = ir_llvm_value(g, instruction->coro_id); + LLVMValueRef coro_mem_ptr = ir_llvm_value(g, instruction->coro_mem_ptr); + LLVMValueRef params[] = { + coro_id, + coro_mem_ptr, + }; + return LLVMBuildCall(g->builder, get_coro_begin_fn_val(g), params, 2, ""); } static LLVMValueRef ir_render_coro_alloc_fail(CodeGen *g, IrExecutable *executable, From 0cf327eb17360cddcdfab623db049aca5afd7010 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 16:29:07 -0500 Subject: [PATCH 22/56] codegen for coro_suspend instruction See #727 --- src/all_types.hpp | 1 + src/codegen.cpp | 29 ++++++++++++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index d2bfd25446..7efede5333 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1614,6 +1614,7 @@ struct CodeGen { LLVMValueRef coro_alloc_fn_val; LLVMValueRef coro_size_fn_val; LLVMValueRef coro_begin_fn_val; + LLVMValueRef coro_suspend_fn_val; bool error_during_imports; const char **clang_argv; diff --git a/src/codegen.cpp b/src/codegen.cpp index c1d6346253..5643333f99 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1003,6 +1003,22 @@ static LLVMValueRef get_coro_begin_fn_val(CodeGen *g) { return g->coro_begin_fn_val; } +static LLVMValueRef get_coro_suspend_fn_val(CodeGen *g) { + if (g->coro_suspend_fn_val) + return g->coro_suspend_fn_val; + + LLVMTypeRef param_types[] = { + ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), + LLVMInt1Type(), + }; + LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt8Type(), param_types, 2, false); + Buf *name = buf_sprintf("llvm.coro.suspend"); + g->coro_suspend_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->coro_suspend_fn_val)); + + return g->coro_suspend_fn_val; +} + static LLVMValueRef get_return_address_fn_val(CodeGen *g) { if (g->return_address_fn_val) return g->return_address_fn_val; @@ -3854,7 +3870,18 @@ static LLVMValueRef ir_render_coro_alloc_fail(CodeGen *g, IrExecutable *executab } static LLVMValueRef ir_render_coro_suspend(CodeGen *g, IrExecutable *executable, IrInstructionCoroSuspend *instruction) { - zig_panic("TODO ir_render_coro_suspend"); + LLVMValueRef save_point; + if (instruction->save_point == nullptr) { + save_point = LLVMConstNull(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext())); + } else { + save_point = ir_llvm_value(g, instruction->save_point); + } + LLVMValueRef is_final = ir_llvm_value(g, instruction->is_final); + LLVMValueRef params[] = { + save_point, + is_final, + }; + return LLVMBuildCall(g->builder, get_coro_suspend_fn_val(g), params, 2, ""); } static LLVMValueRef ir_render_coro_end(CodeGen *g, IrExecutable *executable, IrInstructionCoroEnd *instruction) { From d2d2ba10e9688e6760e75290a29dc055d04a0296 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 16:40:00 -0500 Subject: [PATCH 23/56] codegen for coro_end instruction See #727 --- src/all_types.hpp | 1 + src/codegen.cpp | 22 +++++++++++++++++++++- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 7efede5333..b7a0625926 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1615,6 +1615,7 @@ struct CodeGen { LLVMValueRef coro_size_fn_val; LLVMValueRef coro_begin_fn_val; LLVMValueRef coro_suspend_fn_val; + LLVMValueRef coro_end_fn_val; bool error_during_imports; const char **clang_argv; diff --git a/src/codegen.cpp b/src/codegen.cpp index 5643333f99..4cc9880fea 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1019,6 +1019,22 @@ static LLVMValueRef get_coro_suspend_fn_val(CodeGen *g) { return g->coro_suspend_fn_val; } +static LLVMValueRef get_coro_end_fn_val(CodeGen *g) { + if (g->coro_end_fn_val) + return g->coro_end_fn_val; + + LLVMTypeRef param_types[] = { + LLVMPointerType(LLVMInt8Type(), 0), + LLVMInt1Type(), + }; + LLVMTypeRef fn_type = LLVMFunctionType(LLVMInt1Type(), param_types, 2, false); + Buf *name = buf_sprintf("llvm.coro.end"); + g->coro_end_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->coro_end_fn_val)); + + return g->coro_end_fn_val; +} + static LLVMValueRef get_return_address_fn_val(CodeGen *g) { if (g->return_address_fn_val) return g->return_address_fn_val; @@ -3885,7 +3901,11 @@ static LLVMValueRef ir_render_coro_suspend(CodeGen *g, IrExecutable *executable, } static LLVMValueRef ir_render_coro_end(CodeGen *g, IrExecutable *executable, IrInstructionCoroEnd *instruction) { - zig_panic("TODO ir_render_coro_end"); + LLVMValueRef params[] = { + LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0)), + LLVMConstNull(LLVMInt1Type()), + }; + return LLVMBuildCall(g->builder, get_coro_end_fn_val(g), params, 2, ""); } static LLVMValueRef ir_render_coro_free(CodeGen *g, IrExecutable *executable, IrInstructionCoroFree *instruction) { From 4eac75914bcdf9648518d1837f48e07e35744dc1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 16:46:01 -0500 Subject: [PATCH 24/56] codegen for coro_free instruction See #727 --- src/all_types.hpp | 1 + src/codegen.cpp | 24 +++++++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index b7a0625926..d4ec5ac427 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1616,6 +1616,7 @@ struct CodeGen { LLVMValueRef coro_begin_fn_val; LLVMValueRef coro_suspend_fn_val; LLVMValueRef coro_end_fn_val; + LLVMValueRef coro_free_fn_val; bool error_during_imports; const char **clang_argv; diff --git a/src/codegen.cpp b/src/codegen.cpp index 4cc9880fea..163e9d804b 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1035,6 +1035,22 @@ static LLVMValueRef get_coro_end_fn_val(CodeGen *g) { return g->coro_end_fn_val; } +static LLVMValueRef get_coro_free_fn_val(CodeGen *g) { + if (g->coro_free_fn_val) + return g->coro_free_fn_val; + + LLVMTypeRef param_types[] = { + ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), + LLVMPointerType(LLVMInt8Type(), 0), + }; + LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 2, false); + Buf *name = buf_sprintf("llvm.coro.free"); + g->coro_free_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->coro_free_fn_val)); + + return g->coro_free_fn_val; +} + static LLVMValueRef get_return_address_fn_val(CodeGen *g) { if (g->return_address_fn_val) return g->return_address_fn_val; @@ -3909,7 +3925,13 @@ static LLVMValueRef ir_render_coro_end(CodeGen *g, IrExecutable *executable, IrI } static LLVMValueRef ir_render_coro_free(CodeGen *g, IrExecutable *executable, IrInstructionCoroFree *instruction) { - zig_panic("TODO ir_render_coro_free"); + LLVMValueRef coro_id = ir_llvm_value(g, instruction->coro_id); + LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle); + LLVMValueRef params[] = { + coro_id, + coro_handle, + }; + return LLVMBuildCall(g->builder, get_coro_free_fn_val(g), params, 2, ""); } static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, IrInstructionCoroResume *instruction) { From 83f89064490350991806aea02ea6ba4b948c0376 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 17:34:05 -0500 Subject: [PATCH 25/56] codegen for coro_resume instruction See #727 --- src/all_types.hpp | 1 + src/codegen.cpp | 18 +++++++++++++++++- src/ir.cpp | 9 +++++++-- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index d4ec5ac427..3cf5676dfe 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1617,6 +1617,7 @@ struct CodeGen { LLVMValueRef coro_suspend_fn_val; LLVMValueRef coro_end_fn_val; LLVMValueRef coro_free_fn_val; + LLVMValueRef coro_resume_fn_val; bool error_during_imports; const char **clang_argv; diff --git a/src/codegen.cpp b/src/codegen.cpp index 163e9d804b..0c4f66daa4 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1051,6 +1051,21 @@ static LLVMValueRef get_coro_free_fn_val(CodeGen *g) { return g->coro_free_fn_val; } +static LLVMValueRef get_coro_resume_fn_val(CodeGen *g) { + if (g->coro_resume_fn_val) + return g->coro_resume_fn_val; + + LLVMTypeRef param_types[] = { + LLVMPointerType(LLVMInt8Type(), 0), + }; + LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 1, false); + Buf *name = buf_sprintf("llvm.coro.resume"); + g->coro_resume_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->coro_resume_fn_val)); + + return g->coro_resume_fn_val; +} + static LLVMValueRef get_return_address_fn_val(CodeGen *g) { if (g->return_address_fn_val) return g->return_address_fn_val; @@ -3935,7 +3950,8 @@ static LLVMValueRef ir_render_coro_free(CodeGen *g, IrExecutable *executable, Ir } static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, IrInstructionCoroResume *instruction) { - zig_panic("TODO ir_render_coro_resume"); + LLVMValueRef awaiter_handle = ir_llvm_value(g, instruction->awaiter_handle); + return LLVMBuildCall(g->builder, get_coro_resume_fn_val(g), &awaiter_handle, 1, ""); } static void set_debug_location(CodeGen *g, IrInstruction *instruction) { diff --git a/src/ir.cpp b/src/ir.cpp index 5ab2b149d6..81bde2e793 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -6143,14 +6143,19 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_set_cursor_at_end_and_append_block(irb, end_free_block); IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume"); - ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, suspend_block, const_bool_false); + IrBasicBlock *return_block = ir_create_basic_block(irb, scope, "Return"); + ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, return_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, resume_block); IrInstruction *unwrapped_await_handle_ptr = ir_build_unwrap_maybe(irb, scope, node, irb->exec->coro_awaiter_field_ptr, false); IrInstruction *awaiter_handle = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr); ir_build_coro_resume(irb, scope, node, awaiter_handle); - ir_build_br(irb, scope, node, suspend_block, const_bool_false); + ir_build_br(irb, scope, node, return_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, return_block); + IrInstruction *undef = ir_build_const_undefined(irb, scope, node); + ir_build_return(irb, scope, node, undef); } return true; From 704a8acb5997c7fdab4e29737fb398c022f876b7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 17:34:18 -0500 Subject: [PATCH 26/56] fix handle_is_ptr for promise type --- src/analyze.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index 2787906c64..a4bb98cad6 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -4191,8 +4191,7 @@ bool handle_is_ptr(TypeTableEntry *type_entry) { return type_has_bits(type_entry->data.error_union.payload_type); case TypeTableEntryIdMaybe: return type_has_bits(type_entry->data.maybe.child_type) && - type_entry->data.maybe.child_type->id != TypeTableEntryIdPointer && - type_entry->data.maybe.child_type->id != TypeTableEntryIdFn; + !type_is_codegen_pointer(type_entry->data.maybe.child_type); case TypeTableEntryIdUnion: assert(type_entry->data.unionation.complete); if (type_entry->data.unionation.gen_field_count == 0) From fe354ebb5c71aa6c5600251d2c3db53b8c5dc8eb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 17:57:05 -0500 Subject: [PATCH 27/56] coroutines: fix llvm error of instruction not dominating uses See #727 --- src/ir.cpp | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index 81bde2e793..b3f53f824d 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -5983,6 +5983,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *coro_id; IrInstruction *coro_promise_ptr; IrInstruction *coro_result_field_ptr; + IrInstruction *coro_need_dyn_alloc; TypeTableEntry *return_type; Buf *result_ptr_field_name; if (is_async) { @@ -6012,13 +6013,13 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false)); IrInstruction *promise_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_promise_ptr); coro_id = ir_build_coro_id(irb, scope, node, promise_as_u8_ptr); - IrInstruction *need_dyn_alloc = ir_build_coro_alloc(irb, scope, node, coro_id); + coro_need_dyn_alloc = ir_build_coro_alloc(irb, scope, node, coro_id); IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); IrInstruction *null_ptr = ir_build_int_to_ptr(irb, scope, node, u8_ptr_type, zero); IrBasicBlock *dyn_alloc_block = ir_create_basic_block(irb, scope, "DynAlloc"); IrBasicBlock *coro_begin_block = ir_create_basic_block(irb, scope, "CoroBegin"); - ir_build_cond_br(irb, scope, node, need_dyn_alloc, dyn_alloc_block, coro_begin_block, const_bool_false); + ir_build_cond_br(irb, scope, node, coro_need_dyn_alloc, dyn_alloc_block, coro_begin_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, dyn_alloc_block); IrInstruction *coro_size = ir_build_coro_size(irb, scope, node); @@ -6047,22 +6048,34 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_build_coro_alloc_fail(irb, scope, node, err_val); ir_set_cursor_at_end_and_append_block(irb, alloc_ok_block); - coro_unwrapped_mem_ptr = ir_build_unwrap_err_payload(irb, scope, node, alloc_result_ptr, false); + IrInstruction *unwrapped_mem_ptr = ir_build_unwrap_err_payload(irb, scope, node, alloc_result_ptr, false); Buf *ptr_field_name = buf_create_from_str("ptr"); - IrInstruction *coro_mem_ptr_field = ir_build_field_ptr(irb, scope, node, coro_unwrapped_mem_ptr, + IrInstruction *coro_mem_ptr_field = ir_build_field_ptr(irb, scope, node, unwrapped_mem_ptr, ptr_field_name); IrInstruction *coro_mem_ptr = ir_build_load_ptr(irb, scope, node, coro_mem_ptr_field); ir_build_br(irb, scope, node, coro_begin_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, coro_begin_block); - IrBasicBlock **incoming_blocks = allocate(2); - IrInstruction **incoming_values = allocate(2); - incoming_blocks[0] = entry_block; - incoming_values[0] = null_ptr; - incoming_blocks[1] = alloc_ok_block; - incoming_values[1] = coro_mem_ptr; - IrInstruction *coro_mem = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); + + IrBasicBlock **coro_mem_incoming_blocks = allocate(2); + IrInstruction **coro_mem_incoming_values = allocate(2); + coro_mem_incoming_blocks[0] = entry_block; + coro_mem_incoming_values[0] = null_ptr; + coro_mem_incoming_blocks[1] = alloc_ok_block; + coro_mem_incoming_values[1] = coro_mem_ptr; + IrInstruction *coro_mem = ir_build_phi(irb, scope, node, 2, coro_mem_incoming_blocks, coro_mem_incoming_values); + + IrBasicBlock **unwrapped_mem_ptr_incoming_blocks = allocate(2); + IrInstruction **unwrapped_mem_ptr_incoming_values = allocate(2); + unwrapped_mem_ptr_incoming_blocks[0] = entry_block; + unwrapped_mem_ptr_incoming_values[0] = ir_build_const_undefined(irb, scope, node); + unwrapped_mem_ptr_incoming_blocks[1] = alloc_ok_block; + unwrapped_mem_ptr_incoming_values[1] = unwrapped_mem_ptr; + coro_unwrapped_mem_ptr = ir_build_phi(irb, scope, node, 2, + unwrapped_mem_ptr_incoming_blocks, unwrapped_mem_ptr_incoming_values); + irb->exec->coro_handle = ir_build_coro_begin(irb, scope, node, coro_id, coro_mem); + irb->exec->coro_early_final = ir_create_basic_block(irb, scope, "CoroEarlyFinal"); irb->exec->coro_normal_final = ir_create_basic_block(irb, scope, "CoroNormalFinal"); } @@ -6123,11 +6136,9 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec incoming_blocks[1] = irb->exec->coro_normal_final; incoming_values[1] = const_bool_true; IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); - IrInstruction *mem_to_free = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle); - IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, mem_to_free); IrBasicBlock *dyn_free_block = ir_create_basic_block(irb, scope, "DynFree"); IrBasicBlock *end_free_block = ir_create_basic_block(irb, scope, "EndFree"); - ir_build_cond_br(irb, scope, node, is_non_null, dyn_free_block, end_free_block, const_bool_false); + ir_build_cond_br(irb, scope, node, coro_need_dyn_alloc, dyn_free_block, end_free_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, dyn_free_block); Buf *free_field_name = buf_create_from_str(ASYNC_FREE_FIELD_NAME); From b018c64ca2c92f5d6a4d3e5dcdfdc0cf70680027 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 18:09:39 -0500 Subject: [PATCH 28/56] add coroutine LLVM passes --- src/zig_llvm.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index 34defc6dc6..16a087cee4 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -129,6 +130,8 @@ bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machine_ref, LLVMM PMBuilder->Inliner = createFunctionInliningPass(PMBuilder->OptLevel, PMBuilder->SizeLevel, false); } + addCoroutinePassesToExtensionPoints(*PMBuilder); + // Set up the per-function pass manager. legacy::FunctionPassManager FPM = legacy::FunctionPassManager(module); auto tliwp = new(std::nothrow) TargetLibraryInfoWrapperPass(tlii); From 6cbea99ed6c0a87b4e4b779975f7f26738241280 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 20:27:53 -0500 Subject: [PATCH 29/56] async functions are allowed to accept zig types --- src/analyze.cpp | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index a4bb98cad6..69b6fe4790 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -927,6 +927,20 @@ static const char *calling_convention_fn_type_str(CallingConvention cc) { zig_unreachable(); } +static bool calling_convention_allows_zig_types(CallingConvention cc) { + switch (cc) { + case CallingConventionUnspecified: + case CallingConventionAsync: + return true; + case CallingConventionC: + case CallingConventionCold: + case CallingConventionNaked: + case CallingConventionStdcall: + return false; + } + zig_unreachable(); +} + TypeTableEntry *get_ptr_to_stack_trace_type(CodeGen *g) { if (g->stack_trace_type == nullptr) { ConstExprValue *stack_trace_type_val = get_builtin_value(g, "StackTrace"); @@ -1380,7 +1394,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c bool param_is_var_args = param_node->data.param_decl.is_var_args; if (param_is_comptime) { - if (fn_type_id.cc != CallingConventionUnspecified) { + if (!calling_convention_allows_zig_types(fn_type_id.cc)) { add_node_error(g, param_node, buf_sprintf("comptime parameter not allowed in function with calling convention '%s'", calling_convention_name(fn_type_id.cc))); @@ -1391,7 +1405,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c if (fn_type_id.cc == CallingConventionC) { fn_type_id.param_count = fn_type_id.next_param_index; continue; - } else if (fn_type_id.cc == CallingConventionUnspecified) { + } else if (calling_convention_allows_zig_types(fn_type_id.cc)) { return get_generic_fn_type(g, &fn_type_id); } else { add_node_error(g, param_node, @@ -1405,7 +1419,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c if (type_is_invalid(type_entry)) { return g->builtin_types.entry_invalid; } - if (fn_type_id.cc != CallingConventionUnspecified) { + if (!calling_convention_allows_zig_types(fn_type_id.cc)) { type_ensure_zero_bits_known(g, type_entry); if (!type_has_bits(type_entry)) { add_node_error(g, param_node->data.param_decl.type, @@ -1415,7 +1429,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c } } - if (fn_type_id.cc != CallingConventionUnspecified && !type_allowed_in_extern(g, type_entry)) { + if (!calling_convention_allows_zig_types(fn_type_id.cc) && !type_allowed_in_extern(g, type_entry)) { add_node_error(g, param_node->data.param_decl.type, buf_sprintf("parameter of type '%s' not allowed in function with calling convention '%s'", buf_ptr(&type_entry->name), @@ -1435,7 +1449,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c buf_sprintf("parameter of type '%s' not allowed", buf_ptr(&type_entry->name))); return g->builtin_types.entry_invalid; case TypeTableEntryIdVar: - if (fn_type_id.cc != CallingConventionUnspecified) { + if (!calling_convention_allows_zig_types(fn_type_id.cc)) { add_node_error(g, param_node->data.param_decl.type, buf_sprintf("parameter of type 'var' not allowed in function with calling convention '%s'", calling_convention_name(fn_type_id.cc))); @@ -1467,7 +1481,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdFn: case TypeTableEntryIdPromise: ensure_complete_type(g, type_entry); - if (fn_type_id.cc == CallingConventionUnspecified && !type_is_copyable(g, type_entry)) { + if (calling_convention_allows_zig_types(fn_type_id.cc) && !type_is_copyable(g, type_entry)) { add_node_error(g, param_node->data.param_decl.type, buf_sprintf("type '%s' is not copyable; cannot pass by value", buf_ptr(&type_entry->name))); return g->builtin_types.entry_invalid; @@ -1498,7 +1512,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c fn_type_id.return_type = specified_return_type; } - if (fn_type_id.cc != CallingConventionUnspecified && !type_allowed_in_extern(g, fn_type_id.return_type)) { + if (!calling_convention_allows_zig_types(fn_type_id.cc) && !type_allowed_in_extern(g, fn_type_id.return_type)) { add_node_error(g, fn_proto->return_type, buf_sprintf("return type '%s' not allowed in function with calling convention '%s'", buf_ptr(&fn_type_id.return_type->name), @@ -1525,7 +1539,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdBoundFn: case TypeTableEntryIdVar: case TypeTableEntryIdMetaType: - if (fn_type_id.cc != CallingConventionUnspecified) { + if (!calling_convention_allows_zig_types(fn_type_id.cc)) { add_node_error(g, fn_proto->return_type, buf_sprintf("return type '%s' not allowed in function with calling convention '%s'", buf_ptr(&fn_type_id.return_type->name), From 6b436146a8065b3e19c11359df85a44dac269730 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 20:28:44 -0500 Subject: [PATCH 30/56] fix invalid memory write in coroutines implementation --- src/ir.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ir.cpp b/src/ir.cpp index b3f53f824d..5c5bfc3cd1 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -942,7 +942,7 @@ static IrInstruction *ir_build_const_promise_init(IrBuilder *irb, Scope *scope, IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node); const_instruction->base.value.type = struct_type; const_instruction->base.value.special = ConstValSpecialStatic; - const_instruction->base.value.data.x_struct.fields = allocate(2); + const_instruction->base.value.data.x_struct.fields = allocate(field_count); const_instruction->base.value.data.x_struct.fields[0].type = awaiter_handle_type; const_instruction->base.value.data.x_struct.fields[0].special = ConstValSpecialStatic; const_instruction->base.value.data.x_struct.fields[0].data.x_maybe = nullptr; From 6fef7406c80d6db26513a6ae53f9c44444bef1f2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 25 Feb 2018 20:29:14 -0500 Subject: [PATCH 31/56] move coroutine init code to after coro.begin --- src/ir.cpp | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index 5c5bfc3cd1..70a099f7c4 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -5995,19 +5995,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec return_type = fn_entry->type_entry->data.fn.fn_type_id.return_type; IrInstruction *promise_init = ir_build_const_promise_init(irb, scope, node, return_type); ir_build_var_decl(irb, scope, node, promise_var, nullptr, nullptr, promise_init); - coro_promise_ptr = ir_build_var_ptr(irb, scope, node, promise_var, false, false); - Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME); - irb->exec->coro_awaiter_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, - awaiter_handle_field_name); - if (type_has_bits(return_type)) { - Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME); - coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name); - result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME); - irb->exec->coro_result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, - result_ptr_field_name); - ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr, coro_result_field_ptr); - } u8_ptr_type = ir_build_const_type(irb, scope, node, get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false)); @@ -6076,6 +6064,19 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec irb->exec->coro_handle = ir_build_coro_begin(irb, scope, node, coro_id, coro_mem); + Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME); + irb->exec->coro_awaiter_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, + awaiter_handle_field_name); + if (type_has_bits(return_type)) { + Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME); + coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name); + result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME); + irb->exec->coro_result_ptr_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, + result_ptr_field_name); + ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr, coro_result_field_ptr); + } + + irb->exec->coro_early_final = ir_create_basic_block(irb, scope, "CoroEarlyFinal"); irb->exec->coro_normal_final = ir_create_basic_block(irb, scope, "CoroNormalFinal"); } From c60496a297a0d4c53ad0e22850ad62b0b4a2d841 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 26 Feb 2018 00:04:11 -0500 Subject: [PATCH 32/56] parse await and suspend syntax See #727 --- doc/langref.html.in | 10 ++++-- src/all_types.hpp | 13 ++++++++ src/analyze.cpp | 2 ++ src/ast_render.cpp | 21 ++++++++++++ src/ir.cpp | 20 ++++++++++++ src/parser.cpp | 79 +++++++++++++++++++++++++++++++++++++++++++-- 6 files changed, 139 insertions(+), 6 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index 9c33f9e607..2d4bead65e 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5687,7 +5687,7 @@ AssignmentExpression = UnwrapExpression AssignmentOperator UnwrapExpression | Un AssignmentOperator = "=" | "*=" | "/=" | "%=" | "+=" | "-=" | "<<=" | ">>=" | "&=" | "^=" | "|=" | "*%=" | "+%=" | "-%=" -BlockExpression(body) = Block | IfExpression(body) | IfErrorExpression(body) | TestExpression(body) | WhileExpression(body) | ForExpression(body) | SwitchExpression | CompTimeExpression(body) +BlockExpression(body) = Block | IfExpression(body) | IfErrorExpression(body) | TestExpression(body) | WhileExpression(body) | ForExpression(body) | SwitchExpression | CompTimeExpression(body) | SuspendExpression(body) CompTimeExpression(body) = "comptime" body @@ -5705,6 +5705,8 @@ ReturnExpression = "return" option(Expression) TryExpression = "try" Expression +AwaitExpression = "await" Expression + BreakExpression = "break" option(":" Symbol) option(Expression) CancelExpression = "cancel" Expression; @@ -5713,6 +5715,8 @@ Defer(body) = ("defer" | "deferror") body IfExpression(body) = "if" "(" Expression ")" body option("else" BlockExpression(body)) +SuspendExpression(body) = "suspend" option(("|" Symbol "|" body)) + IfErrorExpression(body) = "if" "(" Expression ")" option("|" option("*") Symbol "|") body "else" "|" Symbol "|" BlockExpression(body) TestExpression(body) = "if" "(" Expression ")" option("|" option("*") Symbol "|") body option("else" BlockExpression(body)) @@ -5763,7 +5767,7 @@ ContainerInitBody = list(StructLiteralField, ",") | list(Expression, ",") StructLiteralField = "." Symbol "=" Expression -PrefixOp = "!" | "-" | "~" | "*" | ("&" option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" +PrefixOp = "!" | "-" | "~" | "*" | ("&" option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" | "await" PrimaryExpression = Integer | Float | String | CharLiteral | KeywordLiteral | GroupedExpression | BlockExpression(BlockOrExpression) | Symbol | ("@" Symbol FnCallExpression) | ArrayType | FnProto | AsmExpression | ContainerDecl | ("continue" option(":" Symbol)) | ErrorSetDecl @@ -5771,7 +5775,7 @@ ArrayType : "[" option(Expression) "]" option("align" "(" Expression option(":" GroupedExpression = "(" Expression ")" -KeywordLiteral = "true" | "false" | "null" | "undefined" | "error" | "this" | "unreachable" +KeywordLiteral = "true" | "false" | "null" | "undefined" | "error" | "this" | "unreachable" | "suspend" ErrorSetDecl = "error" "{" list(Symbol, ",") "}" diff --git a/src/all_types.hpp b/src/all_types.hpp index 3cf5676dfe..d2705d8ec6 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -401,6 +401,8 @@ enum NodeType { NodeTypeTestExpr, NodeTypeErrorSetDecl, NodeTypeCancel, + NodeTypeAwaitExpr, + NodeTypeSuspend, }; struct AstNodeRoot { @@ -859,6 +861,15 @@ struct AstNodeErrorType { struct AstNodeVarLiteral { }; +struct AstNodeAwaitExpr { + AstNode *expr; +}; + +struct AstNodeSuspend { + AstNode *block; + AstNode *promise_symbol; +}; + struct AstNode { enum NodeType type; size_t line; @@ -917,6 +928,8 @@ struct AstNode { AstNodeVarLiteral var_literal; AstNodeErrorSetDecl err_set_decl; AstNodeCancelExpr cancel_expr; + AstNodeAwaitExpr await_expr; + AstNodeSuspend suspend; } data; }; diff --git a/src/analyze.cpp b/src/analyze.cpp index 69b6fe4790..ce9e99f8fa 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3214,6 +3214,8 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeTestExpr: case NodeTypeErrorSetDecl: case NodeTypeCancel: + case NodeTypeAwaitExpr: + case NodeTypeSuspend: zig_unreachable(); } } diff --git a/src/ast_render.cpp b/src/ast_render.cpp index eec4b996a0..5f3e1998fd 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -246,6 +246,10 @@ static const char *node_type_str(NodeType node_type) { return "ErrorSetDecl"; case NodeTypeCancel: return "Cancel"; + case NodeTypeAwaitExpr: + return "AwaitExpr"; + case NodeTypeSuspend: + return "Suspend"; } zig_unreachable(); } @@ -1045,6 +1049,23 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { render_node_grouped(ar, node->data.cancel_expr.expr); break; } + case NodeTypeAwaitExpr: + { + fprintf(ar->f, "await "); + render_node_grouped(ar, node->data.await_expr.expr); + break; + } + case NodeTypeSuspend: + { + fprintf(ar->f, "suspend"); + if (node->data.suspend.block != nullptr) { + fprintf(ar->f, " |"); + render_node_grouped(ar, node->data.suspend.promise_symbol); + fprintf(ar->f, "| "); + render_node_grouped(ar, node->data.suspend.block); + } + break; + } case NodeTypeFnDecl: case NodeTypeParamDecl: case NodeTypeTestDecl: diff --git a/src/ir.cpp b/src/ir.cpp index 70a099f7c4..7ed66b92bd 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -5834,6 +5834,22 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *parent_scope, AstNode return ir_build_cancel(irb, parent_scope, node, target_inst); } +static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, AstNode *node) { + assert(node->type == NodeTypeAwaitExpr); + + IrInstruction *target_inst = ir_gen_node(irb, node->data.await_expr.expr, parent_scope); + if (target_inst == irb->codegen->invalid_instruction) + return irb->codegen->invalid_instruction; + + zig_panic("TODO: generate await expr"); +} + +static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) { + assert(node->type == NodeTypeSuspend); + + zig_panic("TODO: generate suspend"); +} + static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope, LVal lval) { @@ -5932,6 +5948,10 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval); case NodeTypeCancel: return ir_lval_wrap(irb, scope, ir_gen_cancel(irb, scope, node), lval); + case NodeTypeAwaitExpr: + return ir_lval_wrap(irb, scope, ir_gen_await_expr(irb, scope, node), lval); + case NodeTypeSuspend: + return ir_lval_wrap(irb, scope, ir_gen_suspend(irb, scope, node), lval); } zig_unreachable(); } diff --git a/src/parser.cpp b/src/parser.cpp index e64c569e2f..763273fd0a 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -221,6 +221,7 @@ static AstNode *ast_parse_grouped_expr(ParseContext *pc, size_t *token_index, bo static AstNode *ast_parse_container_decl(ParseContext *pc, size_t *token_index, bool mandatory); static AstNode *ast_parse_primary_expr(ParseContext *pc, size_t *token_index, bool mandatory); static AstNode *ast_parse_try_expr(ParseContext *pc, size_t *token_index); +static AstNode *ast_parse_await_expr(ParseContext *pc, size_t *token_index); static AstNode *ast_parse_symbol(ParseContext *pc, size_t *token_index); static void ast_expect_token(ParseContext *pc, Token *token, TokenId token_id) { @@ -650,6 +651,41 @@ static AstNode *ast_parse_asm_expr(ParseContext *pc, size_t *token_index, bool m return node; } +/* +SuspendExpression(body) = "suspend" "|" Symbol "|" body +*/ +static AstNode *ast_parse_suspend_block(ParseContext *pc, size_t *token_index, bool mandatory) { + size_t orig_token_index = *token_index; + + Token *suspend_token = &pc->tokens->at(*token_index); + if (suspend_token->id == TokenIdKeywordSuspend) { + *token_index += 1; + } else if (mandatory) { + ast_expect_token(pc, suspend_token, TokenIdKeywordSuspend); + zig_unreachable(); + } else { + return nullptr; + } + + Token *bar_token = &pc->tokens->at(*token_index); + if (bar_token->id == TokenIdBinOr) { + *token_index += 1; + } else if (mandatory) { + ast_expect_token(pc, suspend_token, TokenIdBinOr); + zig_unreachable(); + } else { + *token_index = orig_token_index; + return nullptr; + } + + AstNode *node = ast_create_node(pc, NodeTypeSuspend, suspend_token); + node->data.suspend.promise_symbol = ast_parse_symbol(pc, token_index); + ast_eat_token(pc, token_index, TokenIdBinOr); + node->data.suspend.block = ast_parse_block(pc, token_index, true); + + return node; +} + /* CompTimeExpression(body) = "comptime" body */ @@ -674,7 +710,7 @@ static AstNode *ast_parse_comptime_expr(ParseContext *pc, size_t *token_index, b /* PrimaryExpression = Integer | Float | String | CharLiteral | KeywordLiteral | GroupedExpression | BlockExpression(BlockOrExpression) | Symbol | ("@" Symbol FnCallExpression) | ArrayType | FnProto | AsmExpression | ContainerDecl | ("continue" option(":" Symbol)) | ErrorSetDecl -KeywordLiteral = "true" | "false" | "null" | "undefined" | "error" | "this" | "unreachable" +KeywordLiteral = "true" | "false" | "null" | "undefined" | "error" | "this" | "unreachable" | "suspend" ErrorSetDecl = "error" "{" list(Symbol, ",") "}" */ static AstNode *ast_parse_primary_expr(ParseContext *pc, size_t *token_index, bool mandatory) { @@ -738,6 +774,10 @@ static AstNode *ast_parse_primary_expr(ParseContext *pc, size_t *token_index, bo AstNode *node = ast_create_node(pc, NodeTypeUnreachable, token); *token_index += 1; return node; + } else if (token->id == TokenIdKeywordSuspend) { + AstNode *node = ast_create_node(pc, NodeTypeSuspend, token); + *token_index += 1; + return node; } else if (token->id == TokenIdKeywordError) { Token *next_token = &pc->tokens->at(*token_index + 1); if (next_token->id == TokenIdLBrace) { @@ -1067,7 +1107,7 @@ static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) { /* PrefixOpExpression = PrefixOp ErrorSetExpr | SuffixOpExpression -PrefixOp = "!" | "-" | "~" | "*" | ("&" option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" +PrefixOp = "!" | "-" | "~" | "*" | ("&" option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" | "await" */ static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index, bool mandatory) { Token *token = &pc->tokens->at(*token_index); @@ -1077,6 +1117,9 @@ static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index, if (token->id == TokenIdKeywordTry) { return ast_parse_try_expr(pc, token_index); } + if (token->id == TokenIdKeywordAwait) { + return ast_parse_await_expr(pc, token_index); + } PrefixOp prefix_op = tok_to_prefix_op(token); if (prefix_op == PrefixOpInvalid) { return ast_parse_suffix_op_expr(pc, token_index, mandatory); @@ -1535,6 +1578,23 @@ static AstNode *ast_parse_try_expr(ParseContext *pc, size_t *token_index) { return node; } +/* +AwaitExpression : "await" Expression +*/ +static AstNode *ast_parse_await_expr(ParseContext *pc, size_t *token_index) { + Token *token = &pc->tokens->at(*token_index); + + if (token->id != TokenIdKeywordAwait) { + return nullptr; + } + *token_index += 1; + + AstNode *node = ast_create_node(pc, NodeTypeAwaitExpr, token); + node->data.await_expr.expr = ast_parse_expression(pc, token_index, true); + + return node; +} + /* BreakExpression = "break" option(":" Symbol) option(Expression) */ @@ -2044,7 +2104,7 @@ static AstNode *ast_parse_switch_expr(ParseContext *pc, size_t *token_index, boo } /* -BlockExpression(body) = Block | IfExpression(body) | TryExpression(body) | TestExpression(body) | WhileExpression(body) | ForExpression(body) | SwitchExpression | CompTimeExpression(body) +BlockExpression(body) = Block | IfExpression(body) | IfErrorExpression(body) | TestExpression(body) | WhileExpression(body) | ForExpression(body) | SwitchExpression | CompTimeExpression(body) | SuspendExpression(body) */ static AstNode *ast_parse_block_expr(ParseContext *pc, size_t *token_index, bool mandatory) { Token *token = &pc->tokens->at(*token_index); @@ -2073,6 +2133,10 @@ static AstNode *ast_parse_block_expr(ParseContext *pc, size_t *token_index, bool if (comptime_node) return comptime_node; + AstNode *suspend_node = ast_parse_suspend_block(pc, token_index, false); + if (suspend_node) + return suspend_node; + if (mandatory) ast_invalid_token_error(pc, token); @@ -2255,6 +2319,8 @@ static bool statement_terminates_without_semicolon(AstNode *node) { return node->data.comptime_expr.expr->type == NodeTypeBlock; case NodeTypeDefer: return node->data.defer.expr->type == NodeTypeBlock; + case NodeTypeSuspend: + return node->data.suspend.block != nullptr && node->data.suspend.block->type == NodeTypeBlock; case NodeTypeSwitchExpr: case NodeTypeBlock: return true; @@ -2994,5 +3060,12 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeCancel: visit_field(&node->data.cancel_expr.expr, visit, context); break; + case NodeTypeAwaitExpr: + visit_field(&node->data.await_expr.expr, visit, context); + break; + case NodeTypeSuspend: + visit_field(&node->data.suspend.promise_symbol, visit, context); + visit_field(&node->data.suspend.block, visit, context); + break; } } From 3e86fb500dc918618a2ccaa5d942de98bd5fea47 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 26 Feb 2018 02:46:21 -0500 Subject: [PATCH 33/56] implement coroutine suspend see #727 --- src/all_types.hpp | 10 +++++ src/codegen.cpp | 22 ++++++++++ src/ir.cpp | 106 ++++++++++++++++++++++++++++++++++++++++++---- src/ir_print.cpp | 9 ++++ 4 files changed, 139 insertions(+), 8 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index d2705d8ec6..63292dd8ec 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -63,6 +63,8 @@ struct IrExecutable { IrInstruction *implicit_allocator_ptr; IrBasicBlock *coro_early_final; IrBasicBlock *coro_normal_final; + IrBasicBlock *coro_suspend_block; + IrBasicBlock *coro_final_cleanup_block; }; enum OutType { @@ -1631,6 +1633,7 @@ struct CodeGen { LLVMValueRef coro_end_fn_val; LLVMValueRef coro_free_fn_val; LLVMValueRef coro_resume_fn_val; + LLVMValueRef coro_save_fn_val; bool error_during_imports; const char **clang_argv; @@ -2000,6 +2003,7 @@ enum IrInstructionId { IrInstructionIdCoroEnd, IrInstructionIdCoroFree, IrInstructionIdCoroResume, + IrInstructionIdCoroSave, }; struct IrInstruction { @@ -2902,6 +2906,12 @@ struct IrInstructionCoroResume { IrInstruction *awaiter_handle; }; +struct IrInstructionCoroSave { + IrInstruction base; + + IrInstruction *coro_handle; +}; + static const size_t slice_ptr_index = 0; static const size_t slice_len_index = 1; diff --git a/src/codegen.cpp b/src/codegen.cpp index 0c4f66daa4..f82c686b85 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1066,6 +1066,21 @@ static LLVMValueRef get_coro_resume_fn_val(CodeGen *g) { return g->coro_resume_fn_val; } +static LLVMValueRef get_coro_save_fn_val(CodeGen *g) { + if (g->coro_save_fn_val) + return g->coro_save_fn_val; + + LLVMTypeRef param_types[] = { + LLVMPointerType(LLVMInt8Type(), 0), + }; + LLVMTypeRef fn_type = LLVMFunctionType(ZigLLVMTokenTypeInContext(LLVMGetGlobalContext()), param_types, 1, false); + Buf *name = buf_sprintf("llvm.coro.save"); + g->coro_save_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->coro_save_fn_val)); + + return g->coro_save_fn_val; +} + static LLVMValueRef get_return_address_fn_val(CodeGen *g) { if (g->return_address_fn_val) return g->return_address_fn_val; @@ -3954,6 +3969,11 @@ static LLVMValueRef ir_render_coro_resume(CodeGen *g, IrExecutable *executable, return LLVMBuildCall(g->builder, get_coro_resume_fn_val(g), &awaiter_handle, 1, ""); } +static LLVMValueRef ir_render_coro_save(CodeGen *g, IrExecutable *executable, IrInstructionCoroSave *instruction) { + LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle); + return LLVMBuildCall(g->builder, get_coro_save_fn_val(g), &coro_handle, 1, ""); +} + static void set_debug_location(CodeGen *g, IrInstruction *instruction) { AstNode *source_node = instruction->source_node; Scope *scope = instruction->scope; @@ -4157,6 +4177,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_coro_free(g, executable, (IrInstructionCoroFree *)instruction); case IrInstructionIdCoroResume: return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction); + case IrInstructionIdCoroSave: + return ir_render_coro_save(g, executable, (IrInstructionCoroSave *)instruction); } zig_unreachable(); } diff --git a/src/ir.cpp b/src/ir.cpp index 7ed66b92bd..2600f5e948 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -691,6 +691,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroResume *) { return IrInstructionIdCoroResume; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSave *) { + return IrInstructionIdCoroSave; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -2585,6 +2589,17 @@ static IrInstruction *ir_build_coro_resume(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_coro_save(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *coro_handle) +{ + IrInstructionCoroSave *instruction = ir_build_instruction(irb, scope, source_node); + instruction->coro_handle = coro_handle; + + ir_ref_instruction(coro_handle, irb->current_basic_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -5847,7 +5862,67 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) { assert(node->type == NodeTypeSuspend); - zig_panic("TODO: generate suspend"); + FnTableEntry *fn_entry = exec_fn_entry(irb->exec); + if (!fn_entry) { + add_node_error(irb->codegen, node, buf_sprintf("suspend outside function definition")); + return irb->codegen->invalid_instruction; + } + if (fn_entry->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync) { + add_node_error(irb->codegen, node, buf_sprintf("suspend in non-async function")); + return irb->codegen->invalid_instruction; + } + + ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope); + if (scope_defer_expr) { + if (!scope_defer_expr->reported_err) { + add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression")); + scope_defer_expr->reported_err = true; + } + return irb->codegen->invalid_instruction; + } + + Scope *outer_scope = irb->exec->begin_scope; + + + IrInstruction *suspend_code; + IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false); + if (node->data.suspend.block == nullptr) { + suspend_code = ir_build_coro_suspend(irb, parent_scope, node, nullptr, const_bool_false); + } else { + assert(node->data.suspend.promise_symbol != nullptr); + assert(node->data.suspend.promise_symbol->type == NodeTypeSymbol); + Buf *promise_symbol_name = node->data.suspend.promise_symbol->data.symbol_expr.symbol; + Scope *child_scope; + if (!buf_eql_str(promise_symbol_name, "_")) { + VariableTableEntry *promise_var = ir_create_var(irb, node, parent_scope, promise_symbol_name, + true, true, false, const_bool_false); + ir_build_var_decl(irb, parent_scope, node, promise_var, nullptr, nullptr, irb->exec->coro_handle); + child_scope = promise_var->child_scope; + } else { + child_scope = parent_scope; + } + IrInstruction *save_token = ir_build_coro_save(irb, child_scope, node, irb->exec->coro_handle); + ir_gen_node(irb, node->data.suspend.block, child_scope); + suspend_code = ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false); + } + + IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup"); + IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume"); + + IrInstructionSwitchBrCase *cases = allocate(2); + cases[0].value = ir_build_const_u8(irb, parent_scope, node, 0); + cases[0].block = resume_block; + cases[1].value = ir_build_const_u8(irb, parent_scope, node, 1); + cases[1].block = cleanup_block; + ir_build_switch_br(irb, parent_scope, node, suspend_code, irb->exec->coro_suspend_block, + 2, cases, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, cleanup_block); + ir_gen_defers_for_block(irb, parent_scope, outer_scope, true); + ir_build_br(irb, parent_scope, node, irb->exec->coro_final_cleanup_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, resume_block); + return ir_build_const_void(irb, parent_scope, node); } static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope, @@ -6099,6 +6174,8 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec irb->exec->coro_early_final = ir_create_basic_block(irb, scope, "CoroEarlyFinal"); irb->exec->coro_normal_final = ir_create_basic_block(irb, scope, "CoroNormalFinal"); + irb->exec->coro_suspend_block = ir_create_basic_block(irb, scope, "Suspend"); + irb->exec->coro_final_cleanup_block = ir_create_basic_block(irb, scope, "FinalCleanup"); } IrInstruction *result = ir_gen_node_extra(irb, node, scope, LVAL_NONE); @@ -6112,8 +6189,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec if (is_async) { IrBasicBlock *invalid_resume_block = ir_create_basic_block(irb, scope, "InvalidResume"); - IrBasicBlock *final_cleanup_block = ir_create_basic_block(irb, scope, "FinalCleanup"); - IrBasicBlock *suspend_block = ir_create_basic_block(irb, scope, "Suspend"); IrBasicBlock *check_free_block = ir_create_basic_block(irb, scope, "CheckFree"); ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_early_final); @@ -6123,10 +6198,10 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec cases[0].value = ir_build_const_u8(irb, scope, node, 0); cases[0].block = invalid_resume_block; cases[1].value = ir_build_const_u8(irb, scope, node, 1); - cases[1].block = final_cleanup_block; - ir_build_switch_br(irb, scope, node, suspend_code, suspend_block, 2, cases, const_bool_false); + cases[1].block = irb->exec->coro_final_cleanup_block; + ir_build_switch_br(irb, scope, node, suspend_code, irb->exec->coro_suspend_block, 2, cases, const_bool_false); - ir_set_cursor_at_end_and_append_block(irb, suspend_block); + ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_suspend_block); ir_build_coro_end(irb, scope, node); ir_build_return(irb, scope, node, irb->exec->coro_handle); @@ -6136,7 +6211,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_normal_final); ir_build_br(irb, scope, node, check_free_block, const_bool_false); - ir_set_cursor_at_end_and_append_block(irb, final_cleanup_block); + ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_final_cleanup_block); if (type_has_bits(return_type)) { IrInstruction *result_ptr = ir_build_load_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr); IrInstruction *result_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, result_ptr); @@ -6152,7 +6227,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_set_cursor_at_end_and_append_block(irb, check_free_block); IrBasicBlock **incoming_blocks = allocate(2); IrInstruction **incoming_values = allocate(2); - incoming_blocks[0] = final_cleanup_block; + incoming_blocks[0] = irb->exec->coro_final_cleanup_block; incoming_values[0] = const_bool_false; incoming_blocks[1] = irb->exec->coro_normal_final; incoming_values[1] = const_bool_true; @@ -17219,6 +17294,18 @@ static TypeTableEntry *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInst return result->value.type; } +static TypeTableEntry *ir_analyze_instruction_coro_save(IrAnalyze *ira, IrInstructionCoroSave *instruction) { + IrInstruction *coro_handle = instruction->coro_handle->other; + if (type_is_invalid(coro_handle->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_build_coro_save(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, coro_handle); + ir_link_new_instruction(result, &instruction->base); + result->value.type = ira->codegen->builtin_types.entry_usize; + return result->value.type; +} + static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { @@ -17444,6 +17531,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_coro_free(ira, (IrInstructionCoroFree *)instruction); case IrInstructionIdCoroResume: return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction); + case IrInstructionIdCoroSave: + return ir_analyze_instruction_coro_save(ira, (IrInstructionCoroSave *)instruction); } zig_unreachable(); } @@ -17566,6 +17655,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdCoroAllocFail: case IrInstructionIdCoroEnd: case IrInstructionIdCoroResume: + case IrInstructionIdCoroSave: return true; case IrInstructionIdPhi: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index ca7eb25879..2e367672a5 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1090,6 +1090,12 @@ static void ir_print_coro_resume(IrPrint *irp, IrInstructionCoroResume *instruct fprintf(irp->f, ")"); } +static void ir_print_coro_save(IrPrint *irp, IrInstructionCoroSave *instruction) { + fprintf(irp->f, "@coroSave("); + ir_print_other_instruction(irp, instruction->coro_handle); + fprintf(irp->f, ")"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -1443,6 +1449,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdCoroResume: ir_print_coro_resume(irp, (IrInstructionCoroResume *)instruction); break; + case IrInstructionIdCoroSave: + ir_print_coro_save(irp, (IrInstructionCoroSave *)instruction); + break; } fprintf(irp->f, "\n"); } From 9aa65c0e8e6e4135dcc04bcb388d1fa38c6d10f6 Mon Sep 17 00:00:00 2001 From: Ben Noordhuis Date: Mon, 26 Feb 2018 18:40:33 +0100 Subject: [PATCH 34/56] allow implicit cast from &const to ?&const &const Allow implicit casts from n-th degree const pointers to nullable const pointers of degree n+1. That is: fn f() void { const s = S {}; const p = &s; g(p); // Works. g(&p); // So does this. } fn g(_: ?&const &const S) void { // Nullable 2nd degree const ptr. } Fixes #731 some more. --- src/ir.cpp | 3 ++- test/cases/cast.zig | 31 +++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/src/ir.cpp b/src/ir.cpp index e79235830c..b1fd7104ea 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -8830,7 +8830,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } else if (wanted_child_type->id == TypeTableEntryIdPointer && wanted_child_type->data.pointer.is_const && - is_container(actual_type)) { + (actual_type->id == TypeTableEntryIdPointer || is_container(actual_type))) + { IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_child_type, value); if (type_is_invalid(cast1->value.type)) return ira->codegen->invalid_instruction; diff --git a/test/cases/cast.zig b/test/cases/cast.zig index dabf97a799..d2671680c8 100644 --- a/test/cases/cast.zig +++ b/test/cases/cast.zig @@ -103,6 +103,37 @@ const Enum = enum { } }; +test "implicitly cast indirect pointer to maybe-indirect pointer" { + const S = struct { + const Self = this; + x: u8, + fn constConst(p: &const &const Self) u8 { + return (*p).x; + } + fn maybeConstConst(p: ?&const &const Self) u8 { + return (*??p).x; + } + fn constConstConst(p: &const &const &const Self) u8 { + return (**p).x; + } + fn maybeConstConstConst(p: ?&const &const &const Self) u8 { + return (**??p).x; + } + }; + const s = S { .x = 42 }; + const p = &s; + const q = &p; + const r = &q; + assert(42 == S.constConst(p)); + assert(42 == S.constConst(q)); + assert(42 == S.maybeConstConst(p)); + assert(42 == S.maybeConstConst(q)); + assert(42 == S.constConstConst(q)); + assert(42 == S.constConstConst(r)); + assert(42 == S.maybeConstConstConst(q)); + assert(42 == S.maybeConstConstConst(r)); +} + test "explicit cast from integer to error type" { testCastIntToErr(error.ItBroke); comptime testCastIntToErr(error.ItBroke); From 4ac6c4d6bfb8f7ada2799ddb5ce3a9797be0518d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 26 Feb 2018 21:14:15 -0500 Subject: [PATCH 35/56] workaround llvm coro transformations by making alloc and free functions be parameters to async functions instead of using getelementptr in the DynAlloc block See #727 --- src/all_types.hpp | 11 ++++ src/analyze.cpp | 34 ++++++++++-- src/codegen.cpp | 37 +++++++++++-- src/ir.cpp | 136 ++++++++++++++++++++++++++++++++-------------- src/ir_print.cpp | 14 ++++- std/mem.zig | 16 ------ 6 files changed, 179 insertions(+), 69 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 63292dd8ec..4220523126 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -2208,7 +2208,10 @@ struct IrInstructionCall { LLVMValueRef tmp_ptr; FnInline fn_inline; bool is_async; + IrInstruction *async_allocator; + IrInstruction *alloc_fn; + IrInstruction *free_fn; }; struct IrInstructionConst { @@ -2849,8 +2852,16 @@ struct IrInstructionCancel { IrInstruction *target; }; +enum ImplicitAllocatorId { + ImplicitAllocatorIdContext, + ImplicitAllocatorIdAlloc, + ImplicitAllocatorIdFree, +}; + struct IrInstructionGetImplicitAllocator { IrInstruction base; + + ImplicitAllocatorId id; }; struct IrInstructionCoroId { diff --git a/src/analyze.cpp b/src/analyze.cpp index ce9e99f8fa..26924cc7db 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1006,13 +1006,13 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { fn_type_id->return_type->id == TypeTableEntryIdErrorSet); // +1 for maybe making the first argument the return value // +1 for maybe first argument the error return trace - // +2 for maybe arguments async allocator and error code pointer - LLVMTypeRef *gen_param_types = allocate(4 + fn_type_id->param_count); + // +4 for maybe arguments async allocator and error code pointer + LLVMTypeRef *gen_param_types = allocate(6 + fn_type_id->param_count); // +1 because 0 is the return type and // +1 for maybe making first arg ret val and // +1 for maybe first argument the error return trace - // +2 for maybe arguments async allocator and error code pointer - ZigLLVMDIType **param_di_types = allocate(5 + fn_type_id->param_count); + // +4 for maybe arguments async allocator and error code pointer + ZigLLVMDIType **param_di_types = allocate(7 + fn_type_id->param_count); param_di_types[0] = fn_type_id->return_type->di_type; size_t gen_param_index = 0; TypeTableEntry *gen_return_type; @@ -1049,6 +1049,32 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { param_di_types[gen_param_index] = gen_type->di_type; } + { + // async alloc fn param + assert(fn_type_id->async_allocator_type->id == TypeTableEntryIdPointer); + TypeTableEntry *struct_type = fn_type_id->async_allocator_type->data.pointer.child_type; + TypeStructField *alloc_fn_field = find_struct_type_field(struct_type, buf_create_from_str("allocFn")); + assert(alloc_fn_field->type_entry->id == TypeTableEntryIdFn); + TypeTableEntry *gen_type = alloc_fn_field->type_entry; + gen_param_types[gen_param_index] = gen_type->type_ref; + gen_param_index += 1; + // after the gen_param_index += 1 because 0 is the return type + param_di_types[gen_param_index] = gen_type->di_type; + } + + { + // async free fn param + assert(fn_type_id->async_allocator_type->id == TypeTableEntryIdPointer); + TypeTableEntry *struct_type = fn_type_id->async_allocator_type->data.pointer.child_type; + TypeStructField *free_fn_field = find_struct_type_field(struct_type, buf_create_from_str("freeFn")); + assert(free_fn_field->type_entry->id == TypeTableEntryIdFn); + TypeTableEntry *gen_type = free_fn_field->type_entry; + gen_param_types[gen_param_index] = gen_type->type_ref; + gen_param_index += 1; + // after the gen_param_index += 1 because 0 is the return type + param_di_types[gen_param_index] = gen_type->di_type; + } + { // error code pointer TypeTableEntry *gen_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false); diff --git a/src/codegen.cpp b/src/codegen.cpp index f82c686b85..21fc28e4af 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2667,6 +2667,18 @@ static bool get_prefix_arg_err_ret_stack(CodeGen *g, TypeTableEntry *src_return_ (src_return_type->id == TypeTableEntryIdErrorUnion || src_return_type->id == TypeTableEntryIdErrorSet); } +static size_t get_async_allocator_arg_index(CodeGen *g, TypeTableEntry *src_return_type) { + // 0 1 2 3 4 5 + // err_ret_stack allocator_ptr alloc free err_code other_args... + return get_prefix_arg_err_ret_stack(g, src_return_type) ? 1 : 0; +} + +static size_t get_async_err_code_arg_index(CodeGen *g, TypeTableEntry *src_return_type) { + // 0 1 2 3 4 5 + // err_ret_stack allocator_ptr alloc free err_code other_args... + return 3 + get_async_allocator_arg_index(g, src_return_type); +} + static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCall *instruction) { LLVMValueRef fn_val; TypeTableEntry *fn_type; @@ -2687,7 +2699,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr bool first_arg_ret = ret_has_bits && handle_is_ptr(src_return_type) && calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc); bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, src_return_type); - size_t actual_param_count = instruction->arg_count + (first_arg_ret ? 1 : 0) + (prefix_arg_err_ret_stack ? 1 : 0); + // +4 for the async args + size_t actual_param_count = instruction->arg_count + (first_arg_ret ? 1 : 0) + (prefix_arg_err_ret_stack ? 1 : 0) + 4; bool is_var_args = fn_type_id->is_var_args; LLVMValueRef *gen_param_values = allocate(actual_param_count); size_t gen_param_index = 0; @@ -2703,6 +2716,12 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr gen_param_values[gen_param_index] = ir_llvm_value(g, instruction->async_allocator); gen_param_index += 1; + gen_param_values[gen_param_index] = ir_llvm_value(g, instruction->alloc_fn); + gen_param_index += 1; + + gen_param_values[gen_param_index] = ir_llvm_value(g, instruction->free_fn); + gen_param_index += 1; + LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, err_union_err_index, ""); LLVMBuildStore(g->builder, LLVMConstNull(g->builtin_types.entry_global_error_set->type_ref), err_val_ptr); gen_param_values[gen_param_index] = err_val_ptr; @@ -3281,9 +3300,16 @@ static LLVMValueRef ir_render_get_implicit_allocator(CodeGen *g, IrExecutable *e IrInstructionGetImplicitAllocator *instruction) { TypeTableEntry *src_return_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; - bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, src_return_type); - size_t allocator_arg_index = prefix_arg_err_ret_stack ? 1 : 0; - return LLVMGetParam(g->cur_fn_val, allocator_arg_index); + size_t allocator_arg_index = get_async_allocator_arg_index(g, src_return_type); + switch (instruction->id) { + case ImplicitAllocatorIdContext: + return LLVMGetParam(g->cur_fn_val, allocator_arg_index + 0); + case ImplicitAllocatorIdAlloc: + return LLVMGetParam(g->cur_fn_val, allocator_arg_index + 1); + case ImplicitAllocatorIdFree: + return LLVMGetParam(g->cur_fn_val, allocator_arg_index + 2); + } + zig_unreachable(); } static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) { @@ -3915,8 +3941,7 @@ static LLVMValueRef ir_render_coro_alloc_fail(CodeGen *g, IrExecutable *executab IrInstructionCoroAllocFail *instruction) { TypeTableEntry *src_return_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; - bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, src_return_type); - size_t err_code_ptr_arg_index = prefix_arg_err_ret_stack ? 2 : 1; + size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, src_return_type); LLVMValueRef err_code_ptr_val = LLVMGetParam(g->cur_fn_val, err_code_ptr_arg_index); LLVMValueRef err_code = ir_llvm_value(g, instruction->err_val); LLVMBuildStore(g->builder, err_code, err_code_ptr_val); diff --git a/src/ir.cpp b/src/ir.cpp index 2600f5e948..bca133b8e5 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1066,7 +1066,7 @@ static IrInstruction *ir_build_union_field_ptr_from(IrBuilder *irb, IrInstructio static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *source_node, FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator) + bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator, IrInstruction *alloc_fn, IrInstruction *free_fn) { IrInstructionCall *call_instruction = ir_build_instruction(irb, scope, source_node); call_instruction->fn_entry = fn_entry; @@ -1077,6 +1077,8 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc call_instruction->arg_count = arg_count; call_instruction->is_async = is_async; call_instruction->async_allocator = async_allocator; + call_instruction->alloc_fn = alloc_fn; + call_instruction->free_fn = free_fn; if (fn_ref) ir_ref_instruction(fn_ref, irb->current_basic_block); @@ -1084,16 +1086,20 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc ir_ref_instruction(args[i], irb->current_basic_block); if (async_allocator) ir_ref_instruction(async_allocator, irb->current_basic_block); + if (alloc_fn) + ir_ref_instruction(alloc_fn, irb->current_basic_block); + if (free_fn) + ir_ref_instruction(free_fn, irb->current_basic_block); return &call_instruction->base; } static IrInstruction *ir_build_call_from(IrBuilder *irb, IrInstruction *old_instruction, FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator) + bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator, IrInstruction *alloc_fn, IrInstruction *free_fn) { IrInstruction *new_instruction = ir_build_call(irb, old_instruction->scope, - old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline, is_async, async_allocator); + old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline, is_async, async_allocator, alloc_fn, free_fn); ir_link_new_instruction(new_instruction, old_instruction); return new_instruction; } @@ -2495,8 +2501,9 @@ static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *sou return &instruction->base; } -static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *scope, AstNode *source_node) { +static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *scope, AstNode *source_node, ImplicitAllocatorId id) { IrInstructionGetImplicitAllocator *instruction = ir_build_instruction(irb, scope, source_node); + instruction->id = id; return &instruction->base; } @@ -3970,7 +3977,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever; - return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline, false, nullptr); + return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline, false, nullptr, nullptr, nullptr); } case BuiltinFnIdTypeId: { @@ -4106,15 +4113,25 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node bool is_async = node->data.fn_call_expr.is_async; IrInstruction *async_allocator = nullptr; + IrInstruction *alloc_fn = nullptr; + IrInstruction *free_fn = nullptr; if (is_async) { if (node->data.fn_call_expr.async_allocator) { async_allocator = ir_gen_node(irb, node->data.fn_call_expr.async_allocator, scope); if (async_allocator == irb->codegen->invalid_instruction) return async_allocator; + + Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME); + IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, scope, node, async_allocator, alloc_field_name); + alloc_fn = ir_build_load_ptr(irb, scope, node, alloc_fn_ptr); + + Buf *free_field_name = buf_create_from_str(ASYNC_FREE_FIELD_NAME); + IrInstruction *free_fn_ptr = ir_build_field_ptr(irb, scope, node, async_allocator, free_field_name); + free_fn = ir_build_load_ptr(irb, scope, node, free_fn_ptr); } } - return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator); + return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator, alloc_fn, free_fn); } static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -6106,20 +6123,16 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_set_cursor_at_end_and_append_block(irb, dyn_alloc_block); IrInstruction *coro_size = ir_build_coro_size(irb, scope, node); - irb->exec->implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node); - Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME); - IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, scope, node, irb->exec->implicit_allocator_ptr, - alloc_field_name); - IrInstruction *alloc_fn = ir_build_load_ptr(irb, scope, node, alloc_fn_ptr); - IrInstruction *alignment = ir_build_const_u29(irb, scope, node, - get_coro_frame_align_bytes(irb->codegen)); + irb->exec->implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node, ImplicitAllocatorIdContext); + IrInstruction *alloc_fn = ir_build_get_implicit_allocator(irb, scope, node, ImplicitAllocatorIdAlloc); + IrInstruction *alignment = ir_build_const_u29(irb, scope, node, get_coro_frame_align_bytes(irb->codegen)); size_t arg_count = 3; IrInstruction **args = allocate(arg_count); args[0] = irb->exec->implicit_allocator_ptr; // self args[1] = coro_size; // byte_count args[2] = alignment; // alignment IrInstruction *alloc_result = ir_build_call(irb, scope, node, nullptr, alloc_fn, arg_count, args, false, - FnInlineAuto, false, nullptr); + FnInlineAuto, false, nullptr, nullptr, nullptr); IrInstruction *alloc_result_ptr = ir_build_ref(irb, scope, node, alloc_result, true, false); IrInstruction *alloc_result_is_err = ir_build_test_err(irb, scope, node, alloc_result); IrBasicBlock *alloc_err_block = ir_create_basic_block(irb, scope, "AllocError"); @@ -6237,15 +6250,12 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_build_cond_br(irb, scope, node, coro_need_dyn_alloc, dyn_free_block, end_free_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, dyn_free_block); - Buf *free_field_name = buf_create_from_str(ASYNC_FREE_FIELD_NAME); - IrInstruction *free_fn_ptr = ir_build_field_ptr(irb, scope, node, irb->exec->implicit_allocator_ptr, - free_field_name); - IrInstruction *free_fn = ir_build_load_ptr(irb, scope, node, free_fn_ptr); + IrInstruction *free_fn = ir_build_get_implicit_allocator(irb, scope, node, ImplicitAllocatorIdFree); size_t arg_count = 2; IrInstruction **args = allocate(arg_count); args[0] = irb->exec->implicit_allocator_ptr; // self args[1] = ir_build_load_ptr(irb, scope, node, coro_unwrapped_mem_ptr); // old_mem - ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr); + ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr, nullptr, nullptr); ir_build_br(irb, scope, node, end_free_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, end_free_block); @@ -11266,7 +11276,7 @@ static TypeTableEntry *ir_analyze_instruction_error_union(IrAnalyze *ira, return ira->codegen->builtin_types.entry_type; } -IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr) { +IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr, ImplicitAllocatorId id) { FnTableEntry *parent_fn_entry = exec_fn_entry(ira->new_irb.exec); if (parent_fn_entry == nullptr) { ir_add_error(ira, source_instr, buf_sprintf("no implicit allocator available")); @@ -11280,27 +11290,39 @@ IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_i } assert(parent_fn_type->async_allocator_type != nullptr); - IrInstruction *result = ir_build_get_implicit_allocator(&ira->new_irb, source_instr->scope, source_instr->source_node); - result->value.type = parent_fn_type->async_allocator_type; + IrInstruction *result = ir_build_get_implicit_allocator(&ira->new_irb, source_instr->scope, source_instr->source_node, id); + switch (id) { + case ImplicitAllocatorIdContext: + result->value.type = parent_fn_type->async_allocator_type; + break; + case ImplicitAllocatorIdAlloc: + { + assert(parent_fn_type->async_allocator_type->id == TypeTableEntryIdPointer); + TypeTableEntry *struct_type = parent_fn_type->async_allocator_type->data.pointer.child_type; + TypeStructField *alloc_fn_field = find_struct_type_field(struct_type, buf_create_from_str(ASYNC_ALLOC_FIELD_NAME)); + assert(alloc_fn_field->type_entry->id == TypeTableEntryIdFn); + result->value.type = alloc_fn_field->type_entry; + break; + } + case ImplicitAllocatorIdFree: + { + assert(parent_fn_type->async_allocator_type->id == TypeTableEntryIdPointer); + TypeTableEntry *struct_type = parent_fn_type->async_allocator_type->data.pointer.child_type; + TypeStructField *free_fn_field = find_struct_type_field(struct_type, buf_create_from_str(ASYNC_FREE_FIELD_NAME)); + assert(free_fn_field->type_entry->id == TypeTableEntryIdFn); + result->value.type = free_fn_field->type_entry; + break; + } + } return result; } static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *call_instruction, FnTableEntry *fn_entry, TypeTableEntry *fn_type, - IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count, IrInstruction *async_allocator_inst) + IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count, IrInstruction *async_allocator_inst, IrInstruction *alloc_fn, IrInstruction *free_fn) { - Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME); - //Buf *free_field_name = buf_create_from_str("freeFn"); assert(async_allocator_inst->value.type->id == TypeTableEntryIdPointer); - TypeTableEntry *container_type = async_allocator_inst->value.type->data.pointer.child_type; - IrInstruction *field_ptr_inst = ir_analyze_container_field_ptr(ira, alloc_field_name, &call_instruction->base, - async_allocator_inst, container_type); - if (type_is_invalid(field_ptr_inst->value.type)) { - return ira->codegen->invalid_instruction; - } - TypeTableEntry *ptr_to_alloc_fn_type = field_ptr_inst->value.type; - assert(ptr_to_alloc_fn_type->id == TypeTableEntryIdPointer); - TypeTableEntry *alloc_fn_type = ptr_to_alloc_fn_type->data.pointer.child_type; + TypeTableEntry *alloc_fn_type = alloc_fn->value.type; if (alloc_fn_type->id != TypeTableEntryIdFn) { ir_add_error(ira, &call_instruction->base, buf_sprintf("expected allocation function, found '%s'", buf_ptr(&alloc_fn_type->name))); @@ -11319,7 +11341,7 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *c TypeTableEntry *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type); IrInstruction *result = ir_build_call(&ira->new_irb, call_instruction->base.scope, call_instruction->base.source_node, - fn_entry, fn_ref, arg_count, casted_args, false, FnInlineAuto, true, async_allocator_inst); + fn_entry, fn_ref, arg_count, casted_args, false, FnInlineAuto, true, async_allocator_inst, alloc_fn, free_fn); result->value.type = async_return_type; return result; } @@ -11842,7 +11864,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal } IrInstruction *uncasted_async_allocator_inst; if (call_instruction->async_allocator == nullptr) { - uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base); + uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base, ImplicitAllocatorIdContext); if (type_is_invalid(uncasted_async_allocator_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; } else { @@ -11886,15 +11908,25 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal size_t impl_param_count = impl_fn->type_entry->data.fn.fn_type_id.param_count; if (call_instruction->is_async) { - IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry, fn_ref, casted_args, impl_param_count, async_allocator_inst); + IrInstruction *alloc_fn = call_instruction->alloc_fn->other; + if (type_is_invalid(alloc_fn->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *free_fn = call_instruction->free_fn->other; + if (type_is_invalid(free_fn->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry, fn_ref, casted_args, impl_param_count, + async_allocator_inst, alloc_fn, free_fn); ir_link_new_instruction(result, &call_instruction->base); ir_add_alloca(ira, result, result->value.type); return ir_finish_anal(ira, result->value.type); } + assert(async_allocator_inst == nullptr); IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, impl_fn, nullptr, impl_param_count, casted_args, false, fn_inline, - call_instruction->is_async, async_allocator_inst); + call_instruction->is_async, nullptr, nullptr, nullptr); ir_add_alloca(ira, new_call_instruction, return_type); @@ -11961,20 +11993,40 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal if (call_instruction->is_async) { IrInstruction *uncasted_async_allocator_inst; + IrInstruction *uncasted_alloc_fn_inst; + IrInstruction *uncasted_free_fn_inst; if (call_instruction->async_allocator == nullptr) { - uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base); + uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base, ImplicitAllocatorIdContext); if (type_is_invalid(uncasted_async_allocator_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; + + uncasted_alloc_fn_inst = ir_get_implicit_allocator(ira, &call_instruction->base, ImplicitAllocatorIdAlloc); + if (type_is_invalid(uncasted_alloc_fn_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + uncasted_free_fn_inst = ir_get_implicit_allocator(ira, &call_instruction->base, ImplicitAllocatorIdFree); + if (type_is_invalid(uncasted_free_fn_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; } else { uncasted_async_allocator_inst = call_instruction->async_allocator->other; if (type_is_invalid(uncasted_async_allocator_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; + + uncasted_alloc_fn_inst = call_instruction->alloc_fn->other; + if (type_is_invalid(uncasted_alloc_fn_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + uncasted_free_fn_inst = call_instruction->free_fn->other; + if (type_is_invalid(uncasted_free_fn_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + } IrInstruction *async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, fn_type_id->async_allocator_type); if (type_is_invalid(async_allocator_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; - IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref, casted_args, call_param_count, async_allocator_inst); + IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref, casted_args, call_param_count, + async_allocator_inst, uncasted_alloc_fn_inst, uncasted_free_fn_inst); ir_link_new_instruction(result, &call_instruction->base); ir_add_alloca(ira, result, result->value.type); return ir_finish_anal(ira, result->value.type); @@ -11982,7 +12034,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, - fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr); + fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr, nullptr, nullptr); ir_add_alloca(ira, new_call_instruction, return_type); return ir_finish_anal(ira, return_type); @@ -17222,7 +17274,7 @@ static TypeTableEntry *ir_analyze_instruction_coro_begin(IrAnalyze *ira, IrInstr } static TypeTableEntry *ir_analyze_instruction_get_implicit_allocator(IrAnalyze *ira, IrInstructionGetImplicitAllocator *instruction) { - IrInstruction *result = ir_get_implicit_allocator(ira, &instruction->base); + IrInstruction *result = ir_get_implicit_allocator(ira, &instruction->base, instruction->id); ir_link_new_instruction(result, &instruction->base); return result->value.type; } diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 2e367672a5..1314123110 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1027,7 +1027,19 @@ static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) { } static void ir_print_get_implicit_allocator(IrPrint *irp, IrInstructionGetImplicitAllocator *instruction) { - fprintf(irp->f, "@getImplicitAllocator()"); + fprintf(irp->f, "@getImplicitAllocator("); + switch (instruction->id) { + case ImplicitAllocatorIdContext: + fprintf(irp->f, "Context"); + break; + case ImplicitAllocatorIdAlloc: + fprintf(irp->f, "Alloc"); + break; + case ImplicitAllocatorIdFree: + fprintf(irp->f, "Free"); + break; + } + fprintf(irp->f, ")"); } static void ir_print_coro_id(IrPrint *irp, IrInstructionCoroId *instruction) { diff --git a/std/mem.zig b/std/mem.zig index 2adb647ef6..07521bfcb8 100644 --- a/std/mem.zig +++ b/std/mem.zig @@ -116,22 +116,6 @@ pub const Allocator = struct { const non_const_ptr = @intToPtr(&u8, @ptrToInt(bytes.ptr)); self.freeFn(self, non_const_ptr[0..bytes.len]); } - - pub const AsyncAllocator = struct { - allocator: &Allocator, - - fn alloc(self: &const AsyncAllocator, byte_count: usize, alignment: u29) Error![]u8 { - return self.allocator.allocFn(self.allocator, byte_count, alignment); - } - - fn free(self: &const AsyncAllocator, old_mem: []u8) void { - return self.allocator.freeFn(self.allocator, old_mem); - } - }; - - fn toAsync(self: &Allocator) AsyncAllocator { - return AsyncAllocator { .allocator = self }; - } }; /// Copy all of source into dest at position 0. From 4e43bde924694a640cfec13141df1f3f611ffe0f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 26 Feb 2018 11:52:29 -0500 Subject: [PATCH 36/56] workaround for llvm: delete coroutine allocation elision maybe this can be reverted, but it seems to be related to llvm's coro transformations crashing. See #727 --- src/ir.cpp | 42 +++--------------------------------------- 1 file changed, 3 insertions(+), 39 deletions(-) diff --git a/src/ir.cpp b/src/ir.cpp index bca133b8e5..00a7233897 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -6095,7 +6095,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *coro_id; IrInstruction *coro_promise_ptr; IrInstruction *coro_result_field_ptr; - IrInstruction *coro_need_dyn_alloc; TypeTableEntry *return_type; Buf *result_ptr_field_name; if (is_async) { @@ -6113,15 +6112,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false)); IrInstruction *promise_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_promise_ptr); coro_id = ir_build_coro_id(irb, scope, node, promise_as_u8_ptr); - coro_need_dyn_alloc = ir_build_coro_alloc(irb, scope, node, coro_id); - IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); - IrInstruction *null_ptr = ir_build_int_to_ptr(irb, scope, node, u8_ptr_type, zero); - - IrBasicBlock *dyn_alloc_block = ir_create_basic_block(irb, scope, "DynAlloc"); - IrBasicBlock *coro_begin_block = ir_create_basic_block(irb, scope, "CoroBegin"); - ir_build_cond_br(irb, scope, node, coro_need_dyn_alloc, dyn_alloc_block, coro_begin_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, dyn_alloc_block); IrInstruction *coro_size = ir_build_coro_size(irb, scope, node); irb->exec->implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node, ImplicitAllocatorIdContext); IrInstruction *alloc_fn = ir_build_get_implicit_allocator(irb, scope, node, ImplicitAllocatorIdAlloc); @@ -6144,31 +6134,11 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_build_coro_alloc_fail(irb, scope, node, err_val); ir_set_cursor_at_end_and_append_block(irb, alloc_ok_block); - IrInstruction *unwrapped_mem_ptr = ir_build_unwrap_err_payload(irb, scope, node, alloc_result_ptr, false); + coro_unwrapped_mem_ptr = ir_build_unwrap_err_payload(irb, scope, node, alloc_result_ptr, false); Buf *ptr_field_name = buf_create_from_str("ptr"); - IrInstruction *coro_mem_ptr_field = ir_build_field_ptr(irb, scope, node, unwrapped_mem_ptr, + IrInstruction *coro_mem_ptr_field = ir_build_field_ptr(irb, scope, node, coro_unwrapped_mem_ptr, ptr_field_name); - IrInstruction *coro_mem_ptr = ir_build_load_ptr(irb, scope, node, coro_mem_ptr_field); - ir_build_br(irb, scope, node, coro_begin_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, coro_begin_block); - - IrBasicBlock **coro_mem_incoming_blocks = allocate(2); - IrInstruction **coro_mem_incoming_values = allocate(2); - coro_mem_incoming_blocks[0] = entry_block; - coro_mem_incoming_values[0] = null_ptr; - coro_mem_incoming_blocks[1] = alloc_ok_block; - coro_mem_incoming_values[1] = coro_mem_ptr; - IrInstruction *coro_mem = ir_build_phi(irb, scope, node, 2, coro_mem_incoming_blocks, coro_mem_incoming_values); - - IrBasicBlock **unwrapped_mem_ptr_incoming_blocks = allocate(2); - IrInstruction **unwrapped_mem_ptr_incoming_values = allocate(2); - unwrapped_mem_ptr_incoming_blocks[0] = entry_block; - unwrapped_mem_ptr_incoming_values[0] = ir_build_const_undefined(irb, scope, node); - unwrapped_mem_ptr_incoming_blocks[1] = alloc_ok_block; - unwrapped_mem_ptr_incoming_values[1] = unwrapped_mem_ptr; - coro_unwrapped_mem_ptr = ir_build_phi(irb, scope, node, 2, - unwrapped_mem_ptr_incoming_blocks, unwrapped_mem_ptr_incoming_values); + IrInstruction *coro_mem = ir_build_load_ptr(irb, scope, node, coro_mem_ptr_field); irb->exec->coro_handle = ir_build_coro_begin(irb, scope, node, coro_id, coro_mem); @@ -6245,20 +6215,14 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec incoming_blocks[1] = irb->exec->coro_normal_final; incoming_values[1] = const_bool_true; IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); - IrBasicBlock *dyn_free_block = ir_create_basic_block(irb, scope, "DynFree"); - IrBasicBlock *end_free_block = ir_create_basic_block(irb, scope, "EndFree"); - ir_build_cond_br(irb, scope, node, coro_need_dyn_alloc, dyn_free_block, end_free_block, const_bool_false); - ir_set_cursor_at_end_and_append_block(irb, dyn_free_block); IrInstruction *free_fn = ir_build_get_implicit_allocator(irb, scope, node, ImplicitAllocatorIdFree); size_t arg_count = 2; IrInstruction **args = allocate(arg_count); args[0] = irb->exec->implicit_allocator_ptr; // self args[1] = ir_build_load_ptr(irb, scope, node, coro_unwrapped_mem_ptr); // old_mem ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr, nullptr, nullptr); - ir_build_br(irb, scope, node, end_free_block, const_bool_false); - ir_set_cursor_at_end_and_append_block(irb, end_free_block); IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume"); IrBasicBlock *return_block = ir_create_basic_block(irb, scope, "Return"); ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, return_block, const_bool_false); From 439621e44a68b436f958a84fcdb0bdac83613aea Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 27 Feb 2018 11:14:14 -0500 Subject: [PATCH 37/56] remove signal hanlding stuff from std.os.ChildProcess --- std/os/child_process.zig | 50 ---------------------------------------- 1 file changed, 50 deletions(-) diff --git a/std/os/child_process.zig b/std/os/child_process.zig index 27a91c1619..06802e657c 100644 --- a/std/os/child_process.zig +++ b/std/os/child_process.zig @@ -32,9 +32,6 @@ pub const ChildProcess = struct { pub argv: []const []const u8, - /// Possibly called from a signal handler. Must set this before calling `spawn`. - pub onTerm: ?fn(&ChildProcess)void, - /// Leave as null to use the current env map using the supplied allocator. pub env_map: ?&const BufMap, @@ -102,7 +99,6 @@ pub const ChildProcess = struct { .err_pipe = undefined, .llnode = undefined, .term = null, - .onTerm = null, .env_map = null, .cwd = null, .uid = if (is_windows) {} else null, @@ -124,7 +120,6 @@ pub const ChildProcess = struct { self.gid = user_info.gid; } - /// onTerm can be called before `spawn` returns. /// On success must call `kill` or `wait`. pub fn spawn(self: &ChildProcess) !void { if (is_windows) { @@ -165,9 +160,6 @@ pub const ChildProcess = struct { } pub fn killPosix(self: &ChildProcess) !Term { - block_SIGCHLD(); - defer restore_SIGCHLD(); - if (self.term) |term| { self.cleanupStreams(); return term; @@ -246,9 +238,6 @@ pub const ChildProcess = struct { } fn waitPosix(self: &ChildProcess) !Term { - block_SIGCHLD(); - defer restore_SIGCHLD(); - if (self.term) |term| { self.cleanupStreams(); return term; @@ -298,10 +287,6 @@ pub const ChildProcess = struct { fn handleWaitResult(self: &ChildProcess, status: i32) void { self.term = self.cleanupAfterWait(status); - - if (self.onTerm) |onTerm| { - onTerm(self); - } } fn cleanupStreams(self: &ChildProcess) void { @@ -347,9 +332,6 @@ pub const ChildProcess = struct { } fn spawnPosix(self: &ChildProcess) !void { - // TODO atomically set a flag saying that we already did this - install_SIGCHLD_handler(); - const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try makePipe() else undefined; errdefer if (self.stdin_behavior == StdIo.Pipe) { destroyPipe(stdin_pipe); }; @@ -387,11 +369,9 @@ pub const ChildProcess = struct { const err_pipe = try makePipe(); errdefer destroyPipe(err_pipe); - block_SIGCHLD(); const pid_result = posix.fork(); const pid_err = posix.getErrno(pid_result); if (pid_err > 0) { - restore_SIGCHLD(); return switch (pid_err) { posix.EAGAIN, posix.ENOMEM, posix.ENOSYS => error.SystemResources, else => os.unexpectedErrorPosix(pid_err), @@ -399,7 +379,6 @@ pub const ChildProcess = struct { } if (pid_result == 0) { // we are the child - restore_SIGCHLD(); setUpChildIo(self.stdin_behavior, stdin_pipe[0], posix.STDIN_FILENO, dev_null_fd) catch |err| forkChildErrReport(err_pipe[1], err); @@ -451,8 +430,6 @@ pub const ChildProcess = struct { // TODO make this atomic so it works even with threads children_nodes.prepend(&self.llnode); - restore_SIGCHLD(); - if (self.stdin_behavior == StdIo.Pipe) { os.close(stdin_pipe[0]); } if (self.stdout_behavior == StdIo.Pipe) { os.close(stdout_pipe[1]); } if (self.stderr_behavior == StdIo.Pipe) { os.close(stderr_pipe[1]); } @@ -824,30 +801,3 @@ fn handleTerm(pid: i32, status: i32) void { } } } - -const sigchld_set = x: { - var signal_set = posix.empty_sigset; - posix.sigaddset(&signal_set, posix.SIGCHLD); - break :x signal_set; -}; - -fn block_SIGCHLD() void { - const err = posix.getErrno(posix.sigprocmask(posix.SIG_BLOCK, &sigchld_set, null)); - assert(err == 0); -} - -fn restore_SIGCHLD() void { - const err = posix.getErrno(posix.sigprocmask(posix.SIG_UNBLOCK, &sigchld_set, null)); - assert(err == 0); -} - -const sigchld_action = posix.Sigaction { - .handler = sigchld_handler, - .mask = posix.empty_sigset, - .flags = posix.SA_RESTART | posix.SA_NOCLDSTOP, -}; - -fn install_SIGCHLD_handler() void { - const err = posix.getErrno(posix.sigaction(posix.SIGCHLD, &sigchld_action, null)); - assert(err == 0); -} From c2f5634fb3df51622cf74f23b4ae0d4a7d2bbbe9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 27 Feb 2018 10:00:07 -0500 Subject: [PATCH 38/56] another llvm workaround for getelementptr --- src/all_types.hpp | 11 +++++++- src/analyze.cpp | 8 ++++++ src/codegen.cpp | 68 +++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 81 insertions(+), 6 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 4220523126..52be88496b 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1461,6 +1461,14 @@ struct LinkLib { bool provided_explicitly; }; +struct WorkaroundStructGEPId { + LLVMTypeRef struct_ptr_type; + uint32_t index; +}; + +uint32_t workaround_struct_gep_hash(WorkaroundStructGEPId x); +bool workaround_struct_gep_eq(WorkaroundStructGEPId a, WorkaroundStructGEPId b); + struct CodeGen { LLVMModuleRef module; ZigList errors; @@ -1491,7 +1499,7 @@ struct CodeGen { HashMap exported_symbol_names; HashMap external_prototypes; HashMap string_literals_table; - + HashMap workaround_struct_gep_table; ZigList import_queue; size_t import_queue_index; @@ -1603,6 +1611,7 @@ struct CodeGen { LLVMValueRef cur_ret_ptr; LLVMValueRef cur_fn_val; LLVMValueRef cur_err_ret_trace_val; + bool cur_workaround_gep_on; bool c_want_stdint; bool c_want_stdbool; AstNode *root_export_decl; diff --git a/src/analyze.cpp b/src/analyze.cpp index 26924cc7db..9c5e4ffdde 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5821,3 +5821,11 @@ bool type_is_global_error_set(TypeTableEntry *err_set_type) { uint32_t get_coro_frame_align_bytes(CodeGen *g) { return g->pointer_size_bytes * 2; } + +uint32_t workaround_struct_gep_hash(WorkaroundStructGEPId x) { + return ptr_hash(x.struct_ptr_type) ^ x.index; +} + +bool workaround_struct_gep_eq(WorkaroundStructGEPId a, WorkaroundStructGEPId b) { + return a.struct_ptr_type == b.struct_ptr_type && a.index == b.index; +} diff --git a/src/codegen.cpp b/src/codegen.cpp index 21fc28e4af..54ad1fe311 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -88,6 +88,7 @@ CodeGen *codegen_create(Buf *root_src_path, const ZigTarget *target, OutType out g->exported_symbol_names.init(8); g->external_prototypes.init(8); g->string_literals_table.init(16); + g->workaround_struct_gep_table.init(8); g->is_test_build = false; g->want_h_file = (out_type == OutTypeObj || out_type == OutTypeLib); buf_resize(&g->global_asm, 0); @@ -2781,6 +2782,52 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } } +static LLVMValueRef get_workaround_struct_gep_fn_val(CodeGen *g, LLVMTypeRef struct_ptr_type, uint32_t index) { + WorkaroundStructGEPId hash_id = {struct_ptr_type, index}; + auto existing_entry = g->workaround_struct_gep_table.maybe_get(hash_id); + if (existing_entry) + return existing_entry->value; + + LLVMTypeRef arg_types[] = { + struct_ptr_type, + }; + LLVMTypeRef result_type = LLVMStructGetTypeAtIndex(LLVMGetElementType(struct_ptr_type), index); + LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMPointerType(result_type, 0), arg_types, 1, false); + + Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_workaround_llvm_struct_gep"), false); + LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref); + LLVMSetLinkage(fn_val, LLVMInternalLinkage); + LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified)); + addLLVMFnAttr(fn_val, "nounwind"); + addLLVMArgAttr(fn_val, (unsigned)0, "nonnull"); + + LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry"); + LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder); + LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder); + LLVMPositionBuilderAtEnd(g->builder, entry_block); + ZigLLVMClearCurrentDebugLocation(g->builder); + + LLVMValueRef result = LLVMBuildStructGEP(g->builder, LLVMGetParam(fn_val, 0), index, ""); + LLVMBuildRet(g->builder, result); + + LLVMPositionBuilderAtEnd(g->builder, prev_block); + LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + + g->workaround_struct_gep_table.put(hash_id, fn_val); + return fn_val; +} + +static LLVMValueRef gen_workaround_struct_gep(CodeGen *g, LLVMValueRef struct_ptr, uint32_t field_index) { + if (g->cur_workaround_gep_on) { + // We need to generate a normal StructGEP but due to llvm bugs we have to workaround it by + // putting the GEP in a function call + LLVMValueRef fn_val = get_workaround_struct_gep_fn_val(g, LLVMTypeOf(struct_ptr), field_index); + return LLVMBuildCall(g->builder, fn_val, &struct_ptr, 1, ""); + } else { + return LLVMBuildStructGEP(g->builder, struct_ptr, field_index, ""); + } +} + static LLVMValueRef ir_render_struct_field_ptr(CodeGen *g, IrExecutable *executable, IrInstructionStructFieldPtr *instruction) { @@ -2799,7 +2846,7 @@ static LLVMValueRef ir_render_struct_field_ptr(CodeGen *g, IrExecutable *executa } assert(field->gen_index != SIZE_MAX); - return LLVMBuildStructGEP(g->builder, struct_ptr, (unsigned)field->gen_index, ""); + return gen_workaround_struct_gep(g, struct_ptr, field->gen_index); } static LLVMValueRef ir_render_union_field_ptr(CodeGen *g, IrExecutable *executable, @@ -3655,7 +3702,7 @@ static LLVMValueRef ir_render_test_err(CodeGen *g, IrExecutable *executable, IrI LLVMValueRef err_val; if (type_has_bits(payload_type)) { - LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); + LLVMValueRef err_val_ptr = gen_workaround_struct_gep(g, err_union_handle, err_union_err_index); err_val = gen_load_untyped(g, err_val_ptr, 0, false, ""); } else { err_val = err_union_handle; @@ -3674,7 +3721,12 @@ static LLVMValueRef ir_render_unwrap_err_code(CodeGen *g, IrExecutable *executab LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type); if (type_has_bits(payload_type)) { - LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); + LLVMValueRef err_val_ptr; + if (g->cur_workaround_gep_on) { + err_val_ptr = gen_workaround_struct_gep(g, err_union_handle, err_union_err_index); + } else { + err_val_ptr = LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); + } return gen_load_untyped(g, err_val_ptr, 0, false, ""); } else { return err_union_handle; @@ -3696,7 +3748,7 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu if (ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on && g->errors_by_index.length > 1) { LLVMValueRef err_val; if (type_has_bits(payload_type)) { - LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); + LLVMValueRef err_val_ptr = gen_workaround_struct_gep(g, err_union_handle, err_union_err_index); err_val = gen_load_untyped(g, err_val_ptr, 0, false, ""); } else { err_val = err_union_handle; @@ -3714,7 +3766,11 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu } if (type_has_bits(payload_type)) { - return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_payload_index, ""); + if (g->cur_workaround_gep_on) { + return gen_workaround_struct_gep(g, err_union_handle, err_union_payload_index); + } else { + return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_payload_index, ""); + } } else { return nullptr; } @@ -3934,6 +3990,7 @@ static LLVMValueRef ir_render_coro_begin(CodeGen *g, IrExecutable *executable, I coro_id, coro_mem_ptr, }; + g->cur_workaround_gep_on = false; return LLVMBuildCall(g->builder, get_coro_begin_fn_val(g), params, 2, ""); } @@ -5073,6 +5130,7 @@ static void do_code_gen(CodeGen *g) { LLVMValueRef fn = fn_llvm_value(g, fn_table_entry); g->cur_fn = fn_table_entry; g->cur_fn_val = fn; + g->cur_workaround_gep_on = fn_table_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync; TypeTableEntry *return_type = fn_table_entry->type_entry->data.fn.fn_type_id.return_type; if (handle_is_ptr(return_type)) { g->cur_ret_ptr = LLVMGetParam(fn, 0); From 6e2a67724c576420c1c0f005ccebdc6b0e708724 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 27 Feb 2018 14:58:02 -0500 Subject: [PATCH 39/56] Revert "another llvm workaround for getelementptr" This reverts commit c2f5634fb3df51622cf74f23b4ae0d4a7d2bbbe9. It doesn't work. With this, LLVM moves the allocate fn call to after llvm.coro.begin --- src/all_types.hpp | 11 +------- src/analyze.cpp | 8 ------ src/codegen.cpp | 68 ++++------------------------------------------- 3 files changed, 6 insertions(+), 81 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 52be88496b..4220523126 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1461,14 +1461,6 @@ struct LinkLib { bool provided_explicitly; }; -struct WorkaroundStructGEPId { - LLVMTypeRef struct_ptr_type; - uint32_t index; -}; - -uint32_t workaround_struct_gep_hash(WorkaroundStructGEPId x); -bool workaround_struct_gep_eq(WorkaroundStructGEPId a, WorkaroundStructGEPId b); - struct CodeGen { LLVMModuleRef module; ZigList errors; @@ -1499,7 +1491,7 @@ struct CodeGen { HashMap exported_symbol_names; HashMap external_prototypes; HashMap string_literals_table; - HashMap workaround_struct_gep_table; + ZigList import_queue; size_t import_queue_index; @@ -1611,7 +1603,6 @@ struct CodeGen { LLVMValueRef cur_ret_ptr; LLVMValueRef cur_fn_val; LLVMValueRef cur_err_ret_trace_val; - bool cur_workaround_gep_on; bool c_want_stdint; bool c_want_stdbool; AstNode *root_export_decl; diff --git a/src/analyze.cpp b/src/analyze.cpp index 9c5e4ffdde..26924cc7db 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5821,11 +5821,3 @@ bool type_is_global_error_set(TypeTableEntry *err_set_type) { uint32_t get_coro_frame_align_bytes(CodeGen *g) { return g->pointer_size_bytes * 2; } - -uint32_t workaround_struct_gep_hash(WorkaroundStructGEPId x) { - return ptr_hash(x.struct_ptr_type) ^ x.index; -} - -bool workaround_struct_gep_eq(WorkaroundStructGEPId a, WorkaroundStructGEPId b) { - return a.struct_ptr_type == b.struct_ptr_type && a.index == b.index; -} diff --git a/src/codegen.cpp b/src/codegen.cpp index 54ad1fe311..21fc28e4af 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -88,7 +88,6 @@ CodeGen *codegen_create(Buf *root_src_path, const ZigTarget *target, OutType out g->exported_symbol_names.init(8); g->external_prototypes.init(8); g->string_literals_table.init(16); - g->workaround_struct_gep_table.init(8); g->is_test_build = false; g->want_h_file = (out_type == OutTypeObj || out_type == OutTypeLib); buf_resize(&g->global_asm, 0); @@ -2782,52 +2781,6 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } } -static LLVMValueRef get_workaround_struct_gep_fn_val(CodeGen *g, LLVMTypeRef struct_ptr_type, uint32_t index) { - WorkaroundStructGEPId hash_id = {struct_ptr_type, index}; - auto existing_entry = g->workaround_struct_gep_table.maybe_get(hash_id); - if (existing_entry) - return existing_entry->value; - - LLVMTypeRef arg_types[] = { - struct_ptr_type, - }; - LLVMTypeRef result_type = LLVMStructGetTypeAtIndex(LLVMGetElementType(struct_ptr_type), index); - LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMPointerType(result_type, 0), arg_types, 1, false); - - Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_workaround_llvm_struct_gep"), false); - LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref); - LLVMSetLinkage(fn_val, LLVMInternalLinkage); - LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified)); - addLLVMFnAttr(fn_val, "nounwind"); - addLLVMArgAttr(fn_val, (unsigned)0, "nonnull"); - - LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry"); - LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder); - LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder); - LLVMPositionBuilderAtEnd(g->builder, entry_block); - ZigLLVMClearCurrentDebugLocation(g->builder); - - LLVMValueRef result = LLVMBuildStructGEP(g->builder, LLVMGetParam(fn_val, 0), index, ""); - LLVMBuildRet(g->builder, result); - - LLVMPositionBuilderAtEnd(g->builder, prev_block); - LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); - - g->workaround_struct_gep_table.put(hash_id, fn_val); - return fn_val; -} - -static LLVMValueRef gen_workaround_struct_gep(CodeGen *g, LLVMValueRef struct_ptr, uint32_t field_index) { - if (g->cur_workaround_gep_on) { - // We need to generate a normal StructGEP but due to llvm bugs we have to workaround it by - // putting the GEP in a function call - LLVMValueRef fn_val = get_workaround_struct_gep_fn_val(g, LLVMTypeOf(struct_ptr), field_index); - return LLVMBuildCall(g->builder, fn_val, &struct_ptr, 1, ""); - } else { - return LLVMBuildStructGEP(g->builder, struct_ptr, field_index, ""); - } -} - static LLVMValueRef ir_render_struct_field_ptr(CodeGen *g, IrExecutable *executable, IrInstructionStructFieldPtr *instruction) { @@ -2846,7 +2799,7 @@ static LLVMValueRef ir_render_struct_field_ptr(CodeGen *g, IrExecutable *executa } assert(field->gen_index != SIZE_MAX); - return gen_workaround_struct_gep(g, struct_ptr, field->gen_index); + return LLVMBuildStructGEP(g->builder, struct_ptr, (unsigned)field->gen_index, ""); } static LLVMValueRef ir_render_union_field_ptr(CodeGen *g, IrExecutable *executable, @@ -3702,7 +3655,7 @@ static LLVMValueRef ir_render_test_err(CodeGen *g, IrExecutable *executable, IrI LLVMValueRef err_val; if (type_has_bits(payload_type)) { - LLVMValueRef err_val_ptr = gen_workaround_struct_gep(g, err_union_handle, err_union_err_index); + LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); err_val = gen_load_untyped(g, err_val_ptr, 0, false, ""); } else { err_val = err_union_handle; @@ -3721,12 +3674,7 @@ static LLVMValueRef ir_render_unwrap_err_code(CodeGen *g, IrExecutable *executab LLVMValueRef err_union_handle = get_handle_value(g, err_union_ptr, err_union_type, ptr_type); if (type_has_bits(payload_type)) { - LLVMValueRef err_val_ptr; - if (g->cur_workaround_gep_on) { - err_val_ptr = gen_workaround_struct_gep(g, err_union_handle, err_union_err_index); - } else { - err_val_ptr = LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); - } + LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); return gen_load_untyped(g, err_val_ptr, 0, false, ""); } else { return err_union_handle; @@ -3748,7 +3696,7 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu if (ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on && g->errors_by_index.length > 1) { LLVMValueRef err_val; if (type_has_bits(payload_type)) { - LLVMValueRef err_val_ptr = gen_workaround_struct_gep(g, err_union_handle, err_union_err_index); + LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, err_union_handle, err_union_err_index, ""); err_val = gen_load_untyped(g, err_val_ptr, 0, false, ""); } else { err_val = err_union_handle; @@ -3766,11 +3714,7 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu } if (type_has_bits(payload_type)) { - if (g->cur_workaround_gep_on) { - return gen_workaround_struct_gep(g, err_union_handle, err_union_payload_index); - } else { - return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_payload_index, ""); - } + return LLVMBuildStructGEP(g->builder, err_union_handle, err_union_payload_index, ""); } else { return nullptr; } @@ -3990,7 +3934,6 @@ static LLVMValueRef ir_render_coro_begin(CodeGen *g, IrExecutable *executable, I coro_id, coro_mem_ptr, }; - g->cur_workaround_gep_on = false; return LLVMBuildCall(g->builder, get_coro_begin_fn_val(g), params, 2, ""); } @@ -5130,7 +5073,6 @@ static void do_code_gen(CodeGen *g) { LLVMValueRef fn = fn_llvm_value(g, fn_table_entry); g->cur_fn = fn_table_entry; g->cur_fn_val = fn; - g->cur_workaround_gep_on = fn_table_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync; TypeTableEntry *return_type = fn_table_entry->type_entry->data.fn.fn_type_id.return_type; if (handle_is_ptr(return_type)) { g->cur_ret_ptr = LLVMGetParam(fn, 0); From 132e604aa399a3bcb91996e550cf8972bd88422c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 27 Feb 2018 17:12:53 -0500 Subject: [PATCH 40/56] llvm coroutine workaround: sret functions return sret pointer --- src/analyze.cpp | 5 ++++- src/codegen.cpp | 13 ++++++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index 26924cc7db..09c5566c5f 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1026,7 +1026,10 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { gen_param_index += 1; // after the gen_param_index += 1 because 0 is the return type param_di_types[gen_param_index] = gen_type->di_type; - gen_return_type = g->builtin_types.entry_void; + + // as a workaround for LLVM coroutines not understanding instruction dependencies, + // we return the sret pointer argument instead of returning void + gen_return_type = gen_type; } else { gen_return_type = fn_type_id->return_type; } diff --git a/src/codegen.cpp b/src/codegen.cpp index 21fc28e4af..9f69f29b80 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -547,7 +547,10 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, FnTableEntry *fn_table_entry) { } else if (handle_is_ptr(return_type) && calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc)) { - addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret"); + // We do not add the sret attribute, because it would require the return type to be void, + // and we want the return value to return the sret pointer, to work around LLVM Coroutine + // transformation passes not understanding the data dependency. + //addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret"); addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull"); } @@ -1616,7 +1619,9 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns if (calling_convention_does_first_arg_return(g->cur_fn->type_entry->data.fn.fn_type_id.cc)) { assert(g->cur_ret_ptr); gen_assign_raw(g, g->cur_ret_ptr, get_pointer_to_type(g, return_type, false), value); - LLVMBuildRetVoid(g->builder); + // as a workaround for LLVM coroutines not understanding instruction dependencies, + // we return the sret pointer argument instead of returning void + LLVMBuildRet(g->builder, g->cur_ret_ptr); } else { LLVMValueRef by_val_value = gen_load_untyped(g, value, 0, false, ""); LLVMBuildRet(g->builder, by_val_value); @@ -2775,7 +2780,9 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } else if (!ret_has_bits) { return nullptr; } else if (first_arg_ret) { - return instruction->tmp_ptr; + // instead of returning instruction->tmp_ptr here, we trust that the function returned the first arg. + // this is a workaround for llvm coroutines not understanding the data dependency + return result; } else { return result; } From 138d6f909321fb4fddeaa172357336c384c64eda Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 27 Feb 2018 17:46:13 -0500 Subject: [PATCH 41/56] revert workaround for alloc and free as coro params reverts 4ac6c4d6bfb8f7ada2799ddb5ce3a9797be0518d the workaround didn't work --- src/all_types.hpp | 10 ---- src/analyze.cpp | 34 ++---------- src/codegen.cpp | 30 +++-------- src/ir.cpp | 132 +++++++++++++++------------------------------- src/ir_print.cpp | 14 +---- 5 files changed, 55 insertions(+), 165 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 4220523126..5073ffaceb 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -2210,8 +2210,6 @@ struct IrInstructionCall { bool is_async; IrInstruction *async_allocator; - IrInstruction *alloc_fn; - IrInstruction *free_fn; }; struct IrInstructionConst { @@ -2852,16 +2850,8 @@ struct IrInstructionCancel { IrInstruction *target; }; -enum ImplicitAllocatorId { - ImplicitAllocatorIdContext, - ImplicitAllocatorIdAlloc, - ImplicitAllocatorIdFree, -}; - struct IrInstructionGetImplicitAllocator { IrInstruction base; - - ImplicitAllocatorId id; }; struct IrInstructionCoroId { diff --git a/src/analyze.cpp b/src/analyze.cpp index 09c5566c5f..f95c9396cb 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1006,13 +1006,13 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { fn_type_id->return_type->id == TypeTableEntryIdErrorSet); // +1 for maybe making the first argument the return value // +1 for maybe first argument the error return trace - // +4 for maybe arguments async allocator and error code pointer - LLVMTypeRef *gen_param_types = allocate(6 + fn_type_id->param_count); + // +2 for maybe arguments async allocator and error code pointer + LLVMTypeRef *gen_param_types = allocate(4 + fn_type_id->param_count); // +1 because 0 is the return type and // +1 for maybe making first arg ret val and // +1 for maybe first argument the error return trace - // +4 for maybe arguments async allocator and error code pointer - ZigLLVMDIType **param_di_types = allocate(7 + fn_type_id->param_count); + // +2 for maybe arguments async allocator and error code pointer + ZigLLVMDIType **param_di_types = allocate(5 + fn_type_id->param_count); param_di_types[0] = fn_type_id->return_type->di_type; size_t gen_param_index = 0; TypeTableEntry *gen_return_type; @@ -1052,32 +1052,6 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { param_di_types[gen_param_index] = gen_type->di_type; } - { - // async alloc fn param - assert(fn_type_id->async_allocator_type->id == TypeTableEntryIdPointer); - TypeTableEntry *struct_type = fn_type_id->async_allocator_type->data.pointer.child_type; - TypeStructField *alloc_fn_field = find_struct_type_field(struct_type, buf_create_from_str("allocFn")); - assert(alloc_fn_field->type_entry->id == TypeTableEntryIdFn); - TypeTableEntry *gen_type = alloc_fn_field->type_entry; - gen_param_types[gen_param_index] = gen_type->type_ref; - gen_param_index += 1; - // after the gen_param_index += 1 because 0 is the return type - param_di_types[gen_param_index] = gen_type->di_type; - } - - { - // async free fn param - assert(fn_type_id->async_allocator_type->id == TypeTableEntryIdPointer); - TypeTableEntry *struct_type = fn_type_id->async_allocator_type->data.pointer.child_type; - TypeStructField *free_fn_field = find_struct_type_field(struct_type, buf_create_from_str("freeFn")); - assert(free_fn_field->type_entry->id == TypeTableEntryIdFn); - TypeTableEntry *gen_type = free_fn_field->type_entry; - gen_param_types[gen_param_index] = gen_type->type_ref; - gen_param_index += 1; - // after the gen_param_index += 1 because 0 is the return type - param_di_types[gen_param_index] = gen_type->di_type; - } - { // error code pointer TypeTableEntry *gen_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false); diff --git a/src/codegen.cpp b/src/codegen.cpp index 9f69f29b80..7fda1eaa3f 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2673,15 +2673,15 @@ static bool get_prefix_arg_err_ret_stack(CodeGen *g, TypeTableEntry *src_return_ } static size_t get_async_allocator_arg_index(CodeGen *g, TypeTableEntry *src_return_type) { - // 0 1 2 3 4 5 - // err_ret_stack allocator_ptr alloc free err_code other_args... + // 0 1 2 3 + // err_ret_stack allocator_ptr err_code other_args... return get_prefix_arg_err_ret_stack(g, src_return_type) ? 1 : 0; } static size_t get_async_err_code_arg_index(CodeGen *g, TypeTableEntry *src_return_type) { - // 0 1 2 3 4 5 - // err_ret_stack allocator_ptr alloc free err_code other_args... - return 3 + get_async_allocator_arg_index(g, src_return_type); + // 0 1 2 3 + // err_ret_stack allocator_ptr err_code other_args... + return 1 + get_async_allocator_arg_index(g, src_return_type); } static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCall *instruction) { @@ -2704,8 +2704,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr bool first_arg_ret = ret_has_bits && handle_is_ptr(src_return_type) && calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc); bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, src_return_type); - // +4 for the async args - size_t actual_param_count = instruction->arg_count + (first_arg_ret ? 1 : 0) + (prefix_arg_err_ret_stack ? 1 : 0) + 4; + // +2 for the async args + size_t actual_param_count = instruction->arg_count + (first_arg_ret ? 1 : 0) + (prefix_arg_err_ret_stack ? 1 : 0) + 2; bool is_var_args = fn_type_id->is_var_args; LLVMValueRef *gen_param_values = allocate(actual_param_count); size_t gen_param_index = 0; @@ -2721,12 +2721,6 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr gen_param_values[gen_param_index] = ir_llvm_value(g, instruction->async_allocator); gen_param_index += 1; - gen_param_values[gen_param_index] = ir_llvm_value(g, instruction->alloc_fn); - gen_param_index += 1; - - gen_param_values[gen_param_index] = ir_llvm_value(g, instruction->free_fn); - gen_param_index += 1; - LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, err_union_err_index, ""); LLVMBuildStore(g->builder, LLVMConstNull(g->builtin_types.entry_global_error_set->type_ref), err_val_ptr); gen_param_values[gen_param_index] = err_val_ptr; @@ -3308,15 +3302,7 @@ static LLVMValueRef ir_render_get_implicit_allocator(CodeGen *g, IrExecutable *e { TypeTableEntry *src_return_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; size_t allocator_arg_index = get_async_allocator_arg_index(g, src_return_type); - switch (instruction->id) { - case ImplicitAllocatorIdContext: - return LLVMGetParam(g->cur_fn_val, allocator_arg_index + 0); - case ImplicitAllocatorIdAlloc: - return LLVMGetParam(g->cur_fn_val, allocator_arg_index + 1); - case ImplicitAllocatorIdFree: - return LLVMGetParam(g->cur_fn_val, allocator_arg_index + 2); - } - zig_unreachable(); + return LLVMGetParam(g->cur_fn_val, allocator_arg_index); } static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) { diff --git a/src/ir.cpp b/src/ir.cpp index 00a7233897..9d213d1ddb 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1066,7 +1066,7 @@ static IrInstruction *ir_build_union_field_ptr_from(IrBuilder *irb, IrInstructio static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *source_node, FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator, IrInstruction *alloc_fn, IrInstruction *free_fn) + bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator) { IrInstructionCall *call_instruction = ir_build_instruction(irb, scope, source_node); call_instruction->fn_entry = fn_entry; @@ -1077,8 +1077,6 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc call_instruction->arg_count = arg_count; call_instruction->is_async = is_async; call_instruction->async_allocator = async_allocator; - call_instruction->alloc_fn = alloc_fn; - call_instruction->free_fn = free_fn; if (fn_ref) ir_ref_instruction(fn_ref, irb->current_basic_block); @@ -1086,20 +1084,16 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc ir_ref_instruction(args[i], irb->current_basic_block); if (async_allocator) ir_ref_instruction(async_allocator, irb->current_basic_block); - if (alloc_fn) - ir_ref_instruction(alloc_fn, irb->current_basic_block); - if (free_fn) - ir_ref_instruction(free_fn, irb->current_basic_block); return &call_instruction->base; } static IrInstruction *ir_build_call_from(IrBuilder *irb, IrInstruction *old_instruction, FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator, IrInstruction *alloc_fn, IrInstruction *free_fn) + bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator) { IrInstruction *new_instruction = ir_build_call(irb, old_instruction->scope, - old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline, is_async, async_allocator, alloc_fn, free_fn); + old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline, is_async, async_allocator); ir_link_new_instruction(new_instruction, old_instruction); return new_instruction; } @@ -2501,9 +2495,8 @@ static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *sou return &instruction->base; } -static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *scope, AstNode *source_node, ImplicitAllocatorId id) { +static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *scope, AstNode *source_node) { IrInstructionGetImplicitAllocator *instruction = ir_build_instruction(irb, scope, source_node); - instruction->id = id; return &instruction->base; } @@ -3977,7 +3970,7 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever; - return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline, false, nullptr, nullptr, nullptr); + return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline, false, nullptr); } case BuiltinFnIdTypeId: { @@ -4113,25 +4106,15 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node bool is_async = node->data.fn_call_expr.is_async; IrInstruction *async_allocator = nullptr; - IrInstruction *alloc_fn = nullptr; - IrInstruction *free_fn = nullptr; if (is_async) { if (node->data.fn_call_expr.async_allocator) { async_allocator = ir_gen_node(irb, node->data.fn_call_expr.async_allocator, scope); if (async_allocator == irb->codegen->invalid_instruction) return async_allocator; - - Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME); - IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, scope, node, async_allocator, alloc_field_name); - alloc_fn = ir_build_load_ptr(irb, scope, node, alloc_fn_ptr); - - Buf *free_field_name = buf_create_from_str(ASYNC_FREE_FIELD_NAME); - IrInstruction *free_fn_ptr = ir_build_field_ptr(irb, scope, node, async_allocator, free_field_name); - free_fn = ir_build_load_ptr(irb, scope, node, free_fn_ptr); } } - return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator, alloc_fn, free_fn); + return ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator); } static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode *node) { @@ -6113,16 +6096,20 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *promise_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_promise_ptr); coro_id = ir_build_coro_id(irb, scope, node, promise_as_u8_ptr); IrInstruction *coro_size = ir_build_coro_size(irb, scope, node); - irb->exec->implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node, ImplicitAllocatorIdContext); - IrInstruction *alloc_fn = ir_build_get_implicit_allocator(irb, scope, node, ImplicitAllocatorIdAlloc); - IrInstruction *alignment = ir_build_const_u29(irb, scope, node, get_coro_frame_align_bytes(irb->codegen)); + irb->exec->implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node); + Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME); + IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, scope, node, irb->exec->implicit_allocator_ptr, + alloc_field_name); + IrInstruction *alloc_fn = ir_build_load_ptr(irb, scope, node, alloc_fn_ptr); + IrInstruction *alignment = ir_build_const_u29(irb, scope, node, + get_coro_frame_align_bytes(irb->codegen)); size_t arg_count = 3; IrInstruction **args = allocate(arg_count); args[0] = irb->exec->implicit_allocator_ptr; // self args[1] = coro_size; // byte_count args[2] = alignment; // alignment IrInstruction *alloc_result = ir_build_call(irb, scope, node, nullptr, alloc_fn, arg_count, args, false, - FnInlineAuto, false, nullptr, nullptr, nullptr); + FnInlineAuto, false, nullptr); IrInstruction *alloc_result_ptr = ir_build_ref(irb, scope, node, alloc_result, true, false); IrInstruction *alloc_result_is_err = ir_build_test_err(irb, scope, node, alloc_result); IrBasicBlock *alloc_err_block = ir_create_basic_block(irb, scope, "AllocError"); @@ -6216,12 +6203,15 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec incoming_values[1] = const_bool_true; IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); - IrInstruction *free_fn = ir_build_get_implicit_allocator(irb, scope, node, ImplicitAllocatorIdFree); + Buf *free_field_name = buf_create_from_str(ASYNC_FREE_FIELD_NAME); + IrInstruction *free_fn_ptr = ir_build_field_ptr(irb, scope, node, irb->exec->implicit_allocator_ptr, + free_field_name); + IrInstruction *free_fn = ir_build_load_ptr(irb, scope, node, free_fn_ptr); size_t arg_count = 2; IrInstruction **args = allocate(arg_count); args[0] = irb->exec->implicit_allocator_ptr; // self args[1] = ir_build_load_ptr(irb, scope, node, coro_unwrapped_mem_ptr); // old_mem - ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr, nullptr, nullptr); + ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr); IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume"); IrBasicBlock *return_block = ir_create_basic_block(irb, scope, "Return"); @@ -11240,7 +11230,7 @@ static TypeTableEntry *ir_analyze_instruction_error_union(IrAnalyze *ira, return ira->codegen->builtin_types.entry_type; } -IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr, ImplicitAllocatorId id) { +IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr) { FnTableEntry *parent_fn_entry = exec_fn_entry(ira->new_irb.exec); if (parent_fn_entry == nullptr) { ir_add_error(ira, source_instr, buf_sprintf("no implicit allocator available")); @@ -11254,39 +11244,27 @@ IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_i } assert(parent_fn_type->async_allocator_type != nullptr); - IrInstruction *result = ir_build_get_implicit_allocator(&ira->new_irb, source_instr->scope, source_instr->source_node, id); - switch (id) { - case ImplicitAllocatorIdContext: - result->value.type = parent_fn_type->async_allocator_type; - break; - case ImplicitAllocatorIdAlloc: - { - assert(parent_fn_type->async_allocator_type->id == TypeTableEntryIdPointer); - TypeTableEntry *struct_type = parent_fn_type->async_allocator_type->data.pointer.child_type; - TypeStructField *alloc_fn_field = find_struct_type_field(struct_type, buf_create_from_str(ASYNC_ALLOC_FIELD_NAME)); - assert(alloc_fn_field->type_entry->id == TypeTableEntryIdFn); - result->value.type = alloc_fn_field->type_entry; - break; - } - case ImplicitAllocatorIdFree: - { - assert(parent_fn_type->async_allocator_type->id == TypeTableEntryIdPointer); - TypeTableEntry *struct_type = parent_fn_type->async_allocator_type->data.pointer.child_type; - TypeStructField *free_fn_field = find_struct_type_field(struct_type, buf_create_from_str(ASYNC_FREE_FIELD_NAME)); - assert(free_fn_field->type_entry->id == TypeTableEntryIdFn); - result->value.type = free_fn_field->type_entry; - break; - } - } + IrInstruction *result = ir_build_get_implicit_allocator(&ira->new_irb, source_instr->scope, source_instr->source_node); + result->value.type = parent_fn_type->async_allocator_type; return result; } static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *call_instruction, FnTableEntry *fn_entry, TypeTableEntry *fn_type, - IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count, IrInstruction *async_allocator_inst, IrInstruction *alloc_fn, IrInstruction *free_fn) + IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count, IrInstruction *async_allocator_inst) { + Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME); + //Buf *free_field_name = buf_create_from_str("freeFn"); assert(async_allocator_inst->value.type->id == TypeTableEntryIdPointer); + TypeTableEntry *container_type = async_allocator_inst->value.type->data.pointer.child_type; + IrInstruction *field_ptr_inst = ir_analyze_container_field_ptr(ira, alloc_field_name, &call_instruction->base, + async_allocator_inst, container_type); + if (type_is_invalid(field_ptr_inst->value.type)) { + return ira->codegen->invalid_instruction; + } + TypeTableEntry *ptr_to_alloc_fn_type = field_ptr_inst->value.type; + assert(ptr_to_alloc_fn_type->id == TypeTableEntryIdPointer); - TypeTableEntry *alloc_fn_type = alloc_fn->value.type; + TypeTableEntry *alloc_fn_type = ptr_to_alloc_fn_type->data.pointer.child_type; if (alloc_fn_type->id != TypeTableEntryIdFn) { ir_add_error(ira, &call_instruction->base, buf_sprintf("expected allocation function, found '%s'", buf_ptr(&alloc_fn_type->name))); @@ -11305,7 +11283,7 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *c TypeTableEntry *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type); IrInstruction *result = ir_build_call(&ira->new_irb, call_instruction->base.scope, call_instruction->base.source_node, - fn_entry, fn_ref, arg_count, casted_args, false, FnInlineAuto, true, async_allocator_inst, alloc_fn, free_fn); + fn_entry, fn_ref, arg_count, casted_args, false, FnInlineAuto, true, async_allocator_inst); result->value.type = async_return_type; return result; } @@ -11828,7 +11806,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal } IrInstruction *uncasted_async_allocator_inst; if (call_instruction->async_allocator == nullptr) { - uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base, ImplicitAllocatorIdContext); + uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base); if (type_is_invalid(uncasted_async_allocator_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; } else { @@ -11872,16 +11850,8 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal size_t impl_param_count = impl_fn->type_entry->data.fn.fn_type_id.param_count; if (call_instruction->is_async) { - IrInstruction *alloc_fn = call_instruction->alloc_fn->other; - if (type_is_invalid(alloc_fn->value.type)) - return ira->codegen->builtin_types.entry_invalid; - - IrInstruction *free_fn = call_instruction->free_fn->other; - if (type_is_invalid(free_fn->value.type)) - return ira->codegen->builtin_types.entry_invalid; - IrInstruction *result = ir_analyze_async_call(ira, call_instruction, impl_fn, impl_fn->type_entry, fn_ref, casted_args, impl_param_count, - async_allocator_inst, alloc_fn, free_fn); + async_allocator_inst); ir_link_new_instruction(result, &call_instruction->base); ir_add_alloca(ira, result, result->value.type); return ir_finish_anal(ira, result->value.type); @@ -11890,7 +11860,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal assert(async_allocator_inst == nullptr); IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, impl_fn, nullptr, impl_param_count, casted_args, false, fn_inline, - call_instruction->is_async, nullptr, nullptr, nullptr); + call_instruction->is_async, nullptr); ir_add_alloca(ira, new_call_instruction, return_type); @@ -11957,40 +11927,22 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal if (call_instruction->is_async) { IrInstruction *uncasted_async_allocator_inst; - IrInstruction *uncasted_alloc_fn_inst; - IrInstruction *uncasted_free_fn_inst; if (call_instruction->async_allocator == nullptr) { - uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base, ImplicitAllocatorIdContext); + uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base); if (type_is_invalid(uncasted_async_allocator_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; - - uncasted_alloc_fn_inst = ir_get_implicit_allocator(ira, &call_instruction->base, ImplicitAllocatorIdAlloc); - if (type_is_invalid(uncasted_alloc_fn_inst->value.type)) - return ira->codegen->builtin_types.entry_invalid; - - uncasted_free_fn_inst = ir_get_implicit_allocator(ira, &call_instruction->base, ImplicitAllocatorIdFree); - if (type_is_invalid(uncasted_free_fn_inst->value.type)) - return ira->codegen->builtin_types.entry_invalid; } else { uncasted_async_allocator_inst = call_instruction->async_allocator->other; if (type_is_invalid(uncasted_async_allocator_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; - uncasted_alloc_fn_inst = call_instruction->alloc_fn->other; - if (type_is_invalid(uncasted_alloc_fn_inst->value.type)) - return ira->codegen->builtin_types.entry_invalid; - - uncasted_free_fn_inst = call_instruction->free_fn->other; - if (type_is_invalid(uncasted_free_fn_inst->value.type)) - return ira->codegen->builtin_types.entry_invalid; - } IrInstruction *async_allocator_inst = ir_implicit_cast(ira, uncasted_async_allocator_inst, fn_type_id->async_allocator_type); if (type_is_invalid(async_allocator_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; IrInstruction *result = ir_analyze_async_call(ira, call_instruction, fn_entry, fn_type, fn_ref, casted_args, call_param_count, - async_allocator_inst, uncasted_alloc_fn_inst, uncasted_free_fn_inst); + async_allocator_inst); ir_link_new_instruction(result, &call_instruction->base); ir_add_alloca(ira, result, result->value.type); return ir_finish_anal(ira, result->value.type); @@ -11998,7 +11950,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, - fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr, nullptr, nullptr); + fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr); ir_add_alloca(ira, new_call_instruction, return_type); return ir_finish_anal(ira, return_type); @@ -17238,7 +17190,7 @@ static TypeTableEntry *ir_analyze_instruction_coro_begin(IrAnalyze *ira, IrInstr } static TypeTableEntry *ir_analyze_instruction_get_implicit_allocator(IrAnalyze *ira, IrInstructionGetImplicitAllocator *instruction) { - IrInstruction *result = ir_get_implicit_allocator(ira, &instruction->base, instruction->id); + IrInstruction *result = ir_get_implicit_allocator(ira, &instruction->base); ir_link_new_instruction(result, &instruction->base); return result->value.type; } diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 1314123110..2e367672a5 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1027,19 +1027,7 @@ static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) { } static void ir_print_get_implicit_allocator(IrPrint *irp, IrInstructionGetImplicitAllocator *instruction) { - fprintf(irp->f, "@getImplicitAllocator("); - switch (instruction->id) { - case ImplicitAllocatorIdContext: - fprintf(irp->f, "Context"); - break; - case ImplicitAllocatorIdAlloc: - fprintf(irp->f, "Alloc"); - break; - case ImplicitAllocatorIdFree: - fprintf(irp->f, "Free"); - break; - } - fprintf(irp->f, ")"); + fprintf(irp->f, "@getImplicitAllocator()"); } static void ir_print_coro_id(IrPrint *irp, IrInstructionCoroId *instruction) { From d24345386274e3abcbcc676fe65bda127c06ce8e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 27 Feb 2018 17:47:18 -0500 Subject: [PATCH 42/56] Revert "llvm coroutine workaround: sret functions return sret pointer" This reverts commit 132e604aa399a3bcb91996e550cf8972bd88422c. this workaround didn't work either --- src/analyze.cpp | 5 +---- src/codegen.cpp | 13 +++---------- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index f95c9396cb..ce9e99f8fa 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1026,10 +1026,7 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { gen_param_index += 1; // after the gen_param_index += 1 because 0 is the return type param_di_types[gen_param_index] = gen_type->di_type; - - // as a workaround for LLVM coroutines not understanding instruction dependencies, - // we return the sret pointer argument instead of returning void - gen_return_type = gen_type; + gen_return_type = g->builtin_types.entry_void; } else { gen_return_type = fn_type_id->return_type; } diff --git a/src/codegen.cpp b/src/codegen.cpp index 7fda1eaa3f..d650daa6cb 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -547,10 +547,7 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, FnTableEntry *fn_table_entry) { } else if (handle_is_ptr(return_type) && calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc)) { - // We do not add the sret attribute, because it would require the return type to be void, - // and we want the return value to return the sret pointer, to work around LLVM Coroutine - // transformation passes not understanding the data dependency. - //addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret"); + addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret"); addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull"); } @@ -1619,9 +1616,7 @@ static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrIns if (calling_convention_does_first_arg_return(g->cur_fn->type_entry->data.fn.fn_type_id.cc)) { assert(g->cur_ret_ptr); gen_assign_raw(g, g->cur_ret_ptr, get_pointer_to_type(g, return_type, false), value); - // as a workaround for LLVM coroutines not understanding instruction dependencies, - // we return the sret pointer argument instead of returning void - LLVMBuildRet(g->builder, g->cur_ret_ptr); + LLVMBuildRetVoid(g->builder); } else { LLVMValueRef by_val_value = gen_load_untyped(g, value, 0, false, ""); LLVMBuildRet(g->builder, by_val_value); @@ -2774,9 +2769,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } else if (!ret_has_bits) { return nullptr; } else if (first_arg_ret) { - // instead of returning instruction->tmp_ptr here, we trust that the function returned the first arg. - // this is a workaround for llvm coroutines not understanding the data dependency - return result; + return instruction->tmp_ptr; } else { return result; } From 90598b4631e3b68565c7d62102a9e4615514a721 Mon Sep 17 00:00:00 2001 From: Ben Noordhuis Date: Wed, 28 Feb 2018 00:51:22 +0100 Subject: [PATCH 43/56] fix assert on self-referencing function ptr field The construct `struct S { f: fn(S) void }` is not legal because structs are not copyable but it should not result in an ICE. Fixes #795. --- src/analyze.cpp | 4 +++- test/compile_errors.zig | 12 ++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index c16a5d462a..9d5e7d77af 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1670,6 +1670,9 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) { if (struct_type->data.structure.is_invalid) return; + if (struct_type->data.structure.zero_bits_loop_flag) + return; + AstNode *decl_node = struct_type->data.structure.decl_node; if (struct_type->data.structure.embedded_in_current) { @@ -1682,7 +1685,6 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) { return; } - assert(!struct_type->data.structure.zero_bits_loop_flag); assert(struct_type->data.structure.fields); assert(decl_node->type == NodeTypeContainerDecl); diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 940125711b..a3ac4e2344 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -3090,4 +3090,16 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { , ".tmp_source.zig:11:20: error: runtime cast to union 'Value' which has non-void fields", ".tmp_source.zig:3:5: note: field 'A' has type 'i32'"); + + cases.add("self-referencing function pointer field", + \\const S = struct { + \\ f: fn(_: S) void, + \\}; + \\fn f(_: S) void { + \\} + \\export fn entry() void { + \\ var _ = S { .f = f }; + \\} + , + ".tmp_source.zig:4:9: error: type 'S' is not copyable; cannot pass by value"); } From 556f22a751e47d572404992befe15c09c0f2eb0b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Feb 2018 00:28:26 -0500 Subject: [PATCH 44/56] different way of fixing previous commit get_fn_type doesn't need the complete parameter type, it can just ensure zero bits known. --- src/analyze.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/analyze.cpp b/src/analyze.cpp index 9d5e7d77af..45e6780791 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -997,7 +997,7 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { gen_param_info->src_index = i; gen_param_info->gen_index = SIZE_MAX; - ensure_complete_type(g, type_entry); + type_ensure_zero_bits_known(g, type_entry); if (type_has_bits(type_entry)) { TypeTableEntry *gen_type; if (handle_is_ptr(type_entry)) { @@ -1670,9 +1670,6 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) { if (struct_type->data.structure.is_invalid) return; - if (struct_type->data.structure.zero_bits_loop_flag) - return; - AstNode *decl_node = struct_type->data.structure.decl_node; if (struct_type->data.structure.embedded_in_current) { @@ -1685,6 +1682,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) { return; } + assert(!struct_type->data.structure.zero_bits_loop_flag); assert(struct_type->data.structure.fields); assert(decl_node->type == NodeTypeContainerDecl); @@ -2131,6 +2129,7 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) { if (enum_type->data.enumeration.zero_bits_loop_flag) { enum_type->data.enumeration.zero_bits_known = true; + enum_type->data.enumeration.zero_bits_loop_flag = false; return; } @@ -2285,6 +2284,7 @@ static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) { // the alignment is pointer width, then assert that the first field is within that // alignment struct_type->data.structure.zero_bits_known = true; + struct_type->data.structure.zero_bits_loop_flag = false; if (struct_type->data.structure.abi_alignment == 0) { if (struct_type->data.structure.layout == ContainerLayoutPacked) { struct_type->data.structure.abi_alignment = 1; From 026aebf2ea567c15eebf9ddb9180f7d0e2ec7a9d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Feb 2018 04:01:22 -0500 Subject: [PATCH 45/56] another workaround for llvm coroutines this one doesn't work either --- src/all_types.hpp | 9 +++ src/analyze.cpp | 10 ++- src/analyze.hpp | 1 + src/codegen.cpp | 155 +++++++++++++++++++++++++++++++++++++++++----- src/ir.cpp | 87 ++++++++++++++++---------- src/ir_print.cpp | 11 ++++ 6 files changed, 222 insertions(+), 51 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 5073ffaceb..9b4fa6d36c 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1634,6 +1634,7 @@ struct CodeGen { LLVMValueRef coro_free_fn_val; LLVMValueRef coro_resume_fn_val; LLVMValueRef coro_save_fn_val; + LLVMValueRef coro_alloc_helper_fn_val; bool error_during_imports; const char **clang_argv; @@ -2004,6 +2005,7 @@ enum IrInstructionId { IrInstructionIdCoroFree, IrInstructionIdCoroResume, IrInstructionIdCoroSave, + IrInstructionIdCoroAllocHelper, }; struct IrInstruction { @@ -2913,6 +2915,13 @@ struct IrInstructionCoroSave { IrInstruction *coro_handle; }; +struct IrInstructionCoroAllocHelper { + IrInstruction base; + + IrInstruction *alloc_fn; + IrInstruction *coro_size; +}; + static const size_t slice_ptr_index = 0; static const size_t slice_len_index = 1; diff --git a/src/analyze.cpp b/src/analyze.cpp index ce9e99f8fa..be01f6b5f8 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1001,9 +1001,7 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { bool first_arg_return = calling_convention_does_first_arg_return(fn_type_id->cc) && handle_is_ptr(fn_type_id->return_type); bool is_async = fn_type_id->cc == CallingConventionAsync; - bool prefix_arg_error_return_trace = g->have_err_ret_tracing && - (fn_type_id->return_type->id == TypeTableEntryIdErrorUnion || - fn_type_id->return_type->id == TypeTableEntryIdErrorSet); + bool prefix_arg_error_return_trace = g->have_err_ret_tracing && fn_type_can_fail(fn_type_id); // +1 for maybe making the first argument the return value // +1 for maybe first argument the error return trace // +2 for maybe arguments async allocator and error code pointer @@ -5795,3 +5793,9 @@ bool type_is_global_error_set(TypeTableEntry *err_set_type) { uint32_t get_coro_frame_align_bytes(CodeGen *g) { return g->pointer_size_bytes * 2; } + +bool fn_type_can_fail(FnTypeId *fn_type_id) { + TypeTableEntry *return_type = fn_type_id->return_type; + return return_type->id == TypeTableEntryIdErrorUnion || return_type->id == TypeTableEntryIdErrorSet || + fn_type_id->cc == CallingConventionAsync; +} diff --git a/src/analyze.hpp b/src/analyze.hpp index 926793c58a..068f321bfb 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -192,5 +192,6 @@ void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry); TypeTableEntry *get_auto_err_set_type(CodeGen *g, FnTableEntry *fn_entry); uint32_t get_coro_frame_align_bytes(CodeGen *g); +bool fn_type_can_fail(FnTypeId *fn_type_id); #endif diff --git a/src/codegen.cpp b/src/codegen.cpp index d650daa6cb..ec047ad9ec 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -412,10 +412,10 @@ static uint32_t get_err_ret_trace_arg_index(CodeGen *g, FnTableEntry *fn_table_e return UINT32_MAX; } TypeTableEntry *fn_type = fn_table_entry->type_entry; - TypeTableEntry *return_type = fn_type->data.fn.fn_type_id.return_type; - if (return_type->id != TypeTableEntryIdErrorUnion && return_type->id != TypeTableEntryIdErrorSet) { + if (!fn_type_can_fail(&fn_type->data.fn.fn_type_id)) { return UINT32_MAX; } + TypeTableEntry *return_type = fn_type->data.fn.fn_type_id.return_type; bool first_arg_ret = type_has_bits(return_type) && handle_is_ptr(return_type); return first_arg_ret ? 1 : 0; } @@ -2662,21 +2662,23 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI } } -static bool get_prefix_arg_err_ret_stack(CodeGen *g, TypeTableEntry *src_return_type) { +static bool get_prefix_arg_err_ret_stack(CodeGen *g, FnTypeId *fn_type_id) { return g->have_err_ret_tracing && - (src_return_type->id == TypeTableEntryIdErrorUnion || src_return_type->id == TypeTableEntryIdErrorSet); + (fn_type_id->return_type->id == TypeTableEntryIdErrorUnion || + fn_type_id->return_type->id == TypeTableEntryIdErrorSet || + fn_type_id->cc == CallingConventionAsync); } -static size_t get_async_allocator_arg_index(CodeGen *g, TypeTableEntry *src_return_type) { +static size_t get_async_allocator_arg_index(CodeGen *g, FnTypeId *fn_type_id) { // 0 1 2 3 // err_ret_stack allocator_ptr err_code other_args... - return get_prefix_arg_err_ret_stack(g, src_return_type) ? 1 : 0; + return get_prefix_arg_err_ret_stack(g, fn_type_id) ? 1 : 0; } -static size_t get_async_err_code_arg_index(CodeGen *g, TypeTableEntry *src_return_type) { +static size_t get_async_err_code_arg_index(CodeGen *g, FnTypeId *fn_type_id) { // 0 1 2 3 // err_ret_stack allocator_ptr err_code other_args... - return 1 + get_async_allocator_arg_index(g, src_return_type); + return 1 + get_async_allocator_arg_index(g, fn_type_id); } static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCall *instruction) { @@ -2698,7 +2700,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr bool first_arg_ret = ret_has_bits && handle_is_ptr(src_return_type) && calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc); - bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, src_return_type); + bool prefix_arg_err_ret_stack = get_prefix_arg_err_ret_stack(g, fn_type_id); // +2 for the async args size_t actual_param_count = instruction->arg_count + (first_arg_ret ? 1 : 0) + (prefix_arg_err_ret_stack ? 1 : 0) + 2; bool is_var_args = fn_type_id->is_var_args; @@ -2717,7 +2719,6 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr gen_param_index += 1; LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, instruction->tmp_ptr, err_union_err_index, ""); - LLVMBuildStore(g->builder, LLVMConstNull(g->builtin_types.entry_global_error_set->type_ref), err_val_ptr); gen_param_values[gen_param_index] = err_val_ptr; gen_param_index += 1; } @@ -3293,8 +3294,7 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns static LLVMValueRef ir_render_get_implicit_allocator(CodeGen *g, IrExecutable *executable, IrInstructionGetImplicitAllocator *instruction) { - TypeTableEntry *src_return_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; - size_t allocator_arg_index = get_async_allocator_arg_index(g, src_return_type); + size_t allocator_arg_index = get_async_allocator_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id); return LLVMGetParam(g->cur_fn_val, allocator_arg_index); } @@ -3926,8 +3926,7 @@ static LLVMValueRef ir_render_coro_begin(CodeGen *g, IrExecutable *executable, I static LLVMValueRef ir_render_coro_alloc_fail(CodeGen *g, IrExecutable *executable, IrInstructionCoroAllocFail *instruction) { - TypeTableEntry *src_return_type = g->cur_fn->type_entry->data.fn.fn_type_id.return_type; - size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, src_return_type); + size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id); LLVMValueRef err_code_ptr_val = LLVMGetParam(g->cur_fn_val, err_code_ptr_arg_index); LLVMValueRef err_code = ir_llvm_value(g, instruction->err_val); LLVMBuildStore(g->builder, err_code, err_code_ptr_val); @@ -3985,6 +3984,132 @@ static LLVMValueRef ir_render_coro_save(CodeGen *g, IrExecutable *executable, Ir return LLVMBuildCall(g->builder, get_coro_save_fn_val(g), &coro_handle, 1, ""); } +static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_fn_type_ref, TypeTableEntry *fn_type) { + if (g->coro_alloc_helper_fn_val != nullptr) + return g->coro_alloc_fn_val; + + assert(fn_type->id == TypeTableEntryIdFn); + + TypeTableEntry *ptr_to_err_code_type = get_pointer_to_type(g, g->builtin_types.entry_global_error_set, false); + + LLVMTypeRef alloc_raw_fn_type_ref = LLVMGetElementType(alloc_fn_type_ref); + LLVMTypeRef *alloc_fn_arg_types = allocate(LLVMCountParamTypes(alloc_raw_fn_type_ref)); + LLVMGetParamTypes(alloc_raw_fn_type_ref, alloc_fn_arg_types); + + ZigList arg_types = {}; + arg_types.append(alloc_fn_type_ref); + if (g->have_err_ret_tracing) { + arg_types.append(alloc_fn_arg_types[1]); + } + arg_types.append(alloc_fn_arg_types[g->have_err_ret_tracing ? 2 : 1]); + arg_types.append(ptr_to_err_code_type->type_ref); + arg_types.append(g->builtin_types.entry_usize->type_ref); + + LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), + arg_types.items, arg_types.length, false); + + Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_coro_alloc_helper"), false); + LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref); + LLVMSetLinkage(fn_val, LLVMInternalLinkage); + LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified)); + addLLVMFnAttr(fn_val, "nounwind"); + addLLVMArgAttr(fn_val, (unsigned)0, "nonnull"); + addLLVMArgAttr(fn_val, (unsigned)1, "nonnull"); + + LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder); + LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder); + FnTableEntry *prev_cur_fn = g->cur_fn; + LLVMValueRef prev_cur_fn_val = g->cur_fn_val; + + LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry"); + LLVMPositionBuilderAtEnd(g->builder, entry_block); + ZigLLVMClearCurrentDebugLocation(g->builder); + g->cur_fn = nullptr; + g->cur_fn_val = fn_val; + + LLVMValueRef sret_ptr = LLVMBuildAlloca(g->builder, LLVMGetElementType(alloc_fn_arg_types[0]), ""); + + size_t next_arg = 0; + LLVMValueRef alloc_fn_val = LLVMGetParam(fn_val, next_arg); + next_arg += 1; + + LLVMValueRef stack_trace_val; + if (g->have_err_ret_tracing) { + stack_trace_val = LLVMGetParam(fn_val, next_arg); + next_arg += 1; + } + + LLVMValueRef allocator_val = LLVMGetParam(fn_val, next_arg); + next_arg += 1; + LLVMValueRef err_code_ptr = LLVMGetParam(fn_val, next_arg); + next_arg += 1; + LLVMValueRef coro_size = LLVMGetParam(fn_val, next_arg); + next_arg += 1; + LLVMValueRef alignment_val = LLVMConstInt(g->builtin_types.entry_u29->type_ref, + 2 * g->pointer_size_bytes, false); + + ZigList args = {}; + args.append(sret_ptr); + if (g->have_err_ret_tracing) { + args.append(stack_trace_val); + } + args.append(allocator_val); + args.append(coro_size); + args.append(alignment_val); + ZigLLVMBuildCall(g->builder, alloc_fn_val, args.items, args.length, + get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); + LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_err_index, ""); + LLVMValueRef err_val = LLVMBuildLoad(g->builder, err_val_ptr, ""); + LLVMBuildStore(g->builder, err_val, err_code_ptr); + LLVMValueRef ok_bit = LLVMBuildICmp(g->builder, LLVMIntEQ, err_val, LLVMConstNull(LLVMTypeOf(err_val)), ""); + LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(fn_val, "AllocOk"); + LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(fn_val, "AllocFail"); + LLVMBuildCondBr(g->builder, ok_bit, ok_block, fail_block); + + LLVMPositionBuilderAtEnd(g->builder, ok_block); + LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_payload_index, ""); + TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false); + TypeTableEntry *slice_type = get_slice_type(g, u8_ptr_type); + size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index; + LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, payload_ptr, ptr_field_index, ""); + LLVMValueRef ptr_val = LLVMBuildLoad(g->builder, ptr_field_ptr, ""); + LLVMBuildRet(g->builder, ptr_val); + + LLVMPositionBuilderAtEnd(g->builder, fail_block); + LLVMBuildRet(g->builder, LLVMConstNull(LLVMPointerType(LLVMInt8Type(), 0))); + + g->cur_fn = prev_cur_fn; + g->cur_fn_val = prev_cur_fn_val; + LLVMPositionBuilderAtEnd(g->builder, prev_block); + LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + + g->coro_alloc_helper_fn_val = fn_val; + return fn_val; +} + +static LLVMValueRef ir_render_coro_alloc_helper(CodeGen *g, IrExecutable *executable, + IrInstructionCoroAllocHelper *instruction) +{ + LLVMValueRef alloc_fn = ir_llvm_value(g, instruction->alloc_fn); + LLVMValueRef coro_size = ir_llvm_value(g, instruction->coro_size); + LLVMValueRef fn_val = get_coro_alloc_helper_fn_val(g, LLVMTypeOf(alloc_fn), instruction->alloc_fn->value.type); + size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id); + size_t allocator_arg_index = get_async_allocator_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id); + + ZigList params = {}; + params.append(alloc_fn); + uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, g->cur_fn); + if (err_ret_trace_arg_index != UINT32_MAX) { + params.append(LLVMGetParam(g->cur_fn_val, err_ret_trace_arg_index)); + } + params.append(LLVMGetParam(g->cur_fn_val, allocator_arg_index)); + params.append(LLVMGetParam(g->cur_fn_val, err_code_ptr_arg_index)); + params.append(coro_size); + + return ZigLLVMBuildCall(g->builder, fn_val, params.items, params.length, + get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); +} + static void set_debug_location(CodeGen *g, IrInstruction *instruction) { AstNode *source_node = instruction->source_node; Scope *scope = instruction->scope; @@ -4190,6 +4315,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction); case IrInstructionIdCoroSave: return ir_render_coro_save(g, executable, (IrInstructionCoroSave *)instruction); + case IrInstructionIdCoroAllocHelper: + return ir_render_coro_alloc_helper(g, executable, (IrInstructionCoroAllocHelper *)instruction); } zig_unreachable(); } diff --git a/src/ir.cpp b/src/ir.cpp index 9d213d1ddb..d2c28582d7 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -695,6 +695,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSave *) { return IrInstructionIdCoroSave; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocHelper *) { + return IrInstructionIdCoroAllocHelper; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -829,14 +833,6 @@ static IrInstruction *ir_build_const_usize(IrBuilder *irb, Scope *scope, AstNode return &const_instruction->base; } -static IrInstruction *ir_build_const_u29(IrBuilder *irb, Scope *scope, AstNode *source_node, uint32_t value) { - IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node); - const_instruction->base.value.type = irb->codegen->builtin_types.entry_u29; - const_instruction->base.value.special = ConstValSpecialStatic; - bigint_init_unsigned(&const_instruction->base.value.data.x_bigint, value); - return &const_instruction->base; -} - static IrInstruction *ir_build_const_u8(IrBuilder *irb, Scope *scope, AstNode *source_node, uint8_t value) { IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node); const_instruction->base.value.type = irb->codegen->builtin_types.entry_u8; @@ -2600,6 +2596,19 @@ static IrInstruction *ir_build_coro_save(IrBuilder *irb, Scope *scope, AstNode * return &instruction->base; } +static IrInstruction *ir_build_coro_alloc_helper(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *alloc_fn, IrInstruction *coro_size) +{ + IrInstructionCoroAllocHelper *instruction = ir_build_instruction(irb, scope, source_node); + instruction->alloc_fn = alloc_fn; + instruction->coro_size = coro_size; + + ir_ref_instruction(alloc_fn, irb->current_basic_block); + ir_ref_instruction(coro_size, irb->current_basic_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -6074,10 +6083,11 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync; IrInstruction *u8_ptr_type; IrInstruction *const_bool_false; - IrInstruction *coro_unwrapped_mem_ptr; + IrInstruction *coro_size; IrInstruction *coro_id; IrInstruction *coro_promise_ptr; IrInstruction *coro_result_field_ptr; + IrInstruction *coro_mem_ptr; TypeTableEntry *return_type; Buf *result_ptr_field_name; if (is_async) { @@ -6095,39 +6105,25 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false)); IrInstruction *promise_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_promise_ptr); coro_id = ir_build_coro_id(irb, scope, node, promise_as_u8_ptr); - IrInstruction *coro_size = ir_build_coro_size(irb, scope, node); + coro_size = ir_build_coro_size(irb, scope, node); irb->exec->implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node); Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME); IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, scope, node, irb->exec->implicit_allocator_ptr, alloc_field_name); IrInstruction *alloc_fn = ir_build_load_ptr(irb, scope, node, alloc_fn_ptr); - IrInstruction *alignment = ir_build_const_u29(irb, scope, node, - get_coro_frame_align_bytes(irb->codegen)); - size_t arg_count = 3; - IrInstruction **args = allocate(arg_count); - args[0] = irb->exec->implicit_allocator_ptr; // self - args[1] = coro_size; // byte_count - args[2] = alignment; // alignment - IrInstruction *alloc_result = ir_build_call(irb, scope, node, nullptr, alloc_fn, arg_count, args, false, - FnInlineAuto, false, nullptr); - IrInstruction *alloc_result_ptr = ir_build_ref(irb, scope, node, alloc_result, true, false); - IrInstruction *alloc_result_is_err = ir_build_test_err(irb, scope, node, alloc_result); + IrInstruction *maybe_coro_mem_ptr = ir_build_coro_alloc_helper(irb, scope, node, alloc_fn, coro_size); + IrInstruction *alloc_result_is_ok = ir_build_test_nonnull(irb, scope, node, maybe_coro_mem_ptr); IrBasicBlock *alloc_err_block = ir_create_basic_block(irb, scope, "AllocError"); IrBasicBlock *alloc_ok_block = ir_create_basic_block(irb, scope, "AllocOk"); - ir_build_cond_br(irb, scope, node, alloc_result_is_err, alloc_err_block, alloc_ok_block, const_bool_false); + ir_build_cond_br(irb, scope, node, alloc_result_is_ok, alloc_ok_block, alloc_err_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, alloc_err_block); - IrInstruction *err_val = ir_build_unwrap_err_code(irb, scope, node, alloc_result_ptr); - ir_build_coro_alloc_fail(irb, scope, node, err_val); + IrInstruction *undef = ir_build_const_undefined(irb, scope, node); + ir_build_return(irb, scope, node, undef); ir_set_cursor_at_end_and_append_block(irb, alloc_ok_block); - coro_unwrapped_mem_ptr = ir_build_unwrap_err_payload(irb, scope, node, alloc_result_ptr, false); - Buf *ptr_field_name = buf_create_from_str("ptr"); - IrInstruction *coro_mem_ptr_field = ir_build_field_ptr(irb, scope, node, coro_unwrapped_mem_ptr, - ptr_field_name); - IrInstruction *coro_mem = ir_build_load_ptr(irb, scope, node, coro_mem_ptr_field); - - irb->exec->coro_handle = ir_build_coro_begin(irb, scope, node, coro_id, coro_mem); + coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, maybe_coro_mem_ptr); + irb->exec->coro_handle = ir_build_coro_begin(irb, scope, node, coro_id, coro_mem_ptr); Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME); irb->exec->coro_awaiter_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, @@ -6207,10 +6203,13 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *free_fn_ptr = ir_build_field_ptr(irb, scope, node, irb->exec->implicit_allocator_ptr, free_field_name); IrInstruction *free_fn = ir_build_load_ptr(irb, scope, node, free_fn_ptr); + IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); + IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false); + IrInstruction *mem_slice = ir_build_slice(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false); size_t arg_count = 2; IrInstruction **args = allocate(arg_count); args[0] = irb->exec->implicit_allocator_ptr; // self - args[1] = ir_build_load_ptr(irb, scope, node, coro_unwrapped_mem_ptr); // old_mem + args[1] = mem_slice; // old_mem ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr); IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume"); @@ -11844,7 +11843,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal } TypeTableEntry *return_type = impl_fn->type_entry->data.fn.fn_type_id.return_type; - if (return_type->id == TypeTableEntryIdErrorSet || return_type->id == TypeTableEntryIdErrorUnion) { + if (fn_type_can_fail(&impl_fn->type_entry->data.fn.fn_type_id)) { parent_fn_entry->calls_errorable_function = true; } @@ -11870,7 +11869,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal FnTableEntry *parent_fn_entry = exec_fn_entry(ira->new_irb.exec); assert(fn_type_id->return_type != nullptr); assert(parent_fn_entry != nullptr); - if (fn_type_id->return_type->id == TypeTableEntryIdErrorSet || fn_type_id->return_type->id == TypeTableEntryIdErrorUnion) { + if (fn_type_can_fail(fn_type_id)) { parent_fn_entry->calls_errorable_function = true; } @@ -17274,6 +17273,23 @@ static TypeTableEntry *ir_analyze_instruction_coro_save(IrAnalyze *ira, IrInstru return result->value.type; } +static TypeTableEntry *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira, IrInstructionCoroAllocHelper *instruction) { + IrInstruction *alloc_fn = instruction->alloc_fn->other; + if (type_is_invalid(alloc_fn->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *coro_size = instruction->coro_size->other; + if (type_is_invalid(coro_size->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_build_coro_alloc_helper(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, alloc_fn, coro_size); + ir_link_new_instruction(result, &instruction->base); + TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false); + result->value.type = get_maybe_type(ira->codegen, u8_ptr_type); + return result->value.type; +} + static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { @@ -17501,6 +17517,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction); case IrInstructionIdCoroSave: return ir_analyze_instruction_coro_save(ira, (IrInstructionCoroSave *)instruction); + case IrInstructionIdCoroAllocHelper: + return ir_analyze_instruction_coro_alloc_helper(ira, (IrInstructionCoroAllocHelper *)instruction); } zig_unreachable(); } @@ -17624,6 +17642,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdCoroEnd: case IrInstructionIdCoroResume: case IrInstructionIdCoroSave: + case IrInstructionIdCoroAllocHelper: return true; case IrInstructionIdPhi: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 2e367672a5..b9e02988c2 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1096,6 +1096,14 @@ static void ir_print_coro_save(IrPrint *irp, IrInstructionCoroSave *instruction) fprintf(irp->f, ")"); } +static void ir_print_coro_alloc_helper(IrPrint *irp, IrInstructionCoroAllocHelper *instruction) { + fprintf(irp->f, "@coroAllocHelper("); + ir_print_other_instruction(irp, instruction->alloc_fn); + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->coro_size); + fprintf(irp->f, ")"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -1452,6 +1460,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdCoroSave: ir_print_coro_save(irp, (IrInstructionCoroSave *)instruction); break; + case IrInstructionIdCoroAllocHelper: + ir_print_coro_alloc_helper(irp, (IrInstructionCoroAllocHelper *)instruction); + break; } fprintf(irp->f, "\n"); } From ad2a29ccf25af189fc180cba6843c20b9dd029d1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Feb 2018 16:47:13 -0500 Subject: [PATCH 46/56] break the data dependencies that llvm coro transforms cant handle my simple coro test program builds now see #727 --- src/all_types.hpp | 9 +++++- src/codegen.cpp | 1 + src/ir.cpp | 73 +++++++++++++++++++++++++++++++++-------------- src/ir_print.cpp | 11 ++++++- 4 files changed, 71 insertions(+), 23 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 9b4fa6d36c..1551f07c70 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -60,11 +60,11 @@ struct IrExecutable { IrInstruction *coro_handle; IrInstruction *coro_awaiter_field_ptr; IrInstruction *coro_result_ptr_field_ptr; - IrInstruction *implicit_allocator_ptr; IrBasicBlock *coro_early_final; IrBasicBlock *coro_normal_final; IrBasicBlock *coro_suspend_block; IrBasicBlock *coro_final_cleanup_block; + VariableTableEntry *coro_allocator_var; }; enum OutType { @@ -2852,8 +2852,15 @@ struct IrInstructionCancel { IrInstruction *target; }; +enum ImplicitAllocatorId { + ImplicitAllocatorIdArg, + ImplicitAllocatorIdLocalVar, +}; + struct IrInstructionGetImplicitAllocator { IrInstruction base; + + ImplicitAllocatorId id; }; struct IrInstructionCoroId { diff --git a/src/codegen.cpp b/src/codegen.cpp index ec047ad9ec..a5ae9996b5 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3294,6 +3294,7 @@ static LLVMValueRef ir_render_cancel(CodeGen *g, IrExecutable *executable, IrIns static LLVMValueRef ir_render_get_implicit_allocator(CodeGen *g, IrExecutable *executable, IrInstructionGetImplicitAllocator *instruction) { + assert(instruction->id == ImplicitAllocatorIdArg); size_t allocator_arg_index = get_async_allocator_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id); return LLVMGetParam(g->cur_fn_val, allocator_arg_index); } diff --git a/src/ir.cpp b/src/ir.cpp index d2c28582d7..e9f29d1caa 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -114,6 +114,8 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc static ErrorMsg *exec_add_error_node(CodeGen *codegen, IrExecutable *exec, AstNode *source_node, Buf *msg); static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_name, IrInstruction *source_instr, IrInstruction *container_ptr, TypeTableEntry *container_type); +static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction, + VariableTableEntry *var, bool is_const_ptr, bool is_volatile_ptr); ConstExprValue *const_ptr_pointee(CodeGen *g, ConstExprValue *const_val) { assert(const_val->type->id == TypeTableEntryIdPointer); @@ -2491,8 +2493,11 @@ static IrInstruction *ir_build_cancel(IrBuilder *irb, Scope *scope, AstNode *sou return &instruction->base; } -static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *scope, AstNode *source_node) { +static IrInstruction *ir_build_get_implicit_allocator(IrBuilder *irb, Scope *scope, AstNode *source_node, + ImplicitAllocatorId id) +{ IrInstructionGetImplicitAllocator *instruction = ir_build_instruction(irb, scope, source_node); + instruction->id = id; return &instruction->base; } @@ -6081,15 +6086,13 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec FnTableEntry *fn_entry = exec_fn_entry(irb->exec); bool is_async = fn_entry != nullptr && fn_entry->type_entry->data.fn.fn_type_id.cc == CallingConventionAsync; + IrInstruction *coro_id; IrInstruction *u8_ptr_type; IrInstruction *const_bool_false; - IrInstruction *coro_size; - IrInstruction *coro_id; - IrInstruction *coro_promise_ptr; IrInstruction *coro_result_field_ptr; - IrInstruction *coro_mem_ptr; TypeTableEntry *return_type; Buf *result_ptr_field_name; + VariableTableEntry *coro_size_var; if (is_async) { // create the coro promise const_bool_false = ir_build_const_bool(irb, scope, node, false); @@ -6099,17 +6102,21 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec return_type = fn_entry->type_entry->data.fn.fn_type_id.return_type; IrInstruction *promise_init = ir_build_const_promise_init(irb, scope, node, return_type); ir_build_var_decl(irb, scope, node, promise_var, nullptr, nullptr, promise_init); - coro_promise_ptr = ir_build_var_ptr(irb, scope, node, promise_var, false, false); + IrInstruction *coro_promise_ptr = ir_build_var_ptr(irb, scope, node, promise_var, false, false); u8_ptr_type = ir_build_const_type(irb, scope, node, get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false)); IrInstruction *promise_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_promise_ptr); coro_id = ir_build_coro_id(irb, scope, node, promise_as_u8_ptr); - coro_size = ir_build_coro_size(irb, scope, node); - irb->exec->implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node); + coro_size_var = ir_create_var(irb, node, scope, nullptr, false, false, true, const_bool_false); + IrInstruction *coro_size = ir_build_coro_size(irb, scope, node); + ir_build_var_decl(irb, scope, node, coro_size_var, nullptr, nullptr, coro_size); + IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node, + ImplicitAllocatorIdArg); + irb->exec->coro_allocator_var = ir_create_var(irb, node, scope, nullptr, true, true, true, const_bool_false); + ir_build_var_decl(irb, scope, node, irb->exec->coro_allocator_var, nullptr, nullptr, implicit_allocator_ptr); Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME); - IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, scope, node, irb->exec->implicit_allocator_ptr, - alloc_field_name); + IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, alloc_field_name); IrInstruction *alloc_fn = ir_build_load_ptr(irb, scope, node, alloc_fn_ptr); IrInstruction *maybe_coro_mem_ptr = ir_build_coro_alloc_helper(irb, scope, node, alloc_fn, coro_size); IrInstruction *alloc_result_is_ok = ir_build_test_nonnull(irb, scope, node, maybe_coro_mem_ptr); @@ -6122,7 +6129,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_build_return(irb, scope, node, undef); ir_set_cursor_at_end_and_append_block(irb, alloc_ok_block); - coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, maybe_coro_mem_ptr); + IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, maybe_coro_mem_ptr); irb->exec->coro_handle = ir_build_coro_begin(irb, scope, node, coro_id, coro_mem_ptr); Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME); @@ -6200,15 +6207,20 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); Buf *free_field_name = buf_create_from_str(ASYNC_FREE_FIELD_NAME); - IrInstruction *free_fn_ptr = ir_build_field_ptr(irb, scope, node, irb->exec->implicit_allocator_ptr, - free_field_name); + IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node, + ImplicitAllocatorIdLocalVar); + IrInstruction *free_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, free_field_name); IrInstruction *free_fn = ir_build_load_ptr(irb, scope, node, free_fn_ptr); IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); + IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle); + IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_mem_ptr_maybe); IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false); + IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var, true, false); + IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr); IrInstruction *mem_slice = ir_build_slice(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false); size_t arg_count = 2; IrInstruction **args = allocate(arg_count); - args[0] = irb->exec->implicit_allocator_ptr; // self + args[0] = implicit_allocator_ptr; // self args[1] = mem_slice; // old_mem ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr); @@ -11229,7 +11241,7 @@ static TypeTableEntry *ir_analyze_instruction_error_union(IrAnalyze *ira, return ira->codegen->builtin_types.entry_type; } -IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr) { +IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_instr, ImplicitAllocatorId id) { FnTableEntry *parent_fn_entry = exec_fn_entry(ira->new_irb.exec); if (parent_fn_entry == nullptr) { ir_add_error(ira, source_instr, buf_sprintf("no implicit allocator available")); @@ -11243,9 +11255,26 @@ IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_i } assert(parent_fn_type->async_allocator_type != nullptr); - IrInstruction *result = ir_build_get_implicit_allocator(&ira->new_irb, source_instr->scope, source_instr->source_node); - result->value.type = parent_fn_type->async_allocator_type; - return result; + + switch (id) { + case ImplicitAllocatorIdArg: + { + IrInstruction *result = ir_build_get_implicit_allocator(&ira->new_irb, source_instr->scope, + source_instr->source_node, ImplicitAllocatorIdArg); + result->value.type = parent_fn_type->async_allocator_type; + return result; + } + case ImplicitAllocatorIdLocalVar: + { + VariableTableEntry *coro_allocator_var = ira->old_irb.exec->coro_allocator_var; + assert(coro_allocator_var != nullptr); + IrInstruction *var_ptr_inst = ir_get_var_ptr(ira, source_instr, coro_allocator_var, true, false); + IrInstruction *result = ir_get_deref(ira, source_instr, var_ptr_inst); + assert(result->value.type != nullptr); + return result; + } + } + zig_unreachable(); } static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *call_instruction, FnTableEntry *fn_entry, TypeTableEntry *fn_type, @@ -11805,7 +11834,8 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal } IrInstruction *uncasted_async_allocator_inst; if (call_instruction->async_allocator == nullptr) { - uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base); + uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base, + ImplicitAllocatorIdLocalVar); if (type_is_invalid(uncasted_async_allocator_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; } else { @@ -11927,7 +11957,8 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal if (call_instruction->is_async) { IrInstruction *uncasted_async_allocator_inst; if (call_instruction->async_allocator == nullptr) { - uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base); + uncasted_async_allocator_inst = ir_get_implicit_allocator(ira, &call_instruction->base, + ImplicitAllocatorIdLocalVar); if (type_is_invalid(uncasted_async_allocator_inst->value.type)) return ira->codegen->builtin_types.entry_invalid; } else { @@ -17189,7 +17220,7 @@ static TypeTableEntry *ir_analyze_instruction_coro_begin(IrAnalyze *ira, IrInstr } static TypeTableEntry *ir_analyze_instruction_get_implicit_allocator(IrAnalyze *ira, IrInstructionGetImplicitAllocator *instruction) { - IrInstruction *result = ir_get_implicit_allocator(ira, &instruction->base); + IrInstruction *result = ir_get_implicit_allocator(ira, &instruction->base, instruction->id); ir_link_new_instruction(result, &instruction->base); return result->value.type; } diff --git a/src/ir_print.cpp b/src/ir_print.cpp index b9e02988c2..a68b5a46df 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1027,7 +1027,16 @@ static void ir_print_cancel(IrPrint *irp, IrInstructionCancel *instruction) { } static void ir_print_get_implicit_allocator(IrPrint *irp, IrInstructionGetImplicitAllocator *instruction) { - fprintf(irp->f, "@getImplicitAllocator()"); + fprintf(irp->f, "@getImplicitAllocator("); + switch (instruction->id) { + case ImplicitAllocatorIdArg: + fprintf(irp->f, "Arg"); + break; + case ImplicitAllocatorIdLocalVar: + fprintf(irp->f, "LocalVar"); + break; + } + fprintf(irp->f, ")"); } static void ir_print_coro_id(IrPrint *irp, IrInstructionCoroId *instruction) { From 58dc2b719c8e5a13c91ebbbbf476998c7f3e925b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Feb 2018 18:22:43 -0500 Subject: [PATCH 47/56] better coroutine codegen, now passing first coro test we have to use the Suspend block with llvm.coro.end to return from the coro --- src/ir.cpp | 9 ++------- test/behavior.zig | 3 ++- test/cases/coroutines.zig | 16 ++++++++++++++++ 3 files changed, 20 insertions(+), 8 deletions(-) create mode 100644 test/cases/coroutines.zig diff --git a/src/ir.cpp b/src/ir.cpp index e9f29d1caa..06a23af07c 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -6225,19 +6225,14 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr); IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume"); - IrBasicBlock *return_block = ir_create_basic_block(irb, scope, "Return"); - ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, return_block, const_bool_false); + ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, resume_block); IrInstruction *unwrapped_await_handle_ptr = ir_build_unwrap_maybe(irb, scope, node, irb->exec->coro_awaiter_field_ptr, false); IrInstruction *awaiter_handle = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr); ir_build_coro_resume(irb, scope, node, awaiter_handle); - ir_build_br(irb, scope, node, return_block, const_bool_false); - - ir_set_cursor_at_end_and_append_block(irb, return_block); - IrInstruction *undef = ir_build_const_undefined(irb, scope, node); - ir_build_return(irb, scope, node, undef); + ir_build_br(irb, scope, node, irb->exec->coro_suspend_block, const_bool_false); } return true; diff --git a/test/behavior.zig b/test/behavior.zig index e718ba6c86..81f2c5dd00 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -11,6 +11,7 @@ comptime { _ = @import("cases/bugs/656.zig"); _ = @import("cases/cast.zig"); _ = @import("cases/const_slice_child.zig"); + _ = @import("cases/coroutines.zig"); _ = @import("cases/defer.zig"); _ = @import("cases/enum.zig"); _ = @import("cases/enum_with_members.zig"); @@ -34,8 +35,8 @@ comptime { _ = @import("cases/sizeof_and_typeof.zig"); _ = @import("cases/slice.zig"); _ = @import("cases/struct.zig"); - _ = @import("cases/struct_contains_slice_of_itself.zig"); _ = @import("cases/struct_contains_null_ptr_itself.zig"); + _ = @import("cases/struct_contains_slice_of_itself.zig"); _ = @import("cases/switch.zig"); _ = @import("cases/switch_prong_err_enum.zig"); _ = @import("cases/switch_prong_implicit_cast.zig"); diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig new file mode 100644 index 0000000000..a20a314c8b --- /dev/null +++ b/test/cases/coroutines.zig @@ -0,0 +1,16 @@ +const std = @import("std"); +const assert = std.debug.assert; + +var x: i32 = 1; + +test "create a coroutine and cancel it" { + const p = try (async(std.debug.global_allocator) emptyAsyncFn()); + cancel p; + assert(x == 2); +} + +async fn emptyAsyncFn() void { + x += 1; + suspend; + x += 1; +} From 36eadb569a31a87b610b9b70e225a981dc181df4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Feb 2018 18:56:26 -0500 Subject: [PATCH 48/56] run coroutine tests only in Debug mode LLVM 5.0.1, 6.0.0, and trunk crash when attempting to optimize coroutine code. So, Zig does not support ReleaseFast or ReleaseSafe for coroutines yet. Luckily, Clang users are running into the same crashes, so folks from the LLVM community are working on fixes. If we're really lucky they'll be fixed in 6.0.1. Otherwise we can hope for 7.0.0. --- test/behavior.zig | 14 +++++++++++++- test/cases/coroutines.zig | 4 ++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/test/behavior.zig b/test/behavior.zig index 81f2c5dd00..b9cfeb8e0b 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -1,3 +1,5 @@ +const builtin = @import("builtin"); + comptime { _ = @import("cases/align.zig"); _ = @import("cases/alignof.zig"); @@ -11,7 +13,6 @@ comptime { _ = @import("cases/bugs/656.zig"); _ = @import("cases/cast.zig"); _ = @import("cases/const_slice_child.zig"); - _ = @import("cases/coroutines.zig"); _ = @import("cases/defer.zig"); _ = @import("cases/enum.zig"); _ = @import("cases/enum_with_members.zig"); @@ -48,4 +49,15 @@ comptime { _ = @import("cases/var_args.zig"); _ = @import("cases/void.zig"); _ = @import("cases/while.zig"); + + + // LLVM 5.0.1, 6.0.0, and trunk crash when attempting to optimize coroutine code. + // So, Zig does not support ReleaseFast or ReleaseSafe for coroutines yet. + // Luckily, Clang users are running into the same crashes, so folks from the LLVM + // community are working on fixes. If we're really lucky they'll be fixed in 6.0.1. + // Otherwise we can hope for 7.0.0. + if (builtin.mode == builtin.Mode.Debug) { + _ = @import("cases/coroutines.zig"); + } + } diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig index a20a314c8b..f5e70774fa 100644 --- a/test/cases/coroutines.zig +++ b/test/cases/coroutines.zig @@ -4,12 +4,12 @@ const assert = std.debug.assert; var x: i32 = 1; test "create a coroutine and cancel it" { - const p = try (async(std.debug.global_allocator) emptyAsyncFn()); + const p = try (async(std.debug.global_allocator) simpleAsyncFn()); cancel p; assert(x == 2); } -async fn emptyAsyncFn() void { +async fn simpleAsyncFn() void { x += 1; suspend; x += 1; From 807a5e94e976f03058426e04dceef449a5bf7ed8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Feb 2018 21:19:51 -0500 Subject: [PATCH 49/56] add atomicrmw builtin function --- doc/langref.html.in | 21 +++++- src/all_types.hpp | 27 ++++++++ src/codegen.cpp | 50 ++++++++++++++ src/ir.cpp | 149 +++++++++++++++++++++++++++++++++++++++++ src/ir_print.cpp | 29 ++++++++ std/debug/index.zig | 18 +++-- test/cases/atomics.zig | 15 ++++- 7 files changed, 297 insertions(+), 12 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index 2d4bead65e..25a90e3361 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -3775,6 +3775,25 @@ pub fn main() void { {#header_open|@ArgType#}

TODO

{#header_close#} + {#header_open|@atomicRmw#} +
@atomicRmw(comptime T: type, ptr: &T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) -> T
+

+ This builtin function atomically modifies memory and then returns the previous value. +

+

+ T must be a pointer type, a bool, + or an integer whose bit count meets these requirements: +

+
    +
  • At least 8
  • +
  • At most the same as usize
  • +
  • Power of 2
  • +
+

+ TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe + we can remove this restriction +

+ {#header_close#} {#header_open|@bitCast#}
@bitCast(comptime DestType: type, value: var) -> DestType

@@ -5859,7 +5878,7 @@ hljs.registerLanguage("zig", function(t) { a = t.IR + "\\s*\\(", c = { keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong", - built_in: "breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic canImplicitCast ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchg fence divExact truncate", + built_in: "breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage setGlobalSection divTrunc divFloor enumTagName intToPtr ptrToInt panic canImplicitCast ptrCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchg fence divExact truncate atomicRmw", literal: "true false null undefined" }, n = [e, t.CLCM, t.CBCM, s, r]; diff --git a/src/all_types.hpp b/src/all_types.hpp index 1551f07c70..d727f4a862 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1338,6 +1338,7 @@ enum BuiltinFnId { BuiltinFnIdArgType, BuiltinFnIdExport, BuiltinFnIdErrorReturnTrace, + BuiltinFnIdAtomicRmw, }; struct BuiltinFnEntry { @@ -1857,6 +1858,19 @@ enum AtomicOrder { AtomicOrderSeqCst, }; +// synchronized with the code in define_builtin_compile_vars +enum AtomicRmwOp { + AtomicRmwOp_xchg, + AtomicRmwOp_add, + AtomicRmwOp_sub, + AtomicRmwOp_and, + AtomicRmwOp_nand, + AtomicRmwOp_or, + AtomicRmwOp_xor, + AtomicRmwOp_max, + AtomicRmwOp_min, +}; + // A basic block contains no branching. Branches send control flow // to another basic block. // Phi instructions must be first in a basic block. @@ -2006,6 +2020,7 @@ enum IrInstructionId { IrInstructionIdCoroResume, IrInstructionIdCoroSave, IrInstructionIdCoroAllocHelper, + IrInstructionIdAtomicRmw, }; struct IrInstruction { @@ -2929,6 +2944,18 @@ struct IrInstructionCoroAllocHelper { IrInstruction *coro_size; }; +struct IrInstructionAtomicRmw { + IrInstruction base; + + IrInstruction *operand_type; + IrInstruction *ptr; + IrInstruction *op; + AtomicRmwOp resolved_op; + IrInstruction *operand; + IrInstruction *ordering; + AtomicOrder resolved_ordering; +}; + static const size_t slice_ptr_index = 0; static const size_t slice_len_index = 1; diff --git a/src/codegen.cpp b/src/codegen.cpp index a5ae9996b5..89dc23f428 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3311,6 +3311,23 @@ static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) { zig_unreachable(); } +static LLVMAtomicRMWBinOp to_LLVMAtomicRMWBinOp(AtomicRmwOp op, bool is_signed) { + switch (op) { + case AtomicRmwOp_xchg: return LLVMAtomicRMWBinOpXchg; + case AtomicRmwOp_add: return LLVMAtomicRMWBinOpAdd; + case AtomicRmwOp_sub: return LLVMAtomicRMWBinOpSub; + case AtomicRmwOp_and: return LLVMAtomicRMWBinOpAnd; + case AtomicRmwOp_nand: return LLVMAtomicRMWBinOpNand; + case AtomicRmwOp_or: return LLVMAtomicRMWBinOpOr; + case AtomicRmwOp_xor: return LLVMAtomicRMWBinOpXor; + case AtomicRmwOp_max: + return is_signed ? LLVMAtomicRMWBinOpMax : LLVMAtomicRMWBinOpUMax; + case AtomicRmwOp_min: + return is_signed ? LLVMAtomicRMWBinOpMin : LLVMAtomicRMWBinOpUMin; + } + zig_unreachable(); +} + static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutable *executable, IrInstructionCmpxchg *instruction) { LLVMValueRef ptr_val = ir_llvm_value(g, instruction->ptr); LLVMValueRef cmp_val = ir_llvm_value(g, instruction->cmp_value); @@ -4111,6 +4128,22 @@ static LLVMValueRef ir_render_coro_alloc_helper(CodeGen *g, IrExecutable *execut get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); } +static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable, + IrInstructionAtomicRmw *instruction) +{ + bool is_signed; + if (instruction->operand->value.type->id == TypeTableEntryIdInt) { + is_signed = instruction->operand->value.type->data.integral.is_signed; + } else { + is_signed = false; + } + LLVMAtomicRMWBinOp op = to_LLVMAtomicRMWBinOp(instruction->resolved_op, is_signed); + LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->resolved_ordering); + LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr); + LLVMValueRef operand = ir_llvm_value(g, instruction->operand); + return LLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, false); +} + static void set_debug_location(CodeGen *g, IrInstruction *instruction) { AstNode *source_node = instruction->source_node; Scope *scope = instruction->scope; @@ -4318,6 +4351,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_coro_save(g, executable, (IrInstructionCoroSave *)instruction); case IrInstructionIdCoroAllocHelper: return ir_render_coro_alloc_helper(g, executable, (IrInstructionCoroAllocHelper *)instruction); + case IrInstructionIdAtomicRmw: + return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction); } zig_unreachable(); } @@ -5810,6 +5845,7 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdArgType, "ArgType", 2); create_builtin_fn(g, BuiltinFnIdExport, "export", 3); create_builtin_fn(g, BuiltinFnIdErrorReturnTrace, "errorReturnTrace", 0); + create_builtin_fn(g, BuiltinFnIdAtomicRmw, "atomicRmw", 5); } static const char *bool_to_str(bool b) { @@ -5939,6 +5975,20 @@ static void define_builtin_compile_vars(CodeGen *g) { " SeqCst,\n" "};\n\n"); } + { + buf_appendf(contents, + "pub const AtomicRmwOp = enum {\n" + " Xchg,\n" + " Add,\n" + " Sub,\n" + " And,\n" + " Nand,\n" + " Or,\n" + " Xor,\n" + " Max,\n" + " Min,\n" + "};\n\n"); + } { buf_appendf(contents, "pub const Mode = enum {\n" diff --git a/src/ir.cpp b/src/ir.cpp index 06a23af07c..dcca05ad02 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -701,6 +701,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocHelper return IrInstructionIdCoroAllocHelper; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) { + return IrInstructionIdAtomicRmw; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -2614,6 +2618,28 @@ static IrInstruction *ir_build_coro_alloc_helper(IrBuilder *irb, Scope *scope, A return &instruction->base; } +static IrInstruction *ir_build_atomic_rmw(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *operand_type, IrInstruction *ptr, IrInstruction *op, IrInstruction *operand, + IrInstruction *ordering, AtomicRmwOp resolved_op, AtomicOrder resolved_ordering) +{ + IrInstructionAtomicRmw *instruction = ir_build_instruction(irb, scope, source_node); + instruction->operand_type = operand_type; + instruction->ptr = ptr; + instruction->op = op; + instruction->operand = operand; + instruction->ordering = ordering; + instruction->resolved_op = resolved_op; + instruction->resolved_ordering = resolved_ordering; + + if (operand_type != nullptr) ir_ref_instruction(operand_type, irb->current_basic_block); + ir_ref_instruction(ptr, irb->current_basic_block); + if (op != nullptr) ir_ref_instruction(op, irb->current_basic_block); + ir_ref_instruction(operand, irb->current_basic_block); + if (ordering != nullptr) ir_ref_instruction(ordering, irb->current_basic_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -4094,6 +4120,38 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo { return ir_build_error_return_trace(irb, scope, node); } + case BuiltinFnIdAtomicRmw: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + AstNode *arg1_node = node->data.fn_call_expr.params.at(1); + IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope); + if (arg1_value == irb->codegen->invalid_instruction) + return arg1_value; + + AstNode *arg2_node = node->data.fn_call_expr.params.at(2); + IrInstruction *arg2_value = ir_gen_node(irb, arg2_node, scope); + if (arg2_value == irb->codegen->invalid_instruction) + return arg2_value; + + AstNode *arg3_node = node->data.fn_call_expr.params.at(3); + IrInstruction *arg3_value = ir_gen_node(irb, arg3_node, scope); + if (arg3_value == irb->codegen->invalid_instruction) + return arg3_value; + + AstNode *arg4_node = node->data.fn_call_expr.params.at(4); + IrInstruction *arg4_value = ir_gen_node(irb, arg4_node, scope); + if (arg4_value == irb->codegen->invalid_instruction) + return arg4_value; + + return ir_build_atomic_rmw(irb, scope, node, arg0_value, arg1_value, arg2_value, arg3_value, + arg4_value, + // these 2 values don't mean anything since we passed non-null values for other args + AtomicRmwOp_xchg, AtomicOrderMonotonic); + } } zig_unreachable(); } @@ -9730,6 +9788,26 @@ static bool ir_resolve_atomic_order(IrAnalyze *ira, IrInstruction *value, Atomic return true; } +static bool ir_resolve_atomic_rmw_op(IrAnalyze *ira, IrInstruction *value, AtomicRmwOp *out) { + if (type_is_invalid(value->value.type)) + return false; + + ConstExprValue *atomic_rmw_op_val = get_builtin_value(ira->codegen, "AtomicRmwOp"); + assert(atomic_rmw_op_val->type->id == TypeTableEntryIdMetaType); + TypeTableEntry *atomic_rmw_op_type = atomic_rmw_op_val->data.x_type; + + IrInstruction *casted_value = ir_implicit_cast(ira, value, atomic_rmw_op_type); + if (type_is_invalid(casted_value->value.type)) + return false; + + ConstExprValue *const_val = ir_resolve_const(ira, casted_value, UndefBad); + if (!const_val) + return false; + + *out = (AtomicRmwOp)bigint_as_unsigned(&const_val->data.x_enum_tag); + return true; +} + static bool ir_resolve_global_linkage(IrAnalyze *ira, IrInstruction *value, GlobalLinkageId *out) { if (type_is_invalid(value->value.type)) return false; @@ -17316,6 +17394,74 @@ static TypeTableEntry *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira, return result->value.type; } +static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstructionAtomicRmw *instruction) { + TypeTableEntry *operand_type = ir_resolve_type(ira, instruction->operand_type->other); + if (type_is_invalid(operand_type)) { + return ira->codegen->builtin_types.entry_invalid; + } + if (operand_type->id == TypeTableEntryIdInt) { + if (operand_type->data.integral.bit_count < 8) { + ir_add_error(ira, &instruction->base, + buf_sprintf("expected integer type 8 bits or larger, found %" PRIu32 "-bit integer type", + operand_type->data.integral.bit_count)); + return ira->codegen->builtin_types.entry_invalid; + } + if (operand_type->data.integral.bit_count > ira->codegen->pointer_size_bytes * 8) { + ir_add_error(ira, &instruction->base, + buf_sprintf("expected integer type pointer size or smaller, found %" PRIu32 "-bit integer type", + operand_type->data.integral.bit_count)); + return ira->codegen->builtin_types.entry_invalid; + } + if (!is_power_of_2(operand_type->data.integral.bit_count)) { + ir_add_error(ira, &instruction->base, + buf_sprintf("%" PRIu32 "-bit integer type is not a power of 2", operand_type->data.integral.bit_count)); + return ira->codegen->builtin_types.entry_invalid; + } + } else if (get_codegen_ptr_type(operand_type) == nullptr) { + ir_add_error(ira, &instruction->base, + buf_sprintf("expected integer or pointer type, found '%s'", buf_ptr(&operand_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *ptr_inst = instruction->ptr->other; + if (type_is_invalid(ptr_inst->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, operand_type, false); + IrInstruction *casted_ptr = ir_implicit_cast(ira, ptr_inst, ptr_type); + if (type_is_invalid(casted_ptr->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + AtomicRmwOp op; + if (!ir_resolve_atomic_rmw_op(ira, instruction->op->other, &op)) { + return ira->codegen->builtin_types.entry_invalid; + } + + IrInstruction *operand = instruction->operand->other; + if (type_is_invalid(operand->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *casted_operand = ir_implicit_cast(ira, operand, operand_type); + if (type_is_invalid(casted_ptr->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + AtomicOrder ordering; + if (!ir_resolve_atomic_order(ira, instruction->ordering->other, &ordering)) + return ira->codegen->builtin_types.entry_invalid; + + if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar) + { + zig_panic("TODO compile-time execution of atomicRmw"); + } + + IrInstruction *result = ir_build_atomic_rmw(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, nullptr, casted_ptr, nullptr, casted_operand, nullptr, + op, ordering); + ir_link_new_instruction(result, &instruction->base); + result->value.type = operand_type; + return result->value.type; +} + static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { @@ -17545,6 +17691,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_coro_save(ira, (IrInstructionCoroSave *)instruction); case IrInstructionIdCoroAllocHelper: return ir_analyze_instruction_coro_alloc_helper(ira, (IrInstructionCoroAllocHelper *)instruction); + case IrInstructionIdAtomicRmw: + return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction); } zig_unreachable(); } @@ -17748,6 +17896,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdCoroSize: case IrInstructionIdCoroSuspend: case IrInstructionIdCoroFree: + case IrInstructionIdAtomicRmw: return false; case IrInstructionIdAsm: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index a68b5a46df..e23183bb38 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -1113,6 +1113,32 @@ static void ir_print_coro_alloc_helper(IrPrint *irp, IrInstructionCoroAllocHelpe fprintf(irp->f, ")"); } +static void ir_print_atomic_rmw(IrPrint *irp, IrInstructionAtomicRmw *instruction) { + fprintf(irp->f, "@atomicRmw("); + if (instruction->operand_type != nullptr) { + ir_print_other_instruction(irp, instruction->operand_type); + } else { + fprintf(irp->f, "[TODO print]"); + } + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->ptr); + fprintf(irp->f, ","); + if (instruction->op != nullptr) { + ir_print_other_instruction(irp, instruction->op); + } else { + fprintf(irp->f, "[TODO print]"); + } + fprintf(irp->f, ","); + ir_print_other_instruction(irp, instruction->operand); + fprintf(irp->f, ","); + if (instruction->ordering != nullptr) { + ir_print_other_instruction(irp, instruction->ordering); + } else { + fprintf(irp->f, "[TODO print]"); + } + fprintf(irp->f, ")"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); switch (instruction->id) { @@ -1472,6 +1498,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdCoroAllocHelper: ir_print_coro_alloc_helper(irp, (IrInstructionCoroAllocHelper *)instruction); break; + case IrInstructionIdAtomicRmw: + ir_print_atomic_rmw(irp, (IrInstructionAtomicRmw *)instruction); + break; } fprintf(irp->f, "\n"); } diff --git a/std/debug/index.zig b/std/debug/index.zig index cc4832b1ea..8731834fa5 100644 --- a/std/debug/index.zig +++ b/std/debug/index.zig @@ -97,21 +97,18 @@ pub fn assertOrPanic(ok: bool) void { } } -var panicking = false; +var panicking: u8 = 0; // TODO make this a bool /// This is the default panic implementation. pub fn panic(comptime format: []const u8, args: ...) noreturn { - // TODO an intrinsic that labels this as unlikely to be reached + @setCold(true); - // TODO - // if (@atomicRmw(AtomicOp.XChg, &panicking, true, AtomicOrder.SeqCst)) { } - if (panicking) { + if (@atomicRmw(u8, &panicking, builtin.AtomicRmwOp.Xchg, 1, builtin.AtomicOrder.SeqCst) == 1) { // Panicked during a panic. + // TODO detect if a different thread caused the panic, because in that case // we would want to return here instead of calling abort, so that the thread // which first called panic can finish printing a stack trace. os.abort(); - } else { - panicking = true; } const stderr = getStderrStream() catch os.abort(); @@ -122,10 +119,11 @@ pub fn panic(comptime format: []const u8, args: ...) noreturn { } pub fn panicWithTrace(trace: &const builtin.StackTrace, comptime format: []const u8, args: ...) noreturn { - if (panicking) { + @setCold(true); + + if (@atomicRmw(u8, &panicking, builtin.AtomicRmwOp.Xchg, 1, builtin.AtomicOrder.SeqCst) == 1) { + // See TODO in above function os.abort(); - } else { - panicking = true; } const stderr = getStderrStream() catch os.abort(); stderr.print(format ++ "\n", args) catch os.abort(); diff --git a/test/cases/atomics.zig b/test/cases/atomics.zig index a796488d3b..e8e81b76e6 100644 --- a/test/cases/atomics.zig +++ b/test/cases/atomics.zig @@ -1,5 +1,7 @@ const assert = @import("std").debug.assert; -const AtomicOrder = @import("builtin").AtomicOrder; +const builtin = @import("builtin"); +const AtomicRmwOp = builtin.AtomicRmwOp; +const AtomicOrder = builtin.AtomicOrder; test "cmpxchg" { var x: i32 = 1234; @@ -12,3 +14,14 @@ test "fence" { @fence(AtomicOrder.SeqCst); x = 5678; } + +test "atomicrmw" { + var data: u8 = 200; + testAtomicRmw(&data); + assert(data == 42); +} + +fn testAtomicRmw(ptr: &u8) void { + const prev_value = @atomicRmw(u8, ptr, AtomicRmwOp.Xchg, 42, AtomicOrder.SeqCst); + assert(prev_value == 200); +} From c6227661568a9e8cad9d28bd7a11cb76c4f9c1c1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Feb 2018 21:48:20 -0500 Subject: [PATCH 50/56] async function fulfills promise atomically --- src/codegen.cpp | 17 ++++++++++++++--- src/ir.cpp | 24 +++++++++++++++++++----- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 89dc23f428..315699b826 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4132,8 +4132,9 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable, IrInstructionAtomicRmw *instruction) { bool is_signed; - if (instruction->operand->value.type->id == TypeTableEntryIdInt) { - is_signed = instruction->operand->value.type->data.integral.is_signed; + TypeTableEntry *operand_type = instruction->operand->value.type; + if (operand_type->id == TypeTableEntryIdInt) { + is_signed = operand_type->data.integral.is_signed; } else { is_signed = false; } @@ -4141,7 +4142,17 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutable *executable, LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->resolved_ordering); LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr); LLVMValueRef operand = ir_llvm_value(g, instruction->operand); - return LLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, false); + + if (get_codegen_ptr_type(operand_type) == nullptr) { + return LLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, false); + } + + // it's a pointer but we need to treat it as an int + LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, ptr, + LLVMPointerType(g->builtin_types.entry_usize->type_ref, 0), ""); + LLVMValueRef casted_operand = LLVMBuildPtrToInt(g->builder, operand, g->builtin_types.entry_usize->type_ref, ""); + LLVMValueRef uncasted_result = LLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering, false); + return LLVMBuildIntToPtr(g->builder, uncasted_result, operand_type->type_ref, ""); } static void set_debug_location(CodeGen *g, IrInstruction *instruction) { diff --git a/src/ir.cpp b/src/ir.cpp index dcca05ad02..dc845bdaf7 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -2727,7 +2727,13 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode IrInstruction *result_ptr = ir_build_load_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr); ir_build_store_ptr(irb, scope, node, result_ptr, return_value); } - IrInstruction *maybe_await_handle = ir_build_load_ptr(irb, scope, node, irb->exec->coro_awaiter_field_ptr); + IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, + get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); + // TODO replace replacement_value with @intToPtr(?promise, 0x1) when it doesn't crash zig + IrInstruction *replacement_value = irb->exec->coro_handle; + IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, scope, node, + promise_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr, + AtomicRmwOp_xchg, AtomicOrderSeqCst); IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_await_handle); IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false); return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final, irb->exec->coro_early_final, @@ -17433,8 +17439,12 @@ static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstr return ira->codegen->builtin_types.entry_invalid; AtomicRmwOp op; - if (!ir_resolve_atomic_rmw_op(ira, instruction->op->other, &op)) { - return ira->codegen->builtin_types.entry_invalid; + if (instruction->op == nullptr) { + op = instruction->resolved_op; + } else { + if (!ir_resolve_atomic_rmw_op(ira, instruction->op->other, &op)) { + return ira->codegen->builtin_types.entry_invalid; + } } IrInstruction *operand = instruction->operand->other; @@ -17446,8 +17456,12 @@ static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstr return ira->codegen->builtin_types.entry_invalid; AtomicOrder ordering; - if (!ir_resolve_atomic_order(ira, instruction->ordering->other, &ordering)) - return ira->codegen->builtin_types.entry_invalid; + if (instruction->ordering == nullptr) { + ordering = instruction->resolved_ordering; + } else { + if (!ir_resolve_atomic_order(ira, instruction->ordering->other, &ordering)) + return ira->codegen->builtin_types.entry_invalid; + } if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar) { From 8429d4ceac4eb99fbe8aeca2ebe864dfd5b40470 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Feb 2018 22:18:48 -0500 Subject: [PATCH 51/56] implement coroutine resume --- doc/langref.html.in | 4 +++- src/all_types.hpp | 6 ++++++ src/analyze.cpp | 1 + src/ast_render.cpp | 8 ++++++++ src/codegen.cpp | 2 +- src/ir.cpp | 18 +++++++++++++++++- src/parser.cpp | 27 ++++++++++++++++++++++++++- test/cases/coroutines.zig | 27 +++++++++++++++++++++++++++ 8 files changed, 89 insertions(+), 4 deletions(-) diff --git a/doc/langref.html.in b/doc/langref.html.in index 25a90e3361..83d5e65bba 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5682,7 +5682,7 @@ ErrorSetExpr = (PrefixOpExpression "!" PrefixOpExpression) | PrefixOpExpression BlockOrExpression = Block | Expression -Expression = TryExpression | ReturnExpression | BreakExpression | AssignmentExpression | CancelExpression +Expression = TryExpression | ReturnExpression | BreakExpression | AssignmentExpression | CancelExpression | ResumeExpression AsmExpression = "asm" option("volatile") "(" String option(AsmOutput) ")" @@ -5730,6 +5730,8 @@ BreakExpression = "break" option(":" Symbol) option(Expression) CancelExpression = "cancel" Expression; +ResumeExpression = "resume" Expression; + Defer(body) = ("defer" | "deferror") body IfExpression(body) = "if" "(" Expression ")" body option("else" BlockExpression(body)) diff --git a/src/all_types.hpp b/src/all_types.hpp index d727f4a862..d2c7875943 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -403,6 +403,7 @@ enum NodeType { NodeTypeTestExpr, NodeTypeErrorSetDecl, NodeTypeCancel, + NodeTypeResume, NodeTypeAwaitExpr, NodeTypeSuspend, }; @@ -849,6 +850,10 @@ struct AstNodeCancelExpr { AstNode *expr; }; +struct AstNodeResumeExpr { + AstNode *expr; +}; + struct AstNodeContinueExpr { Buf *name; }; @@ -930,6 +935,7 @@ struct AstNode { AstNodeVarLiteral var_literal; AstNodeErrorSetDecl err_set_decl; AstNodeCancelExpr cancel_expr; + AstNodeResumeExpr resume_expr; AstNodeAwaitExpr await_expr; AstNodeSuspend suspend; } data; diff --git a/src/analyze.cpp b/src/analyze.cpp index be01f6b5f8..8842b4967e 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3212,6 +3212,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeTestExpr: case NodeTypeErrorSetDecl: case NodeTypeCancel: + case NodeTypeResume: case NodeTypeAwaitExpr: case NodeTypeSuspend: zig_unreachable(); diff --git a/src/ast_render.cpp b/src/ast_render.cpp index 5f3e1998fd..6318ba3cff 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -246,6 +246,8 @@ static const char *node_type_str(NodeType node_type) { return "ErrorSetDecl"; case NodeTypeCancel: return "Cancel"; + case NodeTypeResume: + return "Resume"; case NodeTypeAwaitExpr: return "AwaitExpr"; case NodeTypeSuspend: @@ -1049,6 +1051,12 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { render_node_grouped(ar, node->data.cancel_expr.expr); break; } + case NodeTypeResume: + { + fprintf(ar->f, "resume "); + render_node_grouped(ar, node->data.resume_expr.expr); + break; + } case NodeTypeAwaitExpr: { fprintf(ar->f, "await "); diff --git a/src/codegen.cpp b/src/codegen.cpp index 315699b826..59956c9279 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4004,7 +4004,7 @@ static LLVMValueRef ir_render_coro_save(CodeGen *g, IrExecutable *executable, Ir static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_fn_type_ref, TypeTableEntry *fn_type) { if (g->coro_alloc_helper_fn_val != nullptr) - return g->coro_alloc_fn_val; + return g->coro_alloc_helper_fn_val; assert(fn_type->id == TypeTableEntryIdFn); diff --git a/src/ir.cpp b/src/ir.cpp index dc845bdaf7..4222196f37 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -5927,6 +5927,16 @@ static IrInstruction *ir_gen_cancel(IrBuilder *irb, Scope *parent_scope, AstNode return ir_build_cancel(irb, parent_scope, node, target_inst); } +static IrInstruction *ir_gen_resume(IrBuilder *irb, Scope *parent_scope, AstNode *node) { + assert(node->type == NodeTypeResume); + + IrInstruction *target_inst = ir_gen_node(irb, node->data.resume_expr.expr, parent_scope); + if (target_inst == irb->codegen->invalid_instruction) + return irb->codegen->invalid_instruction; + + return ir_build_coro_resume(irb, parent_scope, node, target_inst); +} + static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, AstNode *node) { assert(node->type == NodeTypeAwaitExpr); @@ -6101,6 +6111,8 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop return ir_lval_wrap(irb, scope, ir_gen_err_set_decl(irb, scope, node), lval); case NodeTypeCancel: return ir_lval_wrap(irb, scope, ir_gen_cancel(irb, scope, node), lval); + case NodeTypeResume: + return ir_lval_wrap(irb, scope, ir_gen_resume(irb, scope, node), lval); case NodeTypeAwaitExpr: return ir_lval_wrap(irb, scope, ir_gen_await_expr(irb, scope, node), lval); case NodeTypeSuspend: @@ -17364,8 +17376,12 @@ static TypeTableEntry *ir_analyze_instruction_coro_resume(IrAnalyze *ira, IrInst if (type_is_invalid(awaiter_handle->value.type)) return ira->codegen->builtin_types.entry_invalid; + IrInstruction *casted_target = ir_implicit_cast(ira, awaiter_handle, ira->codegen->builtin_types.entry_promise); + if (type_is_invalid(casted_target->value.type)) + return ira->codegen->builtin_types.entry_invalid; + IrInstruction *result = ir_build_coro_resume(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, awaiter_handle); + instruction->base.source_node, casted_target); ir_link_new_instruction(result, &instruction->base); result->value.type = ira->codegen->builtin_types.entry_void; return result->value.type; diff --git a/src/parser.cpp b/src/parser.cpp index 763273fd0a..38994c79fc 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -1638,6 +1638,24 @@ static AstNode *ast_parse_cancel_expr(ParseContext *pc, size_t *token_index) { return node; } +/* +ResumeExpression = "resume" Expression; +*/ +static AstNode *ast_parse_resume_expr(ParseContext *pc, size_t *token_index) { + Token *token = &pc->tokens->at(*token_index); + + if (token->id != TokenIdKeywordResume) { + return nullptr; + } + *token_index += 1; + + AstNode *node = ast_create_node(pc, NodeTypeResume, token); + + node->data.resume_expr.expr = ast_parse_expression(pc, token_index, false); + + return node; +} + /* Defer(body) = ("defer" | "errdefer") body */ @@ -2266,7 +2284,7 @@ static AstNode *ast_parse_block_or_expression(ParseContext *pc, size_t *token_in } /* -Expression = TryExpression | ReturnExpression | BreakExpression | AssignmentExpression | CancelExpression +Expression = TryExpression | ReturnExpression | BreakExpression | AssignmentExpression | CancelExpression | ResumeExpression */ static AstNode *ast_parse_expression(ParseContext *pc, size_t *token_index, bool mandatory) { Token *token = &pc->tokens->at(*token_index); @@ -2287,6 +2305,10 @@ static AstNode *ast_parse_expression(ParseContext *pc, size_t *token_index, bool if (cancel_expr) return cancel_expr; + AstNode *resume_expr = ast_parse_resume_expr(pc, token_index); + if (resume_expr) + return resume_expr; + AstNode *ass_expr = ast_parse_ass_expr(pc, token_index, false); if (ass_expr) return ass_expr; @@ -3060,6 +3082,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeCancel: visit_field(&node->data.cancel_expr.expr, visit, context); break; + case NodeTypeResume: + visit_field(&node->data.resume_expr.expr, visit, context); + break; case NodeTypeAwaitExpr: visit_field(&node->data.await_expr.expr, visit, context); break; diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig index f5e70774fa..2a5505360c 100644 --- a/test/cases/coroutines.zig +++ b/test/cases/coroutines.zig @@ -14,3 +14,30 @@ async fn simpleAsyncFn() void { suspend; x += 1; } + +test "coroutine suspend, resume, cancel" { + seq('a'); + const p = (async(std.debug.global_allocator) testAsyncSeq()) catch unreachable; + seq('c'); + resume p; + seq('f'); + cancel p; + seq('g'); + + assert(std.mem.eql(u8, points, "abcdefg")); +} + +async fn testAsyncSeq() void { + defer seq('e'); + + seq('b'); + suspend; + seq('d'); +} +var points = []u8{0} ** "abcdefg".len; +var index: usize = 0; + +fn seq(c: u8) void { + points[index] = c; + index += 1; +} From 834e992a7c4ca0f0e1935e01e23410bc1d95cc52 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 28 Feb 2018 22:26:26 -0500 Subject: [PATCH 52/56] add test for coroutine suspend with block --- test/cases/coroutines.zig | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig index 2a5505360c..b2bed7a8a0 100644 --- a/test/cases/coroutines.zig +++ b/test/cases/coroutines.zig @@ -41,3 +41,21 @@ fn seq(c: u8) void { points[index] = c; index += 1; } + +test "coroutine suspend with block" { + const p = (async(std.debug.global_allocator) testSuspendBlock()) catch unreachable; + std.debug.assert(!result); + resume a_promise; + std.debug.assert(result); + cancel p; +} + +var a_promise: promise = undefined; +var result = false; + +async fn testSuspendBlock() void { + suspend |p| { + a_promise = p; + } + result = true; +} From 253d988e7c00f7ad0cc1b5f913562cb5c1712c91 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 1 Mar 2018 03:28:13 -0500 Subject: [PATCH 53/56] implementation of await but it has bugs --- src/all_types.hpp | 23 +++++ src/analyze.cpp | 18 ++++ src/analyze.hpp | 1 + src/codegen.cpp | 32 ++++++- src/ir.cpp | 185 ++++++++++++++++++++++++++++++++++---- src/ir_print.cpp | 24 ++++- test/cases/coroutines.zig | 6 +- 7 files changed, 266 insertions(+), 23 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index d2c7875943..503d45fd9b 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1192,6 +1192,7 @@ struct TypeTableEntry { TypeTableEntry *pointer_parent[2]; // [0 - mut, 1 - const] TypeTableEntry *maybe_parent; TypeTableEntry *promise_parent; + TypeTableEntry *promise_frame_parent; // If we generate a constant name value for this type, we memoize it here. // The type of this is array ConstExprValue *cached_const_name_val; @@ -1641,6 +1642,7 @@ struct CodeGen { LLVMValueRef coro_free_fn_val; LLVMValueRef coro_resume_fn_val; LLVMValueRef coro_save_fn_val; + LLVMValueRef coro_promise_fn_val; LLVMValueRef coro_alloc_helper_fn_val; bool error_during_imports; @@ -2025,8 +2027,10 @@ enum IrInstructionId { IrInstructionIdCoroFree, IrInstructionIdCoroResume, IrInstructionIdCoroSave, + IrInstructionIdCoroPromise, IrInstructionIdCoroAllocHelper, IrInstructionIdAtomicRmw, + IrInstructionIdPromiseResultType, }; struct IrInstruction { @@ -2943,6 +2947,12 @@ struct IrInstructionCoroSave { IrInstruction *coro_handle; }; +struct IrInstructionCoroPromise { + IrInstruction base; + + IrInstruction *coro_handle; +}; + struct IrInstructionCoroAllocHelper { IrInstruction base; @@ -2962,6 +2972,12 @@ struct IrInstructionAtomicRmw { AtomicOrder resolved_ordering; }; +struct IrInstructionPromiseResultType { + IrInstruction base; + + IrInstruction *promise_type; +}; + static const size_t slice_ptr_index = 0; static const size_t slice_len_index = 1; @@ -2971,6 +2987,13 @@ static const size_t maybe_null_index = 1; static const size_t err_union_err_index = 0; static const size_t err_union_payload_index = 1; +#define ASYNC_ALLOC_FIELD_NAME "allocFn" +#define ASYNC_FREE_FIELD_NAME "freeFn" +#define AWAITER_HANDLE_FIELD_NAME "awaiter_handle" +#define RESULT_FIELD_NAME "result" +#define RESULT_PTR_FIELD_NAME "result_ptr" + + enum FloatMode { FloatModeOptimized, FloatModeStrict, diff --git a/src/analyze.cpp b/src/analyze.cpp index 8842b4967e..d66130ef6d 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -457,6 +457,23 @@ TypeTableEntry *get_pointer_to_type(CodeGen *g, TypeTableEntry *child_type, bool return get_pointer_to_type_extra(g, child_type, is_const, false, get_abi_alignment(g, child_type), 0, 0); } +TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type) { + if (return_type->promise_frame_parent != nullptr) { + return return_type->promise_frame_parent; + } + + TypeTableEntry *awaiter_handle_type = get_maybe_type(g, g->builtin_types.entry_promise); + TypeTableEntry *result_ptr_type = get_pointer_to_type(g, return_type, false); + const char *field_names[] = {AWAITER_HANDLE_FIELD_NAME, RESULT_FIELD_NAME, RESULT_PTR_FIELD_NAME}; + TypeTableEntry *field_types[] = {awaiter_handle_type, return_type, result_ptr_type}; + size_t field_count = type_has_bits(result_ptr_type) ? 3 : 1; + Buf *name = buf_sprintf("AsyncFramePromise(%s)", buf_ptr(&return_type->name)); + TypeTableEntry *entry = get_struct_type(g, buf_ptr(name), field_names, field_types, field_count); + + return_type->promise_frame_parent = entry; + return entry; +} + TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) { if (child_type->maybe_parent) { TypeTableEntry *entry = child_type->maybe_parent; @@ -5800,3 +5817,4 @@ bool fn_type_can_fail(FnTypeId *fn_type_id) { return return_type->id == TypeTableEntryIdErrorUnion || return_type->id == TypeTableEntryIdErrorSet || fn_type_id->cc == CallingConventionAsync; } + diff --git a/src/analyze.hpp b/src/analyze.hpp index 068f321bfb..e9f89aa638 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -36,6 +36,7 @@ TypeTableEntry *get_opaque_type(CodeGen *g, Scope *scope, AstNode *source_node, TypeTableEntry *get_struct_type(CodeGen *g, const char *type_name, const char *field_names[], TypeTableEntry *field_types[], size_t field_count); TypeTableEntry *get_promise_type(CodeGen *g, TypeTableEntry *result_type); +TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type); TypeTableEntry *get_test_fn_type(CodeGen *g); bool handle_is_ptr(TypeTableEntry *type_entry); void find_libc_include_path(CodeGen *g); diff --git a/src/codegen.cpp b/src/codegen.cpp index 59956c9279..6534515edc 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1081,6 +1081,23 @@ static LLVMValueRef get_coro_save_fn_val(CodeGen *g) { return g->coro_save_fn_val; } +static LLVMValueRef get_coro_promise_fn_val(CodeGen *g) { + if (g->coro_promise_fn_val) + return g->coro_promise_fn_val; + + LLVMTypeRef param_types[] = { + LLVMPointerType(LLVMInt8Type(), 0), + LLVMInt32Type(), + LLVMInt1Type(), + }; + LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), param_types, 3, false); + Buf *name = buf_sprintf("llvm.coro.promise"); + g->coro_promise_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->coro_promise_fn_val)); + + return g->coro_promise_fn_val; +} + static LLVMValueRef get_return_address_fn_val(CodeGen *g) { if (g->return_address_fn_val) return g->return_address_fn_val; @@ -4002,6 +4019,16 @@ static LLVMValueRef ir_render_coro_save(CodeGen *g, IrExecutable *executable, Ir return LLVMBuildCall(g->builder, get_coro_save_fn_val(g), &coro_handle, 1, ""); } +static LLVMValueRef ir_render_coro_promise(CodeGen *g, IrExecutable *executable, IrInstructionCoroPromise *instruction) { + LLVMValueRef coro_handle = ir_llvm_value(g, instruction->coro_handle); + LLVMValueRef params[] = { + coro_handle, + LLVMConstInt(LLVMInt32Type(), get_coro_frame_align_bytes(g), false), + LLVMConstNull(LLVMInt1Type()), + }; + return LLVMBuildCall(g->builder, get_coro_promise_fn_val(g), params, 3, ""); +} + static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_fn_type_ref, TypeTableEntry *fn_type) { if (g->coro_alloc_helper_fn_val != nullptr) return g->coro_alloc_helper_fn_val; @@ -4064,7 +4091,7 @@ static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_f LLVMValueRef coro_size = LLVMGetParam(fn_val, next_arg); next_arg += 1; LLVMValueRef alignment_val = LLVMConstInt(g->builtin_types.entry_u29->type_ref, - 2 * g->pointer_size_bytes, false); + get_coro_frame_align_bytes(g), false); ZigList args = {}; args.append(sret_ptr); @@ -4218,6 +4245,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdTagType: case IrInstructionIdExport: case IrInstructionIdErrorUnion: + case IrInstructionIdPromiseResultType: zig_unreachable(); case IrInstructionIdReturn: @@ -4360,6 +4388,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_coro_resume(g, executable, (IrInstructionCoroResume *)instruction); case IrInstructionIdCoroSave: return ir_render_coro_save(g, executable, (IrInstructionCoroSave *)instruction); + case IrInstructionIdCoroPromise: + return ir_render_coro_promise(g, executable, (IrInstructionCoroPromise *)instruction); case IrInstructionIdCoroAllocHelper: return ir_render_coro_alloc_helper(g, executable, (IrInstructionCoroAllocHelper *)instruction); case IrInstructionIdAtomicRmw: diff --git a/src/ir.cpp b/src/ir.cpp index 4222196f37..51c75ca23b 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -45,12 +45,6 @@ static LVal make_lval_addr(bool is_const, bool is_volatile) { return { true, is_const, is_volatile }; } -static const char * ASYNC_ALLOC_FIELD_NAME = "allocFn"; -static const char * ASYNC_FREE_FIELD_NAME = "freeFn"; -static const char * AWAITER_HANDLE_FIELD_NAME = "awaiter_handle"; -static const char * RESULT_FIELD_NAME = "result"; -static const char * RESULT_PTR_FIELD_NAME = "result_ptr"; - enum ConstCastResultId { ConstCastResultIdOk, ConstCastResultIdErrSet, @@ -697,6 +691,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroSave *) { return IrInstructionIdCoroSave; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroPromise *) { + return IrInstructionIdCoroPromise; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionCoroAllocHelper *) { return IrInstructionIdCoroAllocHelper; } @@ -705,6 +703,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicRmw *) { return IrInstructionIdAtomicRmw; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionPromiseResultType *) { + return IrInstructionIdPromiseResultType; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -937,25 +939,19 @@ static IrInstruction *ir_build_const_c_str_lit(IrBuilder *irb, Scope *scope, Ast static IrInstruction *ir_build_const_promise_init(IrBuilder *irb, Scope *scope, AstNode *source_node, TypeTableEntry *return_type) { - TypeTableEntry *awaiter_handle_type = get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise); - TypeTableEntry *result_ptr_type = get_pointer_to_type(irb->codegen, return_type, false); - const char *field_names[] = {AWAITER_HANDLE_FIELD_NAME, RESULT_FIELD_NAME, RESULT_PTR_FIELD_NAME}; - TypeTableEntry *field_types[] = {awaiter_handle_type, return_type, result_ptr_type}; - size_t field_count = type_has_bits(result_ptr_type) ? 3 : 1; - TypeTableEntry *struct_type = get_struct_type(irb->codegen, "AsyncFramePromise", field_names, field_types, - field_count); + TypeTableEntry *struct_type = get_promise_frame_type(irb->codegen, return_type); IrInstructionConst *const_instruction = ir_build_instruction(irb, scope, source_node); const_instruction->base.value.type = struct_type; const_instruction->base.value.special = ConstValSpecialStatic; - const_instruction->base.value.data.x_struct.fields = allocate(field_count); - const_instruction->base.value.data.x_struct.fields[0].type = awaiter_handle_type; + const_instruction->base.value.data.x_struct.fields = allocate(struct_type->data.structure.src_field_count); + const_instruction->base.value.data.x_struct.fields[0].type = struct_type->data.structure.fields[0].type_entry; const_instruction->base.value.data.x_struct.fields[0].special = ConstValSpecialStatic; const_instruction->base.value.data.x_struct.fields[0].data.x_maybe = nullptr; - if (field_count == 3) { + if (struct_type->data.structure.src_field_count > 1) { const_instruction->base.value.data.x_struct.fields[1].type = return_type; const_instruction->base.value.data.x_struct.fields[1].special = ConstValSpecialUndef; - const_instruction->base.value.data.x_struct.fields[2].type = result_ptr_type; + const_instruction->base.value.data.x_struct.fields[2].type = struct_type->data.structure.fields[2].type_entry; const_instruction->base.value.data.x_struct.fields[2].special = ConstValSpecialUndef; } return &const_instruction->base; @@ -2605,6 +2601,17 @@ static IrInstruction *ir_build_coro_save(IrBuilder *irb, Scope *scope, AstNode * return &instruction->base; } +static IrInstruction *ir_build_coro_promise(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *coro_handle) +{ + IrInstructionCoroPromise *instruction = ir_build_instruction(irb, scope, source_node); + instruction->coro_handle = coro_handle; + + ir_ref_instruction(coro_handle, irb->current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_coro_alloc_helper(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *alloc_fn, IrInstruction *coro_size) { @@ -2640,6 +2647,17 @@ static IrInstruction *ir_build_atomic_rmw(IrBuilder *irb, Scope *scope, AstNode return &instruction->base; } +static IrInstruction *ir_build_promise_result_type(IrBuilder *irb, Scope *scope, AstNode *source_node, + IrInstruction *promise_type) +{ + IrInstructionPromiseResultType *instruction = ir_build_instruction(irb, scope, source_node); + instruction->promise_type = promise_type; + + ir_ref_instruction(promise_type, irb->current_basic_block); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -5944,7 +5962,93 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast if (target_inst == irb->codegen->invalid_instruction) return irb->codegen->invalid_instruction; - zig_panic("TODO: generate await expr"); + FnTableEntry *fn_entry = exec_fn_entry(irb->exec); + if (!fn_entry) { + add_node_error(irb->codegen, node, buf_sprintf("await outside function definition")); + return irb->codegen->invalid_instruction; + } + if (fn_entry->type_entry->data.fn.fn_type_id.cc != CallingConventionAsync) { + add_node_error(irb->codegen, node, buf_sprintf("await in non-async function")); + return irb->codegen->invalid_instruction; + } + + ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope); + if (scope_defer_expr) { + if (!scope_defer_expr->reported_err) { + add_node_error(irb->codegen, node, buf_sprintf("cannot await inside defer expression")); + scope_defer_expr->reported_err = true; + } + return irb->codegen->invalid_instruction; + } + + Scope *outer_scope = irb->exec->begin_scope; + + IrInstruction *coro_promise_ptr = ir_build_coro_promise(irb, parent_scope, node, target_inst); + Buf *result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME); + IrInstruction *result_ptr_field_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, result_ptr_field_name); + + Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME); + IrInstruction *awaiter_field_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, + awaiter_handle_field_name); + + IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false); + VariableTableEntry *result_var = ir_create_var(irb, node, parent_scope, nullptr, + false, false, true, const_bool_false); + IrInstruction *undefined_value = ir_build_const_undefined(irb, parent_scope, node); + IrInstruction *target_promise_type = ir_build_typeof(irb, parent_scope, node, target_inst); + IrInstruction *promise_result_type = ir_build_promise_result_type(irb, parent_scope, node, target_promise_type); + ir_build_var_decl(irb, parent_scope, node, result_var, promise_result_type, nullptr, undefined_value); + IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, parent_scope, node, result_var, false, false); + ir_build_store_ptr(irb, parent_scope, node, result_ptr_field_ptr, my_result_var_ptr); + IrInstruction *save_token = ir_build_coro_save(irb, parent_scope, node, irb->exec->coro_handle); + IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node, + get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); + IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, parent_scope, node, + promise_type_val, awaiter_field_ptr, nullptr, irb->exec->coro_handle, nullptr, + AtomicRmwOp_xchg, AtomicOrderSeqCst); + IrInstruction *is_non_null = ir_build_test_nonnull(irb, parent_scope, node, maybe_await_handle); + IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, parent_scope, "YesSuspend"); + IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, parent_scope, "NoSuspend"); + IrBasicBlock *merge_block = ir_create_basic_block(irb, parent_scope, "Merge"); + ir_build_cond_br(irb, parent_scope, node, is_non_null, no_suspend_block, yes_suspend_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, no_suspend_block); + Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME); + IrInstruction *promise_result_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, result_field_name); + IrInstruction *no_suspend_result = ir_build_load_ptr(irb, parent_scope, node, promise_result_ptr); + ir_build_cancel(irb, parent_scope, node, target_inst); + ir_build_br(irb, parent_scope, node, merge_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, yes_suspend_block); + ir_build_coro_resume(irb, parent_scope, node, target_inst); + IrInstruction *suspend_code = ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false); + IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup"); + IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume"); + + IrInstructionSwitchBrCase *cases = allocate(2); + cases[0].value = ir_build_const_u8(irb, parent_scope, node, 0); + cases[0].block = resume_block; + cases[1].value = ir_build_const_u8(irb, parent_scope, node, 1); + cases[1].block = cleanup_block; + ir_build_switch_br(irb, parent_scope, node, suspend_code, irb->exec->coro_suspend_block, + 2, cases, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, cleanup_block); + ir_gen_defers_for_block(irb, parent_scope, outer_scope, true); + ir_build_br(irb, parent_scope, node, irb->exec->coro_final_cleanup_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, resume_block); + IrInstruction *yes_suspend_result = ir_build_load_ptr(irb, parent_scope, node, my_result_var_ptr); + ir_build_br(irb, parent_scope, node, merge_block, const_bool_false); + + ir_set_cursor_at_end_and_append_block(irb, merge_block); + IrBasicBlock **incoming_blocks = allocate(2); + IrInstruction **incoming_values = allocate(2); + incoming_blocks[0] = resume_block; + incoming_values[0] = yes_suspend_result; + incoming_blocks[1] = no_suspend_block; + incoming_values[1] = no_suspend_result; + return ir_build_phi(irb, parent_scope, node, 2, incoming_blocks, incoming_values); } static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNode *node) { @@ -17399,6 +17503,29 @@ static TypeTableEntry *ir_analyze_instruction_coro_save(IrAnalyze *ira, IrInstru return result->value.type; } +static TypeTableEntry *ir_analyze_instruction_coro_promise(IrAnalyze *ira, IrInstructionCoroPromise *instruction) { + IrInstruction *coro_handle = instruction->coro_handle->other; + if (type_is_invalid(coro_handle->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + if (coro_handle->value.type->id != TypeTableEntryIdPromise || + coro_handle->value.type->data.promise.result_type == nullptr) + { + ir_add_error(ira, &instruction->base, buf_sprintf("expected promise->T, found '%s'", + buf_ptr(&coro_handle->value.type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + TypeTableEntry *coro_frame_type = get_promise_frame_type(ira->codegen, + coro_handle->value.type->data.promise.result_type); + + IrInstruction *result = ir_build_coro_promise(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, coro_handle); + ir_link_new_instruction(result, &instruction->base); + result->value.type = get_pointer_to_type(ira->codegen, coro_frame_type, false); + return result->value.type; +} + static TypeTableEntry *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira, IrInstructionCoroAllocHelper *instruction) { IrInstruction *alloc_fn = instruction->alloc_fn->other; if (type_is_invalid(alloc_fn->value.type)) @@ -17492,6 +17619,22 @@ static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstr return result->value.type; } +static TypeTableEntry *ir_analyze_instruction_promise_result_type(IrAnalyze *ira, IrInstructionPromiseResultType *instruction) { + TypeTableEntry *promise_type = ir_resolve_type(ira, instruction->promise_type->other); + if (type_is_invalid(promise_type)) + return ira->codegen->builtin_types.entry_invalid; + + if (promise_type->id != TypeTableEntryIdPromise || promise_type->data.promise.result_type == nullptr) { + ir_add_error(ira, &instruction->base, buf_sprintf("expected promise->T, found '%s'", + buf_ptr(&promise_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); + out_val->data.x_type = promise_type->data.promise.result_type; + return ira->codegen->builtin_types.entry_type; +} + static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstruction *instruction) { switch (instruction->id) { @@ -17719,10 +17862,14 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_coro_resume(ira, (IrInstructionCoroResume *)instruction); case IrInstructionIdCoroSave: return ir_analyze_instruction_coro_save(ira, (IrInstructionCoroSave *)instruction); + case IrInstructionIdCoroPromise: + return ir_analyze_instruction_coro_promise(ira, (IrInstructionCoroPromise *)instruction); case IrInstructionIdCoroAllocHelper: return ir_analyze_instruction_coro_alloc_helper(ira, (IrInstructionCoroAllocHelper *)instruction); case IrInstructionIdAtomicRmw: return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction); + case IrInstructionIdPromiseResultType: + return ir_analyze_instruction_promise_result_type(ira, (IrInstructionPromiseResultType *)instruction); } zig_unreachable(); } @@ -17927,6 +18074,8 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdCoroSuspend: case IrInstructionIdCoroFree: case IrInstructionIdAtomicRmw: + case IrInstructionIdCoroPromise: + case IrInstructionIdPromiseResultType: return false; case IrInstructionIdAsm: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index e23183bb38..194225935a 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -839,7 +839,11 @@ static void ir_print_ptr_to_int(IrPrint *irp, IrInstructionPtrToInt *instruction static void ir_print_int_to_ptr(IrPrint *irp, IrInstructionIntToPtr *instruction) { fprintf(irp->f, "@intToPtr("); - ir_print_other_instruction(irp, instruction->dest_type); + if (instruction->dest_type == nullptr) { + fprintf(irp->f, "(null)"); + } else { + ir_print_other_instruction(irp, instruction->dest_type); + } fprintf(irp->f, ","); ir_print_other_instruction(irp, instruction->target); fprintf(irp->f, ")"); @@ -1105,6 +1109,18 @@ static void ir_print_coro_save(IrPrint *irp, IrInstructionCoroSave *instruction) fprintf(irp->f, ")"); } +static void ir_print_coro_promise(IrPrint *irp, IrInstructionCoroPromise *instruction) { + fprintf(irp->f, "@coroPromise("); + ir_print_other_instruction(irp, instruction->coro_handle); + fprintf(irp->f, ")"); +} + +static void ir_print_promise_result_type(IrPrint *irp, IrInstructionPromiseResultType *instruction) { + fprintf(irp->f, "@PromiseResultType("); + ir_print_other_instruction(irp, instruction->promise_type); + fprintf(irp->f, ")"); +} + static void ir_print_coro_alloc_helper(IrPrint *irp, IrInstructionCoroAllocHelper *instruction) { fprintf(irp->f, "@coroAllocHelper("); ir_print_other_instruction(irp, instruction->alloc_fn); @@ -1501,6 +1517,12 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdAtomicRmw: ir_print_atomic_rmw(irp, (IrInstructionAtomicRmw *)instruction); break; + case IrInstructionIdCoroPromise: + ir_print_coro_promise(irp, (IrInstructionCoroPromise *)instruction); + break; + case IrInstructionIdPromiseResultType: + ir_print_promise_result_type(irp, (IrInstructionPromiseResultType *)instruction); + break; } fprintf(irp->f, "\n"); } diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig index b2bed7a8a0..8f1909a64f 100644 --- a/test/cases/coroutines.zig +++ b/test/cases/coroutines.zig @@ -4,7 +4,7 @@ const assert = std.debug.assert; var x: i32 = 1; test "create a coroutine and cancel it" { - const p = try (async(std.debug.global_allocator) simpleAsyncFn()); + const p = try async(std.debug.global_allocator) simpleAsyncFn(); cancel p; assert(x == 2); } @@ -17,7 +17,7 @@ async fn simpleAsyncFn() void { test "coroutine suspend, resume, cancel" { seq('a'); - const p = (async(std.debug.global_allocator) testAsyncSeq()) catch unreachable; + const p = try async(std.debug.global_allocator) testAsyncSeq(); seq('c'); resume p; seq('f'); @@ -43,7 +43,7 @@ fn seq(c: u8) void { } test "coroutine suspend with block" { - const p = (async(std.debug.global_allocator) testSuspendBlock()) catch unreachable; + const p = try async(std.debug.global_allocator) testSuspendBlock(); std.debug.assert(!result); resume a_promise; std.debug.assert(result); From a7c87ae1e4b621291a844df678cbe0fbfb531029 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 1 Mar 2018 10:23:47 -0500 Subject: [PATCH 54/56] fix not casting result of llvm.coro.promise --- src/codegen.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 6534515edc..0cfd27322f 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4026,7 +4026,8 @@ static LLVMValueRef ir_render_coro_promise(CodeGen *g, IrExecutable *executable, LLVMConstInt(LLVMInt32Type(), get_coro_frame_align_bytes(g), false), LLVMConstNull(LLVMInt1Type()), }; - return LLVMBuildCall(g->builder, get_coro_promise_fn_val(g), params, 3, ""); + LLVMValueRef uncasted_result = LLVMBuildCall(g->builder, get_coro_promise_fn_val(g), params, 3, ""); + return LLVMBuildBitCast(g->builder, uncasted_result, instruction->base.value.type->type_ref, ""); } static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_fn_type_ref, TypeTableEntry *fn_type) { From 8a0e1d4c02480809fe7ab9ee40ce279ffcb4fd16 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 1 Mar 2018 15:46:35 -0500 Subject: [PATCH 55/56] await keyword works --- src/all_types.hpp | 3 ++- src/ir.cpp | 13 ++++++++++--- test/cases/coroutines.zig | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 4 deletions(-) diff --git a/src/all_types.hpp b/src/all_types.hpp index 503d45fd9b..72ec860556 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -58,8 +58,9 @@ struct IrExecutable { ZigList tld_list; IrInstruction *coro_handle; - IrInstruction *coro_awaiter_field_ptr; + IrInstruction *coro_awaiter_field_ptr; // this one is shared and in the promise IrInstruction *coro_result_ptr_field_ptr; + IrInstruction *await_handle_var_ptr; // this one is where we put the one we extracted from the promise IrBasicBlock *coro_early_final; IrBasicBlock *coro_normal_final; IrBasicBlock *coro_suspend_block; diff --git a/src/ir.cpp b/src/ir.cpp index 51c75ca23b..9a01b152d8 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -2752,6 +2752,7 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, scope, node, promise_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr, AtomicRmwOp_xchg, AtomicOrderSeqCst); + ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, maybe_await_handle); IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_await_handle); IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false); return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final, irb->exec->coro_early_final, @@ -6020,7 +6021,6 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast ir_build_br(irb, parent_scope, node, merge_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, yes_suspend_block); - ir_build_coro_resume(irb, parent_scope, node, target_inst); IrInstruction *suspend_code = ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false); IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup"); IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume"); @@ -6277,13 +6277,20 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec // create the coro promise const_bool_false = ir_build_const_bool(irb, scope, node, false); VariableTableEntry *promise_var = ir_create_var(irb, node, scope, nullptr, false, false, true, const_bool_false); - //scope = promise_var->child_scope; return_type = fn_entry->type_entry->data.fn.fn_type_id.return_type; IrInstruction *promise_init = ir_build_const_promise_init(irb, scope, node, return_type); ir_build_var_decl(irb, scope, node, promise_var, nullptr, nullptr, promise_init); IrInstruction *coro_promise_ptr = ir_build_var_ptr(irb, scope, node, promise_var, false, false); + VariableTableEntry *await_handle_var = ir_create_var(irb, node, scope, nullptr, false, false, true, const_bool_false); + IrInstruction *null_value = ir_build_const_null(irb, scope, node); + IrInstruction *await_handle_type_val = ir_build_const_type(irb, scope, node, + get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); + ir_build_var_decl(irb, scope, node, await_handle_var, await_handle_type_val, nullptr, null_value); + irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, scope, node, + await_handle_var, false, false); + u8_ptr_type = ir_build_const_type(irb, scope, node, get_pointer_to_type(irb->codegen, irb->codegen->builtin_types.entry_u8, false)); IrInstruction *promise_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_promise_ptr); @@ -6409,7 +6416,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_set_cursor_at_end_and_append_block(irb, resume_block); IrInstruction *unwrapped_await_handle_ptr = ir_build_unwrap_maybe(irb, scope, node, - irb->exec->coro_awaiter_field_ptr, false); + irb->exec->await_handle_var_ptr, false); IrInstruction *awaiter_handle = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr); ir_build_coro_resume(irb, scope, node, awaiter_handle); ir_build_br(irb, scope, node, irb->exec->coro_suspend_block, const_bool_false); diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig index 8f1909a64f..fa32cd8ce9 100644 --- a/test/cases/coroutines.zig +++ b/test/cases/coroutines.zig @@ -59,3 +59,42 @@ async fn testSuspendBlock() void { } result = true; } + +var await_a_promise: promise = undefined; +var await_final_result: i32 = 0; + +test "coroutine await" { + await_seq('a'); + const p = async(std.debug.global_allocator) await_amain() catch unreachable; + await_seq('f'); + resume await_a_promise; + await_seq('i'); + assert(await_final_result == 1234); + assert(std.mem.eql(u8, await_points, "abcdefghi")); +} + +async fn await_amain() void { + await_seq('b'); + const p = async await_another() catch unreachable; + await_seq('e'); + await_final_result = await p; + await_seq('h'); +} + +async fn await_another() i32 { + await_seq('c'); + suspend |p| { + await_seq('d'); + await_a_promise = p; + } + await_seq('g'); + return 1234; +} + +var await_points = []u8{0} ** "abcdefghi".len; +var await_seq_index: usize = 0; + +fn await_seq(c: u8) void { + await_points[await_seq_index] = c; + await_seq_index += 1; +} From 6bade0b825c37699346a414568e79fe4c1918409 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 1 Mar 2018 16:17:38 -0500 Subject: [PATCH 56/56] coroutines: add await early test case --- test/cases/coroutines.zig | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig index fa32cd8ce9..d5469a5d03 100644 --- a/test/cases/coroutines.zig +++ b/test/cases/coroutines.zig @@ -98,3 +98,35 @@ fn await_seq(c: u8) void { await_points[await_seq_index] = c; await_seq_index += 1; } + + +var early_final_result: i32 = 0; + +test "coroutine await early return" { + early_seq('a'); + const p = async(std.debug.global_allocator) early_amain() catch unreachable; + early_seq('f'); + assert(early_final_result == 1234); + assert(std.mem.eql(u8, early_points, "abcdef")); +} + +async fn early_amain() void { + early_seq('b'); + const p = async early_another() catch unreachable; + early_seq('d'); + early_final_result = await p; + early_seq('e'); +} + +async fn early_another() i32 { + early_seq('c'); + return 1234; +} + +var early_points = []u8{0} ** "abcdef".len; +var early_seq_index: usize = 0; + +fn early_seq(c: u8) void { + early_points[early_seq_index] = c; + early_seq_index += 1; +}