Merge branch 'lazy-sizeof'

This commit is contained in:
Andrew Kelley 2019-08-29 22:44:21 -04:00
commit 10541c8fc8
No known key found for this signature in database
GPG Key ID: 7C5F548F728501A9
11 changed files with 463 additions and 141 deletions

View File

@ -36,6 +36,7 @@ struct IrInstruction;
struct IrInstructionCast;
struct IrInstructionAllocaGen;
struct IrInstructionCallGen;
struct IrInstructionAwaitGen;
struct IrBasicBlock;
struct ScopeDecls;
struct ZigWindowsSDK;
@ -308,6 +309,7 @@ struct ConstGlobalRefs {
enum LazyValueId {
LazyValueIdInvalid,
LazyValueIdAlignOf,
LazyValueIdSizeOf,
LazyValueIdPtrType,
LazyValueIdOptType,
LazyValueIdSliceType,
@ -326,6 +328,13 @@ struct LazyValueAlignOf {
IrInstruction *target_type;
};
struct LazyValueSizeOf {
LazyValue base;
IrAnalyze *ira;
IrInstruction *target_type;
};
struct LazyValueSliceType {
LazyValue base;
@ -1478,6 +1487,7 @@ struct ZigFn {
AstNode **param_source_nodes;
Buf **param_names;
IrInstruction *err_code_spill;
AstNode *assumed_non_async;
AstNode *fn_no_inline_set_node;
AstNode *fn_static_eval_set_node;
@ -1495,6 +1505,7 @@ struct ZigFn {
ZigList<GlobalExport> export_list;
ZigList<IrInstructionCallGen *> call_list;
ZigList<IrInstructionAwaitGen *> await_list;
LLVMValueRef valgrind_client_request_array;
@ -3709,6 +3720,7 @@ struct IrInstructionAwaitGen {
IrInstruction *frame;
IrInstruction *result_loc;
ZigFn *target_fn;
};
struct IrInstructionResume {

View File

@ -31,6 +31,7 @@ static void analyze_fn_body(CodeGen *g, ZigFn *fn_table_entry);
static void resolve_llvm_types(CodeGen *g, ZigType *type, ResolveStatus wanted_resolve_status);
static void preview_use_decl(CodeGen *g, TldUsingNamespace *using_namespace, ScopeDecls *dest_decls_scope);
static void resolve_use_decl(CodeGen *g, TldUsingNamespace *tld_using_namespace, ScopeDecls *dest_decls_scope);
static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame);
// nullptr means not analyzed yet; this one means currently being analyzed
static const AstNode *inferred_async_checking = reinterpret_cast<AstNode *>(0x1);
@ -973,7 +974,7 @@ ConstExprValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, Zig
nullptr, nullptr, node, type_name, nullptr, nullptr, undef);
}
static Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, ZigType *parent_type,
Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, ZigType *parent_type,
ConstExprValue *parent_type_val, bool *is_zero_bits)
{
Error err;
@ -997,6 +998,7 @@ static Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, Zi
switch (type_val->data.x_lazy->id) {
case LazyValueIdInvalid:
case LazyValueIdAlignOf:
case LazyValueIdSizeOf:
zig_unreachable();
case LazyValueIdPtrType: {
LazyValuePtrType *lazy_ptr_type = reinterpret_cast<LazyValuePtrType *>(type_val->data.x_lazy);
@ -1036,6 +1038,7 @@ Error type_val_resolve_is_opaque_type(CodeGen *g, ConstExprValue *type_val, bool
switch (type_val->data.x_lazy->id) {
case LazyValueIdInvalid:
case LazyValueIdAlignOf:
case LazyValueIdSizeOf:
zig_unreachable();
case LazyValueIdSliceType:
case LazyValueIdPtrType:
@ -1055,6 +1058,7 @@ static ReqCompTime type_val_resolve_requires_comptime(CodeGen *g, ConstExprValue
switch (type_val->data.x_lazy->id) {
case LazyValueIdInvalid:
case LazyValueIdAlignOf:
case LazyValueIdSizeOf:
zig_unreachable();
case LazyValueIdSliceType: {
LazyValueSliceType *lazy_slice_type = reinterpret_cast<LazyValueSliceType *>(type_val->data.x_lazy);
@ -1105,7 +1109,7 @@ static ReqCompTime type_val_resolve_requires_comptime(CodeGen *g, ConstExprValue
zig_unreachable();
}
static Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstExprValue *type_val,
Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstExprValue *type_val,
size_t *abi_size, size_t *size_in_bits)
{
Error err;
@ -1123,12 +1127,42 @@ start_over:
switch (type_val->data.x_lazy->id) {
case LazyValueIdInvalid:
case LazyValueIdAlignOf:
case LazyValueIdSizeOf:
zig_unreachable();
case LazyValueIdSliceType:
*abi_size = g->builtin_types.entry_usize->abi_size * 2;
*size_in_bits = g->builtin_types.entry_usize->size_in_bits * 2;
case LazyValueIdSliceType: {
LazyValueSliceType *lazy_slice_type = reinterpret_cast<LazyValueSliceType *>(type_val->data.x_lazy);
bool is_zero_bits;
if ((err = type_val_resolve_zero_bits(g, &lazy_slice_type->elem_type->value, nullptr,
nullptr, &is_zero_bits)))
{
return err;
}
if (is_zero_bits) {
*abi_size = g->builtin_types.entry_usize->abi_size;
*size_in_bits = g->builtin_types.entry_usize->size_in_bits;
} else {
*abi_size = g->builtin_types.entry_usize->abi_size * 2;
*size_in_bits = g->builtin_types.entry_usize->size_in_bits * 2;
}
return ErrorNone;
case LazyValueIdPtrType:
}
case LazyValueIdPtrType: {
LazyValuePtrType *lazy_ptr_type = reinterpret_cast<LazyValuePtrType *>(type_val->data.x_lazy);
bool is_zero_bits;
if ((err = type_val_resolve_zero_bits(g, &lazy_ptr_type->elem_type->value, nullptr,
nullptr, &is_zero_bits)))
{
return err;
}
if (is_zero_bits) {
*abi_size = 0;
*size_in_bits = 0;
} else {
*abi_size = g->builtin_types.entry_usize->abi_size;
*size_in_bits = g->builtin_types.entry_usize->size_in_bits;
}
return ErrorNone;
}
case LazyValueIdFnType:
*abi_size = g->builtin_types.entry_usize->abi_size;
*size_in_bits = g->builtin_types.entry_usize->size_in_bits;
@ -1159,6 +1193,7 @@ Error type_val_resolve_abi_align(CodeGen *g, ConstExprValue *type_val, uint32_t
switch (type_val->data.x_lazy->id) {
case LazyValueIdInvalid:
case LazyValueIdAlignOf:
case LazyValueIdSizeOf:
zig_unreachable();
case LazyValueIdSliceType:
case LazyValueIdPtrType:
@ -1193,6 +1228,7 @@ static OnePossibleValue type_val_resolve_has_one_possible_value(CodeGen *g, Cons
switch (type_val->data.x_lazy->id) {
case LazyValueIdInvalid:
case LazyValueIdAlignOf:
case LazyValueIdSizeOf:
zig_unreachable();
case LazyValueIdSliceType: // it has the len field
case LazyValueIdOptType: // it has the optional bit
@ -4138,8 +4174,14 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
assert(fn->inferred_async_node != inferred_async_checking);
assert(fn->inferred_async_node != inferred_async_none);
if (fn->inferred_async_fn != nullptr) {
ErrorMsg *new_msg = add_error_note(g, msg, fn->inferred_async_node,
buf_sprintf("async function call here"));
ErrorMsg *new_msg;
if (fn->inferred_async_node->type == NodeTypeAwaitExpr) {
new_msg = add_error_note(g, msg, fn->inferred_async_node,
buf_create_from_str("await here is a suspend point"));
} else {
new_msg = add_error_note(g, msg, fn->inferred_async_node,
buf_sprintf("async function call here"));
}
return add_async_error_notes(g, new_msg, fn->inferred_async_fn);
} else if (fn->inferred_async_node->type == NodeTypeFnProto) {
add_error_note(g, msg, fn->inferred_async_node,
@ -4149,7 +4191,7 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
buf_sprintf("suspends here"));
} else if (fn->inferred_async_node->type == NodeTypeAwaitExpr) {
add_error_note(g, msg, fn->inferred_async_node,
buf_sprintf("await is a suspend point"));
buf_sprintf("await here is a suspend point"));
} else if (fn->inferred_async_node->type == NodeTypeFnCallExpr &&
fn->inferred_async_node->data.fn_call_expr.is_builtin)
{
@ -4161,6 +4203,64 @@ static void add_async_error_notes(CodeGen *g, ErrorMsg *msg, ZigFn *fn) {
}
}
// ErrorNone - not async
// ErrorIsAsync - yes async
// ErrorSemanticAnalyzeFail - compile error emitted result is invalid
static Error analyze_callee_async(CodeGen *g, ZigFn *fn, ZigFn *callee, AstNode *call_node,
bool must_not_be_async)
{
if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified)
return ErrorNone;
if (callee->anal_state == FnAnalStateReady) {
analyze_fn_body(g, callee);
if (callee->anal_state == FnAnalStateInvalid) {
return ErrorSemanticAnalyzeFail;
}
}
bool callee_is_async;
if (callee->anal_state == FnAnalStateComplete) {
analyze_fn_async(g, callee, true);
if (callee->anal_state == FnAnalStateInvalid) {
return ErrorSemanticAnalyzeFail;
}
callee_is_async = fn_is_async(callee);
} else {
// If it's already been determined, use that value. Otherwise
// assume non-async, emit an error later if it turned out to be async.
if (callee->inferred_async_node == nullptr ||
callee->inferred_async_node == inferred_async_checking)
{
callee->assumed_non_async = call_node;
callee_is_async = false;
} else {
callee_is_async = callee->inferred_async_node != inferred_async_none;
}
}
if (callee_is_async) {
fn->inferred_async_node = call_node;
fn->inferred_async_fn = callee;
if (must_not_be_async) {
ErrorMsg *msg = add_node_error(g, fn->proto_node,
buf_sprintf("function with calling convention '%s' cannot be async",
calling_convention_name(fn->type_entry->data.fn.fn_type_id.cc)));
add_async_error_notes(g, msg, fn);
return ErrorSemanticAnalyzeFail;
}
if (fn->assumed_non_async != nullptr) {
ErrorMsg *msg = add_node_error(g, fn->proto_node,
buf_sprintf("unable to infer whether '%s' should be async",
buf_ptr(&fn->symbol_name)));
add_error_note(g, msg, fn->assumed_non_async,
buf_sprintf("assumed to be non-async here"));
add_async_error_notes(g, msg, fn);
fn->anal_state = FnAnalStateInvalid;
return ErrorSemanticAnalyzeFail;
}
return ErrorIsAsync;
}
return ErrorNone;
}
// This function resolves functions being inferred async.
static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) {
if (fn->inferred_async_node == inferred_async_checking) {
@ -4187,42 +4287,40 @@ static void analyze_fn_async(CodeGen *g, ZigFn *fn, bool resolve_frame) {
for (size_t i = 0; i < fn->call_list.length; i += 1) {
IrInstructionCallGen *call = fn->call_list.at(i);
ZigFn *callee = call->fn_entry;
if (callee == nullptr) {
if (call->fn_entry == nullptr) {
// TODO function pointer call here, could be anything
continue;
}
if (callee->type_entry->data.fn.fn_type_id.cc != CallingConventionUnspecified)
continue;
if (callee->anal_state == FnAnalStateReady) {
analyze_fn_body(g, callee);
if (callee->anal_state == FnAnalStateInvalid) {
switch (analyze_callee_async(g, fn, call->fn_entry, call->base.source_node, must_not_be_async)) {
case ErrorSemanticAnalyzeFail:
fn->anal_state = FnAnalStateInvalid;
return;
}
case ErrorNone:
continue;
case ErrorIsAsync:
if (resolve_frame) {
resolve_async_fn_frame(g, fn);
}
return;
default:
zig_unreachable();
}
assert(callee->anal_state == FnAnalStateComplete);
analyze_fn_async(g, callee, true);
if (callee->anal_state == FnAnalStateInvalid) {
fn->anal_state = FnAnalStateInvalid;
return;
}
if (fn_is_async(callee)) {
fn->inferred_async_node = call->base.source_node;
fn->inferred_async_fn = callee;
if (must_not_be_async) {
ErrorMsg *msg = add_node_error(g, fn->proto_node,
buf_sprintf("function with calling convention '%s' cannot be async",
calling_convention_name(fn->type_entry->data.fn.fn_type_id.cc)));
add_async_error_notes(g, msg, fn);
}
for (size_t i = 0; i < fn->await_list.length; i += 1) {
IrInstructionAwaitGen *await = fn->await_list.at(i);
switch (analyze_callee_async(g, fn, await->target_fn, await->base.source_node, must_not_be_async)) {
case ErrorSemanticAnalyzeFail:
fn->anal_state = FnAnalStateInvalid;
return;
}
if (resolve_frame) {
resolve_async_fn_frame(g, fn);
}
return;
case ErrorNone:
continue;
case ErrorIsAsync:
if (resolve_frame) {
resolve_async_fn_frame(g, fn);
}
return;
default:
zig_unreachable();
}
}
fn->inferred_async_node = inferred_async_none;
@ -4480,6 +4578,8 @@ void semantic_analyze(CodeGen *g) {
ZigFn *fn = g->fn_defs.at(g->fn_defs_index);
g->trace_err = nullptr;
analyze_fn_async(g, fn, true);
if (fn->anal_state == FnAnalStateInvalid)
continue;
if (fn_is_async(fn) && fn->non_async_node != nullptr) {
ErrorMsg *msg = add_node_error(g, fn->proto_node,
buf_sprintf("'%s' cannot be async", buf_ptr(&fn->symbol_name)));
@ -5632,6 +5732,11 @@ static Error resolve_async_frame(CodeGen *g, ZigType *frame_type) {
return ErrorSemanticAnalyzeFail;
}
analyze_fn_async(g, callee, true);
if (callee->inferred_async_node == inferred_async_checking) {
assert(g->errors.length != 0);
frame_type->data.frame.locals_struct = g->builtin_types.entry_invalid;
return ErrorSemanticAnalyzeFail;
}
if (!fn_is_async(callee))
continue;

View File

@ -247,6 +247,10 @@ void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn);
bool fn_is_async(ZigFn *fn);
Error type_val_resolve_abi_align(CodeGen *g, ConstExprValue *type_val, uint32_t *abi_align);
Error type_val_resolve_abi_size(CodeGen *g, AstNode *source_node, ConstExprValue *type_val,
size_t *abi_size, size_t *size_in_bits);
Error type_val_resolve_zero_bits(CodeGen *g, ConstExprValue *type_val, ZigType *parent_type,
ConstExprValue *parent_type_val, bool *is_zero_bits);
ZigType *resolve_union_field_type(CodeGen *g, TypeUnionField *union_field);
ZigType *resolve_struct_field_type(CodeGen *g, TypeStructField *struct_field);

View File

@ -3924,7 +3924,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMBuildStore(g->builder, awaiter_init_val, awaiter_ptr);
if (ret_has_bits) {
LLVMValueRef ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
ret_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start + 2, "");
LLVMValueRef ret_ptr_ptr = LLVMBuildStructGEP(g->builder, frame_result_loc, frame_ret_start, "");
LLVMBuildStore(g->builder, ret_ptr, ret_ptr_ptr);
@ -4067,6 +4067,9 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
LLVMValueRef store_instr = LLVMBuildStore(g->builder, result, result_loc);
LLVMSetAlignment(store_instr, get_ptr_align(g, instruction->result_loc->value.type));
return result_loc;
} else if (!callee_is_async && instruction->is_async) {
LLVMBuildStore(g->builder, result, ret_ptr);
return result_loc;
} else {
return result;
}
@ -5498,6 +5501,44 @@ static LLVMValueRef ir_render_suspend_finish(CodeGen *g, IrExecutable *executabl
return nullptr;
}
static LLVMValueRef gen_await_early_return(CodeGen *g, IrInstruction *source_instr,
LLVMValueRef target_frame_ptr, ZigType *result_type, ZigType *ptr_result_type,
LLVMValueRef result_loc, bool non_async)
{
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef their_result_ptr = nullptr;
if (type_has_bits(result_type) && (non_async || result_loc != nullptr)) {
LLVMValueRef their_result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start, "");
their_result_ptr = LLVMBuildLoad(g->builder, their_result_ptr_ptr, "");
if (result_loc != nullptr) {
LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, result_loc, ptr_u8, "");
LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, their_result_ptr, ptr_u8, "");
bool is_volatile = false;
uint32_t abi_align = get_abi_alignment(g, result_type);
LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, result_type), false);
ZigLLVMBuildMemCpy(g->builder,
dest_ptr_casted, abi_align,
src_ptr_casted, abi_align, byte_count_val, is_volatile);
}
}
if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
LLVMValueRef their_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
frame_index_trace_arg(g, result_type), "");
LLVMValueRef src_trace_ptr = LLVMBuildLoad(g->builder, their_trace_ptr_ptr, "");
LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, source_instr->scope);
LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
}
if (non_async && type_has_bits(result_type)) {
LLVMValueRef result_ptr = (result_loc == nullptr) ? their_result_ptr : result_loc;
return get_handle_value(g, result_ptr, result_type, ptr_result_type);
} else {
return nullptr;
}
}
static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInstructionAwaitGen *instruction) {
LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->llvm_type;
LLVMValueRef zero = LLVMConstNull(usize_type_ref);
@ -5505,6 +5546,14 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
ZigType *result_type = instruction->base.value.type;
ZigType *ptr_result_type = get_pointer_to_type(g, result_type, true);
LLVMValueRef result_loc = (instruction->result_loc == nullptr) ?
nullptr : ir_llvm_value(g, instruction->result_loc);
if (instruction->target_fn != nullptr && !fn_is_async(instruction->target_fn)) {
return gen_await_early_return(g, &instruction->base, target_frame_ptr, result_type,
ptr_result_type, result_loc, true);
}
// Prepare to be suspended
LLVMBasicBlockRef resume_bb = gen_suspend_begin(g, "AwaitResume");
LLVMBasicBlockRef end_bb = LLVMAppendBasicBlock(g->cur_fn_val, "AwaitEnd");
@ -5512,9 +5561,8 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
// At this point resuming the function will continue from resume_bb.
// This code is as if it is running inside the suspend block.
// supply the awaiter return pointer
LLVMValueRef result_loc = (instruction->result_loc == nullptr) ?
nullptr : ir_llvm_value(g, instruction->result_loc);
if (type_has_bits(result_type)) {
LLVMValueRef awaiter_ret_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start + 1, "");
if (result_loc == nullptr) {
@ -5562,28 +5610,8 @@ static LLVMValueRef ir_render_await(CodeGen *g, IrExecutable *executable, IrInst
// Early return: The async function has already completed. We must copy the result and
// the error return trace if applicable.
LLVMPositionBuilderAtEnd(g->builder, early_return_block);
if (type_has_bits(result_type) && result_loc != nullptr) {
LLVMValueRef their_result_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr, frame_ret_start, "");
LLVMValueRef their_result_ptr = LLVMBuildLoad(g->builder, their_result_ptr_ptr, "");
LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0);
LLVMValueRef dest_ptr_casted = LLVMBuildBitCast(g->builder, result_loc, ptr_u8, "");
LLVMValueRef src_ptr_casted = LLVMBuildBitCast(g->builder, their_result_ptr, ptr_u8, "");
bool is_volatile = false;
uint32_t abi_align = get_abi_alignment(g, result_type);
LLVMValueRef byte_count_val = LLVMConstInt(usize_type_ref, type_size(g, result_type), false);
ZigLLVMBuildMemCpy(g->builder,
dest_ptr_casted, abi_align,
src_ptr_casted, abi_align, byte_count_val, is_volatile);
}
if (codegen_fn_has_err_ret_tracing_arg(g, result_type)) {
LLVMValueRef their_trace_ptr_ptr = LLVMBuildStructGEP(g->builder, target_frame_ptr,
frame_index_trace_arg(g, result_type), "");
LLVMValueRef src_trace_ptr = LLVMBuildLoad(g->builder, their_trace_ptr_ptr, "");
LLVMValueRef dest_trace_ptr = get_cur_err_ret_trace_val(g, instruction->base.scope);
LLVMValueRef args[] = { dest_trace_ptr, src_trace_ptr };
ZigLLVMBuildCall(g->builder, get_merge_err_ret_traces_fn_val(g), args, 2,
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
}
gen_await_early_return(g, &instruction->base, target_frame_ptr, result_type, ptr_result_type,
result_loc, false);
LLVMBuildBr(g->builder, end_bb);
LLVMPositionBuilderAtEnd(g->builder, resume_bb);
@ -6845,6 +6873,7 @@ static void set_global_tls(CodeGen *g, ZigVar *var, LLVMValueRef global_value) {
}
static void do_code_gen(CodeGen *g) {
Error err;
assert(!g->errors.length);
generate_error_name_table(g);
@ -6858,6 +6887,8 @@ static void do_code_gen(CodeGen *g) {
// Generate debug info for it but that's it.
ConstExprValue *const_val = var->const_value;
assert(const_val->special != ConstValSpecialRuntime);
if ((err = ir_resolve_lazy(g, var->decl_node, const_val)))
zig_unreachable();
if (const_val->type != var->var_type) {
zig_panic("TODO debug info for var with ptr casted value");
}
@ -6875,6 +6906,8 @@ static void do_code_gen(CodeGen *g) {
// Generate debug info for it but that's it.
ConstExprValue *const_val = var->const_value;
assert(const_val->special != ConstValSpecialRuntime);
if ((err = ir_resolve_lazy(g, var->decl_node, const_val)))
zig_unreachable();
if (const_val->type != var->var_type) {
zig_panic("TODO debug info for var with ptr casted value");
}

View File

@ -55,6 +55,8 @@ const char *err_str(Error err) {
case ErrorBrokenPipe: return "broken pipe";
case ErrorNoSpaceLeft: return "no space left";
case ErrorNoCCompilerInstalled: return "no C compiler installed";
case ErrorNotLazy: return "not lazy";
case ErrorIsAsync: return "is async";
}
return "(invalid error)";
}

View File

@ -3268,7 +3268,7 @@ static IrInstruction *ir_build_await_src(IrBuilder *irb, Scope *scope, AstNode *
return &instruction->base;
}
static IrInstruction *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_instruction,
static IrInstructionAwaitGen *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_instruction,
IrInstruction *frame, ZigType *result_type, IrInstruction *result_loc)
{
IrInstructionAwaitGen *instruction = ir_build_instruction<IrInstructionAwaitGen>(&ira->new_irb,
@ -3280,7 +3280,7 @@ static IrInstruction *ir_build_await_gen(IrAnalyze *ira, IrInstruction *source_i
ir_ref_instruction(frame, ira->new_irb.current_basic_block);
if (result_loc != nullptr) ir_ref_instruction(result_loc, ira->new_irb.current_basic_block);
return &instruction->base;
return instruction;
}
static IrInstruction *ir_build_resume(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *frame) {
@ -10640,7 +10640,9 @@ static void ir_finish_bb(IrAnalyze *ira) {
static IrInstruction *ir_unreach_error(IrAnalyze *ira) {
ira->old_bb_index = SIZE_MAX;
assert(ira->new_irb.exec->first_err_trace_msg != nullptr);
if (ira->new_irb.exec->first_err_trace_msg == nullptr) {
ira->new_irb.exec->first_err_trace_msg = ira->codegen->trace_err;
}
return ira->codegen->unreach_instruction;
}
@ -12932,7 +12934,52 @@ static bool optional_value_is_null(ConstExprValue *val) {
}
}
// Returns ErrorNotLazy when the value cannot be determined
static Error lazy_cmp_zero(AstNode *source_node, ConstExprValue *val, Cmp *result) {
Error err;
switch (val->special) {
case ConstValSpecialRuntime:
case ConstValSpecialUndef:
return ErrorNotLazy;
case ConstValSpecialStatic:
switch (val->type->id) {
case ZigTypeIdComptimeInt:
case ZigTypeIdInt:
*result = bigint_cmp_zero(&val->data.x_bigint);
return ErrorNone;
default:
return ErrorNotLazy;
}
case ConstValSpecialLazy:
switch (val->data.x_lazy->id) {
case LazyValueIdInvalid:
zig_unreachable();
case LazyValueIdAlignOf:
*result = CmpGT;
return ErrorNone;
case LazyValueIdSizeOf: {
LazyValueSizeOf *lazy_size_of = reinterpret_cast<LazyValueSizeOf *>(val->data.x_lazy);
IrAnalyze *ira = lazy_size_of->ira;
bool is_zero_bits;
if ((err = type_val_resolve_zero_bits(ira->codegen, &lazy_size_of->target_type->value,
nullptr, nullptr, &is_zero_bits)))
{
return err;
}
*result = is_zero_bits ? CmpEQ : CmpGT;
return ErrorNone;
}
default:
return ErrorNotLazy;
}
}
zig_unreachable();
}
static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) {
Error err;
IrInstruction *op1 = bin_op_instruction->op1->child;
if (type_is_invalid(op1->value.type))
return ira->codegen->invalid_instruction;
@ -13182,6 +13229,50 @@ static IrInstruction *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *
}
if (one_possible_value || (instr_is_comptime(casted_op1) && instr_is_comptime(casted_op2))) {
{
// Before resolving the values, we special case comparisons against zero. These can often be done
// without resolving lazy values, preventing potential dependency loops.
Cmp op1_cmp_zero;
if ((err = lazy_cmp_zero(bin_op_instruction->base.source_node, &casted_op1->value, &op1_cmp_zero))) {
if (err == ErrorNotLazy) goto never_mind_just_calculate_it_normally;
return ira->codegen->invalid_instruction;
}
Cmp op2_cmp_zero;
if ((err = lazy_cmp_zero(bin_op_instruction->base.source_node, &casted_op2->value, &op2_cmp_zero))) {
if (err == ErrorNotLazy) goto never_mind_just_calculate_it_normally;
return ira->codegen->invalid_instruction;
}
bool can_cmp_zero = false;
Cmp cmp_result;
if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpEQ) {
can_cmp_zero = true;
cmp_result = CmpEQ;
} else if (op1_cmp_zero == CmpGT && op2_cmp_zero == CmpEQ) {
can_cmp_zero = true;
cmp_result = CmpGT;
} else if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpGT) {
can_cmp_zero = true;
cmp_result = CmpLT;
} else if (op1_cmp_zero == CmpLT && op2_cmp_zero == CmpEQ) {
can_cmp_zero = true;
cmp_result = CmpLT;
} else if (op1_cmp_zero == CmpEQ && op2_cmp_zero == CmpLT) {
can_cmp_zero = true;
cmp_result = CmpGT;
} else if (op1_cmp_zero == CmpLT && op2_cmp_zero == CmpGT) {
can_cmp_zero = true;
cmp_result = CmpLT;
} else if (op1_cmp_zero == CmpGT && op2_cmp_zero == CmpLT) {
can_cmp_zero = true;
cmp_result = CmpGT;
}
if (can_cmp_zero) {
bool answer = resolve_cmp_op_id(op_id, cmp_result);
return ir_const_bool(ira, &bin_op_instruction->base, answer);
}
}
never_mind_just_calculate_it_normally:
ConstExprValue *op1_val = one_possible_value ? &casted_op1->value : ir_resolve_const(ira, casted_op1, UndefBad);
if (op1_val == nullptr)
return ira->codegen->invalid_instruction;
@ -16810,12 +16901,6 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct
return ira->codegen->invalid_instruction;
bool safety_check_on = elem_ptr_instruction->safety_check_on;
if ((err = type_resolve(ira->codegen, return_type->data.pointer.child_type, ResolveStatusSizeKnown)))
return ira->codegen->invalid_instruction;
uint64_t elem_size = type_size(ira->codegen, return_type->data.pointer.child_type);
uint64_t abi_align = get_abi_alignment(ira->codegen, return_type->data.pointer.child_type);
uint64_t ptr_align = get_ptr_align(ira->codegen, return_type);
if (instr_is_comptime(casted_elem_index)) {
uint64_t index = bigint_as_u64(&casted_elem_index->value.data.x_bigint);
if (array_type->id == ZigTypeIdArray) {
@ -16829,8 +16914,16 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct
safety_check_on = false;
}
{
if (return_type->data.pointer.explicit_alignment != 0) {
// figure out the largest alignment possible
if ((err = type_resolve(ira->codegen, return_type->data.pointer.child_type, ResolveStatusSizeKnown)))
return ira->codegen->invalid_instruction;
uint64_t elem_size = type_size(ira->codegen, return_type->data.pointer.child_type);
uint64_t abi_align = get_abi_alignment(ira->codegen, return_type->data.pointer.child_type);
uint64_t ptr_align = get_ptr_align(ira->codegen, return_type);
uint64_t chosen_align = abi_align;
if (ptr_align >= abi_align) {
while (ptr_align > abi_align) {
@ -17059,15 +17152,24 @@ static IrInstruction *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruct
case ReqCompTimeNo:
break;
}
if (ptr_align < abi_align) {
if (elem_size >= ptr_align && elem_size % ptr_align == 0) {
return_type = adjust_ptr_align(ira->codegen, return_type, ptr_align);
if (return_type->data.pointer.explicit_alignment != 0) {
if ((err = type_resolve(ira->codegen, return_type->data.pointer.child_type, ResolveStatusSizeKnown)))
return ira->codegen->invalid_instruction;
uint64_t elem_size = type_size(ira->codegen, return_type->data.pointer.child_type);
uint64_t abi_align = get_abi_alignment(ira->codegen, return_type->data.pointer.child_type);
uint64_t ptr_align = get_ptr_align(ira->codegen, return_type);
if (ptr_align < abi_align) {
if (elem_size >= ptr_align && elem_size % ptr_align == 0) {
return_type = adjust_ptr_align(ira->codegen, return_type, ptr_align);
} else {
// can't get here because guaranteed elem_size >= abi_align
zig_unreachable();
}
} else {
// can't get here because guaranteed elem_size >= abi_align
zig_unreachable();
return_type = adjust_ptr_align(ira->codegen, return_type, abi_align);
}
} else {
return_type = adjust_ptr_align(ira->codegen, return_type, abi_align);
}
}
@ -18066,54 +18168,20 @@ static IrInstruction *ir_analyze_instruction_array_type(IrAnalyze *ira,
zig_unreachable();
}
static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira,
IrInstructionSizeOf *size_of_instruction)
{
Error err;
IrInstruction *type_value = size_of_instruction->type_value->child;
ZigType *type_entry = ir_resolve_type(ira, type_value);
static IrInstruction *ir_analyze_instruction_size_of(IrAnalyze *ira, IrInstructionSizeOf *instruction) {
IrInstruction *result = ir_const(ira, &instruction->base, ira->codegen->builtin_types.entry_num_lit_int);
result->value.special = ConstValSpecialLazy;
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
LazyValueSizeOf *lazy_size_of = allocate<LazyValueSizeOf>(1);
lazy_size_of->ira = ira;
result->value.data.x_lazy = &lazy_size_of->base;
lazy_size_of->base.id = LazyValueIdSizeOf;
lazy_size_of->target_type = instruction->type_value->child;
if (ir_resolve_type_lazy(ira, lazy_size_of->target_type) == nullptr)
return ira->codegen->invalid_instruction;
switch (type_entry->id) {
case ZigTypeIdInvalid: // handled above
zig_unreachable();
case ZigTypeIdUnreachable:
case ZigTypeIdUndefined:
case ZigTypeIdNull:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
ir_add_error_node(ira, type_value->source_node,
buf_sprintf("no size available for type '%s'", buf_ptr(&type_entry->name)));
return ira->codegen->invalid_instruction;
case ZigTypeIdMetaType:
case ZigTypeIdEnumLiteral:
case ZigTypeIdComptimeFloat:
case ZigTypeIdComptimeInt:
case ZigTypeIdVoid:
case ZigTypeIdBool:
case ZigTypeIdInt:
case ZigTypeIdFloat:
case ZigTypeIdPointer:
case ZigTypeIdArray:
case ZigTypeIdStruct:
case ZigTypeIdOptional:
case ZigTypeIdErrorUnion:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdVector:
case ZigTypeIdFnFrame:
case ZigTypeIdAnyFrame:
{
uint64_t size_in_bytes = type_size(ira->codegen, type_entry);
return ir_const_unsigned(ira, &size_of_instruction->base, size_in_bytes);
}
}
zig_unreachable();
return result;
}
static IrInstruction *ir_analyze_test_non_null(IrAnalyze *ira, IrInstruction *source_inst, IrInstruction *value) {
@ -24697,18 +24765,22 @@ static IrInstruction *ir_analyze_instruction_suspend_finish(IrAnalyze *ira,
}
static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruction *source_instr,
IrInstruction *frame_ptr)
IrInstruction *frame_ptr, ZigFn **target_fn)
{
if (type_is_invalid(frame_ptr->value.type))
return ira->codegen->invalid_instruction;
*target_fn = nullptr;
ZigType *result_type;
IrInstruction *frame;
if (frame_ptr->value.type->id == ZigTypeIdPointer &&
frame_ptr->value.type->data.pointer.ptr_len == PtrLenSingle &&
frame_ptr->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame)
{
result_type = frame_ptr->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type;
ZigFn *func = frame_ptr->value.type->data.pointer.child_type->data.frame.fn;
result_type = func->type_entry->data.fn.fn_type_id.return_type;
*target_fn = func;
frame = frame_ptr;
} else {
frame = ir_get_deref(ira, source_instr, frame_ptr, nullptr);
@ -24716,7 +24788,9 @@ static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruct
frame->value.type->data.pointer.ptr_len == PtrLenSingle &&
frame->value.type->data.pointer.child_type->id == ZigTypeIdFnFrame)
{
result_type = frame->value.type->data.pointer.child_type->data.frame.fn->type_entry->data.fn.fn_type_id.return_type;
ZigFn *func = frame->value.type->data.pointer.child_type->data.frame.fn;
result_type = func->type_entry->data.fn.fn_type_id.return_type;
*target_fn = func;
} else if (frame->value.type->id != ZigTypeIdAnyFrame ||
frame->value.type->data.any_frame.result_type == nullptr)
{
@ -24737,7 +24811,11 @@ static IrInstruction *analyze_frame_ptr_to_anyframe_T(IrAnalyze *ira, IrInstruct
}
static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstructionAwaitSrc *instruction) {
IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, instruction->frame->child);
IrInstruction *operand = instruction->frame->child;
if (type_is_invalid(operand->value.type))
return ira->codegen->invalid_instruction;
ZigFn *target_fn;
IrInstruction *frame = analyze_frame_ptr_to_anyframe_T(ira, &instruction->base, operand, &target_fn);
if (type_is_invalid(frame->value.type))
return ira->codegen->invalid_instruction;
@ -24746,8 +24824,11 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction
ZigFn *fn_entry = exec_fn_entry(ira->new_irb.exec);
ir_assert(fn_entry != nullptr, &instruction->base);
if (fn_entry->inferred_async_node == nullptr) {
fn_entry->inferred_async_node = instruction->base.source_node;
// If it's not @Frame(func) then it's definitely a suspend point
if (target_fn == nullptr) {
if (fn_entry->inferred_async_node == nullptr) {
fn_entry->inferred_async_node = instruction->base.source_node;
}
}
if (type_can_fail(result_type)) {
@ -24764,8 +24845,10 @@ static IrInstruction *ir_analyze_instruction_await(IrAnalyze *ira, IrInstruction
result_loc = nullptr;
}
IrInstruction *result = ir_build_await_gen(ira, &instruction->base, frame, result_type, result_loc);
return ir_finish_anal(ira, result);
IrInstructionAwaitGen *result = ir_build_await_gen(ira, &instruction->base, frame, result_type, result_loc);
result->target_fn = target_fn;
fn_entry->await_list.append(result);
return ir_finish_anal(ira, &result->base);
}
static IrInstruction *ir_analyze_instruction_resume(IrAnalyze *ira, IrInstructionResume *instruction) {
@ -25548,6 +25631,61 @@ static Error ir_resolve_lazy_raw(AstNode *source_node, ConstExprValue *val) {
bigint_init_unsigned(&val->data.x_bigint, align_in_bytes);
return ErrorNone;
}
case LazyValueIdSizeOf: {
LazyValueSizeOf *lazy_size_of = reinterpret_cast<LazyValueSizeOf *>(val->data.x_lazy);
IrAnalyze *ira = lazy_size_of->ira;
if (lazy_size_of->target_type->value.special == ConstValSpecialStatic) {
switch (lazy_size_of->target_type->value.data.x_type->id) {
case ZigTypeIdInvalid: // handled above
zig_unreachable();
case ZigTypeIdUnreachable:
case ZigTypeIdUndefined:
case ZigTypeIdNull:
case ZigTypeIdBoundFn:
case ZigTypeIdArgTuple:
case ZigTypeIdOpaque:
ir_add_error(ira, lazy_size_of->target_type,
buf_sprintf("no size available for type '%s'",
buf_ptr(&lazy_size_of->target_type->value.data.x_type->name)));
return ErrorSemanticAnalyzeFail;
case ZigTypeIdMetaType:
case ZigTypeIdEnumLiteral:
case ZigTypeIdComptimeFloat:
case ZigTypeIdComptimeInt:
case ZigTypeIdVoid:
case ZigTypeIdBool:
case ZigTypeIdInt:
case ZigTypeIdFloat:
case ZigTypeIdPointer:
case ZigTypeIdArray:
case ZigTypeIdStruct:
case ZigTypeIdOptional:
case ZigTypeIdErrorUnion:
case ZigTypeIdErrorSet:
case ZigTypeIdEnum:
case ZigTypeIdUnion:
case ZigTypeIdFn:
case ZigTypeIdVector:
case ZigTypeIdFnFrame:
case ZigTypeIdAnyFrame:
break;
}
}
uint64_t abi_size;
uint64_t size_in_bits;
if ((err = type_val_resolve_abi_size(ira->codegen, source_node, &lazy_size_of->target_type->value,
&abi_size, &size_in_bits)))
{
return err;
}
val->special = ConstValSpecialStatic;
assert(val->type->id == ZigTypeIdComptimeInt);
bigint_init_unsigned(&val->data.x_bigint, abi_size);
return ErrorNone;
}
case LazyValueIdSliceType: {
LazyValueSliceType *lazy_slice_type = reinterpret_cast<LazyValueSliceType *>(val->data.x_lazy);
IrAnalyze *ira = lazy_slice_type->ira;

View File

@ -75,6 +75,8 @@ enum Error {
ErrorOperationAborted,
ErrorBrokenPipe,
ErrorNoSpaceLeft,
ErrorNotLazy,
ErrorIsAsync,
};
// ABI warning

View File

@ -75,15 +75,16 @@ pub const Allocator = struct {
new_alignment: u29,
) []u8,
/// Call `destroy` with the result.
/// Returns undefined memory.
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
pub fn create(self: *Allocator, comptime T: type) Error!*T {
if (@sizeOf(T) == 0) return &(T{});
const slice = try self.alloc(T, 1);
return &slice[0];
}
/// `ptr` should be the return value of `create`
/// `ptr` should be the return value of `create`, or otherwise
/// have the same address and alignment property.
pub fn destroy(self: *Allocator, ptr: var) void {
const T = @typeOf(ptr).Child;
if (@sizeOf(T) == 0) return;
@ -92,7 +93,7 @@ pub const Allocator = struct {
assert(shrink_result.len == 0);
}
pub fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
pub fn alloc(self: *Allocator, comptime T: type, n: usize) Error![]T {
return self.alignedAlloc(T, @alignOf(T), n);
}
@ -101,7 +102,7 @@ pub const Allocator = struct {
comptime T: type,
comptime alignment: u29,
n: usize,
) ![]align(alignment) T {
) Error![]align(alignment) T {
if (n == 0) {
return ([*]align(alignment) T)(undefined)[0..0];
}

View File

@ -273,7 +273,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\}
,
"tmp.zig:1:1: error: function with calling convention 'ccc' cannot be async",
"tmp.zig:3:18: note: await is a suspend point",
"tmp.zig:3:18: note: await here is a suspend point",
);
cases.add(
@ -507,11 +507,11 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
cases.add(
"@sizeOf bad type",
\\export fn entry() void {
\\ _ = @sizeOf(@typeOf(null));
\\export fn entry() usize {
\\ return @sizeOf(@typeOf(null));
\\}
,
"tmp.zig:2:17: error: no size available for type '(null)'",
"tmp.zig:2:20: error: no size available for type '(null)'",
);
cases.add(

View File

@ -844,3 +844,13 @@ test "cast fn to async fn when it is inferred to be async" {
resume S.frame;
expect(S.ok);
}
test "await does not force async if callee is blocking" {
const S = struct {
fn simple() i32 {
return 1234;
}
};
var x = async S.simple();
expect(await x == 1234);
}

View File

@ -74,3 +74,18 @@ test "@sizeOf on compile-time types" {
expect(@sizeOf(@typeOf(.hi)) == 0);
expect(@sizeOf(@typeOf(type)) == 0);
}
test "@sizeOf(T) == 0 doesn't force resolving struct size" {
const S = struct {
const Foo = struct {
y: if (@sizeOf(Foo) == 0) u64 else u32,
};
const Bar = struct {
x: i32,
y: if (0 == @sizeOf(Bar)) u64 else u32,
};
};
expect(@sizeOf(S.Foo) == 4);
expect(@sizeOf(S.Bar) == 8);
}