From 298a65ff4b4efc46c877309be22550f023f49758 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 7 Jul 2021 00:38:10 -0700 Subject: [PATCH 1/6] std.HashMap: add ensureUnusedCapacity and ensureTotalCapacity and deprecated ensureCapacity. This matches the pattern set by ArrayList and ArrayHashMap already. --- lib/std/hash_map.zig | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index d64e122aaf..d6762c9d3b 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -483,10 +483,20 @@ pub fn HashMap( return self.unmanaged.getOrPutValueContext(self.allocator, key, value, self.ctx); } + /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`. + pub const ensureCapacity = ensureTotalCapacity; + /// Increases capacity, guaranteeing that insertions up until the /// `expected_count` will not cause an allocation, and therefore cannot fail. - pub fn ensureCapacity(self: *Self, expected_count: Size) !void { - return self.unmanaged.ensureCapacityContext(self.allocator, expected_count, self.ctx); + pub fn ensureTotalCapacity(self: *Self, expected_count: Size) !void { + return self.unmanaged.ensureTotalCapacityContext(self.allocator, expected_count, self.ctx); + } + + /// Increases capacity, guaranteeing that insertions up until + /// `additional_count` **more** items will not cause an allocation, and + /// therefore cannot fail. + pub fn ensureUnusedCapacity(self: *Self, additional_count: Size) !void { + return self.unmanaged.ensureUnusedCapacityContext(self.allocator, additional_count, self.ctx); } /// Returns the number of total elements which may be present before it is @@ -821,16 +831,26 @@ pub fn HashMapUnmanaged( return new_cap; } - pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_size: Size) !void { + /// Deprecated: call `ensureUnusedCapacity` or `ensureTotalCapacity`. + pub const ensureCapacity = ensureTotalCapacity; + + pub fn ensureTotalCapacity(self: *Self, allocator: *Allocator, new_size: Size) !void { if (@sizeOf(Context) != 0) - @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureCapacityContext instead."); - return ensureCapacityContext(self, allocator, new_size, undefined); + @compileError("Cannot infer context " ++ @typeName(Context) ++ ", call ensureTotalCapacityContext instead."); + return ensureTotalCapacityContext(self, allocator, new_size, undefined); } - pub fn ensureCapacityContext(self: *Self, allocator: *Allocator, new_size: Size, ctx: Context) !void { + pub fn ensureTotalCapacityContext(self: *Self, allocator: *Allocator, new_size: Size, ctx: Context) !void { if (new_size > self.size) try self.growIfNeeded(allocator, new_size - self.size, ctx); } + pub fn ensureUnusedCapacity(self: *Self, allocator: *Allocator, additional_size: Size) !void { + return ensureUnusedCapacityContext(self, allocator, additional_size, undefined); + } + pub fn ensureUnusedCapacityContext(self: *Self, allocator: *Allocator, additional_size: Size, ctx: Context) !void { + return ensureTotalCapacityContext(self, allocator, self.capacity() + additional_size, ctx); + } + pub fn clearRetainingCapacity(self: *Self) void { if (self.metadata) |_| { self.initMetadatas(); From d481acc7dbebb5501b5fef608ee1f6b13c442c6a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 7 Jul 2021 00:38:46 -0700 Subject: [PATCH 2/6] std.builtin.panic: simpler default panic for stage2 until it catches up to stage1 in terms of supported language features --- lib/std/builtin.zig | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 7bf60f5283..9d432a3a00 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -677,6 +677,13 @@ pub const panic: PanicFn = if (@hasDecl(root, "panic")) root.panic else default_ /// therefore must be kept in sync with the compiler implementation. pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace) noreturn { @setCold(true); + // Until self-hosted catches up with stage1 language features, we have a simpler + // default panic function: + if (builtin.zig_is_stage2) { + while (true) { + @breakpoint(); + } + } if (@hasDecl(root, "os") and @hasDecl(root.os, "panic")) { root.os.panic(msg, error_return_trace); unreachable; From 13f04e3012b6b2eee141923f9780fce55f7a999d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 7 Jul 2021 00:39:23 -0700 Subject: [PATCH 3/6] stage2: implement `@panic` and beginnigs of inferred error sets * ZIR: add two instructions: - ret_err_value_code - ret_err_value * AstGen: add countDefers and utilize it to emit more efficient ZIR for return expressions in the presence of defers. * AstGen: implement |err| payloads for `errdefer` syntax. - There is not an "unused capture" error for it yet. * AstGen: `return error.Foo` syntax gets a hot path in return expressions, using the new ZIR instructions. This also is part of implementing inferred error sets, since we need to tell Sema to add an error value to the inferred error set before it gets coerced. * Sema: implement `@setCold`. - Implement `@setCold` support for C backend. * `@panic` and regular safety panics such as `unreachable` now properly invoke `std.builtin.panic`. * C backend: improve pointer and function value rendering. * C linker: fix redundant typedefs. * Add Type.error_set_inferred. * Fix Value.format for enum_literal, enum_field_index, bytes. * Remove the C backend test that checks for identical text I measured a 14% reduction in Total ZIR Bytes from master branch for std/os.zig. --- src/AstGen.zig | 141 +++++++++++++++++++--- src/Module.zig | 4 + src/Sema.zig | 211 ++++++++++++++++++++++---------- src/Zir.zig | 27 ++++- src/air.zig | 22 ++-- src/codegen/c.zig | 287 +++++++++++++++++++++++++++++++++++--------- src/link/C.zig | 10 +- src/link/C/zig.h | 6 + src/type.zig | 82 ++++++++++++- src/value.zig | 27 ++++- test/stage2/cbe.zig | 13 -- 11 files changed, 649 insertions(+), 181 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index d511cc13aa..f7fea3a99c 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -1860,7 +1860,7 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const ast.Nod } } - try genDefers(gz, parent_scope, scope, .none); + try genDefers(gz, parent_scope, scope, .normal_only); try checkUsed(gz, parent_scope, scope); } @@ -2102,6 +2102,7 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: ast.Node.Index) Inner .@"resume", .@"await", .await_nosuspend, + .ret_err_value_code, .extended, => break :b false, @@ -2113,6 +2114,7 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: ast.Node.Index) Inner .compile_error, .ret_node, .ret_coerce, + .ret_err_value, .@"unreachable", .repeat, .repeat_inline, @@ -2162,13 +2164,63 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: ast.Node.Index) Inner return noreturn_src_node; } +fn countDefers(astgen: *AstGen, outer_scope: *Scope, inner_scope: *Scope) struct { + have_any: bool, + have_normal: bool, + have_err: bool, + need_err_code: bool, +} { + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + var have_normal = false; + var have_err = false; + var need_err_code = false; + var scope = inner_scope; + while (scope != outer_scope) { + switch (scope.tag) { + .gen_zir => scope = scope.cast(GenZir).?.parent, + .local_val => scope = scope.cast(Scope.LocalVal).?.parent, + .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, + .defer_normal => { + const defer_scope = scope.cast(Scope.Defer).?; + scope = defer_scope.parent; + + have_normal = true; + }, + .defer_error => { + const defer_scope = scope.cast(Scope.Defer).?; + scope = defer_scope.parent; + + have_err = true; + + const have_err_payload = node_datas[defer_scope.defer_node].lhs != 0; + need_err_code = need_err_code or have_err_payload; + }, + .namespace => unreachable, + .top => unreachable, + } + } + return .{ + .have_any = have_normal or have_err, + .have_normal = have_normal, + .have_err = have_err, + .need_err_code = need_err_code, + }; +} + +const DefersToEmit = union(enum) { + both: Zir.Inst.Ref, // err code + both_sans_err, + normal_only, +}; + fn genDefers( gz: *GenZir, outer_scope: *Scope, inner_scope: *Scope, - err_code: Zir.Inst.Ref, + which_ones: DefersToEmit, ) InnerError!void { - _ = err_code; const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); @@ -2191,12 +2243,37 @@ fn genDefers( .defer_error => { const defer_scope = scope.cast(Scope.Defer).?; scope = defer_scope.parent; - if (err_code == .none) continue; - const expr_node = node_datas[defer_scope.defer_node].rhs; - const prev_in_defer = gz.in_defer; - gz.in_defer = true; - defer gz.in_defer = prev_in_defer; - _ = try unusedResultExpr(gz, defer_scope.parent, expr_node); + switch (which_ones) { + .both_sans_err => { + const expr_node = node_datas[defer_scope.defer_node].rhs; + const prev_in_defer = gz.in_defer; + gz.in_defer = true; + defer gz.in_defer = prev_in_defer; + _ = try unusedResultExpr(gz, defer_scope.parent, expr_node); + }, + .both => |err_code| { + const expr_node = node_datas[defer_scope.defer_node].rhs; + const payload_token = node_datas[defer_scope.defer_node].lhs; + const prev_in_defer = gz.in_defer; + gz.in_defer = true; + defer gz.in_defer = prev_in_defer; + var local_val_scope: Scope.LocalVal = undefined; + const sub_scope = if (payload_token == 0) defer_scope.parent else blk: { + const ident_name = try astgen.identAsString(payload_token); + local_val_scope = .{ + .parent = defer_scope.parent, + .gen_zir = gz, + .name = ident_name, + .inst = err_code, + .token_src = payload_token, + .id_cat = .@"capture", + }; + break :blk &local_val_scope.base; + }; + _ = try unusedResultExpr(gz, sub_scope, expr_node); + }, + .normal_only => continue, + } }, .namespace => unreachable, .top => unreachable, @@ -4564,7 +4641,7 @@ fn tryExpr( defer then_scope.instructions.deinit(astgen.gpa); const err_code = try then_scope.addUnNode(err_ops[1], operand, node); - try genDefers(&then_scope, &fn_block.base, scope, err_code); + try genDefers(&then_scope, &fn_block.base, scope, .{ .both = err_code }); const then_result = try then_scope.addUnNode(.ret_node, err_code, node); var else_scope = parent_gz.makeSubBlock(scope); @@ -6090,17 +6167,37 @@ fn ret(gz: *GenZir, scope: *Scope, node: ast.Node.Index) InnerError!Zir.Inst.Ref const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); if (gz.in_defer) return astgen.failNode(node, "cannot return from defer expression", .{}); + const defer_outer = &astgen.fn_block.?.base; + const operand_node = node_datas[node].lhs; if (operand_node == 0) { // Returning a void value; skip error defers. - try genDefers(gz, &astgen.fn_block.?.base, scope, .none); + try genDefers(gz, defer_outer, scope, .normal_only); _ = try gz.addUnNode(.ret_node, .void_value, node); return Zir.Inst.Ref.unreachable_value; } + if (node_tags[operand_node] == .error_value) { + // Hot path for `return error.Foo`. This bypasses result location logic as well as logic + // for detecting whether to add something to the function's inferred error set. + const ident_token = node_datas[operand_node].rhs; + const err_name_str_index = try astgen.identAsString(ident_token); + const defer_counts = countDefers(astgen, defer_outer, scope); + if (!defer_counts.need_err_code) { + try genDefers(gz, defer_outer, scope, .both_sans_err); + _ = try gz.addStrTok(.ret_err_value, err_name_str_index, ident_token); + return Zir.Inst.Ref.unreachable_value; + } + const err_code = try gz.addStrTok(.ret_err_value_code, err_name_str_index, ident_token); + try genDefers(gz, defer_outer, scope, .{ .both = err_code }); + _ = try gz.addUnNode(.ret_node, err_code, node); + return Zir.Inst.Ref.unreachable_value; + } + const rl: ResultLoc = if (nodeMayNeedMemoryLocation(tree, operand_node)) .{ .ptr = try gz.addNodeExtended(.ret_ptr, node), } else .{ @@ -6111,31 +6208,41 @@ fn ret(gz: *GenZir, scope: *Scope, node: ast.Node.Index) InnerError!Zir.Inst.Ref switch (nodeMayEvalToError(tree, operand_node)) { .never => { // Returning a value that cannot be an error; skip error defers. - try genDefers(gz, &astgen.fn_block.?.base, scope, .none); + try genDefers(gz, defer_outer, scope, .normal_only); _ = try gz.addUnNode(.ret_node, operand, node); return Zir.Inst.Ref.unreachable_value; }, .always => { // Value is always an error. Emit both error defers and regular defers. const err_code = try gz.addUnNode(.err_union_code, operand, node); - try genDefers(gz, &astgen.fn_block.?.base, scope, err_code); + try genDefers(gz, defer_outer, scope, .{ .both = err_code }); _ = try gz.addUnNode(.ret_node, operand, node); return Zir.Inst.Ref.unreachable_value; }, .maybe => { + const defer_counts = countDefers(astgen, defer_outer, scope); + if (!defer_counts.have_err) { + // Only regular defers; no branch needed. + try genDefers(gz, defer_outer, scope, .normal_only); + _ = try gz.addUnNode(.ret_node, operand, node); + return Zir.Inst.Ref.unreachable_value; + } + // Emit conditional branch for generating errdefers. const is_err = try gz.addUnNode(.is_err, operand, node); const condbr = try gz.addCondBr(.condbr, node); var then_scope = gz.makeSubBlock(scope); defer then_scope.instructions.deinit(astgen.gpa); - const err_code = try then_scope.addUnNode(.err_union_code, operand, node); - try genDefers(&then_scope, &astgen.fn_block.?.base, scope, err_code); + const which_ones: DefersToEmit = if (!defer_counts.need_err_code) .both_sans_err else .{ + .both = try then_scope.addUnNode(.err_union_code, operand, node), + }; + try genDefers(&then_scope, defer_outer, scope, which_ones); _ = try then_scope.addUnNode(.ret_node, operand, node); var else_scope = gz.makeSubBlock(scope); defer else_scope.instructions.deinit(astgen.gpa); - try genDefers(&else_scope, &astgen.fn_block.?.base, scope, .none); + try genDefers(&else_scope, defer_outer, scope, .normal_only); _ = try else_scope.addUnNode(.ret_node, operand, node); try setCondBrPayload(condbr, is_err, &then_scope, &else_scope); @@ -6885,7 +6992,7 @@ fn builtinCall( .field => { const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]); if (rl == .ref) { - return try gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ + return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{ .lhs = try expr(gz, scope, .ref, params[0]), .field_name = field_name, }); diff --git a/src/Module.zig b/src/Module.zig index 1d715448da..c48440ccc2 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -755,6 +755,7 @@ pub const Fn = struct { rbrace_column: u16, state: Analysis, + is_cold: bool = false, pub const Analysis = enum { queued, @@ -3453,6 +3454,9 @@ pub fn clearDecl( for (decl.dependencies.keys()) |dep| { dep.removeDependant(decl); if (dep.dependants.count() == 0 and !dep.deletion_flag) { + log.debug("insert {*} ({s}) dependant {*} ({s}) into deletion set", .{ + decl, decl.name, dep, dep.name, + }); // We don't recursively perform a deletion here, because during the update, // another reference to it may turn up. dep.deletion_flag = true; diff --git a/src/Sema.zig b/src/Sema.zig index 0d3f7eaf83..1538e54208 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -244,6 +244,7 @@ pub fn analyzeBody( .ptr_type => try sema.zirPtrType(block, inst), .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), .ref => try sema.zirRef(block, inst), + .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), .shl => try sema.zirShl(block, inst), .shr => try sema.zirShr(block, inst), .slice_end => try sema.zirSliceEnd(block, inst), @@ -380,8 +381,9 @@ pub fn analyzeBody( .condbr => return sema.zirCondbr(block, inst), .@"break" => return sema.zirBreak(block, inst), .compile_error => return sema.zirCompileError(block, inst), - .ret_coerce => return sema.zirRetTok(block, inst, true), + .ret_coerce => return sema.zirRetCoerce(block, inst, true), .ret_node => return sema.zirRetNode(block, inst), + .ret_err_value => return sema.zirRetErrValue(block, inst), .@"unreachable" => return sema.zirUnreachable(block, inst), .repeat => return sema.zirRepeat(block, inst), .panic => return sema.zirPanic(block, inst), @@ -587,6 +589,19 @@ pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!*ir.In return sema.inst_map.get(@intCast(u32, i)).?; } +fn resolveConstBool( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + zir_ref: Zir.Inst.Ref, +) !bool { + const air_inst = try sema.resolveInst(zir_ref); + const wanted_type = Type.initTag(.bool); + const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); + const val = try sema.resolveConstValue(block, src, coerced_inst); + return val.toBool(); +} + fn resolveConstString( sema: *Sema, block: *Scope.Block, @@ -1754,8 +1769,9 @@ fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirPanic", .{}); - //return always_noreturn; + const msg_inst = try sema.resolveInst(inst_data.operand); + + return sema.panicWithMsg(block, src, msg_inst); } fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { @@ -2028,8 +2044,10 @@ fn zirSetAlignStack(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne fn zirSetCold(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src: LazySrcLoc = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirSetCold", .{}); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const is_cold = try sema.resolveConstBool(block, operand_src, inst_data.operand); + const func = sema.func orelse return; // does nothing outside a function + func.is_cold = is_cold; } fn zirSetFloatMode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { @@ -2041,11 +2059,7 @@ fn zirSetFloatMode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner fn zirSetRuntimeSafety(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - - const op = try sema.resolveInst(inst_data.operand); - const op_coerced = try sema.coerce(block, Type.initTag(.bool), op, operand_src); - const b = (try sema.resolveConstValue(block, operand_src, op_coerced)).toBool(); - block.want_safety = b; + block.want_safety = try sema.resolveConstBool(block, operand_src, inst_data.operand); } fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { @@ -2190,21 +2204,27 @@ fn zirCall( const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.args_len); - return sema.analyzeCall(block, extra.data.callee, func_src, call_src, modifier, ensure_result_used, args); + const func = try sema.resolveInst(extra.data.callee); + // TODO handle function calls of generic functions + const resolved_args = try sema.arena.alloc(*Inst, args.len); + for (args) |zir_arg, i| { + // the args are already casted to the result of a param type instruction. + resolved_args[i] = try sema.resolveInst(zir_arg); + } + + return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args); } fn analyzeCall( sema: *Sema, block: *Scope.Block, - zir_func: Zir.Inst.Ref, + func: *ir.Inst, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, - zir_args: []const Zir.Inst.Ref, + args: []const *ir.Inst, ) InnerError!*ir.Inst { - const func = try sema.resolveInst(zir_func); - if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2221,22 +2241,22 @@ fn analyzeCall( const fn_params_len = func.ty.fnParamLen(); if (func.ty.fnIsVarArgs()) { assert(cc == .C); - if (zir_args.len < fn_params_len) { + if (args.len < fn_params_len) { // TODO add error note: declared here return sema.mod.fail( &block.base, func_src, "expected at least {d} argument(s), found {d}", - .{ fn_params_len, zir_args.len }, + .{ fn_params_len, args.len }, ); } - } else if (fn_params_len != zir_args.len) { + } else if (fn_params_len != args.len) { // TODO add error note: declared here return sema.mod.fail( &block.base, func_src, "expected {d} argument(s), found {d}", - .{ fn_params_len, zir_args.len }, + .{ fn_params_len, args.len }, ); } @@ -2256,13 +2276,6 @@ fn analyzeCall( }), } - // TODO handle function calls of generic functions - const casted_args = try sema.arena.alloc(*Inst, zir_args.len); - for (zir_args) |zir_arg, i| { - // the args are already casted to the result of a param type instruction. - casted_args[i] = try sema.resolveInst(zir_arg); - } - const ret_type = func.ty.fnReturnType(); const is_comptime_call = block.is_comptime or modifier == .compile_time; @@ -2323,7 +2336,7 @@ fn analyzeCall( defer sema.func = parent_func; const parent_param_inst_list = sema.param_inst_list; - sema.param_inst_list = casted_args; + sema.param_inst_list = args; defer sema.param_inst_list = parent_param_inst_list; const parent_next_arg_index = sema.next_arg_index; @@ -2357,7 +2370,7 @@ fn analyzeCall( break :res result; } else res: { try sema.requireRuntimeBlock(block, call_src); - break :res try block.addCall(call_src, ret_type, func, casted_args); + break :res try block.addCall(call_src, ret_type, func, args); }; if (ensure_result_used) { @@ -3081,28 +3094,31 @@ fn funcCommon( ) InnerError!*Inst { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; - const return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); + const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); const mod = sema.mod; + const new_func = if (body_inst == 0) undefined else try sema.gpa.create(Module.Fn); + errdefer if (body_inst != 0) sema.gpa.destroy(new_func); + const fn_ty: Type = fn_ty: { // Hot path for some common function types. if (zir_param_types.len == 0 and !var_args and align_val.tag() == .null_value and !inferred_error_set) { - if (return_type.zigTypeTag() == .NoReturn and cc == .Unspecified) { + if (bare_return_type.zigTypeTag() == .NoReturn and cc == .Unspecified) { break :fn_ty Type.initTag(.fn_noreturn_no_args); } - if (return_type.zigTypeTag() == .Void and cc == .Unspecified) { + if (bare_return_type.zigTypeTag() == .Void and cc == .Unspecified) { break :fn_ty Type.initTag(.fn_void_no_args); } - if (return_type.zigTypeTag() == .NoReturn and cc == .Naked) { + if (bare_return_type.zigTypeTag() == .NoReturn and cc == .Naked) { break :fn_ty Type.initTag(.fn_naked_noreturn_no_args); } - if (return_type.zigTypeTag() == .Void and cc == .C) { + if (bare_return_type.zigTypeTag() == .Void and cc == .C) { break :fn_ty Type.initTag(.fn_ccc_void_no_args); } } @@ -3120,9 +3136,13 @@ fn funcCommon( return mod.fail(&block.base, src, "TODO implement support for function prototypes to have alignment specified", .{}); } - if (inferred_error_set) { - return mod.fail(&block.base, src, "TODO implement functions with inferred error sets", .{}); - } + const return_type = if (!inferred_error_set) bare_return_type else blk: { + const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, new_func); + break :blk try Type.Tag.error_union.create(sema.arena, .{ + .error_set = error_set_ty, + .payload = bare_return_type, + }); + }; break :fn_ty try Type.Tag.function.create(sema.arena, .{ .param_types = param_types, @@ -3188,7 +3208,6 @@ fn funcCommon( const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .queued; const fn_payload = try sema.arena.create(Value.Payload.Function); - const new_func = try sema.gpa.create(Module.Fn); new_func.* = .{ .state = anal_state, .zir_body_inst = body_inst, @@ -4542,6 +4561,12 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return mod.constType(sema.arena, src, file_root_decl.ty); } +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { + _ = block; + _ = inst; + return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); +} + fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -5388,7 +5413,24 @@ fn zirUnreachable(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE } } -fn zirRetTok( +fn zirRetErrValue( + sema: *Sema, + block: *Scope.Block, + inst: Zir.Inst.Index, +) InnerError!Zir.Inst.Index { + const inst_data = sema.code.instructions.items(.data)[inst].str_tok; + const err_name = inst_data.get(sema.code); + const src = inst_data.src(); + + // Add the error tag to the inferred error set of the in-scope function. + // Return the error code from the function. + + _ = inst_data; + _ = err_name; + return sema.mod.fail(&block.base, src, "TODO: Sema.zirRetErrValueCode", .{}); +} + +fn zirRetCoerce( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, @@ -6195,6 +6237,10 @@ fn zirFuncExtended( src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data; } + const is_var_args = small.is_var_args; + const is_inferred_error = small.is_inferred_error; + const is_extern = small.is_extern; + return sema.funcCommon( block, extra.data.src_node, @@ -6203,9 +6249,9 @@ fn zirFuncExtended( extra.data.return_type, cc, align_val, - small.is_var_args, - small.is_inferred_error, - small.is_extern, + is_var_args, + is_inferred_error, + is_extern, src_locs, lib_name, ); @@ -6357,15 +6403,51 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: try parent_block.instructions.append(sema.gpa, &block_inst.base); } -fn safetyPanic(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, panic_id: PanicId) !Zir.Inst.Index { - _ = sema; - _ = panic_id; - // TODO Once we have a panic function to call, call it here instead of breakpoint. - _ = try block.addNoOp(src, Type.initTag(.void), .breakpoint); - _ = try block.addNoOp(src, Type.initTag(.noreturn), .unreach); +fn panicWithMsg( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + msg_inst: *ir.Inst, +) !Zir.Inst.Index { + const mod = sema.mod; + const arena = sema.arena; + const panic_fn = try sema.getBuiltin(block, src, "panic"); + const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); + const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); + const ptr_stack_trace_ty = try mod.simplePtrType(arena, stack_trace_ty, true, .One); + const null_stack_trace = try mod.constInst(arena, src, .{ + .ty = try mod.optionalType(arena, ptr_stack_trace_ty), + .val = Value.initTag(.null_value), + }); + const args = try arena.create([2]*ir.Inst); + args.* = .{ msg_inst, null_stack_trace }; + _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args); return always_noreturn; } +fn safetyPanic( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + panic_id: PanicId, +) !Zir.Inst.Index { + const mod = sema.mod; + const arena = sema.arena; + const msg = switch (panic_id) { + .unreach => "reached unreachable code", + .unwrap_null => "attempt to use null value", + .unwrap_errunion => "unreachable error occurred", + .cast_to_null => "cast causes pointer to be null", + .incorrect_alignment => "incorrect alignment", + .invalid_error_code => "invalid error code", + }; + const msg_inst = try mod.constInst(arena, src, .{ + .ty = Type.initTag(.const_slice_u8), + .val = try Value.Tag.ref_val.create(arena, try Value.Tag.bytes.create(arena, msg)), + }); + return sema.panicWithMsg(block, src, msg_inst); +} + fn emitBackwardBranch(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) !void { sema.branch_count += 1; if (sema.branch_count > sema.branch_quota) { @@ -7377,15 +7459,13 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) } fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { - // TODO deal with inferred error sets const err_union = dest_type.castTag(.error_union).?; if (inst.value()) |val| { - const to_wrap = if (inst.ty.zigTypeTag() != .ErrorSet) blk: { + if (inst.ty.zigTypeTag() != .ErrorSet) { _ = try sema.coerce(block, err_union.data.payload, inst, inst.src); - break :blk val; } else switch (err_union.data.error_set.tag()) { - .anyerror => val, - .error_set_single => blk: { + .anyerror => {}, + .error_set_single => { const expected_name = val.castTag(.@"error").?.data.name; const n = err_union.data.error_set.castTag(.error_set_single).?.data; if (!mem.eql(u8, expected_name, n)) { @@ -7396,9 +7476,8 @@ fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst .{ err_union.data.error_set, inst.ty }, ); } - break :blk val; }, - .error_set => blk: { + .error_set => { const expected_name = val.castTag(.@"error").?.data.name; const error_set = err_union.data.error_set.castTag(.error_set).?.data; const names = error_set.names_ptr[0..error_set.names_len]; @@ -7415,18 +7494,14 @@ fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst .{ err_union.data.error_set, inst.ty }, ); } - break :blk val; }, else => unreachable, - }; + } return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, // creating a SubValue for the error_union payload - .val = try Value.Tag.error_union.create( - sema.arena, - to_wrap, - ), + .val = try Value.Tag.error_union.create(sema.arena, val), }); } @@ -7573,12 +7648,12 @@ fn resolveBuiltinTypeFields( return sema.resolveTypeFields(block, src, resolved_ty); } -fn getBuiltinType( +fn getBuiltin( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Type { +) InnerError!*ir.Inst { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; @@ -7596,7 +7671,16 @@ fn getBuiltinType( builtin_ty.getNamespace().?, name, ); - const ty_inst = try sema.analyzeLoad(block, src, opt_ty_inst.?, src); + return sema.analyzeLoad(block, src, opt_ty_inst.?, src); +} + +fn getBuiltinType( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + name: []const u8, +) InnerError!Type { + const ty_inst = try sema.getBuiltin(block, src, name); return sema.resolveAirAsType(block, src, ty_inst); } @@ -7662,6 +7746,7 @@ fn typeHasOnePossibleValue( .error_union, .error_set, .error_set_single, + .error_set_inferred, .@"opaque", .var_args_param, .manyptr_u8, diff --git a/src/Zir.zig b/src/Zir.zig index 12ceb85c56..a80660b5bf 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -1,7 +1,7 @@ //! Zig Intermediate Representation. Astgen.zig converts AST nodes to these -//! untyped IR instructions. Next, Sema.zig processes these into TZIR. +//! untyped IR instructions. Next, Sema.zig processes these into AIR. //! The minimum amount of information needed to represent a list of ZIR instructions. -//! Once this structure is completed, it can be used to generate TZIR, followed by +//! Once this structure is completed, it can be used to generate AIR, followed by //! machine code, without any memory access into the AST tree token list, node list, //! or source bytes. Exceptions include: //! * Compile errors, which may need to reach into these data structures to @@ -416,8 +416,8 @@ pub const Inst = struct { /// A labeled block of code that loops forever. At the end of the body will have either /// a `repeat` instruction or a `repeat_inline` instruction. /// Uses the `pl_node` field. The AST node is either a for loop or while loop. - /// This ZIR instruction is needed because TZIR does not (yet?) match ZIR, and Sema - /// needs to emit more than 1 TZIR block for this instruction. + /// This ZIR instruction is needed because AIR does not (yet?) match ZIR, and Sema + /// needs to emit more than 1 AIR block for this instruction. /// The payload is `Block`. loop, /// Sends runtime control flow back to the beginning of the current block. @@ -466,6 +466,19 @@ pub const Inst = struct { /// Uses the `un_tok` union field. /// The operand needs to get coerced to the function's return type. ret_coerce, + /// Sends control flow back to the function's callee. + /// The return operand is `error.foo` where `foo` is given by the string. + /// If the current function has an inferred error set, the error given by the + /// name is added to it. + /// Uses the `str_tok` union field. + ret_err_value, + /// A string name is provided which is an anonymous error set value. + /// If the current function has an inferred error set, the error given by the + /// name is added to it. + /// Results in the error code. Note that control flow is not diverted with + /// this instruction; a following 'ret' instruction will do the diversion. + /// Uses the `str_tok` union field. + ret_err_value_code, /// Create a pointer type that does not have a sentinel, alignment, or bit range specified. /// Uses the `ptr_type_simple` union field. ptr_type_simple, @@ -1193,6 +1206,7 @@ pub const Inst = struct { .@"resume", .@"await", .await_nosuspend, + .ret_err_value_code, .extended, => false, @@ -1203,6 +1217,7 @@ pub const Inst = struct { .compile_error, .ret_node, .ret_coerce, + .ret_err_value, .@"unreachable", .repeat, .repeat_inline, @@ -1307,6 +1322,8 @@ pub const Inst = struct { .ref = .un_tok, .ret_node = .un_node, .ret_coerce = .un_tok, + .ret_err_value = .str_tok, + .ret_err_value_code = .str_tok, .ptr_type_simple = .ptr_type_simple, .ptr_type = .ptr_type, .slice_start = .pl_node, @@ -3077,6 +3094,8 @@ const Writer = struct { .decl_val, .import, .arg, + .ret_err_value, + .ret_err_value_code, => try self.writeStrTok(stream, inst), .func => try self.writeFunc(stream, inst, false), diff --git a/src/air.zig b/src/air.zig index 37126f8153..1b7faa641b 100644 --- a/src/air.zig +++ b/src/air.zig @@ -672,15 +672,15 @@ pub const Body = struct { /// For debugging purposes, prints a function representation to stderr. pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void { const allocator = old_module.gpa; - var ctx: DumpTzir = .{ + var ctx: DumpAir = .{ .allocator = allocator, .arena = std.heap.ArenaAllocator.init(allocator), .old_module = &old_module, .module_fn = module_fn, .indent = 2, - .inst_table = DumpTzir.InstTable.init(allocator), - .partial_inst_table = DumpTzir.InstTable.init(allocator), - .const_table = DumpTzir.InstTable.init(allocator), + .inst_table = DumpAir.InstTable.init(allocator), + .partial_inst_table = DumpAir.InstTable.init(allocator), + .const_table = DumpAir.InstTable.init(allocator), }; defer ctx.inst_table.deinit(); defer ctx.partial_inst_table.deinit(); @@ -695,12 +695,12 @@ pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void { .dependency_failure => std.debug.print("(dependency_failure)", .{}), .success => { const writer = std.io.getStdErr().writer(); - ctx.dump(module_fn.body, writer) catch @panic("failed to dump TZIR"); + ctx.dump(module_fn.body, writer) catch @panic("failed to dump AIR"); }, } } -const DumpTzir = struct { +const DumpAir = struct { allocator: *std.mem.Allocator, arena: std.heap.ArenaAllocator, old_module: *const Module, @@ -718,7 +718,7 @@ const DumpTzir = struct { /// TODO: Improve this code to include a stack of Body and store the instructions /// in there. Now we are putting all the instructions in a function local table, /// however instructions that are in a Body can be thown away when the Body ends. - fn dump(dtz: *DumpTzir, body: Body, writer: std.fs.File.Writer) !void { + fn dump(dtz: *DumpAir, body: Body, writer: std.fs.File.Writer) !void { // First pass to pre-populate the table so that we can show even invalid references. // Must iterate the same order we iterate the second time. // We also look for constants and put them in the const_table. @@ -737,7 +737,7 @@ const DumpTzir = struct { return dtz.dumpBody(body, writer); } - fn fetchInstsAndResolveConsts(dtz: *DumpTzir, body: Body) error{OutOfMemory}!void { + fn fetchInstsAndResolveConsts(dtz: *DumpAir, body: Body) error{OutOfMemory}!void { for (body.instructions) |inst| { try dtz.inst_table.put(inst, dtz.next_index); dtz.next_index += 1; @@ -865,7 +865,7 @@ const DumpTzir = struct { } } - fn dumpBody(dtz: *DumpTzir, body: Body, writer: std.fs.File.Writer) (std.fs.File.WriteError || error{OutOfMemory})!void { + fn dumpBody(dtz: *DumpAir, body: Body, writer: std.fs.File.Writer) (std.fs.File.WriteError || error{OutOfMemory})!void { for (body.instructions) |inst| { const my_index = dtz.next_partial_index; try dtz.partial_inst_table.put(inst, my_index); @@ -1150,7 +1150,7 @@ const DumpTzir = struct { } } - fn writeInst(dtz: *DumpTzir, writer: std.fs.File.Writer, inst: *Inst) !?usize { + fn writeInst(dtz: *DumpAir, writer: std.fs.File.Writer, inst: *Inst) !?usize { if (dtz.partial_inst_table.get(inst)) |operand_index| { try writer.print("%{d}", .{operand_index}); return null; @@ -1166,7 +1166,7 @@ const DumpTzir = struct { } } - fn findConst(dtz: *DumpTzir, operand: *Inst) !void { + fn findConst(dtz: *DumpAir, operand: *Inst) !void { if (operand.tag == .constant) { try dtz.const_table.put(operand, dtz.next_const_index); dtz.next_const_index += 1; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index ae439693b8..a9521d21a8 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -39,7 +39,12 @@ const BlockData = struct { }; pub const CValueMap = std.AutoHashMap(*Inst, CValue); -pub const TypedefMap = std.HashMap(Type, struct { name: []const u8, rendered: []u8 }, Type.HashContext, std.hash_map.default_max_load_percentage); +pub const TypedefMap = std.HashMap( + Type, + struct { name: []const u8, rendered: []u8 }, + Type.HashContext, + std.hash_map.default_max_load_percentage, +); fn formatTypeAsCIdentifier( data: Type, @@ -151,14 +156,49 @@ pub const Object = struct { render_ty = render_ty.elemType(); } - try o.dg.renderType(w, render_ty); + if (render_ty.zigTypeTag() == .Fn) { + const ret_ty = render_ty.fnReturnType(); + if (ret_ty.zigTypeTag() == .NoReturn) { + // noreturn attribute is not allowed here. + try w.writeAll("void"); + } else { + try o.dg.renderType(w, ret_ty); + } + try w.writeAll(" (*"); + switch (mutability) { + .Const => try w.writeAll("const "), + .Mut => {}, + } + try o.writeCValue(w, name); + try w.writeAll(")("); + const param_len = render_ty.fnParamLen(); + const is_var_args = render_ty.fnIsVarArgs(); + if (param_len == 0 and !is_var_args) + try w.writeAll("void") + else { + var index: usize = 0; + while (index < param_len) : (index += 1) { + if (index > 0) { + try w.writeAll(", "); + } + try o.dg.renderType(w, render_ty.fnParamType(index)); + } + } + if (is_var_args) { + if (param_len != 0) try w.writeAll(", "); + try w.writeAll("..."); + } + try w.writeByte(')'); + } else { + try o.dg.renderType(w, render_ty); - const const_prefix = switch (mutability) { - .Const => "const ", - .Mut => "", - }; - try w.print(" {s}", .{const_prefix}); - try o.writeCValue(w, name); + const const_prefix = switch (mutability) { + .Const => "const ", + .Mut => "", + }; + try w.print(" {s}", .{const_prefix}); + try o.writeCValue(w, name); + } try w.writeAll(suffix.items); } }; @@ -196,35 +236,72 @@ pub const DeclGen = struct { return writer.print("{d}", .{val.toSignedInt()}); return writer.print("{d}", .{val.toUnsignedInt()}); }, - .Pointer => switch (val.tag()) { - .null_value, .zero => try writer.writeAll("NULL"), - .one => try writer.writeAll("1"), - .decl_ref => { - const decl = val.castTag(.decl_ref).?.data; + .Pointer => switch (t.ptrSize()) { + .Slice => { + try writer.writeByte('('); + try dg.renderType(writer, t); + try writer.writeAll("){"); + var buf: Type.Payload.ElemType = undefined; + try dg.renderValue(writer, t.slicePtrFieldType(&buf), val); + try writer.writeAll(", "); + try writer.print("{d}", .{val.sliceLen()}); + try writer.writeAll("}"); + }, + else => switch (val.tag()) { + .null_value, .zero => try writer.writeAll("NULL"), + .one => try writer.writeAll("1"), + .decl_ref => { + const decl = val.castTag(.decl_ref).?.data; - // Determine if we must pointer cast. - assert(decl.has_tv); - if (t.eql(decl.ty)) { - try writer.print("&{s}", .{decl.name}); - } else { - try writer.writeAll("("); - try dg.renderType(writer, t); - try writer.print(")&{s}", .{decl.name}); - } + // Determine if we must pointer cast. + assert(decl.has_tv); + if (t.eql(decl.ty)) { + try writer.print("&{s}", .{decl.name}); + } else { + try writer.writeAll("("); + try dg.renderType(writer, t); + try writer.print(")&{s}", .{decl.name}); + } + }, + .function => { + const func = val.castTag(.function).?.data; + try writer.print("{s}", .{func.owner_decl.name}); + }, + .extern_fn => { + const decl = val.castTag(.extern_fn).?.data; + try writer.print("{s}", .{decl.name}); + }, + else => switch (t.ptrSize()) { + .Slice => unreachable, + .Many => { + if (val.castTag(.ref_val)) |ref_val_payload| { + const sub_val = ref_val_payload.data; + if (sub_val.castTag(.bytes)) |bytes_payload| { + const bytes = bytes_payload.data; + try writer.writeByte('('); + try dg.renderType(writer, t); + // TODO: make our own C string escape instead of using std.zig.fmtEscapes + try writer.print(")\"{}\"", .{std.zig.fmtEscapes(bytes)}); + } else { + unreachable; + } + } else { + unreachable; + } + }, + .One => { + var arena = std.heap.ArenaAllocator.init(dg.module.gpa); + defer arena.deinit(); + + const elem_ty = t.elemType(); + const elem_val = try val.pointerDeref(&arena.allocator); + + try writer.writeAll("&"); + try dg.renderValue(writer, elem_ty, elem_val); + }, + .C => unreachable, + }, }, - .function => { - const func = val.castTag(.function).?.data; - try writer.print("{s}", .{func.owner_decl.name}); - }, - .extern_fn => { - const decl = val.castTag(.extern_fn).?.data; - try writer.print("{s}", .{decl.name}); - }, - else => |e| return dg.fail( - .{ .node_offset = 0 }, - "TODO: C backend: implement Pointer value {s}", - .{@tagName(e)}, - ), }, .Array => { // First try specific tag representations for more efficiency. @@ -329,6 +406,32 @@ pub const DeclGen = struct { }, } }, + .Fn => switch (val.tag()) { + .null_value, .zero => try writer.writeAll("NULL"), + .one => try writer.writeAll("1"), + .decl_ref => { + const decl = val.castTag(.decl_ref).?.data; + + // Determine if we must pointer cast. + assert(decl.has_tv); + if (t.eql(decl.ty)) { + try writer.print("&{s}", .{decl.name}); + } else { + try writer.writeAll("("); + try dg.renderType(writer, t); + try writer.print(")&{s}", .{decl.name}); + } + }, + .function => { + const func = val.castTag(.function).?.data; + try writer.print("{s}", .{func.owner_decl.name}); + }, + .extern_fn => { + const decl = val.castTag(.extern_fn).?.data; + try writer.print("{s}", .{decl.name}); + }, + else => unreachable, + }, else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement value {s}", .{ @tagName(e), }), @@ -339,6 +442,12 @@ pub const DeclGen = struct { if (!is_global) { try w.writeAll("static "); } + if (dg.decl.val.castTag(.function)) |func_payload| { + const func: *Module.Fn = func_payload.data; + if (func.is_cold) { + try w.writeAll("ZIG_COLD "); + } + } try dg.renderType(w, dg.decl.ty.fnReturnType()); const decl_name = mem.span(dg.decl.name); try w.print(" {s}(", .{decl_name}); @@ -413,7 +522,35 @@ pub const DeclGen = struct { .Pointer => { if (t.isSlice()) { - return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement slices", .{}); + if (dg.typedefs.get(t)) |some| { + return w.writeAll(some.name); + } + + var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); + defer buffer.deinit(); + const bw = buffer.writer(); + + try bw.writeAll("typedef struct { "); + const elem_type = t.elemType(); + try dg.renderType(bw, elem_type); + try bw.writeAll(" *"); + if (t.isConstPtr()) { + try bw.writeAll("const "); + } + if (t.isVolatilePtr()) { + try bw.writeAll("volatile "); + } + try bw.writeAll("ptr; size_t len; } "); + const name_index = buffer.items.len; + try bw.print("zig_L_{s};\n", .{typeToCIdentifier(elem_type)}); + + const rendered = buffer.toOwnedSlice(); + errdefer dg.typedefs.allocator.free(rendered); + const name = rendered[name_index .. rendered.len - 2]; + + try dg.typedefs.ensureUnusedCapacity(1); + try w.writeAll(name); + dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered }); } else { try dg.renderType(w, t.elemType()); try w.writeAll(" *"); @@ -446,13 +583,13 @@ pub const DeclGen = struct { try dg.renderType(bw, child_type); try bw.writeAll(" payload; bool is_null; } "); const name_index = buffer.items.len; - try bw.print("zig_opt_{s}_t;\n", .{typeToCIdentifier(child_type)}); + try bw.print("zig_Q_{s};\n", .{typeToCIdentifier(child_type)}); const rendered = buffer.toOwnedSlice(); errdefer dg.typedefs.allocator.free(rendered); const name = rendered[name_index .. rendered.len - 2]; - try dg.typedefs.ensureCapacity(dg.typedefs.capacity() + 1); + try dg.typedefs.ensureUnusedCapacity(1); try w.writeAll(name); dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered }); }, @@ -465,7 +602,7 @@ pub const DeclGen = struct { return w.writeAll(some.name); } const child_type = t.errorUnionChild(); - const set_type = t.errorUnionSet(); + const err_set_type = t.errorUnionSet(); var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); defer buffer.deinit(); @@ -475,13 +612,20 @@ pub const DeclGen = struct { try dg.renderType(bw, child_type); try bw.writeAll(" payload; uint16_t error; } "); const name_index = buffer.items.len; - try bw.print("zig_err_union_{s}_{s}_t;\n", .{ typeToCIdentifier(set_type), typeToCIdentifier(child_type) }); + if (err_set_type.castTag(.error_set_inferred)) |inf_err_set_payload| { + const func = inf_err_set_payload.data; + try bw.print("zig_E_{s};\n", .{func.owner_decl.name}); + } else { + try bw.print("zig_E_{s}_{s};\n", .{ + typeToCIdentifier(err_set_type), typeToCIdentifier(child_type), + }); + } const rendered = buffer.toOwnedSlice(); errdefer dg.typedefs.allocator.free(rendered); const name = rendered[name_index .. rendered.len - 2]; - try dg.typedefs.ensureCapacity(dg.typedefs.capacity() + 1); + try dg.typedefs.ensureUnusedCapacity(1); try w.writeAll(name); dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered }); }, @@ -514,7 +658,7 @@ pub const DeclGen = struct { errdefer dg.typedefs.allocator.free(rendered); const name = rendered[name_start .. rendered.len - 2]; - try dg.typedefs.ensureCapacity(dg.typedefs.capacity() + 1); + try dg.typedefs.ensureUnusedCapacity(1); try w.writeAll(name); dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered }); }, @@ -526,7 +670,28 @@ pub const DeclGen = struct { try dg.renderType(w, int_tag_ty); }, .Union => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Union", .{}), - .Fn => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Fn", .{}), + .Fn => { + try dg.renderType(w, t.fnReturnType()); + try w.writeAll(" (*)("); + const param_len = t.fnParamLen(); + const is_var_args = t.fnIsVarArgs(); + if (param_len == 0 and !is_var_args) + try w.writeAll("void") + else { + var index: usize = 0; + while (index < param_len) : (index += 1) { + if (index > 0) { + try w.writeAll(", "); + } + try dg.renderType(w, t.fnParamType(index)); + } + } + if (is_var_args) { + if (param_len != 0) try w.writeAll(", "); + try w.writeAll("..."); + } + try w.writeByte(')'); + }, .Opaque => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Opaque", .{}), .Frame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Frame", .{}), .AnyFrame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type AnyFrame", .{}), @@ -569,23 +734,27 @@ pub fn genDecl(o: *Object) !void { .val = o.dg.decl.val, }; if (tv.val.castTag(.function)) |func_payload| { - const is_global = o.dg.declIsGlobal(tv); - const fwd_decl_writer = o.dg.fwd_decl.writer(); - if (is_global) { - try fwd_decl_writer.writeAll("ZIG_EXTERN_C "); - } - try o.dg.renderFunctionSignature(fwd_decl_writer, is_global); - try fwd_decl_writer.writeAll(";\n"); - const func: *Module.Fn = func_payload.data; - try o.indent_writer.insertNewline(); - try o.dg.renderFunctionSignature(o.writer(), is_global); + if (func.owner_decl == o.dg.decl) { + const is_global = o.dg.declIsGlobal(tv); + const fwd_decl_writer = o.dg.fwd_decl.writer(); + if (is_global) { + try fwd_decl_writer.writeAll("ZIG_EXTERN_C "); + } + try o.dg.renderFunctionSignature(fwd_decl_writer, is_global); + try fwd_decl_writer.writeAll(";\n"); - try o.writer().writeByte(' '); - try genBody(o, func.body); + try o.indent_writer.insertNewline(); + try o.dg.renderFunctionSignature(o.writer(), is_global); - try o.indent_writer.insertNewline(); - } else if (tv.val.tag() == .extern_fn) { + try o.writer().writeByte(' '); + try genBody(o, func.body); + + try o.indent_writer.insertNewline(); + return; + } + } + if (tv.val.tag() == .extern_fn) { const writer = o.writer(); try writer.writeAll("ZIG_EXTERN_C "); try o.dg.renderFunctionSignature(writer, true); @@ -644,9 +813,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { const is_global = dg.declIsGlobal(tv); if (is_global) { try writer.writeAll("ZIG_EXTERN_C "); + try dg.renderFunctionSignature(writer, is_global); + try dg.fwd_decl.appendSlice(";\n"); } - try dg.renderFunctionSignature(writer, is_global); - try dg.fwd_decl.appendSlice(";\n"); }, else => {}, } diff --git a/src/link/C.zig b/src/link/C.zig index 1793b95210..875fd2e964 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -207,7 +207,7 @@ pub fn flushModule(self: *C, comp: *Compilation) !void { } var fn_count: usize = 0; - var typedefs = std.HashMap(Type, []const u8, Type.HashContext, std.hash_map.default_max_load_percentage).init(comp.gpa); + var typedefs = std.HashMap(Type, void, Type.HashContext, std.hash_map.default_max_load_percentage).init(comp.gpa); defer typedefs.deinit(); // Typedefs, forward decls and non-functions first. @@ -217,14 +217,12 @@ pub fn flushModule(self: *C, comp: *Compilation) !void { if (!decl.has_tv) continue; const buf = buf: { if (decl.val.castTag(.function)) |_| { + try typedefs.ensureUnusedCapacity(decl.fn_link.c.typedefs.count()); var it = decl.fn_link.c.typedefs.iterator(); while (it.next()) |new| { - if (typedefs.get(new.key_ptr.*)) |previous| { - try err_typedef_writer.print("typedef {s} {s};\n", .{ previous, new.value_ptr.name }); - } else { - try typedefs.ensureCapacity(typedefs.capacity() + 1); + const gop = typedefs.getOrPutAssumeCapacity(new.key_ptr.*); + if (!gop.found_existing) { try err_typedef_writer.writeAll(new.value_ptr.rendered); - typedefs.putAssumeCapacityNoClobber(new.key_ptr.*, new.value_ptr.name); } } fn_count += 1; diff --git a/src/link/C/zig.h b/src/link/C/zig.h index ad2b5d4498..a3e571d245 100644 --- a/src/link/C/zig.h +++ b/src/link/C/zig.h @@ -12,6 +12,12 @@ #define zig_threadlocal zig_threadlocal_unavailable #endif +#if __GNUC__ +#define ZIG_COLD __attribute__ ((cold)) +#else +#define ZIG_COLD +#endif + #if __STDC_VERSION__ >= 199901L #define ZIG_RESTRICT restrict #elif defined(__GNUC__) diff --git a/src/type.zig b/src/type.zig index 853a4b7914..f9385e90bc 100644 --- a/src/type.zig +++ b/src/type.zig @@ -58,7 +58,7 @@ pub const Type = extern union { .bool => return .Bool, .void => return .Void, .type => return .Type, - .error_set, .error_set_single, .anyerror => return .ErrorSet, + .error_set, .error_set_single, .anyerror, .error_set_inferred => return .ErrorSet, .comptime_int => return .ComptimeInt, .comptime_float => return .ComptimeFloat, .noreturn => return .NoReturn, @@ -689,7 +689,15 @@ pub const Type = extern union { .optional_single_mut_pointer, .optional_single_const_pointer, .anyframe_T, - => return self.copyPayloadShallow(allocator, Payload.ElemType), + => { + const payload = self.cast(Payload.ElemType).?; + const new_payload = try allocator.create(Payload.ElemType); + new_payload.* = .{ + .base = .{ .tag = payload.base.tag }, + .data = try payload.data.copy(allocator), + }; + return Type{ .ptr_otherwise = &new_payload.base }; + }, .int_signed, .int_unsigned, @@ -756,6 +764,7 @@ pub const Type = extern union { }); }, .error_set => return self.copyPayloadShallow(allocator, Payload.ErrorSet), + .error_set_inferred => return self.copyPayloadShallow(allocator, Payload.ErrorSetInferred), .error_set_single => return self.copyPayloadShallow(allocator, Payload.Name), .empty_struct => return self.copyPayloadShallow(allocator, Payload.ContainerScope), .@"struct" => return self.copyPayloadShallow(allocator, Payload.Struct), @@ -1031,6 +1040,10 @@ pub const Type = extern union { const error_set = ty.castTag(.error_set).?.data; return writer.writeAll(std.mem.spanZ(error_set.owner_decl.name)); }, + .error_set_inferred => { + const func = ty.castTag(.error_set_inferred).?.data; + return writer.print("(inferred error set of {s})", .{func.owner_decl.name}); + }, .error_set_single => { const name = ty.castTag(.error_set_single).?.data; return writer.print("error{{{s}}}", .{name}); @@ -1144,6 +1157,7 @@ pub const Type = extern union { .anyerror_void_error_union, .error_set, .error_set_single, + .error_set_inferred, .manyptr_u8, .manyptr_const_u8, .atomic_ordering, @@ -1161,6 +1175,9 @@ pub const Type = extern union { .@"struct" => { // TODO introduce lazy value mechanism const struct_obj = self.castTag(.@"struct").?.data; + assert(struct_obj.status == .have_field_types or + struct_obj.status == .layout_wip or + struct_obj.status == .have_layout); for (struct_obj.fields.values()) |value| { if (value.ty.hasCodeGenBits()) return true; @@ -1348,6 +1365,7 @@ pub const Type = extern union { .error_set_single, .anyerror_void_error_union, .anyerror, + .error_set_inferred, => return 2, // TODO revisit this when we have the concept of the error tag type .array, .array_sentinel => return self.elemType().abiAlignment(target), @@ -1580,6 +1598,7 @@ pub const Type = extern union { .error_set_single, .anyerror_void_error_union, .anyerror, + .error_set_inferred, => return 2, // TODO revisit this when we have the concept of the error tag type .int_signed, .int_unsigned => { @@ -1744,6 +1763,7 @@ pub const Type = extern union { .error_set_single, .anyerror_void_error_union, .anyerror, + .error_set_inferred, => return 16, // TODO revisit this when we have the concept of the error tag type .int_signed, .int_unsigned => self.cast(Payload.Bits).?.data, @@ -1863,6 +1883,48 @@ pub const Type = extern union { }; } + pub fn slicePtrFieldType(self: Type, buffer: *Payload.ElemType) Type { + switch (self.tag()) { + .const_slice_u8 => return Type.initTag(.manyptr_const_u8), + + .const_slice => { + const elem_type = self.castTag(.const_slice).?.data; + buffer.* = .{ + .base = .{ .tag = .many_const_pointer }, + .data = elem_type, + }; + return Type.initPayload(&buffer.base); + }, + .mut_slice => { + const elem_type = self.castTag(.mut_slice).?.data; + buffer.* = .{ + .base = .{ .tag = .many_mut_pointer }, + .data = elem_type, + }; + return Type.initPayload(&buffer.base); + }, + + .pointer => { + const payload = self.castTag(.pointer).?.data; + assert(payload.size == .Slice); + if (payload.mutable) { + buffer.* = .{ + .base = .{ .tag = .many_mut_pointer }, + .data = payload.pointee_type, + }; + } else { + buffer.* = .{ + .base = .{ .tag = .many_const_pointer }, + .data = payload.pointee_type, + }; + } + return Type.initPayload(&buffer.base); + }, + + else => unreachable, + } + } + pub fn isConstPtr(self: Type) bool { return switch (self.tag()) { .single_const_pointer, @@ -1915,7 +1977,10 @@ pub const Type = extern union { /// Asserts that the type is an optional pub fn isPtrLikeOptional(self: Type) bool { switch (self.tag()) { - .optional_single_const_pointer, .optional_single_mut_pointer => return true, + .optional_single_const_pointer, + .optional_single_mut_pointer, + => return true, + .optional => { var buf: Payload.ElemType = undefined; const child_type = self.optionalChild(&buf); @@ -2400,6 +2465,7 @@ pub const Type = extern union { .error_union, .error_set, .error_set_single, + .error_set_inferred, .@"opaque", .var_args_param, .manyptr_u8, @@ -2892,6 +2958,8 @@ pub const Type = extern union { anyframe_T, error_set, error_set_single, + /// The type is the inferred error set of a specific function. + error_set_inferred, empty_struct, @"opaque", @"struct", @@ -2989,6 +3057,7 @@ pub const Type = extern union { => Payload.Bits, .error_set => Payload.ErrorSet, + .error_set_inferred => Payload.ErrorSetInferred, .array, .vector => Payload.Array, .array_sentinel => Payload.ArraySentinel, @@ -3081,6 +3150,13 @@ pub const Type = extern union { data: *Module.ErrorSet, }; + pub const ErrorSetInferred = struct { + pub const base_tag = Tag.error_set_inferred; + + base: Payload = Payload{ .tag = base_tag }, + data: *Module.Fn, + }; + pub const Pointer = struct { pub const base_tag = Tag.pointer; diff --git a/src/value.zig b/src/value.zig index 008cc3c2fe..b4cd63b8d3 100644 --- a/src/value.zig +++ b/src/value.zig @@ -483,13 +483,13 @@ pub const Value = extern union { /// TODO this should become a debug dump() function. In order to print values in a meaningful way /// we also need access to the type. pub fn format( - self: Value, + start_val: Value, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype, ) !void { comptime assert(fmt.len == 0); - var val = self; + var val = start_val; while (true) switch (val.tag()) { .u8_type => return out_stream.writeAll("u8"), .i8_type => return out_stream.writeAll("i8"), @@ -598,9 +598,9 @@ pub const Value = extern union { val = field_ptr.container_ptr; }, .empty_array => return out_stream.writeAll(".{}"), - .enum_literal => return out_stream.print(".{}", .{std.zig.fmtId(self.castTag(.enum_literal).?.data)}), - .enum_field_index => return out_stream.print("(enum field {d})", .{self.castTag(.enum_field_index).?.data}), - .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(self.castTag(.bytes).?.data)}), + .enum_literal => return out_stream.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}), + .enum_field_index => return out_stream.print("(enum field {d})", .{val.castTag(.enum_field_index).?.data}), + .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), .repeated => { try out_stream.writeAll("(repeated) "); val = val.castTag(.repeated).?.data; @@ -1336,6 +1336,23 @@ pub const Value = extern union { }; } + pub fn sliceLen(val: Value) u64 { + return switch (val.tag()) { + .empty_array => 0, + .bytes => val.castTag(.bytes).?.data.len, + .ref_val => sliceLen(val.castTag(.ref_val).?.data), + .decl_ref => { + const decl = val.castTag(.decl_ref).?.data; + if (decl.ty.zigTypeTag() == .Array) { + return decl.ty.arrayLen(); + } else { + return 1; + } + }, + else => unreachable, + }; + } + /// Asserts the value is a single-item pointer to an array, or an array, /// or an unknown-length pointer, and returns the element value at the index. pub fn elemValue(self: Value, allocator: *Allocator, index: usize) error{OutOfMemory}!Value { diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig index 8f69421fd4..a064995c13 100644 --- a/test/stage2/cbe.zig +++ b/test/stage2/cbe.zig @@ -804,19 +804,6 @@ pub fn addCases(ctx: *TestContext) !void { }); } - ctx.c("empty start function", linux_x64, - \\export fn _start() noreturn { - \\ unreachable; - \\} - , - \\ZIG_EXTERN_C zig_noreturn void _start(void); - \\ - \\zig_noreturn void _start(void) { - \\ zig_breakpoint(); - \\ zig_unreachable(); - \\} - \\ - ); ctx.h("simple header", linux_x64, \\export fn start() void{} , From 5816997ae79c6025d5f85aab0c9ab456fecadec9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 7 Jul 2021 14:17:04 -0700 Subject: [PATCH 4/6] stage2: get tests passing * implement enough of ret_err_value to pass wasm tests * only do the proper `@panic` implementation for the backends which support it, which is currently only the C backend. The other backends will see `@breakpoint(); unreachable;` same as before. - I plan to do AIR memory layout reworking as a prerequisite to fixing other backends, because that will help me put all the constants up front, which will allow the codegen to lower to memory without jumps. * `@panic` is implemented using anon decls for the message. Makes it easier on the backends. Might want to look into re-using decls for this in the future. * implement DWARF .debug_info for pointer-like optionals. --- src/Sema.zig | 57 +++++++++--- src/codegen.zig | 214 ++++++++++++++++++++++++++----------------- src/link/Elf.zig | 23 +++-- test/stage2/wasm.zig | 2 - 4 files changed, 192 insertions(+), 104 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 1538e54208..24c51bdc46 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5423,11 +5423,22 @@ fn zirRetErrValue( const src = inst_data.src(); // Add the error tag to the inferred error set of the in-scope function. + if (sema.func) |func| { + const fn_ty = func.owner_decl.ty; + const fn_ret_ty = fn_ty.fnReturnType(); + if (fn_ret_ty.zigTypeTag() == .ErrorUnion and + fn_ret_ty.errorUnionSet().tag() == .error_set_inferred) + { + return sema.mod.fail(&block.base, src, "TODO: Sema.zirRetErrValue", .{}); + } + } // Return the error code from the function. - - _ = inst_data; - _ = err_name; - return sema.mod.fail(&block.base, src, "TODO: Sema.zirRetErrValueCode", .{}); + const kv = try sema.mod.getErrorValue(err_name); + const result_inst = try sema.mod.constInst(sema.arena, src, .{ + .ty = try Type.Tag.error_set_single.create(sema.arena, kv.key), + .val = try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), + }); + return sema.analyzeRet(block, result_inst, src, true); } fn zirRetCoerce( @@ -6411,6 +6422,15 @@ fn panicWithMsg( ) !Zir.Inst.Index { const mod = sema.mod; const arena = sema.arena; + + const this_feature_is_implemented_in_the_backend = + mod.comp.bin_file.options.object_format == .c; + if (!this_feature_is_implemented_in_the_backend) { + // TODO implement this feature in all the backends and then delete this branch + _ = try block.addNoOp(src, Type.initTag(.void), .breakpoint); + _ = try block.addNoOp(src, Type.initTag(.noreturn), .unreach); + return always_noreturn; + } const panic_fn = try sema.getBuiltin(block, src, "panic"); const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); @@ -6431,8 +6451,6 @@ fn safetyPanic( src: LazySrcLoc, panic_id: PanicId, ) !Zir.Inst.Index { - const mod = sema.mod; - const arena = sema.arena; const msg = switch (panic_id) { .unreach => "reached unreachable code", .unwrap_null => "attempt to use null value", @@ -6441,11 +6459,28 @@ fn safetyPanic( .incorrect_alignment => "incorrect alignment", .invalid_error_code => "invalid error code", }; - const msg_inst = try mod.constInst(arena, src, .{ - .ty = Type.initTag(.const_slice_u8), - .val = try Value.Tag.ref_val.create(arena, try Value.Tag.bytes.create(arena, msg)), - }); - return sema.panicWithMsg(block, src, msg_inst); + + const msg_inst = msg_inst: { + // TODO instead of making a new decl for every panic in the entire compilation, + // introduce the concept of a reference-counted decl for these + var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + errdefer new_decl_arena.deinit(); + + const decl_ty = try Type.Tag.array_u8.create(&new_decl_arena.allocator, msg.len); + const decl_val = try Value.Tag.bytes.create(&new_decl_arena.allocator, msg); + + const new_decl = try sema.mod.createAnonymousDecl(&block.base, .{ + .ty = decl_ty, + .val = decl_val, + }); + errdefer sema.mod.deleteAnonDecl(&block.base, new_decl); + try new_decl.finalizeNewArena(&new_decl_arena); + break :msg_inst try sema.analyzeDeclRef(block, .unneeded, new_decl); + }; + + const casted_msg_inst = try sema.coerce(block, Type.initTag(.const_slice_u8), msg_inst, src); + + return sema.panicWithMsg(block, src, casted_msg_inst); } fn emitBackwardBranch(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) !void { diff --git a/src/codegen.zig b/src/codegen.zig index 0b63222242..f9f5a861fb 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -142,40 +142,52 @@ pub fn generateSymbol( ), }; }, - .Pointer => { - // TODO populate .debug_info for the pointer - if (typed_value.val.castTag(.decl_ref)) |payload| { - const decl = payload.data; - if (decl.analysis != .complete) return error.AnalysisFail; - // TODO handle the dependency of this symbol on the decl's vaddr. - // If the decl changes vaddr, then this symbol needs to get regenerated. - const vaddr = bin_file.getDeclVAddr(decl); - const endian = bin_file.options.target.cpu.arch.endian(); - switch (bin_file.options.target.cpu.arch.ptrBitWidth()) { - 16 => { - try code.resize(2); - mem.writeInt(u16, code.items[0..2], @intCast(u16, vaddr), endian); - }, - 32 => { - try code.resize(4); - mem.writeInt(u32, code.items[0..4], @intCast(u32, vaddr), endian); - }, - 64 => { - try code.resize(8); - mem.writeInt(u64, code.items[0..8], vaddr, endian); - }, - else => unreachable, + .Pointer => switch (typed_value.ty.ptrSize()) { + .Slice => { + return Result{ + .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO implement generateSymbol for slice {}", + .{typed_value.val}, + ), + }; + }, + else => { + // TODO populate .debug_info for the pointer + if (typed_value.val.castTag(.decl_ref)) |payload| { + const decl = payload.data; + if (decl.analysis != .complete) return error.AnalysisFail; + // TODO handle the dependency of this symbol on the decl's vaddr. + // If the decl changes vaddr, then this symbol needs to get regenerated. + const vaddr = bin_file.getDeclVAddr(decl); + const endian = bin_file.options.target.cpu.arch.endian(); + switch (bin_file.options.target.cpu.arch.ptrBitWidth()) { + 16 => { + try code.resize(2); + mem.writeInt(u16, code.items[0..2], @intCast(u16, vaddr), endian); + }, + 32 => { + try code.resize(4); + mem.writeInt(u32, code.items[0..4], @intCast(u32, vaddr), endian); + }, + 64 => { + try code.resize(8); + mem.writeInt(u64, code.items[0..8], vaddr, endian); + }, + else => unreachable, + } + return Result{ .appended = {} }; } - return Result{ .appended = {} }; - } - return Result{ - .fail = try ErrorMsg.create( - bin_file.allocator, - src_loc, - "TODO implement generateSymbol for pointer {}", - .{typed_value.val}, - ), - }; + return Result{ + .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO implement generateSymbol for pointer {}", + .{typed_value.val}, + ), + }; + }, }, .Int => { // TODO populate .debug_info for the integer @@ -2244,10 +2256,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.register_manager.getReg(reg, null); try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); }, - .stack_offset => { + .stack_offset => |off| { // Here we need to emit instructions like this: // mov qword ptr [rsp + stack_offset], x - return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{}); + try self.genSetStack(arg.src, arg.ty, off, arg_mcv); }, .ptr_stack_offset => { return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); @@ -3444,9 +3456,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, } }, - .embedded_in_code => |code_offset| { - _ = code_offset; - return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{}); + .embedded_in_code => { + // TODO this and `.stack_offset` below need to get improved to support types greater than + // register size, and do general memcpy + const reg = try self.copyToTmpRegister(src, ty, mcv); + return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); }, .register => |reg| { try self.genX8664ModRMRegToStack(src, ty, stack_offset, reg, 0x89); @@ -3456,6 +3470,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail(src, "TODO implement set stack variable from memory vaddr", .{}); }, .stack_offset => |off| { + // TODO this and `.embedded_in_code` above need to get improved to support types greater than + // register size, and do general memcpy + if (stack_offset == off) return; // Copy stack variable to itself; nothing to do. @@ -4161,33 +4178,48 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); switch (typed_value.ty.zigTypeTag()) { - .Pointer => { - if (typed_value.val.castTag(.decl_ref)) |payload| { - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const decl = payload.data; - const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const decl = payload.data; - const got_addr = blk: { - const seg = macho_file.load_commands.items[macho_file.data_const_segment_cmd_index.?].Segment; - const got = seg.sections.items[macho_file.got_section_index.?]; - break :blk got.addr + decl.link.macho.offset_table_index * ptr_bytes; - }; - return MCValue{ .memory = got_addr }; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const decl = payload.data; - const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else { - return self.fail(src, "TODO codegen non-ELF const Decl pointer", .{}); + .Pointer => switch (typed_value.ty.ptrSize()) { + .Slice => { + var buf: Type.Payload.ElemType = undefined; + const ptr_type = typed_value.ty.slicePtrFieldType(&buf); + const ptr_mcv = try self.genTypedValue(src, .{ .ty = ptr_type, .val = typed_value.val }); + const slice_len = typed_value.val.sliceLen(); + // Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean + // the Sema code needs to use anonymous Decls or alloca instructions to store data. + const ptr_imm = ptr_mcv.memory; + _ = slice_len; + _ = ptr_imm; + // We need more general support for const data being stored in memory to make this work. + return self.fail(src, "TODO codegen for const slices", .{}); + }, + else => { + if (typed_value.val.castTag(.decl_ref)) |payload| { + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const decl = payload.data; + const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { + const decl = payload.data; + const got_addr = blk: { + const seg = macho_file.load_commands.items[macho_file.data_const_segment_cmd_index.?].Segment; + const got = seg.sections.items[macho_file.got_section_index.?]; + break :blk got.addr + decl.link.macho.offset_table_index * ptr_bytes; + }; + return MCValue{ .memory = got_addr }; + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const decl = payload.data; + const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else { + return self.fail(src, "TODO codegen non-ELF const Decl pointer", .{}); + } } - } - if (typed_value.val.tag() == .int_u64) { - return MCValue{ .immediate = typed_value.val.toUnsignedInt() }; - } - return self.fail(src, "TODO codegen more kinds of const pointers", .{}); + if (typed_value.val.tag() == .int_u64) { + return MCValue{ .immediate = typed_value.val.toUnsignedInt() }; + } + return self.fail(src, "TODO codegen more kinds of const pointers", .{}); + }, }, .Int => { const info = typed_value.ty.intInfo(self.target.*); @@ -4264,27 +4296,39 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var next_stack_offset: u32 = 0; for (param_types) |ty, i| { - switch (ty.zigTypeTag()) { - .Bool, .Int => { - if (!ty.hasCodeGenBits()) { - assert(cc != .C); - result.args[i] = .{ .none = {} }; - } else { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - if (next_int_reg >= c_abi_int_param_regs.len) { - result.args[i] = .{ .stack_offset = next_stack_offset }; - next_stack_offset += param_size; - } else { - const aliased_reg = registerAlias( - c_abi_int_param_regs[next_int_reg], - param_size, - ); - result.args[i] = .{ .register = aliased_reg }; - next_int_reg += 1; - } - } - }, - else => return self.fail(src, "TODO implement function parameters of type {s}", .{@tagName(ty.zigTypeTag())}), + if (!ty.hasCodeGenBits()) { + assert(cc != .C); + result.args[i] = .{ .none = {} }; + continue; + } + const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const pass_in_reg = switch (ty.zigTypeTag()) { + .Bool => true, + .Int => param_size <= 8, + .Pointer => ty.ptrSize() != .Slice, + .Optional => ty.isPtrLikeOptional(), + else => false, + }; + if (pass_in_reg) { + if (next_int_reg >= c_abi_int_param_regs.len) { + result.args[i] = .{ .stack_offset = next_stack_offset }; + next_stack_offset += param_size; + } else { + const aliased_reg = registerAlias( + c_abi_int_param_regs[next_int_reg], + param_size, + ); + result.args[i] = .{ .register = aliased_reg }; + next_int_reg += 1; + } + } else { + // For simplicity of codegen, slices and other types are always pushed onto the stack. + // TODO: look into optimizing this by passing things as registers sometimes, + // such as ptr and len of slices as separate registers. + // TODO: also we need to honor the C ABI for relevant types rather than passing on + // the stack here. + result.args[i] = .{ .stack_offset = next_stack_offset }; + next_stack_offset += param_size; } } result.stack_byte_count = next_stack_offset; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 722077b8b8..d754b478b9 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2505,11 +2505,7 @@ fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !vo abbrev_base_type, DW.ATE_boolean, // DW.AT_encoding , DW.FORM_data1 1, // DW.AT_byte_size, DW.FORM_data1 - 'b', - 'o', - 'o', - 'l', - 0, // DW.AT_name, DW.FORM_string + 'b', 'o', 'o', 'l', 0, // DW.AT_name, DW.FORM_string }); }, .Int => { @@ -2526,8 +2522,23 @@ fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !vo // DW.AT_name, DW.FORM_string try dbg_info_buffer.writer().print("{}\x00", .{ty}); }, + .Optional => { + if (ty.isPtrLikeOptional()) { + try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 12); + dbg_info_buffer.appendAssumeCapacity(abbrev_base_type); + // DW.AT_encoding, DW.FORM_data1 + dbg_info_buffer.appendAssumeCapacity(DW.ATE_address); + // DW.AT_byte_size, DW.FORM_data1 + dbg_info_buffer.appendAssumeCapacity(@intCast(u8, ty.abiSize(self.base.options.target))); + // DW.AT_name, DW.FORM_string + try dbg_info_buffer.writer().print("{}\x00", .{ty}); + } else { + log.err("TODO implement .debug_info for type '{}'", .{ty}); + try dbg_info_buffer.append(abbrev_pad1); + } + }, else => { - std.log.scoped(.compiler).err("TODO implement .debug_info for type '{}'", .{ty}); + log.err("TODO implement .debug_info for type '{}'", .{ty}); try dbg_info_buffer.append(abbrev_pad1); }, } diff --git a/test/stage2/wasm.zig b/test/stage2/wasm.zig index 07386e8212..60c746f4e0 100644 --- a/test/stage2/wasm.zig +++ b/test/stage2/wasm.zig @@ -587,8 +587,6 @@ pub fn addCases(ctx: *TestContext) !void { } { - // TODO implement Type equality comparison of error unions in SEMA - // before we can incrementally compile functions with an error union as return type var case = ctx.exe("wasm error union part 2", wasi); case.addCompareOutput( From 5c8bd443d92c6306f60857720103ae46ca7b8b3e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 7 Jul 2021 19:50:56 -0700 Subject: [PATCH 5/6] stage2: fix if expressions on error unions AstGen had the then-else logic backwards for if expressions on error unions. This commit fixes it. Turns out AstGen only really needs `is_non_null` and `is_non_err`, and does not need the `is_null` or `is_err` variants. So I removed the `is_null{,_ptr}` and `is_err{,_ptr}` ZIR instructions (-4) and added `is_non_err`, `is_non_err_ptr` ZIR instructions (+2) for a total of (-2) ZIR instructions, giving us a tiny bit more headroom within the 256 tag limit. This required swapping the order of then/else blocks in a handful of cases, but ultimately means the ZIR will be in the same as source order, which is convenient when debugging. AIR code on the other hand, gains the `is_non_err` and `is_non_err_ptr` instructions. Sema: fix logic in zirErrUnionCode and zirErrUnionCodePtr returning the wrong result type. --- src/AstGen.zig | 92 ++++++++++++++++++++++---------------------- src/Sema.zig | 55 ++++++++++++++------------ src/Zir.zig | 32 +++++---------- src/air.zig | 12 +++++- src/codegen.zig | 12 ++++++ src/codegen/c.zig | 11 +++--- src/codegen/wasm.zig | 9 ++--- 7 files changed, 119 insertions(+), 104 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index f7fea3a99c..a816628f66 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -786,7 +786,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr rl, node, node_datas[node].lhs, - .is_err_ptr, + .is_non_err_ptr, .err_union_payload_unsafe_ptr, .err_union_code_ptr, node_datas[node].rhs, @@ -798,7 +798,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr rl, node, node_datas[node].lhs, - .is_err, + .is_non_err, .err_union_payload_unsafe, .err_union_code, node_datas[node].rhs, @@ -813,7 +813,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr rl, node, node_datas[node].lhs, - .is_null_ptr, + .is_non_null_ptr, .optional_payload_unsafe_ptr, undefined, node_datas[node].rhs, @@ -825,7 +825,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: ast.Node.Index) InnerEr rl, node, node_datas[node].lhs, - .is_null, + .is_non_null, .optional_payload_unsafe, undefined, node_datas[node].rhs, @@ -1948,11 +1948,9 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: ast.Node.Index) Inner .float128, .int_type, .is_non_null, - .is_null, .is_non_null_ptr, - .is_null_ptr, - .is_err, - .is_err_ptr, + .is_non_err, + .is_non_err_ptr, .mod_rem, .mul, .mulwrap, @@ -4621,8 +4619,8 @@ fn tryExpr( }; const err_ops = switch (rl) { // zig fmt: off - .ref => [3]Zir.Inst.Tag{ .is_err_ptr, .err_union_code_ptr, .err_union_payload_unsafe_ptr }, - else => [3]Zir.Inst.Tag{ .is_err, .err_union_code, .err_union_payload_unsafe }, + .ref => [3]Zir.Inst.Tag{ .is_non_err_ptr, .err_union_code_ptr, .err_union_payload_unsafe_ptr }, + else => [3]Zir.Inst.Tag{ .is_non_err, .err_union_code, .err_union_payload_unsafe }, // zig fmt: on }; // This could be a pointer or value depending on the `operand_rl` parameter. @@ -4640,20 +4638,20 @@ fn tryExpr( var then_scope = parent_gz.makeSubBlock(scope); defer then_scope.instructions.deinit(astgen.gpa); - const err_code = try then_scope.addUnNode(err_ops[1], operand, node); - try genDefers(&then_scope, &fn_block.base, scope, .{ .both = err_code }); - const then_result = try then_scope.addUnNode(.ret_node, err_code, node); + block_scope.break_count += 1; + // This could be a pointer or value depending on `err_ops[2]`. + const unwrapped_payload = try then_scope.addUnNode(err_ops[2], operand, node); + const then_result = switch (rl) { + .ref => unwrapped_payload, + else => try rvalue(&then_scope, block_scope.break_result_loc, unwrapped_payload, node), + }; var else_scope = parent_gz.makeSubBlock(scope); defer else_scope.instructions.deinit(astgen.gpa); - block_scope.break_count += 1; - // This could be a pointer or value depending on `err_ops[2]`. - const unwrapped_payload = try else_scope.addUnNode(err_ops[2], operand, node); - const else_result = switch (rl) { - .ref => unwrapped_payload, - else => try rvalue(&else_scope, block_scope.break_result_loc, unwrapped_payload, node), - }; + const err_code = try else_scope.addUnNode(err_ops[1], operand, node); + try genDefers(&else_scope, &fn_block.base, scope, .{ .both = err_code }); + const else_result = try else_scope.addUnNode(.ret_node, err_code, node); return finishThenElseBlock( parent_gz, @@ -4711,18 +4709,28 @@ fn orelseCatchExpr( var then_scope = parent_gz.makeSubBlock(scope); defer then_scope.instructions.deinit(astgen.gpa); + // This could be a pointer or value depending on `unwrap_op`. + const unwrapped_payload = try then_scope.addUnNode(unwrap_op, operand, node); + const then_result = switch (rl) { + .ref => unwrapped_payload, + else => try rvalue(&then_scope, block_scope.break_result_loc, unwrapped_payload, node), + }; + + var else_scope = parent_gz.makeSubBlock(scope); + defer else_scope.instructions.deinit(astgen.gpa); + var err_val_scope: Scope.LocalVal = undefined; - const then_sub_scope = blk: { - const payload = payload_token orelse break :blk &then_scope.base; + const else_sub_scope = blk: { + const payload = payload_token orelse break :blk &else_scope.base; if (mem.eql(u8, tree.tokenSlice(payload), "_")) { return astgen.failTok(payload, "discard of error capture; omit it instead", .{}); } const err_name = try astgen.identAsString(payload); err_val_scope = .{ - .parent = &then_scope.base, - .gen_zir = &then_scope, + .parent = &else_scope.base, + .gen_zir = &else_scope, .name = err_name, - .inst = try then_scope.addUnNode(unwrap_code_op, operand, node), + .inst = try else_scope.addUnNode(unwrap_code_op, operand, node), .token_src = payload, .id_cat = .@"capture", }; @@ -4730,23 +4738,13 @@ fn orelseCatchExpr( }; block_scope.break_count += 1; - const then_result = try expr(&then_scope, then_sub_scope, block_scope.break_result_loc, rhs); - try checkUsed(parent_gz, &then_scope.base, then_sub_scope); + const else_result = try expr(&else_scope, else_sub_scope, block_scope.break_result_loc, rhs); + try checkUsed(parent_gz, &else_scope.base, else_sub_scope); // We hold off on the break instructions as well as copying the then/else // instructions into place until we know whether to keep store_to_block_ptr // instructions or not. - var else_scope = parent_gz.makeSubBlock(scope); - defer else_scope.instructions.deinit(astgen.gpa); - - // This could be a pointer or value depending on `unwrap_op`. - const unwrapped_payload = try else_scope.addUnNode(unwrap_op, operand, node); - const else_result = switch (rl) { - .ref => unwrapped_payload, - else => try rvalue(&else_scope, block_scope.break_result_loc, unwrapped_payload, node), - }; - return finishThenElseBlock( parent_gz, rl, @@ -4964,7 +4962,7 @@ fn ifExpr( if (if_full.error_token) |_| { const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none; const err_union = try expr(&block_scope, &block_scope.base, cond_rl, if_full.ast.cond_expr); - const tag: Zir.Inst.Tag = if (payload_is_ref) .is_err_ptr else .is_err; + const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err; break :c .{ .inst = err_union, .bool_bit = try block_scope.addUnNode(tag, err_union, node), @@ -5221,7 +5219,7 @@ fn whileExpr( if (while_full.error_token) |_| { const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none; const err_union = try expr(&continue_scope, &continue_scope.base, cond_rl, while_full.ast.cond_expr); - const tag: Zir.Inst.Tag = if (payload_is_ref) .is_err_ptr else .is_err; + const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_err_ptr else .is_non_err; break :c .{ .inst = err_union, .bool_bit = try continue_scope.addUnNode(tag, err_union, node), @@ -6229,23 +6227,25 @@ fn ret(gz: *GenZir, scope: *Scope, node: ast.Node.Index) InnerError!Zir.Inst.Ref } // Emit conditional branch for generating errdefers. - const is_err = try gz.addUnNode(.is_err, operand, node); + const is_non_err = try gz.addUnNode(.is_non_err, operand, node); const condbr = try gz.addCondBr(.condbr, node); var then_scope = gz.makeSubBlock(scope); defer then_scope.instructions.deinit(astgen.gpa); - const which_ones: DefersToEmit = if (!defer_counts.need_err_code) .both_sans_err else .{ - .both = try then_scope.addUnNode(.err_union_code, operand, node), - }; - try genDefers(&then_scope, defer_outer, scope, which_ones); + + try genDefers(&then_scope, defer_outer, scope, .normal_only); _ = try then_scope.addUnNode(.ret_node, operand, node); var else_scope = gz.makeSubBlock(scope); defer else_scope.instructions.deinit(astgen.gpa); - try genDefers(&else_scope, defer_outer, scope, .normal_only); + + const which_ones: DefersToEmit = if (!defer_counts.need_err_code) .both_sans_err else .{ + .both = try else_scope.addUnNode(.err_union_code, operand, node), + }; + try genDefers(&else_scope, defer_outer, scope, which_ones); _ = try else_scope.addUnNode(.ret_node, operand, node); - try setCondBrPayload(condbr, is_err, &then_scope, &else_scope); + try setCondBrPayload(condbr, is_non_err, &then_scope, &else_scope); return Zir.Inst.Ref.unreachable_value; }, diff --git a/src/Sema.zig b/src/Sema.zig index 24c51bdc46..86e5f59af6 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -225,12 +225,10 @@ pub fn analyzeBody( .float => try sema.zirFloat(block, inst), .float128 => try sema.zirFloat128(block, inst), .int_type => try sema.zirIntType(block, inst), - .is_err => try sema.zirIsErr(block, inst), - .is_err_ptr => try sema.zirIsErrPtr(block, inst), - .is_non_null => try sema.zirIsNull(block, inst, true), - .is_non_null_ptr => try sema.zirIsNullPtr(block, inst, true), - .is_null => try sema.zirIsNull(block, inst, false), - .is_null_ptr => try sema.zirIsNullPtr(block, inst, false), + .is_non_err => try sema.zirIsNonErr(block, inst), + .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), + .is_non_null => try sema.zirIsNonNull(block, inst), + .is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), .loop => try sema.zirLoop(block, inst), .merge_error_sets => try sema.zirMergeErrorSets(block, inst), .negate => try sema.zirNegate(block, inst, .sub), @@ -2981,17 +2979,19 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); + const result_ty = operand.ty.castTag(.error_union).?.data.error_set; + if (operand.value()) |val| { assert(val.getError() != null); const data = val.castTag(.error_union).?.data; return sema.mod.constInst(sema.arena, src, .{ - .ty = operand.ty.castTag(.error_union).?.data.error_set, + .ty = result_ty, .val = data, }); } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, operand.ty.castTag(.error_union).?.data.payload, .unwrap_errunion_err, operand); + return block.addUnOp(src, result_ty, .unwrap_errunion_err, operand); } /// Pointer in, value out @@ -3007,18 +3007,20 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand.ty.elemType()}); + const result_ty = operand.ty.elemType().castTag(.error_union).?.data.error_set; + if (operand.value()) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); assert(val.getError() != null); const data = val.castTag(.error_union).?.data; return sema.mod.constInst(sema.arena, src, .{ - .ty = operand.ty.elemType().castTag(.error_union).?.data.error_set, + .ty = result_ty, .val = data, }); } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, operand.ty.castTag(.error_union).?.data.payload, .unwrap_errunion_err_ptr, operand); + return block.addUnOp(src, result_ty, .unwrap_errunion_err_ptr, operand); } fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { @@ -5298,11 +5300,10 @@ fn zirBoolBr( return &block_inst.base; } -fn zirIsNull( +fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, - invert_logic: bool, ) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -5310,14 +5311,13 @@ fn zirIsNull( const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = try sema.resolveInst(inst_data.operand); - return sema.analyzeIsNull(block, src, operand, invert_logic); + return sema.analyzeIsNull(block, src, operand, true); } -fn zirIsNullPtr( +fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, - invert_logic: bool, ) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -5326,19 +5326,19 @@ fn zirIsNullPtr( const src = inst_data.src(); const ptr = try sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); - return sema.analyzeIsNull(block, src, loaded, invert_logic); + return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = try sema.resolveInst(inst_data.operand); - return sema.analyzeIsErr(block, inst_data.src(), operand); + return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { const tracy = trace(@src()); defer tracy.end(); @@ -5346,7 +5346,7 @@ fn zirIsErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const src = inst_data.src(); const ptr = try sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); - return sema.analyzeIsErr(block, src, loaded); + return sema.analyzeIsNonErr(block, src, loaded); } fn zirCondbr( @@ -7219,20 +7219,25 @@ fn analyzeIsNull( return block.addUnOp(src, result_ty, inst_tag, operand); } -fn analyzeIsErr(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, operand: *Inst) InnerError!*Inst { +fn analyzeIsNonErr( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + operand: *Inst, +) InnerError!*Inst { const ot = operand.ty.zigTypeTag(); - if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, false); - if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, true); + if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true); + if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false); assert(ot == .ErrorUnion); const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |err_union| { if (err_union.isUndef()) { return sema.mod.constUndef(sema.arena, src, result_ty); } - return sema.mod.constBool(sema.arena, src, err_union.getError() != null); + return sema.mod.constBool(sema.arena, src, err_union.getError() == null); } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, result_ty, .is_err, operand); + return block.addUnOp(src, result_ty, .is_non_err, operand); } fn analyzeSlice( diff --git a/src/Zir.zig b/src/Zir.zig index a80660b5bf..77b71f6caf 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -398,21 +398,15 @@ pub const Inst = struct { /// Return a boolean false if an optional is null. `x != null` /// Uses the `un_node` field. is_non_null, - /// Return a boolean true if an optional is null. `x == null` - /// Uses the `un_node` field. - is_null, /// Return a boolean false if an optional is null. `x.* != null` /// Uses the `un_node` field. is_non_null_ptr, - /// Return a boolean true if an optional is null. `x.* == null` + /// Return a boolean false if value is an error /// Uses the `un_node` field. - is_null_ptr, - /// Return a boolean true if value is an error + is_non_err, + /// Return a boolean false if dereferenced pointer is an error /// Uses the `un_node` field. - is_err, - /// Return a boolean true if dereferenced pointer is an error - /// Uses the `un_node` field. - is_err_ptr, + is_non_err_ptr, /// A labeled block of code that loops forever. At the end of the body will have either /// a `repeat` instruction or a `repeat_inline` instruction. /// Uses the `pl_node` field. The AST node is either a for loop or while loop. @@ -1046,11 +1040,9 @@ pub const Inst = struct { .float128, .int_type, .is_non_null, - .is_null, .is_non_null_ptr, - .is_null_ptr, - .is_err, - .is_err_ptr, + .is_non_err, + .is_non_err_ptr, .mod_rem, .mul, .mulwrap, @@ -1306,11 +1298,9 @@ pub const Inst = struct { .float128 = .pl_node, .int_type = .int_type, .is_non_null = .un_node, - .is_null = .un_node, .is_non_null_ptr = .un_node, - .is_null_ptr = .un_node, - .is_err = .un_node, - .is_err_ptr = .un_node, + .is_non_err = .un_node, + .is_non_err_ptr = .un_node, .loop = .pl_node, .repeat = .node, .repeat_inline = .node, @@ -2857,11 +2847,9 @@ const Writer = struct { .err_union_code, .err_union_code_ptr, .is_non_null, - .is_null, .is_non_null_ptr, - .is_null_ptr, - .is_err, - .is_err_ptr, + .is_non_err, + .is_non_err_ptr, .typeof, .typeof_elem, .struct_init_empty, diff --git a/src/air.zig b/src/air.zig index 1b7faa641b..e73367945b 100644 --- a/src/air.zig +++ b/src/air.zig @@ -90,8 +90,12 @@ pub const Inst = struct { is_non_null_ptr, /// E!T => bool is_err, + /// E!T => bool (inverted logic) + is_non_err, /// *E!T => bool is_err_ptr, + /// *E!T => bool (inverted logic) + is_non_err_ptr, bool_and, bool_or, /// Read a value from a pointer. @@ -154,7 +158,9 @@ pub const Inst = struct { .is_null, .is_null_ptr, .is_err, + .is_non_err, .is_err_ptr, + .is_non_err_ptr, .ptrtoint, .floatcast, .intcast, @@ -759,7 +765,9 @@ const DumpAir = struct { .is_null, .is_null_ptr, .is_err, + .is_non_err, .is_err_ptr, + .is_non_err_ptr, .ptrtoint, .floatcast, .intcast, @@ -888,11 +896,13 @@ const DumpAir = struct { .bitcast, .not, .is_non_null, - .is_null, .is_non_null_ptr, + .is_null, .is_null_ptr, .is_err, .is_err_ptr, + .is_non_err, + .is_non_err_ptr, .ptrtoint, .floatcast, .intcast, diff --git a/src/codegen.zig b/src/codegen.zig index f9f5a861fb..6b9bd633d0 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -859,6 +859,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), .is_null => return self.genIsNull(inst.castTag(.is_null).?), .is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), + .is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), + .is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), .is_err => return self.genIsErr(inst.castTag(.is_err).?), .is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), .load => return self.genLoad(inst.castTag(.load).?), @@ -2972,6 +2974,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail(inst.base.src, "TODO load the operand and call genIsErr", .{}); } + fn genIsNonErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + switch (arch) { + else => return self.fail(inst.base.src, "TODO implement is_non_err for {}", .{self.target.cpu.arch}), + } + } + + fn genIsNonErrPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + return self.fail(inst.base.src, "TODO load the operand and call genIsNonErr", .{}); + } + fn genLoop(self: *Self, inst: *ir.Inst.Loop) !MCValue { // A loop is a setup to be able to jump back to the beginning. const start_index = self.code.items.len; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a9521d21a8..db0e910643 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -895,8 +895,10 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi .ref => try genRef(o, inst.castTag(.ref).?), .struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), - .is_err => try genIsErr(o, inst.castTag(.is_err).?), - .is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?), + .is_err => try genIsErr(o, inst.castTag(.is_err).?, "", "!="), + .is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", "=="), + .is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "[0]", "!="), + .is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "[0]", "=="), .unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), .unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), @@ -1446,15 +1448,14 @@ fn genWrapErrUnionPay(o: *Object, inst: *Inst.UnOp) !CValue { return local; } -fn genIsErr(o: *Object, inst: *Inst.UnOp) !CValue { +fn genIsErr(o: *Object, inst: *Inst.UnOp, deref_suffix: []const u8, op_str: []const u8) !CValue { const writer = o.writer(); - const maybe_deref = if (inst.base.tag == .is_err_ptr) "[0]" else ""; const operand = try o.resolveInst(inst.operand); const local = try o.allocLocal(Type.initTag(.bool), .Const); try writer.writeAll(" = ("); try o.writeCValue(writer, operand); - try writer.print("){s}.error != 0;\n", .{maybe_deref}); + try writer.print("){s}.error {s} 0;\n", .{ deref_suffix, op_str }); return local; } diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index ec4ec66b1e..3476ab2ce6 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -814,7 +814,8 @@ pub const Context = struct { .constant => unreachable, .dbg_stmt => WValue.none, .div => self.genBinOp(inst.castTag(.div).?, .div), - .is_err => self.genIsErr(inst.castTag(.is_err).?), + .is_err => self.genIsErr(inst.castTag(.is_err).?, .i32_ne), + .is_non_err => self.genIsErr(inst.castTag(.is_non_err).?, .i32_eq), .load => self.genLoad(inst.castTag(.load).?), .loop => self.genLoop(inst.castTag(.loop).?), .mul => self.genBinOp(inst.castTag(.mul).?, .mul), @@ -1278,7 +1279,7 @@ pub const Context = struct { return .none; } - fn genIsErr(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genIsErr(self: *Context, inst: *Inst.UnOp, opcode: wasm.Opcode) InnerError!WValue { const operand = self.resolveInst(inst.operand); const offset = self.code.items.len; const writer = self.code.writer(); @@ -1289,9 +1290,7 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.i32_const)); try leb.writeILEB128(writer, @as(i32, 0)); - // we want to break out of the condition if they're *not* equal, - // because that means there's an error. - try writer.writeByte(wasm.opcode(.i32_ne)); + try writer.writeByte(@enumToInt(opcode)); return WValue{ .code_offset = offset }; } From c2e66d9bab396a69514ec7c3c41fb0404e542f21 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 7 Jul 2021 20:47:21 -0700 Subject: [PATCH 6/6] stage2: basic inferred error set support * Inferred error sets are stored in the return Type of the function, owned by the Module.Fn. So it cleans up that memory in deinit(). * Sema: update the inferred error set in zirRetErrValue - Update relevant code in wrapErrorUnion * C backend: improve some some instructions to take advantage of liveness analysis to avoid being emitted when unused. * C backend: when an error union has a payload type with no runtime bits, emit the error union as the same type as the error set. --- src/Module.zig | 15 ++++++++-- src/Sema.zig | 25 ++++++++++++----- src/codegen/c.zig | 67 ++++++++++++++++++++++++++++++++++++++------- src/type.zig | 7 +++-- test/stage2/cbe.zig | 20 ++++++++++++++ 5 files changed, 113 insertions(+), 21 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index c48440ccc2..8ae184a377 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -777,8 +777,19 @@ pub const Fn = struct { } pub fn deinit(func: *Fn, gpa: *Allocator) void { - _ = func; - _ = gpa; + if (func.getInferredErrorSet()) |map| { + map.deinit(gpa); + } + } + + pub fn getInferredErrorSet(func: *Fn) ?*std.StringHashMapUnmanaged(void) { + const ret_ty = func.owner_decl.ty.fnReturnType(); + if (ret_ty.zigTypeTag() == .ErrorUnion) { + if (ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| { + return &payload.data.map; + } + } + return null; } }; diff --git a/src/Sema.zig b/src/Sema.zig index 86e5f59af6..d7ce9fdf4f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -3139,7 +3139,10 @@ fn funcCommon( } const return_type = if (!inferred_error_set) bare_return_type else blk: { - const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, new_func); + const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, .{ + .func = new_func, + .map = .{}, + }); break :blk try Type.Tag.error_union.create(sema.arena, .{ .error_set = error_set_ty, .payload = bare_return_type, @@ -5424,12 +5427,8 @@ fn zirRetErrValue( // Add the error tag to the inferred error set of the in-scope function. if (sema.func) |func| { - const fn_ty = func.owner_decl.ty; - const fn_ret_ty = fn_ty.fnReturnType(); - if (fn_ret_ty.zigTypeTag() == .ErrorUnion and - fn_ret_ty.errorUnionSet().tag() == .error_set_inferred) - { - return sema.mod.fail(&block.base, src, "TODO: Sema.zirRetErrValue", .{}); + if (func.getInferredErrorSet()) |map| { + _ = try map.getOrPut(sema.gpa, err_name); } } // Return the error code from the function. @@ -7535,6 +7534,18 @@ fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst ); } }, + .error_set_inferred => { + const expected_name = val.castTag(.@"error").?.data.name; + const map = &err_union.data.error_set.castTag(.error_set_inferred).?.data.map; + if (!map.contains(expected_name)) { + return sema.mod.fail( + &block.base, + inst.src, + "expected type '{}', found type '{}'", + .{ err_union.data.error_set, inst.ty }, + ); + } + }, else => unreachable, } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index db0e910643..3aaf559802 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -360,6 +360,12 @@ pub const DeclGen = struct { const error_type = t.errorUnionSet(); const payload_type = t.errorUnionChild(); const data = val.castTag(.error_union).?.data; + + if (!payload_type.hasCodeGenBits()) { + // We use the error type directly as the type. + return dg.renderValue(writer, error_type, data); + } + try writer.writeByte('('); try dg.renderType(writer, t); try writer.writeAll("){"); @@ -604,6 +610,10 @@ pub const DeclGen = struct { const child_type = t.errorUnionChild(); const err_set_type = t.errorUnionSet(); + if (!child_type.hasCodeGenBits()) { + return dg.renderType(w, err_set_type); + } + var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); defer buffer.deinit(); const bw = buffer.writer(); @@ -613,7 +623,7 @@ pub const DeclGen = struct { try bw.writeAll(" payload; uint16_t error; } "); const name_index = buffer.items.len; if (err_set_type.castTag(.error_set_inferred)) |inf_err_set_payload| { - const func = inf_err_set_payload.data; + const func = inf_err_set_payload.data.func; try bw.print("zig_E_{s};\n", .{func.owner_decl.name}); } else { try bw.print("zig_E_{s}_{s};\n", .{ @@ -895,10 +905,10 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi .ref => try genRef(o, inst.castTag(.ref).?), .struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), - .is_err => try genIsErr(o, inst.castTag(.is_err).?, "", "!="), - .is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", "=="), - .is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "[0]", "!="), - .is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "[0]", "=="), + .is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="), + .is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="), + .is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="), + .is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="), .unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), .unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), @@ -1384,9 +1394,25 @@ fn genStructFieldPtr(o: *Object, inst: *Inst.StructFieldPtr) !CValue { // *(E!T) -> E NOT *E fn genUnwrapErrUnionErr(o: *Object, inst: *Inst.UnOp) !CValue { + if (inst.base.isUnused()) + return CValue.none; + const writer = o.writer(); const operand = try o.resolveInst(inst.operand); + const payload_ty = inst.operand.ty.errorUnionChild(); + if (!payload_ty.hasCodeGenBits()) { + if (inst.operand.ty.zigTypeTag() == .Pointer) { + const local = try o.allocLocal(inst.base.ty, .Const); + try writer.writeAll(" = *"); + try o.writeCValue(writer, operand); + try writer.writeAll(";\n"); + return local; + } else { + return operand; + } + } + const maybe_deref = if (inst.operand.ty.zigTypeTag() == .Pointer) "->" else "."; const local = try o.allocLocal(inst.base.ty, .Const); @@ -1396,10 +1422,19 @@ fn genUnwrapErrUnionErr(o: *Object, inst: *Inst.UnOp) !CValue { try writer.print("){s}error;\n", .{maybe_deref}); return local; } + fn genUnwrapErrUnionPay(o: *Object, inst: *Inst.UnOp) !CValue { + if (inst.base.isUnused()) + return CValue.none; + const writer = o.writer(); const operand = try o.resolveInst(inst.operand); + const payload_ty = inst.operand.ty.errorUnionChild(); + if (!payload_ty.hasCodeGenBits()) { + return CValue.none; + } + const maybe_deref = if (inst.operand.ty.zigTypeTag() == .Pointer) "->" else "."; const maybe_addrof = if (inst.base.ty.zigTypeTag() == .Pointer) "&" else ""; @@ -1448,14 +1483,26 @@ fn genWrapErrUnionPay(o: *Object, inst: *Inst.UnOp) !CValue { return local; } -fn genIsErr(o: *Object, inst: *Inst.UnOp, deref_suffix: []const u8, op_str: []const u8) !CValue { +fn genIsErr( + o: *Object, + inst: *Inst.UnOp, + deref_prefix: [*:0]const u8, + deref_suffix: [*:0]const u8, + op_str: [*:0]const u8, +) !CValue { const writer = o.writer(); const operand = try o.resolveInst(inst.operand); - const local = try o.allocLocal(Type.initTag(.bool), .Const); - try writer.writeAll(" = ("); - try o.writeCValue(writer, operand); - try writer.print("){s}.error {s} 0;\n", .{ deref_suffix, op_str }); + const payload_ty = inst.operand.ty.errorUnionChild(); + if (!payload_ty.hasCodeGenBits()) { + try writer.print(" = {s}", .{deref_prefix}); + try o.writeCValue(writer, operand); + try writer.print(" {s} 0;\n", .{op_str}); + } else { + try writer.writeAll(" = "); + try o.writeCValue(writer, operand); + try writer.print("{s}error {s} 0;\n", .{ deref_suffix, op_str }); + } return local; } diff --git a/src/type.zig b/src/type.zig index f9385e90bc..e8f0998332 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1041,7 +1041,7 @@ pub const Type = extern union { return writer.writeAll(std.mem.spanZ(error_set.owner_decl.name)); }, .error_set_inferred => { - const func = ty.castTag(.error_set_inferred).?.data; + const func = ty.castTag(.error_set_inferred).?.data.func; return writer.print("(inferred error set of {s})", .{func.owner_decl.name}); }, .error_set_single => { @@ -3154,7 +3154,10 @@ pub const Type = extern union { pub const base_tag = Tag.error_set_inferred; base: Payload = Payload{ .tag = base_tag }, - data: *Module.Fn, + data: struct { + func: *Module.Fn, + map: std.StringHashMapUnmanaged(void), + }, }; pub const Pointer = struct { diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig index a064995c13..cbe24d3ec3 100644 --- a/test/stage2/cbe.zig +++ b/test/stage2/cbe.zig @@ -804,6 +804,26 @@ pub fn addCases(ctx: *TestContext) !void { }); } + { + var case = ctx.exeFromCompiledC("inferred error sets", .{}); + + case.addCompareOutput( + \\pub export fn main() c_int { + \\ if (foo()) |_| { + \\ @panic("test fail"); + \\ } else |err| { + \\ if (err != error.ItBroke) { + \\ @panic("test fail"); + \\ } + \\ } + \\ return 0; + \\} + \\fn foo() !void { + \\ return error.ItBroke; + \\} + , ""); + } + ctx.h("simple header", linux_x64, \\export fn start() void{} ,