diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 83667c758b..86ed1c5a65 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1798,7 +1798,7 @@ fn resetSegfaultHandler() void { .mask = os.empty_sigset, .flags = 0, }; - // do nothing if an error happens to avoid a double-panic + // To avoid a double-panic, do nothing if an error happens here. updateSegfaultHandler(&act) catch {}; } diff --git a/src/Sema.zig b/src/Sema.zig index d3fca6d2b2..cf9b5aa57f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5899,12 +5899,22 @@ fn zirErrorToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (val.isUndef()) { return sema.addConstUndef(result_ty); } - const payload = try sema.arena.create(Value.Payload.U64); - payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, - }; - return sema.addConstant(result_ty, Value.initPayload(&payload.base)); + switch (val.tag()) { + .@"error" => { + const payload = try sema.arena.create(Value.Payload.U64); + payload.* = .{ + .base = .{ .tag = .int_u64 }, + .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, + }; + return sema.addConstant(result_ty, Value.initPayload(&payload.base)); + }, + + // This is not a valid combination with the type `anyerror`. + .the_only_possible_value => unreachable, + + // Assume it's already encoded as an integer. + else => return sema.addConstant(result_ty, val), + } } try sema.requireRuntimeBlock(block, src); @@ -6261,19 +6271,24 @@ fn zirErrUnionPayload( }); } + const result_ty = operand_ty.errorUnionPayload(); if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.getError()) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } const data = val.castTag(.eu_payload).?.data; - const result_ty = operand_ty.errorUnionPayload(); return sema.addConstant(result_ty, data); } + try sema.requireRuntimeBlock(block, src); - if (safety_check and block.wantSafety()) { + + // If the error set has no fields then no safety check is needed. + if (safety_check and block.wantSafety() and + operand_ty.errorUnionSet().errorSetCardinality() != .zero) + { try sema.panicUnwrapError(block, src, operand, .unwrap_errunion_err, .is_non_err); } - const result_ty = operand_ty.errorUnionPayload(); + return block.addTyOp(.unwrap_errunion_payload, result_ty, operand); } @@ -6311,7 +6326,8 @@ fn analyzeErrUnionPayloadPtr( }); } - const payload_ty = operand_ty.elemType().errorUnionPayload(); + const err_union_ty = operand_ty.elemType(); + const payload_ty = err_union_ty.errorUnionPayload(); const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = payload_ty, .mutable = !operand_ty.isConstPtr(), @@ -6351,9 +6367,14 @@ fn analyzeErrUnionPayloadPtr( } try sema.requireRuntimeBlock(block, src); - if (safety_check and block.wantSafety()) { + + // If the error set has no fields then no safety check is needed. + if (safety_check and block.wantSafety() and + err_union_ty.errorUnionSet().errorSetCardinality() != .zero) + { try sema.panicUnwrapError(block, src, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr); } + const air_tag: Air.Inst.Tag = if (initializing) .errunion_payload_ptr_set else @@ -20929,6 +20950,11 @@ fn analyzeLoad( .Pointer => ptr_ty.childType(), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}), }; + + if (try sema.typeHasOnePossibleValue(block, src, elem_ty)) |opv| { + return sema.addConstant(elem_ty, opv); + } + if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); @@ -23295,16 +23321,11 @@ pub fn typeHasOnePossibleValue( .const_slice, .mut_slice, .anyopaque, - .optional, .optional_single_mut_pointer, .optional_single_const_pointer, .enum_literal, .anyerror_void_error_union, - .error_union, - .error_set, - .error_set_single, .error_set_inferred, - .error_set_merged, .@"opaque", .var_args_param, .manyptr_u8, @@ -23333,6 +23354,56 @@ pub fn typeHasOnePossibleValue( .bound_fn, => return null, + .optional => { + var buf: Type.Payload.ElemType = undefined; + const child_ty = ty.optionalChild(&buf); + if (child_ty.isNoReturn()) { + return Value.@"null"; + } else { + return null; + } + }, + + .error_union => { + const error_ty = ty.errorUnionSet(); + switch (error_ty.errorSetCardinality()) { + .zero => { + const payload_ty = ty.errorUnionPayload(); + if (try typeHasOnePossibleValue(sema, block, src, payload_ty)) |payload_val| { + return try Value.Tag.eu_payload.create(sema.arena, payload_val); + } else { + return null; + } + }, + .one => { + if (ty.errorUnionPayload().isNoReturn()) { + const error_val = (try typeHasOnePossibleValue(sema, block, src, error_ty)).?; + return error_val; + } else { + return null; + } + }, + .many => return null, + } + }, + + .error_set_single => { + const name = ty.castTag(.error_set_single).?.data; + return try Value.Tag.@"error".create(sema.arena, .{ .name = name }); + }, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + if (names.len > 1) return null; + return try Value.Tag.@"error".create(sema.arena, .{ .name = names[0] }); + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + if (names.len > 1) return null; + return try Value.Tag.@"error".create(sema.arena, .{ .name = names[0] }); + }, + .@"struct" => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const s = resolved_ty.castTag(.@"struct").?.data; diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index f4f2b1e5e5..2a71f3138a 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3,6 +3,7 @@ const builtin = @import("builtin"); const mem = std.mem; const math = std.math; const assert = std.debug.assert; +const codegen = @import("../../codegen.zig"); const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); @@ -22,12 +23,14 @@ const leb128 = std.leb; const log = std.log.scoped(.codegen); const build_options = @import("build_options"); -const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError; -const FnResult = @import("../../codegen.zig").FnResult; -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; +const GenerateSymbolError = codegen.GenerateSymbolError; +const FnResult = codegen.FnResult; +const DebugInfoOutput = codegen.DebugInfoOutput; const bits = @import("bits.zig"); const abi = @import("abi.zig"); +const errUnionPayloadOffset = codegen.errUnionPayloadOffset; +const errUnionErrorOffset = codegen.errUnionErrorOffset; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; const Register = bits.Register; @@ -3272,7 +3275,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. fn ret(self: *Self, mcv: MCValue) !void { const ret_ty = self.fn_type.fnReturnType(); - try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); + switch (self.ret_mcv) { + .immediate => { + assert(ret_ty.isError()); + }, + else => { + try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); + }, + } // Just add space for an instruction, patch this later const index = try self.addInst(.{ .tag = .nop, @@ -3601,30 +3611,39 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { const error_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); - if (!error_type.hasRuntimeBits()) { + if (error_type.errorSetCardinality() == .zero) { return MCValue{ .immediate = 0 }; // always false - } else if (!payload_type.hasRuntimeBits()) { - if (error_type.abiSize(self.target.*) <= 8) { - const reg_mcv: MCValue = switch (operand) { - .register => operand, - else => .{ .register = try self.copyToTmpRegister(error_type, operand) }, - }; + } + const err_off = errUnionErrorOffset(payload_type, self.target.*); + switch (operand) { + .stack_offset => |off| { + const offset = off - @intCast(u32, err_off); + const tmp_reg = try self.copyToTmpRegister(Type.anyerror, .{ .stack_offset = offset }); _ = try self.addInst(.{ .tag = .cmp_immediate, .data = .{ .r_imm12_sh = .{ - .rn = reg_mcv.register, + .rn = tmp_reg, .imm12 = 0, } }, }); - - return MCValue{ .compare_flags_unsigned = .gt }; - } else { - return self.fail("TODO isErr for errors with size > 8", .{}); - } - } else { - return self.fail("TODO isErr for non-empty payloads", .{}); + }, + .register => |reg| { + if (err_off > 0 or payload_type.hasRuntimeBitsIgnoreComptime()) { + return self.fail("TODO implement isErr for register operand with payload bits", .{}); + } + _ = try self.addInst(.{ + .tag = .cmp_immediate, + .data = .{ .r_imm12_sh = .{ + .rn = reg, + .imm12 = 0, + } }, + }); + }, + else => return self.fail("TODO implement isErr for {}", .{operand}), } + + return MCValue{ .compare_flags_unsigned = .gt }; } fn isNonErr(self: *Self, ty: Type, operand: MCValue) !MCValue { @@ -4483,7 +4502,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBits()) { + if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); @@ -4491,7 +4510,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBits()) + if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); @@ -4674,32 +4693,38 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { } }, .ErrorSet => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = self.bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return MCValue{ .immediate = error_index }; + switch (typed_value.val.tag()) { + .@"error" => { + const err_name = typed_value.val.castTag(.@"error").?.data.name; + const module = self.bin_file.options.module.?; + const global_error_set = module.global_error_set; + const error_index = global_error_set.get(err_name).?; + return MCValue{ .immediate = error_index }; + }, + else => { + // In this case we are rendering an error union which has a 0 bits payload. + return MCValue{ .immediate = 0 }; + }, + } }, .ErrorUnion => { const error_type = typed_value.ty.errorUnionSet(); const payload_type = typed_value.ty.errorUnionPayload(); - if (typed_value.val.castTag(.eu_payload)) |pl| { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return MCValue{ .immediate = 0 }; - } - - _ = pl; - return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty.fmtDebug()}); - } else { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val }); - } - - return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty.fmtDebug()}); + if (error_type.errorSetCardinality() == .zero) { + const payload_val = typed_value.val.castTag(.eu_payload).?.data; + return self.genTypedValue(.{ .ty = payload_type, .val = payload_val }); } + + const is_pl = typed_value.val.errorUnionIsPayload(); + + if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + // We use the error type directly as the type. + const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); + return self.genTypedValue(.{ .ty = error_type, .val = err_val }); + } + + return self.lowerUnnamedConst(typed_value); }, .Struct => { return self.lowerUnnamedConst(typed_value); @@ -4796,13 +4821,16 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { result.return_value = .{ .none = {} }; } else switch (cc) { .Naked => unreachable, .Unspecified, .C => { const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - if (ret_ty_size <= 8) { + if (ret_ty_size == 0) { + assert(ret_ty.isError()); + result.return_value = .{ .immediate = 0 }; + } else if (ret_ty_size <= 8) { result.return_value = .{ .register = registerAlias(c_abi_int_return_regs[0], ret_ty_size) }; } else { return self.fail("TODO support more return types for ARM backend", .{}); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 75fe8f6403..b7682a5b9a 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -3,6 +3,7 @@ const builtin = @import("builtin"); const mem = std.mem; const math = std.math; const assert = std.debug.assert; +const codegen = @import("../../codegen.zig"); const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); @@ -22,12 +23,14 @@ const leb128 = std.leb; const log = std.log.scoped(.codegen); const build_options = @import("build_options"); -const FnResult = @import("../../codegen.zig").FnResult; -const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError; -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; +const FnResult = codegen.FnResult; +const GenerateSymbolError = codegen.GenerateSymbolError; +const DebugInfoOutput = codegen.DebugInfoOutput; const bits = @import("bits.zig"); const abi = @import("abi.zig"); +const errUnionPayloadOffset = codegen.errUnionPayloadOffset; +const errUnionErrorOffset = codegen.errUnionErrorOffset; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; const Register = bits.Register; @@ -1763,19 +1766,26 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// Given an error union, returns the error fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { + const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBits()) return error_union_mcv; + if (err_ty.errorSetCardinality() == .zero) { + return MCValue{ .immediate = 0 }; + } + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return error_union_mcv; + } + const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); switch (error_union_mcv) { .register => return self.fail("TODO errUnionErr for registers", .{}), .stack_argument_offset => |off| { - return MCValue{ .stack_argument_offset = off }; + return MCValue{ .stack_argument_offset = off - err_offset }; }, .stack_offset => |off| { - return MCValue{ .stack_offset = off }; + return MCValue{ .stack_offset = off - err_offset }; }, .memory => |addr| { - return MCValue{ .memory = addr }; + return MCValue{ .memory = addr + err_offset }; }, else => unreachable, // invalid MCValue for an error union } @@ -1793,24 +1803,26 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { /// Given an error union, returns the payload fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { + const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBits()) return MCValue.none; - - const error_ty = error_union_ty.errorUnionSet(); - const error_size = @intCast(u32, error_ty.abiSize(self.target.*)); - const eu_align = @intCast(u32, error_union_ty.abiAlignment(self.target.*)); - const offset = std.mem.alignForwardGeneric(u32, error_size, eu_align); + if (err_ty.errorSetCardinality() == .zero) { + return error_union_mcv; + } + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return MCValue.none; + } + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); switch (error_union_mcv) { .register => return self.fail("TODO errUnionPayload for registers", .{}), .stack_argument_offset => |off| { - return MCValue{ .stack_argument_offset = off - offset }; + return MCValue{ .stack_argument_offset = off - payload_offset }; }, .stack_offset => |off| { - return MCValue{ .stack_offset = off - offset }; + return MCValue{ .stack_offset = off - payload_offset }; }, .memory => |addr| { - return MCValue{ .memory = addr - offset }; + return MCValue{ .memory = addr + payload_offset }; }, else => unreachable, // invalid MCValue for an error union } @@ -3478,6 +3490,9 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { switch (self.ret_mcv) { .none => {}, + .immediate => { + assert(ret_ty.isError()); + }, .register => |reg| { // Return result by value try self.genSetReg(ret_ty, reg, operand); @@ -3867,7 +3882,7 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { const error_type = ty.errorUnionSet(); const error_int_type = Type.initTag(.u16); - if (!error_type.hasRuntimeBits()) { + if (error_type.errorSetCardinality() == .zero) { return MCValue{ .immediate = 0 }; // always false } @@ -4975,7 +4990,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBits()) { + if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); @@ -4983,7 +4998,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBits()) + if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); @@ -5147,26 +5162,35 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { } }, .ErrorSet => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = self.bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return MCValue{ .immediate = error_index }; + switch (typed_value.val.tag()) { + .@"error" => { + const err_name = typed_value.val.castTag(.@"error").?.data.name; + const module = self.bin_file.options.module.?; + const global_error_set = module.global_error_set; + const error_index = global_error_set.get(err_name).?; + return MCValue{ .immediate = error_index }; + }, + else => { + // In this case we are rendering an error union which has a 0 bits payload. + return MCValue{ .immediate = 0 }; + }, + } }, .ErrorUnion => { const error_type = typed_value.ty.errorUnionSet(); const payload_type = typed_value.ty.errorUnionPayload(); - if (typed_value.val.castTag(.eu_payload)) |_| { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return MCValue{ .immediate = 0 }; - } - } else { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val }); - } + if (error_type.errorSetCardinality() == .zero) { + const payload_val = typed_value.val.castTag(.eu_payload).?.data; + return self.genTypedValue(.{ .ty = payload_type, .val = payload_val }); + } + + const is_pl = typed_value.val.errorUnionIsPayload(); + + if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + // We use the error type directly as the type. + const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); + return self.genTypedValue(.{ .ty = error_type, .val = err_val }); } }, @@ -5231,7 +5255,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { result.return_value = .{ .none = {} }; } else { const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); @@ -5278,11 +5302,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { .Unspecified => { if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { result.return_value = .{ .none = {} }; } else { const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - if (ret_ty_size <= 4) { + if (ret_ty_size == 0) { + assert(ret_ty.isError()); + result.return_value = .{ .immediate = 0 }; + } else if (ret_ty_size <= 4) { result.return_value = .{ .register = .r0 }; } else { // The result is returned by reference, not by diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a35589f043..1eddb7441b 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -22,6 +22,8 @@ const Liveness = @import("../../Liveness.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const abi = @import("abi.zig"); +const errUnionPayloadOffset = codegen.errUnionPayloadOffset; +const errUnionErrorOffset = codegen.errUnionErrorOffset; /// Wasm Value, created when generating an instruction const WValue = union(enum) { @@ -636,7 +638,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue { // means we must generate it from a constant. const val = self.air.value(ref).?; const ty = self.air.typeOf(ref); - if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt()) { + if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt() and !ty.isError()) { gop.value_ptr.* = WValue{ .none = {} }; return gop.value_ptr.*; } @@ -804,6 +806,8 @@ fn genFunctype(gpa: Allocator, fn_info: Type.Payload.Function.Data, target: std. } else { try returns.append(typeToValtype(fn_info.return_type, target)); } + } else if (fn_info.return_type.isError()) { + try returns.append(.i32); } // param types @@ -1373,13 +1377,18 @@ fn isByRef(ty: Type, target: std.Target) bool { .Int => return ty.intInfo(target).bits > 64, .Float => return ty.floatBits(target) > 64, .ErrorUnion => { - const has_tag = ty.errorUnionSet().hasRuntimeBitsIgnoreComptime(); - const has_pl = ty.errorUnionPayload().hasRuntimeBitsIgnoreComptime(); - if (!has_tag or !has_pl) return false; - return ty.hasRuntimeBitsIgnoreComptime(); + const err_ty = ty.errorUnionSet(); + const pl_ty = ty.errorUnionPayload(); + if (err_ty.errorSetCardinality() == .zero) { + return isByRef(pl_ty, target); + } + if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + return false; + } + return true; }, .Optional => { - if (ty.isPtrLikeOptional()) return false; + if (ty.optionalReprIsPayload()) return false; var buf: Type.Payload.ElemType = undefined; return ty.optionalChild(&buf).hasRuntimeBitsIgnoreComptime(); }, @@ -1624,13 +1633,14 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.decl.ty.fnReturnType(); + const fn_info = self.decl.ty.fnInfo(); + const ret_ty = fn_info.return_type; // result must be stored in the stack and we return a pointer // to the stack instead if (self.return_value != .none) { - try self.store(self.return_value, operand, self.decl.ty.fnReturnType(), 0); - } else if (self.decl.ty.fnInfo().cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) { + try self.store(self.return_value, operand, ret_ty, 0); + } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) { switch (ret_ty.zigTypeTag()) { // Aggregate types can be lowered as a singular value .Struct, .Union => { @@ -1650,7 +1660,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!WValue { else => try self.emitWValue(operand), } } else { - try self.emitWValue(operand); + if (!ret_ty.hasRuntimeBitsIgnoreComptime() and ret_ty.isError()) { + try self.addImm32(0); + } else { + try self.emitWValue(operand); + } } try self.restoreStackPointer(); try self.addTag(.@"return"); @@ -1675,7 +1689,13 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.air.typeOf(un_op).childType(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) return WValue.none; + if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + if (ret_ty.isError()) { + try self.addImm32(0); + } else { + return WValue.none; + } + } if (!firstParamSRet(self.decl.ty.fnInfo(), self.target)) { const result = try self.load(operand, ret_ty, 0); @@ -1723,8 +1743,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const sret = if (first_param_sret) blk: { const sret_local = try self.allocStack(ret_ty); - const ptr_offset = try self.buildPointerOffset(sret_local, 0, .new); - try self.emitWValue(ptr_offset); + try self.lowerToStack(sret_local); break :blk sret_local; } else WValue{ .none = {} }; @@ -1754,7 +1773,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. try self.addLabel(.call_indirect, fn_type_index); } - if (self.liveness.isUnused(inst) or !ret_ty.hasRuntimeBitsIgnoreComptime()) { + if (self.liveness.isUnused(inst) or (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError())) { return WValue.none; } else if (ret_ty.isNoReturn()) { try self.addTag(.@"unreachable"); @@ -1796,8 +1815,11 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro .ErrorUnion => { const err_ty = ty.errorUnionSet(); const pl_ty = ty.errorUnionPayload(); + if (err_ty.errorSetCardinality() == .zero) { + return self.store(lhs, rhs, pl_ty, 0); + } if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { - return self.store(lhs, rhs, err_ty, 0); + return self.store(lhs, rhs, Type.anyerror, 0); } const len = @intCast(u32, ty.abiSize(self.target)); @@ -1812,6 +1834,9 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { return self.store(lhs, rhs, Type.u8, 0); } + if (pl_ty.zigTypeTag() == .ErrorSet) { + return self.store(lhs, rhs, Type.anyerror, 0); + } const len = @intCast(u32, ty.abiSize(self.target)); return self.memcpy(lhs, rhs, .{ .imm32 = len }); @@ -2178,7 +2203,7 @@ fn lowerParentPtr(self: *Self, ptr_val: Value, ptr_child_ty: Type) InnerError!WV const parent_ptr = try self.lowerParentPtr(payload_ptr.container_ptr, payload_ptr.container_ty); var buf: Type.Payload.ElemType = undefined; const payload_ty = payload_ptr.container_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.optionalReprIsPayload()) { return parent_ptr; } @@ -2256,6 +2281,7 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue { const target = self.target; switch (ty.zigTypeTag()) { + .Void => return WValue{ .none = {} }, .Int => { const int_info = ty.intInfo(self.target); switch (int_info.signedness) { @@ -2324,11 +2350,15 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue { }, .ErrorUnion => { const error_type = ty.errorUnionSet(); + if (error_type.errorSetCardinality() == .zero) { + const pl_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef); + return self.lowerConstant(pl_val, ty.errorUnionPayload()); + } const is_pl = val.errorUnionIsPayload(); const err_val = if (!is_pl) val else Value.initTag(.zero); return self.lowerConstant(err_val, error_type); }, - .Optional => if (ty.isPtrLikeOptional()) { + .Optional => if (ty.optionalReprIsPayload()) { var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); if (val.castTag(.opt_payload)) |payload| { @@ -2367,7 +2397,7 @@ fn emitUndefined(self: *Self, ty: Type) InnerError!WValue { .Optional => { var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); - if (ty.isPtrLikeOptional()) { + if (ty.optionalReprIsPayload()) { return self.emitUndefined(pl_ty); } return WValue{ .imm32 = 0xaaaaaaaa }; @@ -2517,7 +2547,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) Inner } fn cmp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue { - if (ty.zigTypeTag() == .Optional and !ty.isPtrLikeOptional()) { + if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); if (payload_ty.hasRuntimeBitsIgnoreComptime()) { @@ -2889,15 +2919,22 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const err_ty = self.air.typeOf(un_op); - const pl_ty = err_ty.errorUnionPayload(); + const err_union_ty = self.air.typeOf(un_op); + const pl_ty = err_union_ty.errorUnionPayload(); + + if (err_union_ty.errorUnionSet().errorSetCardinality() == .zero) { + switch (opcode) { + .i32_ne => return WValue{ .imm32 = 0 }, + .i32_eq => return WValue{ .imm32 = 1 }, + else => unreachable, + } + } - // load the error tag value try self.emitWValue(operand); if (pl_ty.hasRuntimeBitsIgnoreComptime()) { try self.addMemArg(.i32_load16_u, .{ - .offset = operand.offset(), - .alignment = err_ty.errorUnionSet().abiAlignment(self.target), + .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, self.target)), + .alignment = Type.anyerror.abiAlignment(self.target), }); } @@ -2905,7 +2942,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!W try self.addImm32(0); try self.addTag(Mir.Inst.Tag.fromOpcode(opcode)); - const is_err_tmp = try self.allocLocal(Type.initTag(.i32)); // result is always an i32 + const is_err_tmp = try self.allocLocal(Type.i32); try self.addLabel(.local_set, is_err_tmp.local); return is_err_tmp; } @@ -2917,14 +2954,18 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) const op_ty = self.air.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; const payload_ty = err_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} }; - const err_align = err_ty.abiAlignment(self.target); - const set_size = err_ty.errorUnionSet().abiSize(self.target); - const offset = mem.alignForwardGeneric(u64, set_size, err_align); - if (op_is_ptr or isByRef(payload_ty, self.target)) { - return self.buildPointerOffset(operand, offset, .new); + + if (err_ty.errorUnionSet().errorSetCardinality() == .zero) { + return operand; } - return self.load(operand, payload_ty, @intCast(u32, offset)); + + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} }; + + const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target)); + if (op_is_ptr or isByRef(payload_ty, self.target)) { + return self.buildPointerOffset(operand, pl_offset, .new); + } + return self.load(operand, payload_ty, pl_offset); } fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!WValue { @@ -2935,11 +2976,16 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) In const op_ty = self.air.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; const payload_ty = err_ty.errorUnionPayload(); + + if (err_ty.errorUnionSet().errorSetCardinality() == .zero) { + return WValue{ .imm32 = 0 }; + } + if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime()) { return operand; } - return self.load(operand, err_ty.errorUnionSet(), 0); + return self.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, self.target))); } fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -2947,22 +2993,26 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); + const err_ty = self.air.typeOfIndex(inst); - const op_ty = self.air.typeOf(ty_op.operand); - if (!op_ty.hasRuntimeBitsIgnoreComptime()) return operand; - const err_union_ty = self.air.getRefType(ty_op.ty); - const err_align = err_union_ty.abiAlignment(self.target); - const set_size = err_union_ty.errorUnionSet().abiSize(self.target); - const offset = mem.alignForwardGeneric(u64, set_size, err_align); + if (err_ty.errorUnionSet().errorSetCardinality() == .zero) { + return operand; + } - const err_union = try self.allocStack(err_union_ty); - const payload_ptr = try self.buildPointerOffset(err_union, offset, .new); - try self.store(payload_ptr, operand, op_ty, 0); + const pl_ty = self.air.typeOf(ty_op.operand); + if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + return operand; + } + + const err_union = try self.allocStack(err_ty); + const payload_ptr = try self.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, self.target)), .new); + try self.store(payload_ptr, operand, pl_ty, 0); // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. try self.emitWValue(err_union); try self.addImm32(0); - try self.addMemArg(.i32_store16, .{ .offset = err_union.offset(), .alignment = 2 }); + const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, self.target)); + try self.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 }); return err_union; } @@ -2973,17 +3023,18 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const err_ty = self.air.getRefType(ty_op.ty); + const pl_ty = err_ty.errorUnionPayload(); - if (!err_ty.errorUnionPayload().hasRuntimeBitsIgnoreComptime()) return operand; + if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + return operand; + } const err_union = try self.allocStack(err_ty); - try self.store(err_union, operand, err_ty.errorUnionSet(), 0); + // store error value + try self.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, self.target))); // write 'undefined' to the payload - const err_align = err_ty.abiAlignment(self.target); - const set_size = err_ty.errorUnionSet().abiSize(self.target); - const offset = mem.alignForwardGeneric(u64, set_size, err_align); - const payload_ptr = try self.buildPointerOffset(err_union, offset, .new); + const payload_ptr = try self.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, self.target)), .new); const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(self.target)); try self.memset(payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaaaaaaaa }); @@ -3074,7 +3125,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: en fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { try self.emitWValue(operand); - if (!optional_ty.isPtrLikeOptional()) { + if (!optional_ty.optionalReprIsPayload()) { var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); // When payload is zero-bits, we can treat operand as a value, rather than @@ -3100,7 +3151,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const opt_ty = self.air.typeOf(ty_op.operand); const payload_ty = self.air.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} }; - if (opt_ty.isPtrLikeOptional()) return operand; + if (opt_ty.optionalReprIsPayload()) return operand; const offset = opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target); @@ -3120,7 +3171,7 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.optionalReprIsPayload()) { return operand; } @@ -3138,7 +3189,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue return self.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } - if (opt_ty.isPtrLikeOptional()) { + if (opt_ty.optionalReprIsPayload()) { return operand; } @@ -3169,7 +3220,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const operand = try self.resolveInst(ty_op.operand); const op_ty = self.air.typeOfIndex(inst); - if (op_ty.isPtrLikeOptional()) { + if (op_ty.optionalReprIsPayload()) { return operand; } const offset = std.math.cast(u32, op_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) catch { @@ -3927,12 +3978,16 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue { fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_set_ty = self.air.typeOf(ty_op.operand).childType(); - const err_ty = err_set_ty.errorUnionSet(); const payload_ty = err_set_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); // set error-tag to '0' to annotate error union is non-error - try self.store(operand, .{ .imm32 = 0 }, err_ty, 0); + try self.store( + operand, + .{ .imm32 = 0 }, + Type.anyerror, + @intCast(u32, errUnionErrorOffset(payload_ty, self.target)), + ); if (self.liveness.isUnused(inst)) return WValue{ .none = {} }; @@ -3940,11 +3995,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue return operand; } - const err_align = err_set_ty.abiAlignment(self.target); - const set_size = err_ty.abiSize(self.target); - const offset = mem.alignForwardGeneric(u64, set_size, err_align); - - return self.buildPointerOffset(operand, @intCast(u32, offset), .new); + return self.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, self.target)), .new); } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index eeb4cab04f..5c69f78724 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2,6 +2,7 @@ const std = @import("std"); const build_options = @import("build_options"); const builtin = @import("builtin"); const assert = std.debug.assert; +const codegen = @import("../../codegen.zig"); const leb128 = std.leb; const link = @import("../../link.zig"); const log = std.log.scoped(.codegen); @@ -12,11 +13,11 @@ const trace = @import("../../tracy.zig").trace; const Air = @import("../../Air.zig"); const Allocator = mem.Allocator; const Compilation = @import("../../Compilation.zig"); -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; +const DebugInfoOutput = codegen.DebugInfoOutput; const DW = std.dwarf; const ErrorMsg = Module.ErrorMsg; -const FnResult = @import("../../codegen.zig").FnResult; -const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError; +const FnResult = codegen.FnResult; +const GenerateSymbolError = codegen.GenerateSymbolError; const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); const Mir = @import("Mir.zig"); @@ -28,6 +29,8 @@ const Value = @import("../../value.zig").Value; const bits = @import("bits.zig"); const abi = @import("abi.zig"); +const errUnionPayloadOffset = codegen.errUnionPayloadOffset; +const errUnionErrorOffset = codegen.errUnionErrorOffset; const callee_preserved_regs = abi.callee_preserved_regs; const caller_preserved_regs = abi.caller_preserved_regs; @@ -854,7 +857,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const ptr_ty = self.air.typeOfIndex(inst); const elem_ty = ptr_ty.elemType(); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBitsIgnoreComptime()) { return self.allocMem(inst, @sizeOf(usize), @alignOf(usize)); } @@ -1786,21 +1789,34 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const err_ty = err_union_ty.errorUnionSet(); const payload_ty = err_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - const operand_lock: ?RegisterLock = switch (operand) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, - }; - defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const result: MCValue = result: { - if (!payload_ty.hasRuntimeBits()) break :result operand; + if (err_ty.errorSetCardinality() == .zero) { + break :result MCValue{ .immediate = 0 }; + } + + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + break :result operand; + } + + const err_off = errUnionErrorOffset(payload_ty, self.target.*); switch (operand) { .stack_offset => |off| { - break :result MCValue{ .stack_offset = off }; + const offset = off - @intCast(i32, err_off); + break :result MCValue{ .stack_offset = offset }; }, - .register => { + .register => |reg| { // TODO reuse operand - break :result try self.copyToRegisterWithInstTracking(inst, err_ty, operand); + const lock = self.register_manager.lockRegAssumeUnused(reg); + defer self.register_manager.unlockReg(lock); + const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand); + if (err_off > 0) { + const shift = @intCast(u6, err_off * 8); + try self.genShiftBinOpMir(.shr, err_union_ty, result.register, .{ .immediate = shift }); + } else { + try self.truncateRegister(Type.anyerror, result.register); + } + break :result result; }, else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}), } @@ -1815,32 +1831,37 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { } const err_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = err_union_ty.errorUnionPayload(); + const err_ty = err_union_ty.errorUnionSet(); + const operand = try self.resolveInst(ty_op.operand); + const result: MCValue = result: { - if (!payload_ty.hasRuntimeBits()) break :result MCValue.none; + if (err_ty.errorSetCardinality() == .zero) { + // TODO check if we can reuse + break :result operand; + } - const operand = try self.resolveInst(ty_op.operand); - const operand_lock: ?RegisterLock = switch (operand) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, - }; - defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + break :result MCValue.none; + } - const abi_align = err_union_ty.abiAlignment(self.target.*); - const err_ty = err_union_ty.errorUnionSet(); - const err_abi_size = mem.alignForwardGeneric(u32, @intCast(u32, err_ty.abiSize(self.target.*)), abi_align); + const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); switch (operand) { .stack_offset => |off| { - const offset = off - @intCast(i32, err_abi_size); + const offset = off - @intCast(i32, payload_off); break :result MCValue{ .stack_offset = offset }; }, - .register => { + .register => |reg| { // TODO reuse operand - const shift = @intCast(u6, err_abi_size * @sizeOf(usize)); + const lock = self.register_manager.lockRegAssumeUnused(reg); + defer self.register_manager.unlockReg(lock); const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand); - try self.genShiftBinOpMir(.shr, Type.usize, result.register, .{ .immediate = shift }); - break :result MCValue{ - .register = registerAlias(result.register, @intCast(u32, payload_ty.abiSize(self.target.*))), - }; + if (payload_off > 0) { + const shift = @intCast(u6, payload_off * 8); + try self.genShiftBinOpMir(.shr, err_union_ty, result.register, .{ .immediate = shift }); + } else { + try self.truncateRegister(payload_ty, result.register); + } + break :result result; }, else => return self.fail("TODO implement unwrap_err_payload for {}", .{operand}), } @@ -1935,24 +1956,37 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); } + const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - assert(payload_ty.hasRuntimeBits()); - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); - const err_abi_size = @intCast(u32, error_ty.abiSize(self.target.*)); - const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); - const offset = mem.alignForwardGeneric(u32, err_abi_size, abi_align); - try self.genSetStack(error_ty, stack_offset, .{ .immediate = 0 }, .{}); - try self.genSetStack(payload_ty, stack_offset - @intCast(i32, offset), operand, .{}); + const result: MCValue = result: { + if (error_ty.errorSetCardinality() == .zero) { + break :result operand; + } - return self.finishAir(inst, .{ .stack_offset = stack_offset }, .{ ty_op.operand, .none, .none }); + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + break :result operand; + } + + const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); + const abi_align = error_union_ty.abiAlignment(self.target.*); + const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); + const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); + const err_off = errUnionErrorOffset(payload_ty, self.target.*); + try self.genSetStack(payload_ty, stack_offset - @intCast(i32, payload_off), operand, .{}); + try self.genSetStack(Type.anyerror, stack_offset - @intCast(i32, err_off), .{ .immediate = 0 }, .{}); + + break :result MCValue{ .stack_offset = stack_offset }; + }; + + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// E to E!T @@ -1962,19 +1996,22 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); } const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - const err = try self.resolveInst(ty_op.operand); + const operand = try self.resolveInst(ty_op.operand); + const result: MCValue = result: { - if (!payload_ty.hasRuntimeBits()) break :result err; + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + break :result operand; + } const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); const abi_align = error_union_ty.abiAlignment(self.target.*); - const err_abi_size = @intCast(u32, error_ty.abiSize(self.target.*)); const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); - const offset = mem.alignForwardGeneric(u32, err_abi_size, abi_align); - try self.genSetStack(error_ty, stack_offset, err, .{}); - try self.genSetStack(payload_ty, stack_offset - @intCast(i32, offset), .undef, .{}); + const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); + const err_off = errUnionErrorOffset(payload_ty, self.target.*); + try self.genSetStack(Type.anyerror, stack_offset - @intCast(i32, err_off), operand, .{}); + try self.genSetStack(payload_ty, stack_offset - @intCast(i32, payload_off), .undef, .{}); + break :result MCValue{ .stack_offset = stack_offset }; }; @@ -2535,7 +2572,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBitsIgnoreComptime()) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -4102,6 +4139,9 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(un_op); const ret_ty = self.fn_type.fnReturnType(); switch (self.ret_mcv) { + .immediate => { + assert(ret_ty.isError()); + }, .stack_offset => { const reg = try self.copyToTmpRegister(Type.usize, self.ret_mcv); const reg_lock = self.register_manager.lockRegAssumeUnused(reg); @@ -4134,6 +4174,9 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.air.typeOf(un_op); const elem_ty = ptr_ty.elemType(); switch (self.ret_mcv) { + .immediate => { + assert(elem_ty.isError()); + }, .stack_offset => { const reg = try self.copyToTmpRegister(Type.usize, self.ret_mcv); const reg_lock = self.register_manager.lockRegAssumeUnused(reg); @@ -4377,7 +4420,6 @@ fn genVarDbgInfo( fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void { switch (self.debug_output) { .dwarf => |dw| { - assert(ty.hasRuntimeBits()); const dbg_info = &dw.dbg_info; const index = dbg_info.items.len; try dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4 @@ -4604,7 +4646,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValu const cmp_ty: Type = if (!ty.isPtrLikeOptional()) blk: { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - break :blk if (payload_ty.hasRuntimeBits()) Type.bool else ty; + break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime()) Type.bool else ty; } else ty; try self.genBinOpMir(.cmp, cmp_ty, operand, MCValue{ .immediate = 0 }); @@ -4620,25 +4662,36 @@ fn isNonNull(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCV fn isErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { const err_type = ty.errorUnionSet(); - const payload_type = ty.errorUnionPayload(); - if (!err_type.hasRuntimeBits()) { + + if (err_type.errorSetCardinality() == .zero) { return MCValue{ .immediate = 0 }; // always false } try self.spillCompareFlagsIfOccupied(); self.compare_flags_inst = inst; - if (!payload_type.hasRuntimeBits()) { - if (err_type.abiSize(self.target.*) <= 8) { - try self.genBinOpMir(.cmp, err_type, operand, MCValue{ .immediate = 0 }); - return MCValue{ .compare_flags_unsigned = .gt }; - } else { - return self.fail("TODO isErr for errors with size larger than register size", .{}); - } - } else { - try self.genBinOpMir(.cmp, err_type, operand, MCValue{ .immediate = 0 }); - return MCValue{ .compare_flags_unsigned = .gt }; + const err_off = errUnionErrorOffset(ty.errorUnionPayload(), self.target.*); + switch (operand) { + .stack_offset => |off| { + const offset = off - @intCast(i32, err_off); + try self.genBinOpMir(.cmp, Type.anyerror, .{ .stack_offset = offset }, .{ .immediate = 0 }); + }, + .register => |reg| { + const maybe_lock = self.register_manager.lockReg(reg); + defer if (maybe_lock) |lock| self.register_manager.unlockReg(lock); + const tmp_reg = try self.copyToTmpRegister(ty, operand); + if (err_off > 0) { + const shift = @intCast(u6, err_off * 8); + try self.genShiftBinOpMir(.shr, ty, tmp_reg, .{ .immediate = shift }); + } else { + try self.truncateRegister(Type.anyerror, tmp_reg); + } + try self.genBinOpMir(.cmp, Type.anyerror, .{ .register = tmp_reg }, .{ .immediate = 0 }); + }, + else => return self.fail("TODO implement isErr for {}", .{operand}), } + + return MCValue{ .compare_flags_unsigned = .gt }; } fn isNonErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { @@ -5461,6 +5514,21 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl .immediate => |x_big| { const base_reg = opts.dest_stack_base orelse .rbp; switch (abi_size) { + 0 => { + assert(ty.isError()); + const payload = try self.addExtra(Mir.ImmPair{ + .dest_off = @bitCast(u32, -stack_offset), + .operand = @truncate(u32, x_big), + }); + _ = try self.addInst(.{ + .tag = .mov_mem_imm, + .ops = Mir.Inst.Ops.encode(.{ + .reg1 = base_reg, + .flags = 0b00, + }), + .data = .{ .payload = payload }, + }); + }, 1, 2, 4 => { const payload = try self.addExtra(Mir.ImmPair{ .dest_off = @bitCast(u32, -stack_offset), @@ -6643,7 +6711,7 @@ pub fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBits()) { + if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); @@ -6651,7 +6719,7 @@ pub fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBits()) + if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); @@ -6780,6 +6848,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { const target = self.target.*; switch (typed_value.ty.zigTypeTag()) { + .Void => return MCValue{ .none = {} }, .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => {}, else => { @@ -6841,26 +6910,35 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { } }, .ErrorSet => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = self.bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return MCValue{ .immediate = error_index }; + switch (typed_value.val.tag()) { + .@"error" => { + const err_name = typed_value.val.castTag(.@"error").?.data.name; + const module = self.bin_file.options.module.?; + const global_error_set = module.global_error_set; + const error_index = global_error_set.get(err_name).?; + return MCValue{ .immediate = error_index }; + }, + else => { + // In this case we are rendering an error union which has a 0 bits payload. + return MCValue{ .immediate = 0 }; + }, + } }, .ErrorUnion => { const error_type = typed_value.ty.errorUnionSet(); const payload_type = typed_value.ty.errorUnionPayload(); - if (typed_value.val.castTag(.eu_payload)) |_| { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return MCValue{ .immediate = 0 }; - } - } else { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val }); - } + if (error_type.errorSetCardinality() == .zero) { + const payload_val = typed_value.val.castTag(.eu_payload).?.data; + return self.genTypedValue(.{ .ty = payload_type, .val = payload_val }); + } + + const is_pl = typed_value.val.errorUnionIsPayload(); + + if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + // We use the error type directly as the type. + const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); + return self.genTypedValue(.{ .ty = error_type, .val = err_val }); } }, @@ -6868,7 +6946,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { .ComptimeFloat => unreachable, .Type => unreachable, .EnumLiteral => unreachable, - .Void => unreachable, .NoReturn => unreachable, .Undefined => unreachable, .Null => unreachable, @@ -6922,11 +6999,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // Return values if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { result.return_value = .{ .none = {} }; } else { const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - if (ret_ty_size <= 8) { + if (ret_ty_size == 0) { + assert(ret_ty.isError()); + result.return_value = .{ .immediate = 0 }; + } else if (ret_ty_size <= 8) { const aliased_reg = registerAlias(c_abi_int_return_regs[0], ret_ty_size); result.return_value = .{ .register = aliased_reg }; } else { diff --git a/src/codegen.zig b/src/codegen.zig index bd556baa5e..fbe462959e 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -442,7 +442,10 @@ pub fn generateSymbol( .Int => { const info = typed_value.ty.intInfo(target); if (info.bits <= 8) { - const x = @intCast(u8, typed_value.val.toUnsignedInt(target)); + const x: u8 = switch (info.signedness) { + .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(target)), + .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt())), + }; try code.append(x); return Result{ .appended = {} }; } @@ -654,7 +657,7 @@ pub fn generateSymbol( return Result{ .appended = {} }; } - if (typed_value.ty.isPtrLikeOptional()) { + if (typed_value.ty.optionalReprIsPayload()) { if (typed_value.val.castTag(.opt_payload)) |payload| { switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, @@ -702,16 +705,50 @@ pub fn generateSymbol( .ErrorUnion => { const error_ty = typed_value.ty.errorUnionSet(); const payload_ty = typed_value.ty.errorUnionPayload(); + + if (error_ty.errorSetCardinality() == .zero) { + const payload_val = typed_value.val.castTag(.eu_payload).?.data; + return generateSymbol(bin_file, src_loc, .{ + .ty = payload_ty, + .val = payload_val, + }, code, debug_output, reloc_info); + } + const is_payload = typed_value.val.errorUnionIsPayload(); + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const err_val = if (is_payload) Value.initTag(.zero) else typed_value.val; + return generateSymbol(bin_file, src_loc, .{ + .ty = error_ty, + .val = err_val, + }, code, debug_output, reloc_info); + } + + const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(target); const abi_align = typed_value.ty.abiAlignment(target); - { - const error_val = if (!is_payload) typed_value.val else Value.initTag(.zero); - const begin = code.items.len; + // error value first when its type is larger than the error union's payload + if (error_align > payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = error_val, + .val = if (is_payload) Value.initTag(.zero) else typed_value.val, + }, code, debug_output, reloc_info)) { + .appended => {}, + .externally_managed => |external_slice| { + code.appendSliceAssumeCapacity(external_slice); + }, + .fail => |em| return Result{ .fail = em }, + } + } + + // emit payload part of the error union + { + const begin = code.items.len; + const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.initTag(.undef); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = payload_ty, + .val = payload_val, }, code, debug_output, reloc_info)) { .appended => {}, .externally_managed => |external_slice| { @@ -728,12 +765,12 @@ pub fn generateSymbol( } } - if (payload_ty.hasRuntimeBits()) { + // Payload size is larger than error set, so emit our error set last + if (error_align <= payload_align) { const begin = code.items.len; - const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.initTag(.undef); switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_ty, - .val = payload_val, + .ty = error_ty, + .val = if (is_payload) Value.initTag(.zero) else typed_value.val, }, code, debug_output, reloc_info)) { .appended => {}, .externally_managed => |external_slice| { @@ -760,7 +797,7 @@ pub fn generateSymbol( try code.writer().writeInt(u32, kv.value, endian); }, else => { - try code.writer().writeByteNTimes(0, @intCast(usize, typed_value.ty.abiSize(target))); + try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(target))); }, } return Result{ .appended = {} }; @@ -853,3 +890,23 @@ fn lowerDeclRef( return Result{ .appended = {} }; } + +pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 { + const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(target); + if (payload_align >= error_align) { + return 0; + } else { + return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(target), payload_align); + } +} + +pub fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u64 { + const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(target); + if (payload_align >= error_align) { + return mem.alignForwardGeneric(u64, payload_ty.abiSize(target), error_align); + } else { + return 0; + } +} diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 5f61f8586e..1e45090648 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -711,21 +711,24 @@ pub const DeclGen = struct { .Bool => return writer.print("{}", .{val.toBool()}), .Optional => { var opt_buf: Type.Payload.ElemType = undefined; - const payload_type = ty.optionalChild(&opt_buf); - if (ty.isPtrLikeOptional()) { - return dg.renderValue(writer, payload_type, val, location); - } - if (payload_type.abiSize(target) == 0) { + const payload_ty = ty.optionalChild(&opt_buf); + + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { const is_null = val.castTag(.opt_payload) == null; return writer.print("{}", .{is_null}); } + + if (ty.optionalReprIsPayload()) { + return dg.renderValue(writer, payload_ty, val, location); + } + try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeAll("){"); if (val.castTag(.opt_payload)) |pl| { const payload_val = pl.data; try writer.writeAll(" .is_null = false, .payload = "); - try dg.renderValue(writer, payload_type, payload_val, location); + try dg.renderValue(writer, payload_ty, payload_val, location); try writer.writeAll(" }"); } else { try writer.writeAll(" .is_null = true }"); @@ -749,6 +752,12 @@ pub const DeclGen = struct { const error_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); + if (error_type.errorSetCardinality() == .zero) { + // We use the payload directly as the type. + const payload_val = val.castTag(.eu_payload).?.data; + return dg.renderValue(writer, payload_type, payload_val, location); + } + if (!payload_type.hasRuntimeBits()) { // We use the error type directly as the type. const err_val = if (val.errorUnionIsPayload()) Value.initTag(.zero) else val; @@ -894,10 +903,12 @@ pub const DeclGen = struct { try w.writeAll("ZIG_COLD "); } } - const return_ty = dg.decl.ty.fnReturnType(); - if (return_ty.hasRuntimeBits()) { - try dg.renderType(w, return_ty); - } else if (return_ty.zigTypeTag() == .NoReturn) { + const fn_info = dg.decl.ty.fnInfo(); + if (fn_info.return_type.hasRuntimeBits()) { + try dg.renderType(w, fn_info.return_type); + } else if (fn_info.return_type.isError()) { + try dg.renderType(w, Type.anyerror); + } else if (fn_info.return_type.zigTypeTag() == .NoReturn) { try w.writeAll("zig_noreturn void"); } else { try w.writeAll("void"); @@ -905,22 +916,19 @@ pub const DeclGen = struct { try w.writeAll(" "); try dg.renderDeclName(w, dg.decl_index); try w.writeAll("("); - const param_len = dg.decl.ty.fnParamLen(); - var index: usize = 0; var params_written: usize = 0; - while (index < param_len) : (index += 1) { - const param_type = dg.decl.ty.fnParamType(index); + for (fn_info.param_types) |param_type, index| { if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; if (params_written > 0) { try w.writeAll(", "); } const name = CValue{ .arg = index }; - try dg.renderTypeAndName(w, dg.decl.ty.fnParamType(index), name, .Mut, 0); + try dg.renderTypeAndName(w, param_type, name, .Mut, 0); params_written += 1; } - if (dg.decl.ty.fnIsVarArgs()) { + if (fn_info.is_var_args) { if (params_written != 0) try w.writeAll(", "); try w.writeAll("..."); } else if (params_written == 0) { @@ -1156,26 +1164,36 @@ pub const DeclGen = struct { } fn renderErrorUnionTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - const child_type = t.errorUnionPayload(); - const err_set_type = t.errorUnionSet(); + const payload_ty = t.errorUnionPayload(); + const error_ty = t.errorUnionSet(); var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); defer buffer.deinit(); const bw = buffer.writer(); - try bw.writeAll("typedef struct { "); const payload_name = CValue{ .bytes = "payload" }; - try dg.renderTypeAndName(bw, child_type, payload_name, .Mut, 0); - try bw.writeAll("; uint16_t error; } "); + const target = dg.module.getTarget(); + const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(target); + if (error_align > payload_align) { + try bw.writeAll("typedef struct { "); + try dg.renderTypeAndName(bw, payload_ty, payload_name, .Mut, 0); + try bw.writeAll("; uint16_t error; } "); + } else { + try bw.writeAll("typedef struct { uint16_t error; "); + try dg.renderTypeAndName(bw, payload_ty, payload_name, .Mut, 0); + try bw.writeAll("; } "); + } + const name_index = buffer.items.len; - if (err_set_type.castTag(.error_set_inferred)) |inf_err_set_payload| { + if (error_ty.castTag(.error_set_inferred)) |inf_err_set_payload| { const func = inf_err_set_payload.data.func; try bw.writeAll("zig_E_"); try dg.renderDeclName(bw, func.owner_decl); try bw.writeAll(";\n"); } else { try bw.print("zig_E_{s}_{s};\n", .{ - typeToCIdentifier(err_set_type, dg.module), typeToCIdentifier(child_type, dg.module), + typeToCIdentifier(error_ty, dg.module), typeToCIdentifier(payload_ty, dg.module), }); } @@ -1345,12 +1363,12 @@ pub const DeclGen = struct { var opt_buf: Type.Payload.ElemType = undefined; const child_type = t.optionalChild(&opt_buf); - if (t.isPtrLikeOptional()) { - return dg.renderType(w, child_type); + if (!child_type.hasRuntimeBitsIgnoreComptime()) { + return w.writeAll("bool"); } - if (child_type.abiSize(target) == 0) { - return w.writeAll("bool"); + if (t.optionalReprIsPayload()) { + return dg.renderType(w, child_type); } const name = dg.getTypedefName(t) orelse @@ -1359,12 +1377,19 @@ pub const DeclGen = struct { return w.writeAll(name); }, .ErrorSet => { - comptime assert(Type.initTag(.anyerror).abiSize(builtin.target) == 2); + comptime assert(Type.anyerror.abiSize(builtin.target) == 2); return w.writeAll("uint16_t"); }, .ErrorUnion => { - if (t.errorUnionPayload().abiSize(target) == 0) { - return dg.renderType(w, t.errorUnionSet()); + const error_ty = t.errorUnionSet(); + const payload_ty = t.errorUnionPayload(); + + if (error_ty.errorSetCardinality() == .zero) { + return dg.renderType(w, payload_ty); + } + + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return dg.renderType(w, Type.anyerror); } const name = dg.getTypedefName(t) orelse @@ -1794,8 +1819,9 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .not => try airNot (f, inst), .optional_payload => try airOptionalPayload(f, inst), - .optional_payload_ptr => try airOptionalPayload(f, inst), + .optional_payload_ptr => try airOptionalPayloadPtr(f, inst), .optional_payload_ptr_set => try airOptionalPayloadPtrSet(f, inst), + .wrap_optional => try airWrapOptional(f, inst), .is_err => try airIsErr(f, inst, false, "!="), .is_non_err => try airIsErr(f, inst, false, "=="), @@ -1824,7 +1850,6 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .cond_br => try airCondBr(f, inst), .br => try airBr(f, inst), .switch_br => try airSwitchBr(f, inst), - .wrap_optional => try airWrapOptional(f, inst), .struct_field_ptr => try airStructFieldPtr(f, inst), .array_to_slice => try airArrayToSlice(f, inst), .cmpxchg_weak => try airCmpxchg(f, inst, "weak"), @@ -1901,8 +1926,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .array_elem_val => try airArrayElemVal(f, inst), .unwrap_errunion_payload => try airUnwrapErrUnionPay(f, inst, ""), - .unwrap_errunion_err => try airUnwrapErrUnionErr(f, inst), .unwrap_errunion_payload_ptr => try airUnwrapErrUnionPay(f, inst, "&"), + .unwrap_errunion_err => try airUnwrapErrUnionErr(f, inst), .unwrap_errunion_err_ptr => try airUnwrapErrUnionErr(f, inst), .wrap_errunion_payload => try airWrapErrUnionPay(f, inst), .wrap_errunion_err => try airWrapErrUnionErr(f, inst), @@ -2120,11 +2145,14 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { fn airRet(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); - if (f.air.typeOf(un_op).isFnOrHasRuntimeBitsIgnoreComptime()) { + const ret_ty = f.air.typeOf(un_op); + if (ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) { const operand = try f.resolveInst(un_op); try writer.writeAll("return "); try f.writeCValue(writer, operand); try writer.writeAll(";\n"); + } else if (ret_ty.isError()) { + try writer.writeAll("return 0;"); } else { try writer.writeAll("return;\n"); } @@ -2136,13 +2164,16 @@ fn airRetLoad(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const ptr_ty = f.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); - if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + const ptr = try f.resolveInst(un_op); + try writer.writeAll("return *"); + try f.writeCValue(writer, ptr); + try writer.writeAll(";\n"); + } else if (ret_ty.isError()) { + try writer.writeAll("return 0;\n"); + } else { try writer.writeAll("return;\n"); } - const ptr = try f.resolveInst(un_op); - try writer.writeAll("return *"); - try f.writeCValue(writer, ptr); - try writer.writeAll(";\n"); return CValue.none; } @@ -2713,19 +2744,20 @@ fn airCall( .Pointer => callee_ty.childType(), else => unreachable, }; - const ret_ty = fn_ty.fnReturnType(); - const unused_result = f.liveness.isUnused(inst); const writer = f.object.writer(); - var result_local: CValue = .none; - if (unused_result) { - if (ret_ty.hasRuntimeBits()) { - try writer.print("(void)", .{}); + const result_local: CValue = r: { + if (f.liveness.isUnused(inst)) { + if (loweredFnRetTyHasBits(fn_ty)) { + try writer.print("(void)", .{}); + } + break :r .none; + } else { + const local = try f.allocLocal(fn_ty.fnReturnType(), .Const); + try writer.writeAll(" = "); + break :r local; } - } else { - result_local = try f.allocLocal(ret_ty, .Const); - try writer.writeAll(" = "); - } + }; callee: { known: { @@ -3116,7 +3148,6 @@ fn airIsNull( const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); const operand = try f.resolveInst(un_op); - const target = f.object.dg.module.getTarget(); const local = try f.allocLocal(Type.initTag(.bool), .Const); try writer.writeAll(" = ("); @@ -3124,16 +3155,18 @@ fn airIsNull( const ty = f.air.typeOf(un_op); var opt_buf: Type.Payload.ElemType = undefined; - const payload_type = if (ty.zigTypeTag() == .Pointer) + const payload_ty = if (ty.zigTypeTag() == .Pointer) ty.childType().optionalChild(&opt_buf) else ty.optionalChild(&opt_buf); - if (ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + try writer.print("){s} {s} true;\n", .{ deref_suffix, operator }); + } else if (ty.isPtrLikeOptional()) { // operand is a regular pointer, test `operand !=/== NULL` try writer.print("){s} {s} NULL;\n", .{ deref_suffix, operator }); - } else if (payload_type.abiSize(target) == 0) { - try writer.print("){s} {s} true;\n", .{ deref_suffix, operator }); + } else if (payload_ty.zigTypeTag() == .ErrorSet) { + try writer.print("){s} {s} 0;\n", .{ deref_suffix, operator }); } else { try writer.print("){s}.is_null {s} true;\n", .{ deref_suffix, operator }); } @@ -3141,34 +3174,58 @@ fn airIsNull( } fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { - if (f.liveness.isUnused(inst)) - return CValue.none; + if (f.liveness.isUnused(inst)) return CValue.none; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); - const operand_ty = f.air.typeOf(ty_op.operand); + const opt_ty = f.air.typeOf(ty_op.operand); - const opt_ty = if (operand_ty.zigTypeTag() == .Pointer) - operand_ty.elemType() - else - operand_ty; + var buf: Type.Payload.ElemType = undefined; + const payload_ty = opt_ty.optionalChild(&buf); - if (opt_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return CValue.none; + } + + if (opt_ty.optionalReprIsPayload()) { + return operand; + } + + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); + try writer.writeAll(" = ("); + try f.writeCValue(writer, operand); + try writer.writeAll(").payload;\n"); + return local; +} + +fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; + + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const writer = f.object.writer(); + const operand = try f.resolveInst(ty_op.operand); + const ptr_ty = f.air.typeOf(ty_op.operand); + const opt_ty = ptr_ty.childType(); + var buf: Type.Payload.ElemType = undefined; + const payload_ty = opt_ty.optionalChild(&buf); + + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return operand; + } + + if (opt_ty.optionalReprIsPayload()) { // the operand is just a regular pointer, no need to do anything special. // *?*T -> **T and ?*T -> *T are **T -> **T and *T -> *T in C return operand; } const inst_ty = f.air.typeOfIndex(inst); - const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else "."; - const maybe_addrof = if (inst_ty.zigTypeTag() == .Pointer) "&" else ""; - const local = try f.allocLocal(inst_ty, .Const); - try writer.print(" = {s}(", .{maybe_addrof}); + try writer.writeAll(" = &("); try f.writeCValue(writer, operand); - - try writer.print("){s}payload;\n", .{maybe_deref}); + try writer.writeAll(")->payload;\n"); return local; } @@ -3180,7 +3237,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const opt_ty = operand_ty.elemType(); - if (opt_ty.isPtrLikeOptional()) { + if (opt_ty.optionalReprIsPayload()) { // The payload and the optional are the same value. // Setting to non-null will be done when the payload is set. return operand; @@ -3307,7 +3364,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { return local; } -// *(E!T) -> E NOT *E +/// *(E!T) -> E +/// Note that the result is never a pointer. fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) return CValue.none; @@ -3319,7 +3377,11 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const operand_ty = f.air.typeOf(ty_op.operand); if (operand_ty.zigTypeTag() == .Pointer) { - if (!operand_ty.childType().errorUnionPayload().hasRuntimeBits()) { + const err_union_ty = operand_ty.childType(); + if (err_union_ty.errorUnionSet().errorSetCardinality() == .zero) { + return CValue{ .bytes = "0" }; + } + if (!err_union_ty.errorUnionPayload().hasRuntimeBits()) { return operand; } const local = try f.allocLocal(inst_ty, .Const); @@ -3328,6 +3390,9 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(";\n"); return local; } + if (operand_ty.errorUnionSet().errorSetCardinality() == .zero) { + return CValue{ .bytes = "0" }; + } if (!operand_ty.errorUnionPayload().hasRuntimeBits()) { return operand; } @@ -3343,7 +3408,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { return local; } -fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, maybe_addrof: []const u8) !CValue { +fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, maybe_addrof: [*:0]const u8) !CValue { if (f.liveness.isUnused(inst)) return CValue.none; @@ -3351,17 +3416,19 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, maybe_addrof: []cons const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); const operand_ty = f.air.typeOf(ty_op.operand); + const operand_is_ptr = operand_ty.zigTypeTag() == .Pointer; + const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + + if (error_union_ty.errorUnionSet().errorSetCardinality() == .zero) { + return operand; + } - const error_union_ty = if (operand_ty.zigTypeTag() == .Pointer) - operand_ty.childType() - else - operand_ty; if (!error_union_ty.errorUnionPayload().hasRuntimeBits()) { return CValue.none; } const inst_ty = f.air.typeOfIndex(inst); - const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else "."; + const maybe_deref = if (operand_is_ptr) "->" else "."; const local = try f.allocLocal(inst_ty, .Const); try writer.print(" = {s}(", .{maybe_addrof}); @@ -3380,8 +3447,7 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); const inst_ty = f.air.typeOfIndex(inst); - if (inst_ty.isPtrLikeOptional()) { - // the operand is just a regular pointer, no need to do anything special. + if (inst_ty.optionalReprIsPayload()) { return operand; } @@ -3421,6 +3487,11 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); + if (error_ty.errorSetCardinality() == .zero) { + // TODO: write undefined bytes through the pointer here + return operand; + } + // First, set the non-error value. if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { try f.writeCValueDeref(writer, operand); @@ -3464,6 +3535,9 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); const inst_ty = f.air.typeOfIndex(inst); + if (inst_ty.errorUnionSet().errorSetCardinality() == .zero) { + return operand; + } const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = { .error = 0, .payload = "); try f.writeCValue(writer, operand); @@ -3486,16 +3560,23 @@ fn airIsErr( const operand_ty = f.air.typeOf(un_op); const local = try f.allocLocal(Type.initTag(.bool), .Const); const payload_ty = operand_ty.errorUnionPayload(); + const error_ty = operand_ty.errorUnionSet(); + try writer.writeAll(" = "); - if (is_ptr) { - try f.writeCValueDeref(writer, operand); + + if (error_ty.errorSetCardinality() == .zero) { + try writer.print("0 {s} 0;\n", .{op_str}); } else { - try f.writeCValue(writer, operand); + if (is_ptr) { + try f.writeCValueDeref(writer, operand); + } else { + try f.writeCValue(writer, operand); + } + if (payload_ty.hasRuntimeBits()) { + try writer.writeAll(".error"); + } + try writer.print(" {s} 0;\n", .{op_str}); } - if (payload_ty.hasRuntimeBits()) { - try writer.writeAll(".error"); - } - try writer.print(" {s} 0;\n", .{op_str}); return local; } @@ -4129,3 +4210,14 @@ fn intMin(ty: Type, target: std.Target, buf: []u8) []const u8 { }, } } + +fn loweredFnRetTyHasBits(fn_ty: Type) bool { + const ret_ty = fn_ty.fnReturnType(); + if (ret_ty.hasRuntimeBitsIgnoreComptime()) { + return true; + } + if (ret_ty.isError()) { + return true; + } + return false; +} diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ef33f39f55..ec71297c10 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -745,7 +745,7 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const param_llvm_ty = try dg.llvmType(param_ty); + const param_llvm_ty = try dg.lowerType(param_ty); const abi_size = @intCast(c_uint, param_ty.abiSize(target)); const int_llvm_ty = dg.context.intType(abi_size * 8); const int_ptr_llvm_ty = int_llvm_ty.pointerType(0); @@ -775,7 +775,7 @@ pub const Object = struct { .Struct => { const fields = param_ty.structFields().values(); if (is_by_ref) { - const param_llvm_ty = try dg.llvmType(param_ty); + const param_llvm_ty = try dg.lowerType(param_ty); const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty); arg_ptr.setAlignment(param_ty.abiAlignment(target)); @@ -1390,7 +1390,7 @@ pub const Object = struct { gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; } - if (ty.isPtrLikeOptional()) { + if (ty.optionalReprIsPayload()) { const ptr_di_ty = try o.lowerDebugType(child_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module }); @@ -1470,10 +1470,25 @@ pub const Object = struct { return full_di_ty; }, .ErrorUnion => { - const err_set_ty = ty.errorUnionSet(); const payload_ty = ty.errorUnionPayload(); + switch (ty.errorUnionSet().errorSetCardinality()) { + .zero => { + const payload_di_ty = try o.lowerDebugType(payload_ty, .full); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(payload_di_ty), .{ .mod = o.module }); + return payload_di_ty; + }, + .one => { + if (payload_ty.isNoReturn()) { + const di_type = dib.createBasicType("void", 0, DW.ATE.signed); + gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); + return di_type; + } + }, + .many => {}, + } if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - const err_set_di_ty = try o.lowerDebugType(err_set_ty, .full); + const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module }); return err_set_di_ty; @@ -1496,56 +1511,51 @@ pub const Object = struct { break :blk fwd_decl; }; - const err_set_size = err_set_ty.abiSize(target); - const err_set_align = err_set_ty.abiAlignment(target); + const error_size = Type.anyerror.abiSize(target); + const error_align = Type.anyerror.abiAlignment(target); const payload_size = payload_ty.abiSize(target); const payload_align = payload_ty.abiAlignment(target); - var offset: u64 = 0; - offset += err_set_size; - offset = std.mem.alignForwardGeneric(u64, offset, payload_align); - const payload_offset = offset; - - var len: u8 = 2; - var fields: [3]*llvm.DIType = .{ - dib.createMemberType( - fwd_decl.toScope(), - "tag", - di_file, - line, - err_set_size * 8, // size in bits - err_set_align * 8, // align in bits - 0, // offset in bits - 0, // flags - try o.lowerDebugType(err_set_ty, .full), - ), - dib.createMemberType( - fwd_decl.toScope(), - "value", - di_file, - line, - payload_size * 8, // size in bits - payload_align * 8, // align in bits - payload_offset * 8, // offset in bits - 0, // flags - try o.lowerDebugType(payload_ty, .full), - ), - undefined, - }; - - const error_size = Type.anyerror.abiSize(target); - if (payload_align > error_size) { - fields[2] = fields[1]; - const pad_len = @intCast(u32, payload_align - error_size); - fields[1] = dib.createArrayType( - pad_len * 8, - 8, - try o.lowerDebugType(Type.u8, .full), - @intCast(c_int, pad_len), - ); - len += 1; + var error_index: u32 = undefined; + var payload_index: u32 = undefined; + var error_offset: u64 = undefined; + var payload_offset: u64 = undefined; + if (error_align > payload_align) { + error_index = 0; + payload_index = 1; + error_offset = 0; + payload_offset = std.mem.alignForwardGeneric(u64, error_size, payload_align); + } else { + payload_index = 0; + error_index = 1; + payload_offset = 0; + error_offset = std.mem.alignForwardGeneric(u64, payload_size, error_align); } + var fields: [2]*llvm.DIType = undefined; + fields[error_index] = dib.createMemberType( + fwd_decl.toScope(), + "tag", + di_file, + line, + error_size * 8, // size in bits + error_align * 8, // align in bits + error_offset * 8, // offset in bits + 0, // flags + try o.lowerDebugType(Type.anyerror, .full), + ); + fields[payload_index] = dib.createMemberType( + fwd_decl.toScope(), + "value", + di_file, + line, + payload_size * 8, // size in bits + payload_align * 8, // align in bits + payload_offset * 8, // offset in bits + 0, // flags + try o.lowerDebugType(payload_ty, .full), + ); + const full_di_ty = dib.createStructType( compile_unit_scope, name.ptr, @@ -1556,7 +1566,7 @@ pub const Object = struct { 0, // flags null, // derived from &fields, - len, + fields.len, 0, // run time lang null, // vtable holder "", // unique id @@ -2094,7 +2104,7 @@ pub const DeclGen = struct { break :init_val decl.val; }; if (init_val.tag() != .unreachable_value) { - const llvm_init = try dg.genTypedValue(.{ .ty = decl.ty, .val = init_val }); + const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val }); if (global.globalGetValueType() == llvm_init.typeOf()) { global.setInitializer(llvm_init); } else { @@ -2165,7 +2175,7 @@ pub const DeclGen = struct { const target = dg.module.getTarget(); const sret = firstParamSRet(fn_info, target); - const fn_type = try dg.llvmType(zig_fn_type); + const fn_type = try dg.lowerType(zig_fn_type); const fqn = try decl.getFullyQualifiedName(dg.module); defer dg.gpa.free(fqn); @@ -2192,7 +2202,7 @@ pub const DeclGen = struct { dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0 dg.addArgAttr(llvm_fn, 0, "noalias"); - const raw_llvm_ret_ty = try dg.llvmType(fn_info.return_type); + const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type); llvm_fn.addSretAttr(0, raw_llvm_ret_ty); } @@ -2285,7 +2295,7 @@ pub const DeclGen = struct { const fqn = try decl.getFullyQualifiedName(dg.module); defer dg.gpa.free(fqn); - const llvm_type = try dg.llvmType(decl.ty); + const llvm_type = try dg.lowerType(decl.ty); const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); const llvm_global = dg.object.llvm_module.addGlobalInAddressSpace(llvm_type, fqn, llvm_addrspace); gop.value_ptr.* = llvm_global; @@ -2339,15 +2349,15 @@ pub const DeclGen = struct { } fn isUnnamedType(dg: *DeclGen, ty: Type, val: *const llvm.Value) bool { - // Once `llvmType` succeeds, successive calls to it with the same Zig type - // are guaranteed to succeed. So if a call to `llvmType` fails here it means + // Once `lowerType` succeeds, successive calls to it with the same Zig type + // are guaranteed to succeed. So if a call to `lowerType` fails here it means // it is the first time lowering the type, which means the value can't possible // have that type. - const llvm_ty = dg.llvmType(ty) catch return true; + const llvm_ty = dg.lowerType(ty) catch return true; return val.typeOf() != llvm_ty; } - fn llvmType(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type { + fn lowerType(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type { const gpa = dg.gpa; const target = dg.module.getTarget(); switch (t.zigTypeTag()) { @@ -2379,8 +2389,8 @@ pub const DeclGen = struct { const ptr_type = t.slicePtrFieldType(&buf); const fields: [2]*const llvm.Type = .{ - try dg.llvmType(ptr_type), - try dg.llvmType(Type.usize), + try dg.lowerType(ptr_type), + try dg.lowerType(Type.usize), }; return dg.context.structType(&fields, fields.len, .False); } @@ -2396,7 +2406,7 @@ pub const DeclGen = struct { else => elem_ty.hasRuntimeBitsIgnoreComptime(), }; const llvm_elem_ty = if (lower_elem_ty) - try dg.llvmType(elem_ty) + try dg.lowerType(elem_ty) else dg.context.intType(8); return llvm_elem_ty.pointerType(llvm_addrspace); @@ -2424,12 +2434,12 @@ pub const DeclGen = struct { .Array => { const elem_ty = t.childType(); assert(elem_ty.onePossibleValue() == null); - const elem_llvm_ty = try dg.llvmType(elem_ty); + const elem_llvm_ty = try dg.lowerType(elem_ty); const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); }, .Vector => { - const elem_type = try dg.llvmType(t.childType()); + const elem_type = try dg.lowerType(t.childType()); return elem_type.vectorType(t.vectorLen()); }, .Optional => { @@ -2438,8 +2448,8 @@ pub const DeclGen = struct { if (!child_ty.hasRuntimeBitsIgnoreComptime()) { return dg.context.intType(1); } - const payload_llvm_ty = try dg.llvmType(child_ty); - if (t.isPtrLikeOptional()) { + const payload_llvm_ty = try dg.lowerType(child_ty); + if (t.optionalReprIsPayload()) { return payload_llvm_ty; } @@ -2449,28 +2459,33 @@ pub const DeclGen = struct { return dg.context.structType(&fields, fields.len, .False); }, .ErrorUnion => { - const error_type = t.errorUnionSet(); - const payload_type = t.errorUnionPayload(); - const llvm_error_type = try dg.llvmType(error_type); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { - return llvm_error_type; + const payload_ty = t.errorUnionPayload(); + switch (t.errorUnionSet().errorSetCardinality()) { + .zero => return dg.lowerType(payload_ty), + .one => { + if (payload_ty.isNoReturn()) { + return dg.context.voidType(); + } + }, + .many => {}, } - const llvm_payload_type = try dg.llvmType(payload_type); + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return try dg.lowerType(Type.anyerror); + } + const llvm_error_type = try dg.lowerType(Type.anyerror); + const llvm_payload_type = try dg.lowerType(payload_ty); - const payload_align = payload_type.abiAlignment(target); - const error_size = error_type.abiSize(target); - if (payload_align > error_size) { - const pad_type = dg.context.intType(8).arrayType(@intCast(u32, payload_align - error_size)); - const fields: [3]*const llvm.Type = .{ llvm_error_type, pad_type, llvm_payload_type }; - return dg.context.structType(&fields, fields.len, .False); - } else { + const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(target); + if (error_align > payload_align) { const fields: [2]*const llvm.Type = .{ llvm_error_type, llvm_payload_type }; return dg.context.structType(&fields, fields.len, .False); + } else { + const fields: [2]*const llvm.Type = .{ llvm_payload_type, llvm_error_type }; + return dg.context.structType(&fields, fields.len, .False); } }, - .ErrorSet => { - return dg.context.intType(16); - }, + .ErrorSet => return dg.context.intType(16), .Struct => { const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); if (gop.found_existing) return gop.value_ptr.*; @@ -2507,7 +2522,7 @@ pub const DeclGen = struct { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); try llvm_field_types.append(gpa, llvm_array_ty); } - const field_llvm_ty = try dg.llvmType(field_ty); + const field_llvm_ty = try dg.lowerType(field_ty); try llvm_field_types.append(gpa, field_llvm_ty); offset += field_ty.abiSize(target); @@ -2536,7 +2551,7 @@ pub const DeclGen = struct { if (struct_obj.layout == .Packed) { var buf: Type.Payload.Bits = undefined; const int_ty = struct_obj.packedIntegerType(target, &buf); - const int_llvm_ty = try dg.llvmType(int_ty); + const int_llvm_ty = try dg.lowerType(int_ty); gop.value_ptr.* = int_llvm_ty; return int_llvm_ty; } @@ -2571,7 +2586,7 @@ pub const DeclGen = struct { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); try llvm_field_types.append(gpa, llvm_array_ty); } - const field_llvm_ty = try dg.llvmType(field.ty); + const field_llvm_ty = try dg.lowerType(field.ty); try llvm_field_types.append(gpa, field_llvm_ty); offset += field.ty.abiSize(target); @@ -2606,7 +2621,7 @@ pub const DeclGen = struct { const union_obj = t.cast(Type.Payload.Union).?.data; if (layout.payload_size == 0) { - const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty); + const enum_tag_llvm_ty = try dg.lowerType(union_obj.tag_ty); gop.value_ptr.* = enum_tag_llvm_ty; return enum_tag_llvm_ty; } @@ -2618,7 +2633,7 @@ pub const DeclGen = struct { gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls const aligned_field = union_obj.fields.values()[layout.most_aligned_field]; - const llvm_aligned_field_ty = try dg.llvmType(aligned_field.ty); + const llvm_aligned_field_ty = try dg.lowerType(aligned_field.ty); const llvm_payload_ty = t: { if (layout.most_aligned_field_size == layout.payload_size) { @@ -2637,7 +2652,7 @@ pub const DeclGen = struct { llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False); return llvm_union_ty; } - const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty); + const enum_tag_llvm_ty = try dg.lowerType(union_obj.tag_ty); // Put the tag before or after the payload depending on which one's // alignment is greater. @@ -2659,7 +2674,7 @@ pub const DeclGen = struct { llvm_union_ty.structSetBody(&llvm_fields, llvm_fields_len, .False); return llvm_union_ty; }, - .Fn => return llvmTypeFn(dg, t), + .Fn => return lowerTypeFn(dg, t), .ComptimeInt => unreachable, .ComptimeFloat => unreachable, .Type => unreachable, @@ -2674,7 +2689,7 @@ pub const DeclGen = struct { } } - fn llvmTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*const llvm.Type { + fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*const llvm.Type { const target = dg.module.getTarget(); const fn_info = fn_ty.fnInfo(); const llvm_ret_ty = try lowerFnRetTy(dg, fn_info); @@ -2683,7 +2698,7 @@ pub const DeclGen = struct { defer llvm_params.deinit(); if (firstParamSRet(fn_info, target)) { - const llvm_sret_ty = try dg.llvmType(fn_info.return_type); + const llvm_sret_ty = try dg.lowerType(fn_info.return_type); try llvm_params.append(llvm_sret_ty.pointerType(0)); } @@ -2695,7 +2710,7 @@ pub const DeclGen = struct { .data = dg.object.getStackTraceType(), }; const ptr_ty = Type.initPayload(&ptr_ty_payload.base); - try llvm_params.append(try dg.llvmType(ptr_ty)); + try llvm_params.append(try dg.lowerType(ptr_ty)); } var it = iterateParamTypes(dg, fn_info); @@ -2703,11 +2718,11 @@ pub const DeclGen = struct { .no_bits => continue, .byval => { const param_ty = fn_info.param_types[it.zig_index - 1]; - try llvm_params.append(try dg.llvmType(param_ty)); + try llvm_params.append(try dg.lowerType(param_ty)); }, .byref => { const param_ty = fn_info.param_types[it.zig_index - 1]; - const raw_llvm_ty = try dg.llvmType(param_ty); + const raw_llvm_ty = try dg.lowerType(param_ty); try llvm_params.append(raw_llvm_ty.pointerType(0)); }, .abi_sized_int => { @@ -2749,7 +2764,7 @@ pub const DeclGen = struct { // one field; in this case keep the type information // to avoid the potentially costly ptrtoint/bitcast. if (bits_used == 0 and field_abi_bits == int_bits) { - const llvm_field_ty = try dg.llvmType(field.ty); + const llvm_field_ty = try dg.lowerType(field.ty); llvm_params.appendAssumeCapacity(llvm_field_ty); field_i += 1; if (field_i >= fields.len) { @@ -2787,16 +2802,16 @@ pub const DeclGen = struct { ); } - fn genTypedValue(dg: *DeclGen, tv: TypedValue) Error!*const llvm.Value { + fn lowerValue(dg: *DeclGen, tv: TypedValue) Error!*const llvm.Value { if (tv.val.isUndef()) { - const llvm_type = try dg.llvmType(tv.ty); + const llvm_type = try dg.lowerType(tv.ty); return llvm_type.getUndef(); } const target = dg.module.getTarget(); switch (tv.ty.zigTypeTag()) { .Bool => { - const llvm_type = try dg.llvmType(tv.ty); + const llvm_type = try dg.lowerType(tv.ty); return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); }, // TODO this duplicates code with Pointer but they should share the handling @@ -2857,7 +2872,7 @@ pub const DeclGen = struct { return unsigned_val; }, .Float => { - const llvm_ty = try dg.llvmType(tv.ty); + const llvm_ty = try dg.lowerType(tv.ty); switch (tv.ty.floatBits(target)) { 16, 32, 64 => return llvm_ty.constReal(tv.val.toFloat(f64)), 80 => { @@ -2894,7 +2909,7 @@ pub const DeclGen = struct { const decl = dg.module.declPtr(decl_index); dg.module.markDeclAlive(decl); const val = try dg.resolveGlobalDecl(decl_index); - const llvm_var_type = try dg.llvmType(tv.ty); + const llvm_var_type = try dg.lowerType(tv.ty); const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); const llvm_type = llvm_var_type.pointerType(llvm_addrspace); return val.constBitCast(llvm_type); @@ -2903,11 +2918,11 @@ pub const DeclGen = struct { const slice = tv.val.castTag(.slice).?.data; var buf: Type.SlicePtrFieldTypeBuffer = undefined; const fields: [2]*const llvm.Value = .{ - try dg.genTypedValue(.{ + try dg.lowerValue(.{ .ty = tv.ty.slicePtrFieldType(&buf), .val = slice.ptr, }), - try dg.genTypedValue(.{ + try dg.lowerValue(.{ .ty = Type.usize, .val = slice.len, }), @@ -2915,15 +2930,15 @@ pub const DeclGen = struct { return dg.context.constStruct(&fields, fields.len, .False); }, .int_u64, .one, .int_big_positive => { - const llvm_usize = try dg.llvmType(Type.usize); + const llvm_usize = try dg.lowerType(Type.usize); const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(target), .False); - return llvm_int.constIntToPtr(try dg.llvmType(tv.ty)); + return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); }, .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { return dg.lowerParentPtr(tv.val, tv.ty.childType()); }, .null_value, .zero => { - const llvm_type = try dg.llvmType(tv.ty); + const llvm_type = try dg.lowerType(tv.ty); return llvm_type.constNull(); }, else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ @@ -2978,7 +2993,7 @@ pub const DeclGen = struct { defer gpa.free(llvm_elems); var need_unnamed = false; for (elem_vals[0..len]) |elem_val, i| { - llvm_elems[i] = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_val }); + llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val }); need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); } if (need_unnamed) { @@ -2988,7 +3003,7 @@ pub const DeclGen = struct { .True, ); } else { - const llvm_elem_ty = try dg.llvmType(elem_ty); + const llvm_elem_ty = try dg.lowerType(elem_ty); return llvm_elem_ty.constArray( llvm_elems.ptr, @intCast(c_uint, llvm_elems.len), @@ -3008,13 +3023,13 @@ pub const DeclGen = struct { var need_unnamed = false; if (len != 0) { for (llvm_elems[0..len]) |*elem| { - elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val }); + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); } need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); } if (sentinel) |sent| { - llvm_elems[len] = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent }); + llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); } @@ -3025,7 +3040,7 @@ pub const DeclGen = struct { .True, ); } else { - const llvm_elem_ty = try dg.llvmType(elem_ty); + const llvm_elem_ty = try dg.lowerType(elem_ty); return llvm_elem_ty.constArray( llvm_elems.ptr, @intCast(c_uint, llvm_elems.len), @@ -3035,13 +3050,13 @@ pub const DeclGen = struct { .empty_array_sentinel => { const elem_ty = tv.ty.elemType(); const sent_val = tv.ty.sentinel().?; - const sentinel = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent_val }); + const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val }); const llvm_elems: [1]*const llvm.Value = .{sentinel}; const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); if (need_unnamed) { return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True); } else { - const llvm_elem_ty = try dg.llvmType(elem_ty); + const llvm_elem_ty = try dg.lowerType(elem_ty); return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len); } }, @@ -3056,19 +3071,19 @@ pub const DeclGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { return non_null_bit; } - if (tv.ty.isPtrLikeOptional()) { + if (tv.ty.optionalReprIsPayload()) { if (tv.val.castTag(.opt_payload)) |payload| { - return dg.genTypedValue(.{ .ty = payload_ty, .val = payload.data }); + return dg.lowerValue(.{ .ty = payload_ty, .val = payload.data }); } else if (is_pl) { - return dg.genTypedValue(.{ .ty = payload_ty, .val = tv.val }); + return dg.lowerValue(.{ .ty = payload_ty, .val = tv.val }); } else { - const llvm_ty = try dg.llvmType(tv.ty); + const llvm_ty = try dg.lowerType(tv.ty); return llvm_ty.constNull(); } } assert(payload_ty.zigTypeTag() != .Fn); const fields: [2]*const llvm.Value = .{ - try dg.genTypedValue(.{ + try dg.lowerValue(.{ .ty = payload_ty, .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef), }), @@ -3087,7 +3102,7 @@ pub const DeclGen = struct { return dg.resolveLlvmFunction(fn_decl_index); }, .ErrorSet => { - const llvm_ty = try dg.llvmType(tv.ty); + const llvm_ty = try dg.lowerType(Type.anyerror); switch (tv.val.tag()) { .@"error" => { const err_name = tv.val.castTag(.@"error").?.data.name; @@ -3101,40 +3116,39 @@ pub const DeclGen = struct { } }, .ErrorUnion => { - const error_type = tv.ty.errorUnionSet(); const payload_type = tv.ty.errorUnionPayload(); + if (tv.ty.errorUnionSet().errorSetCardinality() == .zero) { + const payload_val = tv.val.castTag(.eu_payload).?.data; + return dg.lowerValue(.{ .ty = payload_type, .val = payload_val }); + } const is_pl = tv.val.errorUnionIsPayload(); if (!payload_type.hasRuntimeBitsIgnoreComptime()) { // We use the error type directly as the type. const err_val = if (!is_pl) tv.val else Value.initTag(.zero); - return dg.genTypedValue(.{ .ty = error_type, .val = err_val }); + return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } - var len: u8 = 2; - var fields: [3]*const llvm.Value = .{ - try dg.genTypedValue(.{ - .ty = error_type, - .val = if (is_pl) Value.initTag(.zero) else tv.val, - }), - try dg.genTypedValue(.{ - .ty = payload_type, - .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef), - }), - undefined, - }; const payload_align = payload_type.abiAlignment(target); - const error_size = error_type.abiSize(target); - if (payload_align > error_size) { - fields[2] = fields[1]; - const pad_type = dg.context.intType(8).arrayType(@intCast(u32, payload_align - error_size)); - fields[1] = pad_type.getUndef(); - len += 1; + const error_align = Type.anyerror.abiAlignment(target); + const llvm_error_value = try dg.lowerValue(.{ + .ty = Type.anyerror, + .val = if (is_pl) Value.initTag(.zero) else tv.val, + }); + const llvm_payload_value = try dg.lowerValue(.{ + .ty = payload_type, + .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef), + }); + if (error_align > payload_align) { + const fields: [2]*const llvm.Value = .{ llvm_error_value, llvm_payload_value }; + return dg.context.constStruct(&fields, fields.len, .False); + } else { + const fields: [2]*const llvm.Value = .{ llvm_payload_value, llvm_error_value }; + return dg.context.constStruct(&fields, fields.len, .False); } - return dg.context.constStruct(&fields, len, .False); }, .Struct => { - const llvm_struct_ty = try dg.llvmType(tv.ty); + const llvm_struct_ty = try dg.lowerType(tv.ty); const field_vals = tv.val.castTag(.aggregate).?.data; const gpa = dg.gpa; @@ -3167,7 +3181,7 @@ pub const DeclGen = struct { llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } - const field_llvm_val = try dg.genTypedValue(.{ + const field_llvm_val = try dg.lowerValue(.{ .ty = field_ty, .val = field_vals[i], }); @@ -3215,7 +3229,7 @@ pub const DeclGen = struct { const field = fields[i]; if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; - const non_int_val = try dg.genTypedValue(.{ + const non_int_val = try dg.lowerValue(.{ .ty = field.ty, .val = field_val, }); @@ -3259,7 +3273,7 @@ pub const DeclGen = struct { llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } - const field_llvm_val = try dg.genTypedValue(.{ + const field_llvm_val = try dg.lowerValue(.{ .ty = field.ty, .val = field_vals[i], }); @@ -3294,13 +3308,13 @@ pub const DeclGen = struct { } }, .Union => { - const llvm_union_ty = try dg.llvmType(tv.ty); + const llvm_union_ty = try dg.lowerType(tv.ty); const tag_and_val = tv.val.castTag(.@"union").?.data; const layout = tv.ty.unionGetLayout(target); if (layout.payload_size == 0) { - return genTypedValue(dg, .{ + return lowerValue(dg, .{ .ty = tv.ty.unionTagType().?, .val = tag_and_val.tag, }); @@ -3314,7 +3328,7 @@ pub const DeclGen = struct { const padding_len = @intCast(c_uint, layout.payload_size); break :p dg.context.intType(8).arrayType(padding_len).getUndef(); } - const field = try genTypedValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); + const field = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); const field_size = field_ty.abiSize(target); if (field_size == layout.payload_size) { break :p field; @@ -3340,7 +3354,7 @@ pub const DeclGen = struct { return llvm_union_ty.constNamedStruct(&fields, fields.len); } } - const llvm_tag_value = try genTypedValue(dg, .{ + const llvm_tag_value = try lowerValue(dg, .{ .ty = tv.ty.unionTagType().?, .val = tag_and_val.tag, }); @@ -3377,7 +3391,7 @@ pub const DeclGen = struct { .data = bytes[i], }; - elem.* = try dg.genTypedValue(.{ + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = Value.initPayload(&byte_payload.base), }); @@ -3397,7 +3411,7 @@ pub const DeclGen = struct { const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems) |*elem, i| { - elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_vals[i] }); + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] }); } return llvm.constVector( llvm_elems.ptr, @@ -3412,7 +3426,7 @@ pub const DeclGen = struct { const llvm_elems = try dg.gpa.alloc(*const llvm.Value, len); defer dg.gpa.free(llvm_elems); for (llvm_elems) |*elem| { - elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val }); + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); } return llvm.constVector( llvm_elems.ptr, @@ -3462,7 +3476,7 @@ pub const DeclGen = struct { if (ptr_child_ty.eql(decl.ty, dg.module)) { return llvm_ptr; } else { - return llvm_ptr.constBitCast((try dg.llvmType(ptr_child_ty)).pointerType(0)); + return llvm_ptr.constBitCast((try dg.lowerType(ptr_child_ty)).pointerType(0)); } } @@ -3484,15 +3498,15 @@ pub const DeclGen = struct { }, .int_i64 => { const int = ptr_val.castTag(.int_i64).?.data; - const llvm_usize = try dg.llvmType(Type.usize); + const llvm_usize = try dg.lowerType(Type.usize); const llvm_int = llvm_usize.constInt(@bitCast(u64, int), .False); - return llvm_int.constIntToPtr((try dg.llvmType(ptr_child_ty)).pointerType(0)); + return llvm_int.constIntToPtr((try dg.lowerType(ptr_child_ty)).pointerType(0)); }, .int_u64 => { const int = ptr_val.castTag(.int_u64).?.data; - const llvm_usize = try dg.llvmType(Type.usize); + const llvm_usize = try dg.lowerType(Type.usize); const llvm_int = llvm_usize.constInt(int, .False); - return llvm_int.constIntToPtr((try dg.llvmType(ptr_child_ty)).pointerType(0)); + return llvm_int.constIntToPtr((try dg.lowerType(ptr_child_ty)).pointerType(0)); }, .field_ptr => blk: { const field_ptr = ptr_val.castTag(.field_ptr).?.data; @@ -3541,7 +3555,7 @@ pub const DeclGen = struct { const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty); bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty, dg.module); - const llvm_usize = try dg.llvmType(Type.usize); + const llvm_usize = try dg.lowerType(Type.usize); const indices: [1]*const llvm.Value = .{ llvm_usize.constInt(elem_ptr.index, .False), }; @@ -3555,7 +3569,9 @@ pub const DeclGen = struct { const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf); bitcast_needed = !payload_ty.eql(ptr_child_ty, dg.module); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime() or + payload_ty.optionalReprIsPayload()) + { // In this case, we represent pointer to optional the same as pointer // to the payload. break :blk parent_llvm_ptr; @@ -3592,7 +3608,7 @@ pub const DeclGen = struct { else => unreachable, }; if (bitcast_needed) { - return llvm_ptr.constBitCast((try dg.llvmType(ptr_child_ty)).pointerType(0)); + return llvm_ptr.constBitCast((try dg.lowerType(ptr_child_ty)).pointerType(0)); } else { return llvm_ptr; } @@ -3611,11 +3627,11 @@ pub const DeclGen = struct { .data = tv.val.sliceLen(self.module), }; const fields: [2]*const llvm.Value = .{ - try self.genTypedValue(.{ + try self.lowerValue(.{ .ty = ptr_ty, .val = tv.val, }), - try self.genTypedValue(.{ + try self.lowerValue(.{ .ty = Type.usize, .val = Value.initPayload(&slice_len.base), }), @@ -3647,7 +3663,7 @@ pub const DeclGen = struct { else try self.resolveGlobalDecl(decl_index); - const llvm_type = try self.llvmType(tv.ty); + const llvm_type = try self.lowerType(tv.ty); if (tv.ty.zigTypeTag() == .Int) { return llvm_val.constPtrToInt(llvm_type); } else { @@ -3662,8 +3678,8 @@ pub const DeclGen = struct { // The value cannot be undefined, because we use the `nonnull` annotation // for non-optional pointers. We also need to respect the alignment, even though // the address will never be dereferenced. - const llvm_usize = try dg.llvmType(Type.usize); - const llvm_ptr_ty = try dg.llvmType(ptr_ty); + const llvm_usize = try dg.lowerType(Type.usize); + const llvm_ptr_ty = try dg.lowerType(ptr_ty); if (alignment != 0) { return llvm_usize.constInt(alignment, .False).constIntToPtr(llvm_ptr_ty); } @@ -3842,7 +3858,7 @@ pub const FuncGen = struct { const val = self.air.value(inst).?; const ty = self.air.typeOf(inst); - const llvm_val = try self.dg.genTypedValue(.{ .ty = ty, .val = val }); + const llvm_val = try self.dg.lowerValue(.{ .ty = ty, .val = val }); if (!isByRef(ty)) { gop.value_ptr.* = llvm_val; return llvm_val; @@ -3860,7 +3876,7 @@ pub const FuncGen = struct { // Because of LLVM limitations for lowering certain types such as unions, // the type of global constants might not match the type it is supposed to // be, and so we must bitcast the pointer at the usage sites. - const wanted_llvm_ty = try self.dg.llvmType(ty); + const wanted_llvm_ty = try self.dg.lowerType(ty); const wanted_llvm_ptr_ty = wanted_llvm_ty.pointerType(0); const casted_ptr = global.constBitCast(wanted_llvm_ptr_ty); gop.value_ptr.* = casted_ptr; @@ -4084,7 +4100,7 @@ pub const FuncGen = struct { defer llvm_args.deinit(); const ret_ptr = if (!sret) null else blk: { - const llvm_ret_ty = try self.dg.llvmType(return_type); + const llvm_ret_ty = try self.dg.lowerType(return_type); const ret_ptr = self.buildAlloca(llvm_ret_ty); ret_ptr.setAlignment(return_type.abiAlignment(target)); try llvm_args.append(ret_ptr); @@ -4116,7 +4132,7 @@ pub const FuncGen = struct { // which is always lowered to an LLVM type of `*i8`. // 2. The argument is a global which does act as a pointer, however // a bitcast is needed in order for the LLVM types to match. - const llvm_param_ty = try self.dg.llvmType(param_ty); + const llvm_param_ty = try self.dg.lowerType(param_ty); const casted_ptr = self.builder.buildBitCast(llvm_arg, llvm_param_ty, ""); try llvm_args.append(casted_ptr); } else { @@ -4163,7 +4179,7 @@ pub const FuncGen = struct { ); const int_ptr = self.buildAlloca(int_llvm_ty); int_ptr.setAlignment(alignment); - const param_llvm_ty = try self.dg.llvmType(param_ty); + const param_llvm_ty = try self.dg.lowerType(param_ty); const casted_ptr = self.builder.buildBitCast(int_ptr, param_llvm_ty.pointerType(0), ""); const store_inst = self.builder.buildStore(llvm_arg, casted_ptr); store_inst.setAlignment(alignment); @@ -4274,7 +4290,7 @@ pub const FuncGen = struct { return null; } - const llvm_ret_ty = try self.dg.llvmType(return_type); + const llvm_ret_ty = try self.dg.lowerType(return_type); if (ret_ptr) |rp| { call.setCallSret(llvm_ret_ty); @@ -4338,11 +4354,19 @@ pub const FuncGen = struct { _ = self.builder.buildRetVoid(); return null; } + const fn_info = self.dg.decl.ty.fnInfo(); if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - _ = self.builder.buildRetVoid(); + if (fn_info.return_type.isError()) { + // Functions with an empty error set are emitted with an error code + // return type and return zero so they can be function pointers coerced + // to functions that return anyerror. + const err_int = try self.dg.lowerType(Type.anyerror); + _ = self.builder.buildRet(err_int.constInt(0, .False)); + } else { + _ = self.builder.buildRetVoid(); + } return null; } - const fn_info = self.dg.decl.ty.fnInfo(); const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); const operand = try self.resolveInst(un_op); const llvm_ret_ty = operand.typeOf(); @@ -4369,15 +4393,27 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime() or self.ret_ptr != null) { + const fn_info = self.dg.decl.ty.fnInfo(); + if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + if (fn_info.return_type.isError()) { + // Functions with an empty error set are emitted with an error code + // return type and return zero so they can be function pointers coerced + // to functions that return anyerror. + const err_int = try self.dg.lowerType(Type.anyerror); + _ = self.builder.buildRet(err_int.constInt(0, .False)); + } else { + _ = self.builder.buildRetVoid(); + } + return null; + } + if (self.ret_ptr != null) { _ = self.builder.buildRetVoid(); return null; } const ptr = try self.resolveInst(un_op); const target = self.dg.module.getTarget(); - const fn_info = self.dg.decl.ty.fnInfo(); const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); - const llvm_ret_ty = try self.dg.llvmType(ret_ty); + const llvm_ret_ty = try self.dg.lowerType(ret_ty); const casted_ptr = if (abi_ret_ty == llvm_ret_ty) ptr else p: { const ptr_abi_ty = abi_ret_ty.pointerType(0); break :p self.builder.buildBitCast(ptr, ptr_abi_ty, ""); @@ -4439,7 +4475,9 @@ pub const FuncGen = struct { .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or operand_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime() or + operand_ty.optionalReprIsPayload()) + { break :blk operand_ty; } // We need to emit instructions to check for equality/inequality @@ -4556,7 +4594,7 @@ pub const FuncGen = struct { const is_body = inst_ty.zigTypeTag() == .Fn; if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime()) return null; - const raw_llvm_ty = try self.dg.llvmType(inst_ty); + const raw_llvm_ty = try self.dg.lowerType(inst_ty); const llvm_ty = ty: { // If the zig tag type is a function, this represents an actual function body; not @@ -4696,9 +4734,9 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const array_ty = operand_ty.childType(); - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); const len = llvm_usize.constInt(array_ty.arrayLen(), .False); - const slice_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); if (!array_ty.hasRuntimeBitsIgnoreComptime()) { return self.builder.buildInsertValue(slice_llvm_ty.getUndef(), len, 1, ""); } @@ -4723,7 +4761,7 @@ pub const FuncGen = struct { const dest_ty = self.air.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(); - const dest_llvm_ty = try self.dg.llvmType(dest_ty); + const dest_llvm_ty = try self.dg.lowerType(dest_ty); const target = self.dg.module.getTarget(); if (intrinsicsAllowed(dest_scalar_ty, target)) { @@ -4774,7 +4812,7 @@ pub const FuncGen = struct { const dest_ty = self.air.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(); - const dest_llvm_ty = try self.dg.llvmType(dest_ty); + const dest_llvm_ty = try self.dg.lowerType(dest_ty); if (intrinsicsAllowed(operand_scalar_ty, target)) { // TODO set fast math flag @@ -4801,7 +4839,7 @@ pub const FuncGen = struct { compiler_rt_dest_abbrev, }) catch unreachable; - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); const param_types = [1]*const llvm.Type{operand_llvm_ty}; const libc_fn = self.getLibcFunction(fn_name, ¶m_types, libc_ret_ty); const params = [1]*const llvm.Value{operand}; @@ -4962,7 +5000,7 @@ pub const FuncGen = struct { const containing_int = struct_llvm_val; const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); - const elem_llvm_ty = try self.dg.llvmType(field_ty); + const elem_llvm_ty = try self.dg.lowerType(field_ty); if (field_ty.zigTypeTag() == .Float) { const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); const same_size_int = self.context.intType(elem_bits); @@ -4994,7 +5032,7 @@ pub const FuncGen = struct { return self.load(field_ptr, field_ptr_ty); }, .Union => { - const llvm_field_ty = try self.dg.llvmType(field_ty); + const llvm_field_ty = try self.dg.lowerType(field_ty); const layout = struct_ty.unionGetLayout(target); const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const union_field_ptr = self.builder.buildStructGEP(struct_llvm_val, payload_index, ""); @@ -5021,7 +5059,7 @@ pub const FuncGen = struct { const struct_ty = self.air.getRefType(ty_pl.ty).childType(); const field_offset = struct_ty.structFieldOffset(extra.field_index, target); - const res_ty = try self.dg.llvmType(self.air.getRefType(ty_pl.ty)); + const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty)); if (field_offset == 0) { return self.builder.buildBitCast(field_ptr, res_ty, ""); } @@ -5351,7 +5389,7 @@ pub const FuncGen = struct { } const ret_ty = self.air.typeOfIndex(inst); - const ret_llvm_ty = try self.dg.llvmType(ret_ty); + const ret_llvm_ty = try self.dg.lowerType(ret_ty); const llvm_fn_ty = llvm.functionType( ret_llvm_ty, llvm_param_types.ptr, @@ -5392,8 +5430,8 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const operand_ty = self.air.typeOf(un_op); const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; - if (optional_ty.isPtrLikeOptional()) { - const optional_llvm_ty = try self.dg.llvmType(optional_ty); + if (optional_ty.optionalReprIsPayload()) { + const optional_llvm_ty = try self.dg.lowerType(optional_ty); const loaded = if (operand_is_ptr) self.builder.buildLoad(operand, "") else operand; return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); } @@ -5430,21 +5468,33 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const err_union_ty = self.air.typeOf(un_op); const payload_ty = err_union_ty.errorUnionPayload(); - const err_set_ty = try self.dg.llvmType(Type.initTag(.anyerror)); + const err_set_ty = try self.dg.lowerType(Type.initTag(.anyerror)); const zero = err_set_ty.constNull(); + if (err_union_ty.errorUnionSet().errorSetCardinality() == .zero) { + const llvm_i1 = self.context.intType(1); + switch (op) { + .EQ => return llvm_i1.constInt(1, .False), // 0 == 0 + .NE => return llvm_i1.constInt(0, .False), // 0 != 0 + else => unreachable, + } + } + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { const loaded = if (operand_is_ptr) self.builder.buildLoad(operand, "") else operand; return self.builder.buildICmp(op, loaded, zero, ""); } + const target = self.dg.module.getTarget(); + const err_field_index = errUnionErrorOffset(payload_ty, target); + if (operand_is_ptr or isByRef(err_union_ty)) { - const err_field_ptr = self.builder.buildStructGEP(operand, 0, ""); + const err_field_ptr = self.builder.buildStructGEP(operand, err_field_index, ""); const loaded = self.builder.buildLoad(err_field_ptr, ""); return self.builder.buildICmp(op, loaded, zero, ""); } - const loaded = self.builder.buildExtractValue(operand, 0, ""); + const loaded = self.builder.buildExtractValue(operand, err_field_index, ""); return self.builder.buildICmp(op, loaded, zero, ""); } @@ -5462,10 +5512,10 @@ pub const FuncGen = struct { // a pointer to a zero-bit value. // TODO once we update to LLVM 14 this bitcast won't be necessary. - const res_ptr_ty = try self.dg.llvmType(result_ty); + const res_ptr_ty = try self.dg.lowerType(result_ty); return self.builder.buildBitCast(operand, res_ptr_ty, ""); } - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.optionalReprIsPayload()) { // The payload and the optional are the same value. return operand; } @@ -5490,10 +5540,10 @@ pub const FuncGen = struct { _ = self.builder.buildStore(non_null_bit, operand); // TODO once we update to LLVM 14 this bitcast won't be necessary. - const res_ptr_ty = try self.dg.llvmType(result_ty); + const res_ptr_ty = try self.dg.lowerType(result_ty); return self.builder.buildBitCast(operand, res_ptr_ty, ""); } - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.optionalReprIsPayload()) { // The payload and the optional are the same value. // Setting to non-null will be done when the payload is set. return operand; @@ -5527,7 +5577,7 @@ pub const FuncGen = struct { const payload_ty = self.air.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null; - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.optionalReprIsPayload()) { // Payload value is the same as the optional value. return operand; } @@ -5544,17 +5594,23 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const result_ty = self.air.getRefType(ty_op.ty); + const operand_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + // If the error set has no fields, then the payload and the error + // union are the same value. + if (error_union_ty.errorUnionSet().errorSetCardinality() == .zero) { + return operand; + } + const result_ty = self.air.typeOfIndex(inst); const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; - const target = self.dg.module.getTarget(); - const offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; + const offset = errUnionPayloadOffset(payload_ty, target); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { if (!operand_is_ptr) return null; // TODO once we update to LLVM 14 this bitcast won't be necessary. - const res_ptr_ty = try self.dg.llvmType(result_ty); + const res_ptr_ty = try self.dg.lowerType(result_ty); return self.builder.buildBitCast(operand, res_ptr_ty, ""); } if (operand_is_ptr or isByRef(payload_ty)) { @@ -5574,54 +5630,69 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - const err_set_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + if (err_union_ty.errorUnionSet().errorSetCardinality() == .zero) { + const err_llvm_ty = try self.dg.lowerType(Type.anyerror); + if (operand_is_ptr) { + return self.builder.buildBitCast(operand, err_llvm_ty.pointerType(0), ""); + } else { + return err_llvm_ty.constInt(0, .False); + } + } - const payload_ty = err_set_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { if (!operand_is_ptr) return operand; return self.builder.buildLoad(operand, ""); } - if (operand_is_ptr or isByRef(err_set_ty)) { - const err_field_ptr = self.builder.buildStructGEP(operand, 0, ""); + const target = self.dg.module.getTarget(); + const offset = errUnionErrorOffset(payload_ty, target); + + if (operand_is_ptr or isByRef(err_union_ty)) { + const err_field_ptr = self.builder.buildStructGEP(operand, offset, ""); return self.builder.buildLoad(err_field_ptr, ""); } - return self.builder.buildExtractValue(operand, 0, ""); + return self.builder.buildExtractValue(operand, offset, ""); } fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const error_set_ty = self.air.typeOf(ty_op.operand).childType(); + const error_union_ty = self.air.typeOf(ty_op.operand).childType(); - const error_ty = error_set_ty.errorUnionSet(); - const payload_ty = error_set_ty.errorUnionPayload(); - const non_error_val = try self.dg.genTypedValue(.{ .ty = error_ty, .val = Value.zero }); + if (error_union_ty.errorUnionSet().errorSetCardinality() == .zero) { + // TODO: write undefined bytes through the pointer here + return operand; + } + const payload_ty = error_union_ty.errorUnionPayload(); + const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - // We have a pointer to a i1. We need to set it to 1 and then return the same pointer. _ = self.builder.buildStore(non_error_val, operand); return operand; } const index_type = self.context.intType(32); + const target = self.dg.module.getTarget(); { + const error_offset = errUnionErrorOffset(payload_ty, target); // First set the non-error value. const indices: [2]*const llvm.Value = .{ index_type.constNull(), // dereference the pointer - index_type.constNull(), // first field is the payload + index_type.constInt(error_offset, .False), }; const non_null_ptr = self.builder.buildInBoundsGEP(operand, &indices, indices.len, ""); - _ = self.builder.buildStore(non_error_val, non_null_ptr); + const store_inst = self.builder.buildStore(non_error_val, non_null_ptr); + store_inst.setAlignment(Type.anyerror.abiAlignment(target)); } // Then return the payload pointer (only if it is used). if (self.liveness.isUnused(inst)) return null; - const target = self.dg.module.getTarget(); - const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; + const payload_offset = errUnionPayloadOffset(payload_ty, target); const indices: [2]*const llvm.Value = .{ index_type.constNull(), // dereference the pointer - index_type.constInt(payload_offset, .False), // second field is the payload + index_type.constInt(payload_offset, .False), }; return self.builder.buildInBoundsGEP(operand, &indices, indices.len, ""); } @@ -5646,8 +5717,10 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOfIndex(inst); - if (optional_ty.isPtrLikeOptional()) return operand; - const llvm_optional_ty = try self.dg.llvmType(optional_ty); + if (optional_ty.optionalReprIsPayload()) { + return operand; + } + const llvm_optional_ty = try self.dg.lowerType(optional_ty); if (isByRef(optional_ty)) { const optional_ptr = self.buildAlloca(llvm_optional_ty); const payload_ptr = self.builder.buildStructGEP(optional_ptr, 0, ""); @@ -5669,21 +5742,26 @@ pub const FuncGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const payload_ty = self.air.typeOf(ty_op.operand); + const inst_ty = self.air.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); + if (inst_ty.errorUnionSet().errorSetCardinality() == .zero) { + return operand; + } + const payload_ty = self.air.typeOf(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { return operand; } - const inst_ty = self.air.typeOfIndex(inst); - const ok_err_code = self.context.intType(16).constNull(); - const err_un_llvm_ty = try self.dg.llvmType(inst_ty); + const ok_err_code = (try self.dg.lowerType(Type.anyerror)).constNull(); + const err_un_llvm_ty = try self.dg.lowerType(inst_ty); const target = self.dg.module.getTarget(); - const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; + const payload_offset = errUnionPayloadOffset(payload_ty, target); + const error_offset = errUnionErrorOffset(payload_ty, target); if (isByRef(inst_ty)) { const result_ptr = self.buildAlloca(err_un_llvm_ty); - const err_ptr = self.builder.buildStructGEP(result_ptr, 0, ""); - _ = self.builder.buildStore(ok_err_code, err_ptr); + const err_ptr = self.builder.buildStructGEP(result_ptr, error_offset, ""); + const store_inst = self.builder.buildStore(ok_err_code, err_ptr); + store_inst.setAlignment(Type.anyerror.abiAlignment(target)); const payload_ptr = self.builder.buildStructGEP(result_ptr, payload_offset, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -5694,7 +5772,7 @@ pub const FuncGen = struct { return result_ptr; } - const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), ok_err_code, 0, ""); + const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), ok_err_code, error_offset, ""); return self.builder.buildInsertValue(partial, operand, payload_offset, ""); } @@ -5708,14 +5786,16 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { return operand; } - const err_un_llvm_ty = try self.dg.llvmType(err_un_ty); + const err_un_llvm_ty = try self.dg.lowerType(err_un_ty); const target = self.dg.module.getTarget(); - const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; + const payload_offset = errUnionPayloadOffset(payload_ty, target); + const error_offset = errUnionErrorOffset(payload_ty, target); if (isByRef(err_un_ty)) { const result_ptr = self.buildAlloca(err_un_llvm_ty); - const err_ptr = self.builder.buildStructGEP(result_ptr, 0, ""); - _ = self.builder.buildStore(operand, err_ptr); + const err_ptr = self.builder.buildStructGEP(result_ptr, error_offset, ""); + const store_inst = self.builder.buildStore(operand, err_ptr); + store_inst.setAlignment(Type.anyerror.abiAlignment(target)); const payload_ptr = self.builder.buildStructGEP(result_ptr, payload_offset, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -5728,7 +5808,7 @@ pub const FuncGen = struct { return result_ptr; } - const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), operand, 0, ""); + const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), operand, error_offset, ""); // TODO set payload bytes to undef return partial; } @@ -5791,7 +5871,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const llvm_slice_ty = try self.dg.llvmType(inst_ty); + const llvm_slice_ty = try self.dg.lowerType(inst_ty); // In case of slicing a global, the result type looks something like `{ i8*, i64 }` // but `ptr` is pointing to the global directly. If it's an array, we would want to @@ -5799,7 +5879,7 @@ pub const FuncGen = struct { // This prevents an assertion failure. var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = inst_ty.slicePtrFieldType(&buf); - const ptr_llvm_ty = try self.dg.llvmType(ptr_ty); + const ptr_llvm_ty = try self.dg.lowerType(ptr_ty); const casted_ptr = self.builder.buildBitCast(ptr, ptr_llvm_ty, ""); const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), casted_ptr, 0, ""); return self.builder.buildInsertValue(partial, len, 1, ""); @@ -5965,7 +6045,7 @@ pub const FuncGen = struct { // const d = @divTrunc(a, b); // const r = @rem(a, b); // return if (r == 0) d else d - ((a < 0) ^ (b < 0)); - const result_llvm_ty = try self.dg.llvmType(inst_ty); + const result_llvm_ty = try self.dg.lowerType(inst_ty); const zero = result_llvm_ty.constNull(); const div_trunc = self.builder.buildSDiv(lhs, rhs, ""); const rem = self.builder.buildSRem(lhs, rhs, ""); @@ -6015,7 +6095,7 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const inst_llvm_ty = try self.dg.llvmType(inst_ty); + const inst_llvm_ty = try self.dg.lowerType(inst_ty); const scalar_ty = inst_ty.scalarType(); if (scalar_ty.isRuntimeFloat()) { @@ -6099,8 +6179,8 @@ pub const FuncGen = struct { const intrinsic_name = if (scalar_ty.isSignedInt()) signed_intrinsic else unsigned_intrinsic; - const llvm_lhs_ty = try self.dg.llvmType(lhs_ty); - const llvm_dest_ty = try self.dg.llvmType(dest_ty); + const llvm_lhs_ty = try self.dg.lowerType(lhs_ty); + const llvm_dest_ty = try self.dg.lowerType(dest_ty); const tg = self.dg.module.getTarget(); @@ -6208,7 +6288,7 @@ pub const FuncGen = struct { ) !*const llvm.Value { const target = self.dg.module.getTarget(); const scalar_ty = ty.scalarType(); - const scalar_llvm_ty = try self.dg.llvmType(scalar_ty); + const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); if (intrinsicsAllowed(scalar_ty, target)) { const llvm_predicate: llvm.RealPredicate = switch (pred) { @@ -6308,8 +6388,8 @@ pub const FuncGen = struct { ) !*const llvm.Value { const target = self.dg.module.getTarget(); const scalar_ty = ty.scalarType(); - const llvm_ty = try self.dg.llvmType(ty); - const scalar_llvm_ty = try self.dg.llvmType(scalar_ty); + const llvm_ty = try self.dg.lowerType(ty); + const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const intrinsics_allowed = op != .tan and intrinsicsAllowed(scalar_ty, target); var fn_name_buf: [64]u8 = undefined; @@ -6403,12 +6483,12 @@ pub const FuncGen = struct { const rhs_scalar_ty = rhs_ty.scalarType(); const dest_ty = self.air.typeOfIndex(inst); - const llvm_dest_ty = try self.dg.llvmType(dest_ty); + const llvm_dest_ty = try self.dg.lowerType(dest_ty); const tg = self.dg.module.getTarget(); const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") + self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; @@ -6468,7 +6548,7 @@ pub const FuncGen = struct { const tg = self.dg.module.getTarget(); const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") + self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; if (lhs_scalar_ty.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, ""); @@ -6491,7 +6571,7 @@ pub const FuncGen = struct { const tg = self.dg.module.getTarget(); const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") + self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_type), "") else rhs; return self.builder.buildShl(lhs, casted_rhs, ""); @@ -6513,7 +6593,7 @@ pub const FuncGen = struct { const tg = self.dg.module.getTarget(); const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") + self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; if (lhs_scalar_ty.isSignedInt()) return self.builder.buildSShlSat(lhs, casted_rhs, ""); @@ -6536,7 +6616,7 @@ pub const FuncGen = struct { const tg = self.dg.module.getTarget(); const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") + self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; const is_signed_int = lhs_scalar_ty.isSignedInt(); @@ -6564,7 +6644,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dest_ty = self.air.typeOfIndex(inst); const dest_info = dest_ty.intInfo(target); - const dest_llvm_ty = try self.dg.llvmType(dest_ty); + const dest_llvm_ty = try self.dg.lowerType(dest_ty); const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const operand_info = operand_ty.intInfo(target); @@ -6586,7 +6666,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); return self.builder.buildTrunc(operand, dest_llvm_ty, ""); } @@ -6604,7 +6684,7 @@ pub const FuncGen = struct { if (!backendSupportsF80(target) and (src_bits == 80 or dest_bits == 80)) { return softF80TruncOrExt(self, operand, src_bits, dest_bits); } - const dest_llvm_ty = try self.dg.llvmType(dest_ty); + const dest_llvm_ty = try self.dg.lowerType(dest_ty); return self.builder.buildFPTrunc(operand, dest_llvm_ty, ""); } @@ -6622,7 +6702,7 @@ pub const FuncGen = struct { if (!backendSupportsF80(target) and (src_bits == 80 or dest_bits == 80)) { return softF80TruncOrExt(self, operand, src_bits, dest_bits); } - const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); return self.builder.buildFPExt(operand, dest_llvm_ty, ""); } @@ -6632,7 +6712,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); return self.builder.buildPtrToInt(operand, dest_llvm_ty, ""); } @@ -6640,12 +6720,12 @@ pub const FuncGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const inst_ty = self.air.typeOfIndex(inst); + const operand = try self.resolveInst(ty_op.operand); const operand_is_ref = isByRef(operand_ty); const result_is_ref = isByRef(inst_ty); - const llvm_dest_ty = try self.dg.llvmType(inst_ty); + const llvm_dest_ty = try self.dg.lowerType(inst_ty); const target = self.dg.module.getTarget(); if (operand_is_ref and result_is_ref) { @@ -6665,14 +6745,14 @@ pub const FuncGen = struct { const array_ptr = self.buildAlloca(llvm_dest_ty); const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; if (bitcast_ok) { - const llvm_vector_ty = try self.dg.llvmType(operand_ty); + const llvm_vector_ty = try self.dg.lowerType(operand_ty); const casted_ptr = self.builder.buildBitCast(array_ptr, llvm_vector_ty.pointerType(0), ""); const llvm_store = self.builder.buildStore(operand, casted_ptr); llvm_store.setAlignment(inst_ty.abiAlignment(target)); } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); const vector_len = operand_ty.arrayLen(); @@ -6689,7 +6769,7 @@ pub const FuncGen = struct { return array_ptr; } else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) { const elem_ty = operand_ty.childType(); - const llvm_vector_ty = try self.dg.llvmType(inst_ty); + const llvm_vector_ty = try self.dg.lowerType(inst_ty); if (!operand_is_ref) { return self.dg.todo("implement bitcast non-ref array to vector", .{}); } @@ -6706,7 +6786,7 @@ pub const FuncGen = struct { } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); const vector_len = operand_ty.arrayLen(); @@ -6738,7 +6818,7 @@ pub const FuncGen = struct { const alignment = @maximum(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); const result_ptr = self.buildAlloca(llvm_dest_ty); result_ptr.setAlignment(alignment); - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), ""); const store_inst = self.builder.buildStore(operand, casted_ptr); store_inst.setAlignment(alignment); @@ -6752,7 +6832,7 @@ pub const FuncGen = struct { const alignment = @maximum(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); const result_ptr = self.buildAlloca(llvm_dest_ty); result_ptr.setAlignment(alignment); - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), ""); const store_inst = self.builder.buildStore(operand, casted_ptr); store_inst.setAlignment(alignment); @@ -6826,7 +6906,7 @@ pub const FuncGen = struct { const pointee_type = ptr_ty.childType(); if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); - const pointee_llvm_ty = try self.dg.llvmType(pointee_type); + const pointee_llvm_ty = try self.dg.lowerType(pointee_type); const alloca_inst = self.buildAlloca(pointee_llvm_ty); const target = self.dg.module.getTarget(); const alignment = ptr_ty.ptrAlignment(target); @@ -6840,7 +6920,7 @@ pub const FuncGen = struct { const ret_ty = ptr_ty.childType(); if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; - const ret_llvm_ty = try self.dg.llvmType(ret_ty); + const ret_llvm_ty = try self.dg.lowerType(ret_ty); const target = self.dg.module.getTarget(); const alloca_inst = self.buildAlloca(ret_llvm_ty); alloca_inst.setAlignment(ptr_ty.ptrAlignment(target)); @@ -6871,7 +6951,7 @@ pub const FuncGen = struct { const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, ""); const fill_char = u8_llvm_ty.constInt(0xaa, .False); const dest_ptr_align = ptr_ty.ptrAlignment(target); - const usize_llvm_ty = try self.dg.llvmType(Type.usize); + const usize_llvm_ty = try self.dg.lowerType(Type.usize); const len = usize_llvm_ty.constInt(operand_size, .False); _ = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr()); if (self.dg.module.comp.bin_file.options.valgrind) { @@ -6908,7 +6988,7 @@ pub const FuncGen = struct { const llvm_fn = self.getIntrinsic("llvm.returnaddress", &.{}); const params = [_]*const llvm.Value{llvm_i32.constNull()}; const ptr_val = self.builder.buildCall(llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); } @@ -6926,7 +7006,7 @@ pub const FuncGen = struct { const params = [_]*const llvm.Value{llvm_i32.constNull()}; const ptr_val = self.builder.buildCall(llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); } @@ -6971,15 +7051,15 @@ pub const FuncGen = struct { var payload = self.builder.buildExtractValue(result, 0, ""); if (opt_abi_ty != null) { - payload = self.builder.buildTrunc(payload, try self.dg.llvmType(operand_ty), ""); + payload = self.builder.buildTrunc(payload, try self.dg.lowerType(operand_ty), ""); } const success_bit = self.builder.buildExtractValue(result, 1, ""); - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.optionalReprIsPayload()) { return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, ""); } - const optional_llvm_ty = try self.dg.llvmType(optional_ty); + const optional_llvm_ty = try self.dg.lowerType(optional_ty); const non_null_bit = self.builder.buildNot(success_bit, ""); const partial = self.builder.buildInsertValue(optional_llvm_ty.getUndef(), payload, 0, ""); return self.builder.buildInsertValue(partial, non_null_bit, 1, ""); @@ -7015,7 +7095,7 @@ pub const FuncGen = struct { ordering, single_threaded, ); - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); if (is_float) { return self.builder.buildBitCast(uncasted_result, operand_llvm_ty, ""); } else { @@ -7028,7 +7108,7 @@ pub const FuncGen = struct { } // It's a pointer but we need to treat it as an int. - const usize_llvm_ty = try self.dg.llvmType(Type.usize); + const usize_llvm_ty = try self.dg.lowerType(Type.usize); const casted_ptr = self.builder.buildBitCast(ptr, usize_llvm_ty.pointerType(0), ""); const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, ""); const uncasted_result = self.builder.buildAtomicRmw( @@ -7038,7 +7118,7 @@ pub const FuncGen = struct { ordering, single_threaded, ); - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, ""); } @@ -7057,7 +7137,7 @@ pub const FuncGen = struct { const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); const load_inst = (try self.load(casted_ptr, ptr_ty)).?; load_inst.setOrdering(ordering); - return self.builder.buildTrunc(load_inst, try self.dg.llvmType(operand_ty), ""); + return self.builder.buildTrunc(load_inst, try self.dg.lowerType(operand_ty), ""); } const load_inst = (try self.load(ptr, ptr_ty)).?; load_inst.setOrdering(ordering); @@ -7198,13 +7278,13 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const llvm_i1 = self.context.intType(1); - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const params = [_]*const llvm.Value{ operand, llvm_i1.constNull() }; const wrong_size_result = self.builder.buildCall(fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.air.typeOfIndex(inst); - const result_llvm_ty = try self.dg.llvmType(result_ty); + const result_llvm_ty = try self.dg.lowerType(result_ty); const target = self.dg.module.getTarget(); const bits = operand_ty.intInfo(target).bits; @@ -7226,12 +7306,12 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const params = [_]*const llvm.Value{operand}; - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const wrong_size_result = self.builder.buildCall(fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.air.typeOfIndex(inst); - const result_llvm_ty = try self.dg.llvmType(result_ty); + const result_llvm_ty = try self.dg.lowerType(result_ty); const target = self.dg.module.getTarget(); const bits = operand_ty.intInfo(target).bits; @@ -7255,7 +7335,7 @@ pub const FuncGen = struct { assert(bits % 8 == 0); var operand = try self.resolveInst(ty_op.operand); - var operand_llvm_ty = try self.dg.llvmType(operand_ty); + var operand_llvm_ty = try self.dg.lowerType(operand_ty); if (bits % 16 == 8) { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte @@ -7289,7 +7369,7 @@ pub const FuncGen = struct { const wrong_size_result = self.builder.buildCall(fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.air.typeOfIndex(inst); - const result_llvm_ty = try self.dg.llvmType(result_ty); + const result_llvm_ty = try self.dg.lowerType(result_ty); const result_bits = result_ty.intInfo(target).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); @@ -7332,14 +7412,14 @@ pub const FuncGen = struct { } const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const llvm_ret_ty = try self.dg.llvmType(slice_ty); - const usize_llvm_ty = try self.dg.llvmType(Type.usize); + const llvm_ret_ty = try self.dg.lowerType(slice_ty); + const usize_llvm_ty = try self.dg.lowerType(Type.usize); const target = self.dg.module.getTarget(); const slice_alignment = slice_ty.abiAlignment(target); var int_tag_type_buffer: Type.Payload.Bits = undefined; const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); - const param_types = [_]*const llvm.Type{try self.dg.llvmType(int_tag_ty)}; + const param_types = [_]*const llvm.Type{try self.dg.lowerType(int_tag_ty)}; const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); const fn_val = self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); @@ -7396,7 +7476,7 @@ pub const FuncGen = struct { .base = .{ .tag = .enum_field_index }, .data = @intCast(u32, field_index), }; - break :int try self.dg.genTypedValue(.{ + break :int try self.dg.lowerValue(.{ .ty = enum_ty, .val = Value.initPayload(&tag_val_payload.base), }); @@ -7421,8 +7501,8 @@ pub const FuncGen = struct { // Function signature: fn (anyerror) bool - const ret_llvm_ty = try self.dg.llvmType(Type.bool); - const anyerror_llvm_ty = try self.dg.llvmType(Type.anyerror); + const ret_llvm_ty = try self.dg.lowerType(Type.bool); + const anyerror_llvm_ty = try self.dg.lowerType(Type.anyerror); const param_types = [_]*const llvm.Type{anyerror_llvm_ty}; const fn_type = llvm.functionType(ret_llvm_ty, ¶m_types, param_types.len, .False); @@ -7531,7 +7611,7 @@ pub const FuncGen = struct { .Add => switch (scalar_ty.zigTypeTag()) { .Int => return self.builder.buildAddReduce(operand), .Float => { - const scalar_llvm_ty = try self.dg.llvmType(scalar_ty); + const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const neutral_value = scalar_llvm_ty.constReal(-0.0); return self.builder.buildFPAddReduce(neutral_value, operand); }, @@ -7540,7 +7620,7 @@ pub const FuncGen = struct { .Mul => switch (scalar_ty.zigTypeTag()) { .Int => return self.builder.buildMulReduce(operand), .Float => { - const scalar_llvm_ty = try self.dg.llvmType(scalar_ty); + const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const neutral_value = scalar_llvm_ty.constReal(1.0); return self.builder.buildFPMulReduce(neutral_value, operand); }, @@ -7556,7 +7636,7 @@ pub const FuncGen = struct { const result_ty = self.air.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); - const llvm_result_ty = try self.dg.llvmType(result_ty); + const llvm_result_ty = try self.dg.lowerType(result_ty); const target = self.dg.module.getTarget(); switch (result_ty.zigTypeTag()) { @@ -7644,7 +7724,7 @@ pub const FuncGen = struct { .Array => { assert(isByRef(result_ty)); - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); const alloca_inst = self.buildAlloca(llvm_result_ty); alloca_inst.setAlignment(result_ty.abiAlignment(target)); @@ -7679,7 +7759,7 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = self.air.typeOfIndex(inst); - const union_llvm_ty = try self.dg.llvmType(union_ty); + const union_llvm_ty = try self.dg.lowerType(union_ty); const target = self.dg.module.getTarget(); const layout = union_ty.unionGetLayout(target); if (layout.payload_size == 0) { @@ -7699,8 +7779,8 @@ pub const FuncGen = struct { const union_obj = union_ty.cast(Type.Payload.Union).?.data; assert(union_obj.haveFieldTypes()); const field = union_obj.fields.values()[extra.field_index]; - const field_llvm_ty = try self.dg.llvmType(field.ty); - const tag_llvm_ty = try self.dg.llvmType(union_obj.tag_ty); + const field_llvm_ty = try self.dg.lowerType(field.ty); + const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty); const field_size = field.ty.abiSize(target); const field_align = field.normalAlignment(target); @@ -7936,7 +8016,7 @@ pub const FuncGen = struct { const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget()); - const llvm_slice_ty = try self.dg.llvmType(slice_ty); + const llvm_slice_ty = try self.dg.lowerType(slice_ty); const llvm_slice_ptr_ty = llvm_slice_ty.pointerType(0); // TODO: Address space const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table"); @@ -8000,7 +8080,7 @@ pub const FuncGen = struct { // out the relevant bits when accessing the pointee. // Here we perform a bitcast because we want to use the host_size // as the llvm pointer element type. - const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + const result_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); // TODO this can be removed if we change host_size to be bits instead // of bytes. return self.builder.buildBitCast(struct_ptr, result_llvm_ty, ""); @@ -8015,7 +8095,7 @@ pub const FuncGen = struct { // end of the struct. Treat our struct pointer as an array of two and get // the index to the element at index `1` to get a pointer to the end of // the struct. - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_index = llvm_usize.constInt(1, .False); const indices: [1]*const llvm.Value = .{llvm_index}; return self.builder.buildInBoundsGEP(struct_ptr, &indices, indices.len, ""); @@ -8036,7 +8116,7 @@ pub const FuncGen = struct { ) !?*const llvm.Value { const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = &union_obj.fields.values()[field_index]; - const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + const result_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); if (!field.ty.hasRuntimeBitsIgnoreComptime()) { return null; } @@ -8075,7 +8155,7 @@ pub const FuncGen = struct { const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr()); if (info.host_size == 0) { if (isByRef(info.pointee_type)) { - const elem_llvm_ty = try self.dg.llvmType(info.pointee_type); + const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); const result_align = info.pointee_type.abiAlignment(target); const max_align = @maximum(result_align, ptr_alignment); const result_ptr = self.buildAlloca(elem_llvm_ty); @@ -8108,7 +8188,7 @@ pub const FuncGen = struct { const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target)); const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); - const elem_llvm_ty = try self.dg.llvmType(info.pointee_type); + const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); if (isByRef(info.pointee_type)) { const result_align = info.pointee_type.abiAlignment(target); @@ -8546,7 +8626,14 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool /// be effectively bitcasted to the actual return type. fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm.Type { if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) { - return dg.context.voidType(); + // If the return type is an error set or an error union, then we make this + // anyerror return type instead, so that it can be coerced into a function + // pointer type which has anyerror as the return type. + if (fn_info.return_type.isError()) { + return dg.lowerType(Type.anyerror); + } else { + return dg.context.voidType(); + } } const target = dg.module.getTarget(); switch (fn_info.cc) { @@ -8554,7 +8641,7 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm. if (isByRef(fn_info.return_type)) { return dg.context.voidType(); } else { - return dg.llvmType(fn_info.return_type); + return dg.lowerType(fn_info.return_type); } }, .C => { @@ -8575,24 +8662,24 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm. else => false, }; switch (target.cpu.arch) { - .mips, .mipsel => return dg.llvmType(fn_info.return_type), + .mips, .mipsel => return dg.lowerType(fn_info.return_type), .x86_64 => switch (target.os.tag) { .windows => switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) { .integer => { if (is_scalar) { - return dg.llvmType(fn_info.return_type); + return dg.lowerType(fn_info.return_type); } else { const abi_size = fn_info.return_type.abiSize(target); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } }, .memory => return dg.context.voidType(), - .sse => return dg.llvmType(fn_info.return_type), + .sse => return dg.lowerType(fn_info.return_type), else => unreachable, }, else => { if (is_scalar) { - return dg.llvmType(fn_info.return_type); + return dg.lowerType(fn_info.return_type); } const classes = x86_64_abi.classifySystemV(fn_info.return_type, target); if (classes[0] == .memory) { @@ -8633,10 +8720,10 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm. }, }, // TODO investigate C ABI for other architectures - else => return dg.llvmType(fn_info.return_type), + else => return dg.lowerType(fn_info.return_type), } }, - else => return dg.llvmType(fn_info.return_type), + else => return dg.lowerType(fn_info.return_type), } } @@ -8991,3 +9078,11 @@ fn buildAllocaInner( return builder.buildAlloca(llvm_ty, ""); } + +fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u1 { + return @boolToInt(Type.anyerror.abiAlignment(target) > payload_ty.abiAlignment(target)); +} + +fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u1 { + return @boolToInt(Type.anyerror.abiAlignment(target) <= payload_ty.abiAlignment(target)); +} diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index a204dd91ae..61bec1f880 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -498,9 +498,11 @@ pub const DeclState = struct { .ErrorUnion => { const error_ty = ty.errorUnionSet(); const payload_ty = ty.errorUnionPayload(); + const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(target); const abi_size = ty.abiSize(target); - const abi_align = ty.abiAlignment(target); - const payload_off = mem.alignForwardGeneric(u64, error_ty.abiSize(target), abi_align); + const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(target) else 0; + const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(target); // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); @@ -534,7 +536,7 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeReloc(atom, error_ty, @intCast(u32, index), null); // DW.AT.data_member_location, DW.FORM.sdata - try dbg_info_buffer.append(0); + try leb128.writeULEB128(dbg_info_buffer.writer(), error_off); // DW.AT.structure_type delimit children try dbg_info_buffer.append(0); @@ -2293,7 +2295,7 @@ fn addDbgInfoErrorSet( // DW.AT.enumeration_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type)); // DW.AT.byte_size, DW.FORM.sdata - const abi_size = ty.abiSize(target); + const abi_size = Type.anyerror.abiSize(target); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string const name = try ty.nameAllocArena(arena, module); diff --git a/src/type.zig b/src/type.zig index ea65cc8916..145ae4904a 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2317,10 +2317,7 @@ pub const Type = extern union { .const_slice_u8_sentinel_0, .array_u8_sentinel_0, .anyerror_void_error_union, - .error_set, - .error_set_single, .error_set_inferred, - .error_set_merged, .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -2361,12 +2358,23 @@ pub const Type = extern union { .fn_void_no_args, .fn_naked_noreturn_no_args, .fn_ccc_void_no_args, + .error_set_single, => return false, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + return names.len > 1; + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + return names.len > 1; + }, + // These types have more than one possible value, so the result is the same as // asking whether they are comptime-only types. .anyframe_T, - .optional, .optional_single_mut_pointer, .optional_single_const_pointer, .single_const_pointer, @@ -2388,6 +2396,41 @@ pub const Type = extern union { } }, + .optional => { + var buf: Payload.ElemType = undefined; + const child_ty = ty.optionalChild(&buf); + if (child_ty.isNoReturn()) { + // Then the optional is comptime-known to be null. + return false; + } + if (ignore_comptime_only) { + return true; + } else if (sema_kit) |sk| { + return !(try sk.sema.typeRequiresComptime(sk.block, sk.src, child_ty)); + } else { + return !comptimeOnly(child_ty); + } + }, + + .error_union => { + // This code needs to be kept in sync with the equivalent switch prong + // in abiSizeAdvanced. + const data = ty.castTag(.error_union).?.data; + switch (data.error_set.errorSetCardinality()) { + .zero => return hasRuntimeBitsAdvanced(data.payload, ignore_comptime_only, sema_kit), + .one => return !data.payload.isNoReturn(), + .many => { + if (ignore_comptime_only) { + return true; + } else if (sema_kit) |sk| { + return !(try sk.sema.typeRequiresComptime(sk.block, sk.src, ty)); + } else { + return !comptimeOnly(ty); + } + }, + } + }, + .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; if (sema_kit) |sk| { @@ -2467,12 +2510,6 @@ pub const Type = extern union { .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data != 0, - .error_union => { - const payload = ty.castTag(.error_union).?.data; - return (try payload.error_set.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit)) or - (try payload.payload.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit)); - }, - .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types) |field_ty, i| { @@ -2647,13 +2684,22 @@ pub const Type = extern union { }; } - pub fn isNoReturn(self: Type) bool { - const definitely_correct_result = - self.tag_if_small_enough != .bound_fn and - self.zigTypeTag() == .NoReturn; - const fast_result = self.tag_if_small_enough == Tag.noreturn; - assert(fast_result == definitely_correct_result); - return fast_result; + /// TODO add enums with no fields here + pub fn isNoReturn(ty: Type) bool { + switch (ty.tag()) { + .noreturn => return true, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + return names.len == 0; + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + return names.len == 0; + }, + else => return false, + } } /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. @@ -2852,13 +2898,30 @@ pub const Type = extern union { else => unreachable, }, - .error_set, - .error_set_single, + // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, .anyerror, .error_set_inferred, - .error_set_merged, - => return AbiAlignmentAdvanced{ .scalar = 2 }, // TODO revisit this when we have the concept of the error tag type + => return AbiAlignmentAdvanced{ .scalar = 2 }, + + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + if (names.len <= 1) { + return AbiAlignmentAdvanced{ .scalar = 0 }; + } else { + return AbiAlignmentAdvanced{ .scalar = 2 }; + } + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + if (names.len <= 1) { + return AbiAlignmentAdvanced{ .scalar = 0 }; + } else { + return AbiAlignmentAdvanced{ .scalar = 2 }; + } + }, .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(target, strat), @@ -2881,8 +2944,16 @@ pub const Type = extern union { var buf: Payload.ElemType = undefined; const child_type = ty.optionalChild(&buf); - if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr()) { - return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }; + switch (child_type.zigTypeTag()) { + .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + .ErrorSet => switch (child_type.errorSetCardinality()) { + // `?error{}` is comptime-known to be null. + .zero => return AbiAlignmentAdvanced{ .scalar = 0 }, + .one => return AbiAlignmentAdvanced{ .scalar = 1 }, + .many => return abiAlignmentAdvanced(Type.anyerror, target, strat), + }, + .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 }, + else => {}, } switch (strat) { @@ -2900,31 +2971,35 @@ pub const Type = extern union { }, .error_union => { + // This code needs to be kept in sync with the equivalent switch prong + // in abiSizeAdvanced. const data = ty.castTag(.error_union).?.data; + switch (data.error_set.errorSetCardinality()) { + .zero => return abiAlignmentAdvanced(data.payload, target, strat), + .one => { + if (data.payload.isNoReturn()) { + return AbiAlignmentAdvanced{ .scalar = 0 }; + } + }, + .many => {}, + } + const code_align = abiAlignment(Type.anyerror, target); switch (strat) { .eager, .sema_kit => { - if (!(try data.error_set.hasRuntimeBitsAdvanced(false, sema_kit))) { - return data.payload.abiAlignmentAdvanced(target, strat); - } else if (!(try data.payload.hasRuntimeBitsAdvanced(false, sema_kit))) { - return data.error_set.abiAlignmentAdvanced(target, strat); + if (!(try data.payload.hasRuntimeBitsAdvanced(false, sema_kit))) { + return AbiAlignmentAdvanced{ .scalar = code_align }; } return AbiAlignmentAdvanced{ .scalar = @maximum( + code_align, (try data.payload.abiAlignmentAdvanced(target, strat)).scalar, - (try data.error_set.abiAlignmentAdvanced(target, strat)).scalar, ) }; }, .lazy => |arena| { switch (try data.payload.abiAlignmentAdvanced(target, strat)) { .scalar => |payload_align| { - if (payload_align == 0) { - return data.error_set.abiAlignmentAdvanced(target, strat); - } - switch (try data.error_set.abiAlignmentAdvanced(target, strat)) { - .scalar => |err_set_align| { - return AbiAlignmentAdvanced{ .scalar = @maximum(payload_align, err_set_align) }; - }, - .val => {}, - } + return AbiAlignmentAdvanced{ + .scalar = @maximum(code_align, payload_align), + }; }, .val => {}, } @@ -3018,6 +3093,7 @@ pub const Type = extern union { .@"undefined", .enum_literal, .type_info, + .error_set_single, => return AbiAlignmentAdvanced{ .scalar = 0 }, .noreturn, @@ -3136,6 +3212,7 @@ pub const Type = extern union { .empty_struct_literal, .empty_struct, .void, + .error_set_single, => return AbiSizeAdvanced{ .scalar = 0 }, .@"struct", .tuple, .anon_struct => switch (ty.containerLayout()) { @@ -3291,14 +3368,30 @@ pub const Type = extern union { }, // TODO revisit this when we have the concept of the error tag type - .error_set, - .error_set_single, .anyerror_void_error_union, .anyerror, .error_set_inferred, - .error_set_merged, => return AbiSizeAdvanced{ .scalar = 2 }, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + if (names.len <= 1) { + return AbiSizeAdvanced{ .scalar = 0 }; + } else { + return AbiSizeAdvanced{ .scalar = 2 }; + } + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + if (names.len <= 1) { + return AbiSizeAdvanced{ .scalar = 0 }; + } else { + return AbiSizeAdvanced{ .scalar = 2 }; + } + }, + .i16, .u16 => return AbiSizeAdvanced{ .scalar = intAbiSize(16, target) }, .i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) }, .i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) }, @@ -3312,37 +3405,81 @@ pub const Type = extern union { .optional => { var buf: Payload.ElemType = undefined; const child_type = ty.optionalChild(&buf); + + if (child_type.isNoReturn()) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + if (!child_type.hasRuntimeBits()) return AbiSizeAdvanced{ .scalar = 1 }; - if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr() and !child_type.isSlice()) - return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }; + switch (child_type.zigTypeTag()) { + .Pointer => { + const ptr_info = child_type.ptrInfo().data; + const has_null = switch (ptr_info.size) { + .Slice, .C => true, + else => ptr_info.@"allowzero", + }; + if (!has_null) { + const ptr_size_bytes = @divExact(target.cpu.arch.ptrBitWidth(), 8); + return AbiSizeAdvanced{ .scalar = ptr_size_bytes }; + } + }, + .ErrorSet => return abiSizeAdvanced(Type.anyerror, target, strat), + else => {}, + } // Optional types are represented as a struct with the child type as the first // field and a boolean as the second. Since the child type's abi alignment is // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal // to the child type's ABI alignment. - return AbiSizeAdvanced{ .scalar = child_type.abiAlignment(target) + child_type.abiSize(target) }; + return AbiSizeAdvanced{ + .scalar = child_type.abiAlignment(target) + child_type.abiSize(target), + }; }, .error_union => { + // This code needs to be kept in sync with the equivalent switch prong + // in abiAlignmentAdvanced. const data = ty.castTag(.error_union).?.data; - if (!data.error_set.hasRuntimeBits() and !data.payload.hasRuntimeBits()) { - return AbiSizeAdvanced{ .scalar = 0 }; - } else if (!data.error_set.hasRuntimeBits()) { - return AbiSizeAdvanced{ .scalar = data.payload.abiSize(target) }; - } else if (!data.payload.hasRuntimeBits()) { - return AbiSizeAdvanced{ .scalar = data.error_set.abiSize(target) }; + // Here we need to care whether or not the error set is *empty* or whether + // it only has *one possible value*. In the former case, it means there + // cannot possibly be an error, meaning the ABI size is equivalent to the + // payload ABI size. In the latter case, we need to account for the "tag" + // because even if both the payload type and the error set type of an + // error union have no runtime bits, an error union still has + // 1 bit of data which is whether or not the value is an error. + // Zig still uses the error code encoding at runtime, even when only 1 bit + // would suffice. This prevents coercions from needing to branch. + switch (data.error_set.errorSetCardinality()) { + .zero => return abiSizeAdvanced(data.payload, target, strat), + .one => { + if (data.payload.isNoReturn()) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + }, + .many => {}, } - const code_align = abiAlignment(data.error_set, target); + const code_size = abiSize(Type.anyerror, target); + if (!data.payload.hasRuntimeBits()) { + // Same as anyerror. + return AbiSizeAdvanced{ .scalar = code_size }; + } + const code_align = abiAlignment(Type.anyerror, target); const payload_align = abiAlignment(data.payload, target); - const big_align = @maximum(code_align, payload_align); const payload_size = abiSize(data.payload, target); var size: u64 = 0; - size += abiSize(data.error_set, target); - size = std.mem.alignForwardGeneric(u64, size, payload_align); - size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, big_align); + if (code_align > payload_align) { + size += code_size; + size = std.mem.alignForwardGeneric(u64, size, payload_align); + size += payload_size; + size = std.mem.alignForwardGeneric(u64, size, code_align); + } else { + size += payload_size; + size = std.mem.alignForwardGeneric(u64, size, code_align); + size += code_size; + size = std.mem.alignForwardGeneric(u64, size, payload_align); + } return AbiSizeAdvanced{ .scalar = size }; }, } @@ -3832,8 +3969,39 @@ pub const Type = extern union { return ty.ptrInfo().data.@"allowzero"; } + /// See also `isPtrLikeOptional`. + pub fn optionalReprIsPayload(ty: Type) bool { + switch (ty.tag()) { + .optional_single_const_pointer, + .optional_single_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + => return true, + + .optional => { + const child_ty = ty.castTag(.optional).?.data; + switch (child_ty.zigTypeTag()) { + .Pointer => { + const info = child_ty.ptrInfo().data; + switch (info.size) { + .Slice, .C => return false, + .Many, .One => return !info.@"allowzero", + } + }, + .ErrorSet => return true, + else => return false, + } + }, + + .pointer => return ty.castTag(.pointer).?.data.size == .C, + + else => return false, + } + } + /// Returns true if the type is optional and would be lowered to a single pointer /// address value, using 0 for null. Note that this returns true for C pointers. + /// See also `hasOptionalRepr`. pub fn isPtrLikeOptional(self: Type) bool { switch (self.tag()) { .optional_single_const_pointer, @@ -4166,6 +4334,35 @@ pub const Type = extern union { }; } + const ErrorSetCardinality = enum { zero, one, many }; + + pub fn errorSetCardinality(ty: Type) ErrorSetCardinality { + switch (ty.tag()) { + .anyerror => return .many, + .error_set_inferred => return .many, + .error_set_single => return .one, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + switch (names.len) { + 0 => return .zero, + 1 => return .one, + else => return .many, + } + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + switch (names.len) { + 0 => return .zero, + 1 => return .one, + else => return .many, + } + }, + else => unreachable, + } + } + /// Returns true if it is an error set that includes anyerror, false otherwise. /// Note that the result may be a false negative if the type did not get error set /// resolution prior to this call. @@ -4658,16 +4855,11 @@ pub const Type = extern union { .const_slice, .mut_slice, .anyopaque, - .optional, .optional_single_mut_pointer, .optional_single_const_pointer, .enum_literal, .anyerror_void_error_union, - .error_union, - .error_set, - .error_set_single, .error_set_inferred, - .error_set_merged, .@"opaque", .var_args_param, .manyptr_u8, @@ -4696,6 +4888,52 @@ pub const Type = extern union { .bound_fn, => return null, + .optional => { + var buf: Payload.ElemType = undefined; + const child_ty = ty.optionalChild(&buf); + if (child_ty.isNoReturn()) { + return Value.@"null"; + } else { + return null; + } + }, + + .error_union => { + const error_ty = ty.errorUnionSet(); + switch (error_ty.errorSetCardinality()) { + .zero => { + const payload_ty = ty.errorUnionPayload(); + if (onePossibleValue(payload_ty)) |payload_val| { + _ = payload_val; + return Value.initTag(.the_only_possible_value); + } else { + return null; + } + }, + .one => { + if (ty.errorUnionPayload().isNoReturn()) { + const error_val = onePossibleValue(error_ty).?; + return error_val; + } else { + return null; + } + }, + .many => return null, + } + }, + + .error_set_single => return Value.initTag(.the_only_possible_value), + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + if (err_set_obj.names.count() > 1) return null; + return Value.initTag(.the_only_possible_value); + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + if (name_map.count() > 1) return null; + return Value.initTag(.the_only_possible_value); + }, + .@"struct" => { const s = ty.castTag(.@"struct").?.data; assert(s.haveFieldTypes()); diff --git a/test/behavior/error.zig b/test/behavior/error.zig index ada0f3bbf1..312ab1524a 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -121,7 +121,7 @@ test "debug info for optional error set" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - const SomeError = error{Hello}; + const SomeError = error{ Hello, Hello2 }; var a_local_variable: ?SomeError = null; _ = a_local_variable; } @@ -148,18 +148,46 @@ test "implicit cast to optional to error union to return result loc" { //comptime S.entry(); TODO } -test "error: fn returning empty error set can be passed as fn returning any error" { +test "fn returning empty error set can be passed as fn returning any error" { + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + entry(); comptime entry(); } +test "fn returning empty error set can be passed as fn returning any error - pointer" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + + entryPtr(); + comptime entryPtr(); +} + fn entry() void { foo2(bar2); } +fn entryPtr() void { + var ptr = &bar2; + fooPtr(ptr); +} + fn foo2(f: fn () anyerror!void) void { const x = f(); - x catch {}; + x catch { + @panic("fail"); + }; +} + +fn fooPtr(f: *const fn () anyerror!void) void { + const x = f(); + x catch { + @panic("fail"); + }; } fn bar2() (error{}!void) {} @@ -239,7 +267,10 @@ fn testComptimeTestErrorEmptySet(x: EmptyErrorSet!i32) !void { } test "comptime err to int of error set with only 1 possible value" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A)); comptime testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A)); @@ -409,9 +440,11 @@ test "return function call to error set from error union function" { } test "optional error set is the same size as error set" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO comptime try expect(@sizeOf(?anyerror) == @sizeOf(anyerror)); + comptime try expect(@alignOf(?anyerror) == @alignOf(anyerror)); const S = struct { fn returnsOptErrSet() ?anyerror { return null; @@ -421,6 +454,65 @@ test "optional error set is the same size as error set" { comptime try expect(S.returnsOptErrSet() == null); } +test "optional error set with only one error is the same size as bool" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + + const E = error{only}; + comptime try expect(@sizeOf(?E) == @sizeOf(bool)); + comptime try expect(@alignOf(?E) == @alignOf(bool)); + const S = struct { + fn gimmeNull() ?E { + return null; + } + fn gimmeErr() ?E { + return error.only; + } + }; + try expect(S.gimmeNull() == null); + try expect(error.only == S.gimmeErr().?); + comptime try expect(S.gimmeNull() == null); + comptime try expect(error.only == S.gimmeErr().?); +} + +test "optional empty error set" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + + comptime try expect(@sizeOf(error{}!void) == @sizeOf(void)); + comptime try expect(@alignOf(error{}!void) == @alignOf(void)); + + var x: ?error{} = undefined; + if (x != null) { + @compileError("test failed"); + } +} + +test "empty error set plus zero-bit payload" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + + comptime try expect(@sizeOf(error{}!void) == @sizeOf(void)); + comptime try expect(@alignOf(error{}!void) == @alignOf(void)); + + var x: error{}!void = undefined; + if (x) |payload| { + if (payload != {}) { + @compileError("test failed"); + } + } else |_| { + @compileError("test failed"); + } + const S = struct { + fn empty() error{}!void {} + fn inferred() !void { + return empty(); + } + }; + try S.inferred(); +} + test "nested catch" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index e0e787509a..383c32172c 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -425,7 +425,6 @@ test "f64 at compile time is lossy" { } test { - if (builtin.zig_backend != .stage1 and builtin.os.tag == .macos) return error.SkipZigTest; comptime try expect(@as(f128, 1 << 113) == 10384593717069655257060992658440192); } @@ -573,28 +572,6 @@ test "inlined loop has array literal with elided runtime scope on first iteratio } } -test "call method on bound fn referring to var instance" { - if (builtin.zig_backend != .stage1) { - // Let's delay solving this one; I want to try to eliminate bound functions from - // the language. - return error.SkipZigTest; // TODO - } - - try expect(bound_fn() == 1237); -} - -const SimpleStruct = struct { - field: i32, - - fn method(self: *const SimpleStruct) i32 { - return self.field + 3; - } -}; - -var simple_struct = SimpleStruct{ .field = 1234 }; - -const bound_fn = simple_struct.method; - test "ptr to local array argument at comptime" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO @@ -669,8 +646,6 @@ pub fn TypeWithCompTimeSlice(comptime field_name: []const u8) type { } test "comptime function with mutable pointer is not memoized" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO - comptime { var x: i32 = 1; const ptr = &x; @@ -685,8 +660,6 @@ fn increment(value: *i32) void { } test "const ptr to comptime mutable data is not memoized" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO - comptime { var foo = SingleFieldStruct{ .x = 1 }; try expect(foo.read_x() == 1); diff --git a/test/cases/compile_errors/call method on bound fn referring to var instance.zig b/test/cases/compile_errors/call method on bound fn referring to var instance.zig new file mode 100644 index 0000000000..10ff584124 --- /dev/null +++ b/test/cases/compile_errors/call method on bound fn referring to var instance.zig @@ -0,0 +1,20 @@ +export fn entry() void { + bad(bound_fn() == 1237); +} +const SimpleStruct = struct { + field: i32, + + fn method(self: *const SimpleStruct) i32 { + return self.field + 3; + } +}; +var simple_struct = SimpleStruct{ .field = 1234 }; +const bound_fn = simple_struct.method; +fn bad(ok: bool) void { + _ = ok; +} +// error +// target=native +// backend=stage2 +// +// :12:18: error: unable to resolve comptime value