From 3264abe3d8f658e1b7275d2be80e43eddfc098dc Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 22 May 2022 21:51:44 -0700 Subject: [PATCH 01/18] stage2: fixes for error union semantics * Sema: avoid unnecessary safety checks when an error set is empty. * Sema: make zirErrorToInt handle comptime errors that are represented as integers. * Sema: make empty error sets properly integrate with typeHasOnePossibleValue. * Type: correct the ABI alignment and size of error unions which have both zero-bit error set and zero-bit payload. The previous code did not account for the fact that we still need to store a bit for whether there is an error. * LLVM: lower error unions possibly with the payload first or with the error code first, depending on alignment. Previously it always put the error code first and used a padding array. * LLVM: lower functions which have an empty error set as the return type the same as anyerror, so that they can be used where fn()anyerror function pointers are expected. In such functions, Zig will lower ret to returning zero instead of void. As a result, one more behavior test is passing. --- lib/std/debug.zig | 2 +- src/Sema.zig | 63 ++++++++++--- src/codegen/llvm.zig | 202 ++++++++++++++++++++++++++++------------ src/type.zig | 197 ++++++++++++++++++++++++++++++--------- test/behavior/error.zig | 31 +++++- 5 files changed, 370 insertions(+), 125 deletions(-) diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 83667c758b..86ed1c5a65 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1798,7 +1798,7 @@ fn resetSegfaultHandler() void { .mask = os.empty_sigset, .flags = 0, }; - // do nothing if an error happens to avoid a double-panic + // To avoid a double-panic, do nothing if an error happens here. updateSegfaultHandler(&act) catch {}; } diff --git a/src/Sema.zig b/src/Sema.zig index d3fca6d2b2..b718912a38 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5899,12 +5899,22 @@ fn zirErrorToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! if (val.isUndef()) { return sema.addConstUndef(result_ty); } - const payload = try sema.arena.create(Value.Payload.U64); - payload.* = .{ - .base = .{ .tag = .int_u64 }, - .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, - }; - return sema.addConstant(result_ty, Value.initPayload(&payload.base)); + switch (val.tag()) { + .@"error" => { + const payload = try sema.arena.create(Value.Payload.U64); + payload.* = .{ + .base = .{ .tag = .int_u64 }, + .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, + }; + return sema.addConstant(result_ty, Value.initPayload(&payload.base)); + }, + + // This is not a valid combination with the type `anyerror`. + .the_only_possible_value => unreachable, + + // Assume it's already encoded as an integer. + else => return sema.addConstant(result_ty, val), + } } try sema.requireRuntimeBlock(block, src); @@ -6261,19 +6271,24 @@ fn zirErrUnionPayload( }); } + const result_ty = operand_ty.errorUnionPayload(); if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.getError()) |name| { return sema.fail(block, src, "caught unexpected error '{s}'", .{name}); } const data = val.castTag(.eu_payload).?.data; - const result_ty = operand_ty.errorUnionPayload(); return sema.addConstant(result_ty, data); } + try sema.requireRuntimeBlock(block, src); - if (safety_check and block.wantSafety()) { + + // If the error set has no fields then no safety check is needed. + if (safety_check and block.wantSafety() and + operand_ty.errorUnionSet().errorSetCardinality() != .zero) + { try sema.panicUnwrapError(block, src, operand, .unwrap_errunion_err, .is_non_err); } - const result_ty = operand_ty.errorUnionPayload(); + return block.addTyOp(.unwrap_errunion_payload, result_ty, operand); } @@ -6311,7 +6326,8 @@ fn analyzeErrUnionPayloadPtr( }); } - const payload_ty = operand_ty.elemType().errorUnionPayload(); + const err_union_ty = operand_ty.elemType(); + const payload_ty = err_union_ty.errorUnionPayload(); const operand_pointer_ty = try Type.ptr(sema.arena, sema.mod, .{ .pointee_type = payload_ty, .mutable = !operand_ty.isConstPtr(), @@ -6351,9 +6367,14 @@ fn analyzeErrUnionPayloadPtr( } try sema.requireRuntimeBlock(block, src); - if (safety_check and block.wantSafety()) { + + // If the error set has no fields then no safety check is needed. + if (safety_check and block.wantSafety() and + err_union_ty.errorUnionSet().errorSetCardinality() != .zero) + { try sema.panicUnwrapError(block, src, operand, .unwrap_errunion_err_ptr, .is_non_err_ptr); } + const air_tag: Air.Inst.Tag = if (initializing) .errunion_payload_ptr_set else @@ -23301,10 +23322,7 @@ pub fn typeHasOnePossibleValue( .enum_literal, .anyerror_void_error_union, .error_union, - .error_set, - .error_set_single, .error_set_inferred, - .error_set_merged, .@"opaque", .var_args_param, .manyptr_u8, @@ -23333,6 +23351,23 @@ pub fn typeHasOnePossibleValue( .bound_fn, => return null, + .error_set_single => { + const name = ty.castTag(.error_set_single).?.data; + return try Value.Tag.@"error".create(sema.arena, .{ .name = name }); + }, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + if (names.len > 1) return null; + return try Value.Tag.@"error".create(sema.arena, .{ .name = names[0] }); + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + if (names.len > 1) return null; + return try Value.Tag.@"error".create(sema.arena, .{ .name = names[0] }); + }, + .@"struct" => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); const s = resolved_ty.castTag(.@"struct").?.data; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ef33f39f55..95d12dff3a 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2451,20 +2451,22 @@ pub const DeclGen = struct { .ErrorUnion => { const error_type = t.errorUnionSet(); const payload_type = t.errorUnionPayload(); - const llvm_error_type = try dg.llvmType(error_type); - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { - return llvm_error_type; + if (error_type.errorSetCardinality() == .zero) { + return dg.llvmType(payload_type); } + if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + return try dg.llvmType(Type.anyerror); + } + const llvm_error_type = try dg.llvmType(error_type); const llvm_payload_type = try dg.llvmType(payload_type); const payload_align = payload_type.abiAlignment(target); - const error_size = error_type.abiSize(target); - if (payload_align > error_size) { - const pad_type = dg.context.intType(8).arrayType(@intCast(u32, payload_align - error_size)); - const fields: [3]*const llvm.Type = .{ llvm_error_type, pad_type, llvm_payload_type }; + const error_align = Type.anyerror.abiAlignment(target); + if (error_align > payload_align) { + const fields: [2]*const llvm.Type = .{ llvm_error_type, llvm_payload_type }; return dg.context.structType(&fields, fields.len, .False); } else { - const fields: [2]*const llvm.Type = .{ llvm_error_type, llvm_payload_type }; + const fields: [2]*const llvm.Type = .{ llvm_payload_type, llvm_error_type }; return dg.context.structType(&fields, fields.len, .False); } }, @@ -3103,6 +3105,10 @@ pub const DeclGen = struct { .ErrorUnion => { const error_type = tv.ty.errorUnionSet(); const payload_type = tv.ty.errorUnionPayload(); + if (error_type.errorSetCardinality() == .zero) { + const payload_val = tv.val.castTag(.eu_payload).?.data; + return dg.genTypedValue(.{ .ty = payload_type, .val = payload_val }); + } const is_pl = tv.val.errorUnionIsPayload(); if (!payload_type.hasRuntimeBitsIgnoreComptime()) { @@ -3110,28 +3116,24 @@ pub const DeclGen = struct { const err_val = if (!is_pl) tv.val else Value.initTag(.zero); return dg.genTypedValue(.{ .ty = error_type, .val = err_val }); } - var len: u8 = 2; - var fields: [3]*const llvm.Value = .{ - try dg.genTypedValue(.{ - .ty = error_type, - .val = if (is_pl) Value.initTag(.zero) else tv.val, - }), - try dg.genTypedValue(.{ - .ty = payload_type, - .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef), - }), - undefined, - }; const payload_align = payload_type.abiAlignment(target); - const error_size = error_type.abiSize(target); - if (payload_align > error_size) { - fields[2] = fields[1]; - const pad_type = dg.context.intType(8).arrayType(@intCast(u32, payload_align - error_size)); - fields[1] = pad_type.getUndef(); - len += 1; + const error_align = Type.anyerror.abiAlignment(target); + const llvm_error_value = try dg.genTypedValue(.{ + .ty = error_type, + .val = if (is_pl) Value.initTag(.zero) else tv.val, + }); + const llvm_payload_value = try dg.genTypedValue(.{ + .ty = payload_type, + .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef), + }); + if (error_align > payload_align) { + const fields: [2]*const llvm.Value = .{ llvm_error_value, llvm_payload_value }; + return dg.context.constStruct(&fields, fields.len, .False); + } else { + const fields: [2]*const llvm.Value = .{ llvm_payload_value, llvm_error_value }; + return dg.context.constStruct(&fields, fields.len, .False); } - return dg.context.constStruct(&fields, len, .False); }, .Struct => { const llvm_struct_ty = try dg.llvmType(tv.ty); @@ -4338,11 +4340,19 @@ pub const FuncGen = struct { _ = self.builder.buildRetVoid(); return null; } + const fn_info = self.dg.decl.ty.fnInfo(); if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { - _ = self.builder.buildRetVoid(); + if (fn_info.return_type.isError()) { + // Functions with an empty error set are emitted with an error code + // return type and return zero so they can be function pointers coerced + // to functions that return anyerror. + const err_int = try self.dg.llvmType(Type.anyerror); + _ = self.builder.buildRet(err_int.constInt(0, .False)); + } else { + _ = self.builder.buildRetVoid(); + } return null; } - const fn_info = self.dg.decl.ty.fnInfo(); const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); const operand = try self.resolveInst(un_op); const llvm_ret_ty = operand.typeOf(); @@ -4369,13 +4379,25 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime() or self.ret_ptr != null) { + const fn_info = self.dg.decl.ty.fnInfo(); + if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + if (fn_info.return_type.isError()) { + // Functions with an empty error set are emitted with an error code + // return type and return zero so they can be function pointers coerced + // to functions that return anyerror. + const err_int = try self.dg.llvmType(Type.anyerror); + _ = self.builder.buildRet(err_int.constInt(0, .False)); + } else { + _ = self.builder.buildRetVoid(); + } + return null; + } + if (self.ret_ptr != null) { _ = self.builder.buildRetVoid(); return null; } const ptr = try self.resolveInst(un_op); const target = self.dg.module.getTarget(); - const fn_info = self.dg.decl.ty.fnInfo(); const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); const llvm_ret_ty = try self.dg.llvmType(ret_ty); const casted_ptr = if (abi_ret_ty == llvm_ret_ty) ptr else p: { @@ -5433,18 +5455,30 @@ pub const FuncGen = struct { const err_set_ty = try self.dg.llvmType(Type.initTag(.anyerror)); const zero = err_set_ty.constNull(); + if (err_union_ty.errorUnionSet().errorSetCardinality() == .zero) { + const llvm_i1 = self.context.intType(1); + switch (op) { + .EQ => return llvm_i1.constInt(1, .False), // 0 == 0 + .NE => return llvm_i1.constInt(0, .False), // 0 != 0 + else => unreachable, + } + } + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { const loaded = if (operand_is_ptr) self.builder.buildLoad(operand, "") else operand; return self.builder.buildICmp(op, loaded, zero, ""); } + const target = self.dg.module.getTarget(); + const err_field_index = errUnionErrorOffset(payload_ty, target); + if (operand_is_ptr or isByRef(err_union_ty)) { - const err_field_ptr = self.builder.buildStructGEP(operand, 0, ""); + const err_field_ptr = self.builder.buildStructGEP(operand, err_field_index, ""); const loaded = self.builder.buildLoad(err_field_ptr, ""); return self.builder.buildICmp(op, loaded, zero, ""); } - const loaded = self.builder.buildExtractValue(operand, 0, ""); + const loaded = self.builder.buildExtractValue(operand, err_field_index, ""); return self.builder.buildICmp(op, loaded, zero, ""); } @@ -5544,11 +5578,17 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const result_ty = self.air.getRefType(ty_op.ty); + const operand_ty = self.air.typeOf(ty_op.operand); + const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + // If the error set has no fields, then the payload and the error + // union are the same value. + if (error_union_ty.errorUnionSet().errorSetCardinality() == .zero) { + return operand; + } + const result_ty = self.air.typeOfIndex(inst); const payload_ty = if (operand_is_ptr) result_ty.childType() else result_ty; - const target = self.dg.module.getTarget(); - const offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; + const offset = errUnionPayloadOffset(payload_ty, target); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { if (!operand_is_ptr) return null; @@ -5574,54 +5614,70 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); - const err_set_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + if (err_union_ty.errorUnionSet().errorSetCardinality() == .zero) { + const err_llvm_ty = try self.dg.llvmType(Type.anyerror); + if (operand_is_ptr) { + return self.builder.buildBitCast(operand, err_llvm_ty.pointerType(0), ""); + } else { + return err_llvm_ty.constInt(0, .False); + } + } - const payload_ty = err_set_ty.errorUnionPayload(); + const payload_ty = err_union_ty.errorUnionPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { if (!operand_is_ptr) return operand; return self.builder.buildLoad(operand, ""); } - if (operand_is_ptr or isByRef(err_set_ty)) { - const err_field_ptr = self.builder.buildStructGEP(operand, 0, ""); + const target = self.dg.module.getTarget(); + const offset = errUnionErrorOffset(payload_ty, target); + + if (operand_is_ptr or isByRef(err_union_ty)) { + const err_field_ptr = self.builder.buildStructGEP(operand, offset, ""); return self.builder.buildLoad(err_field_ptr, ""); } - return self.builder.buildExtractValue(operand, 0, ""); + return self.builder.buildExtractValue(operand, offset, ""); } fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const error_set_ty = self.air.typeOf(ty_op.operand).childType(); + const error_union_ty = self.air.typeOf(ty_op.operand).childType(); - const error_ty = error_set_ty.errorUnionSet(); - const payload_ty = error_set_ty.errorUnionPayload(); + const error_ty = error_union_ty.errorUnionSet(); + if (error_ty.errorSetCardinality() == .zero) { + // TODO: write undefined bytes through the pointer here + return operand; + } + const payload_ty = error_union_ty.errorUnionPayload(); const non_error_val = try self.dg.genTypedValue(.{ .ty = error_ty, .val = Value.zero }); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - // We have a pointer to a i1. We need to set it to 1 and then return the same pointer. _ = self.builder.buildStore(non_error_val, operand); return operand; } const index_type = self.context.intType(32); + const target = self.dg.module.getTarget(); { + const error_offset = errUnionErrorOffset(payload_ty, target); // First set the non-error value. const indices: [2]*const llvm.Value = .{ index_type.constNull(), // dereference the pointer - index_type.constNull(), // first field is the payload + index_type.constInt(error_offset, .False), }; const non_null_ptr = self.builder.buildInBoundsGEP(operand, &indices, indices.len, ""); - _ = self.builder.buildStore(non_error_val, non_null_ptr); + const store_inst = self.builder.buildStore(non_error_val, non_null_ptr); + store_inst.setAlignment(Type.anyerror.abiAlignment(target)); } // Then return the payload pointer (only if it is used). if (self.liveness.isUnused(inst)) return null; - const target = self.dg.module.getTarget(); - const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; + const payload_offset = errUnionPayloadOffset(payload_ty, target); const indices: [2]*const llvm.Value = .{ index_type.constNull(), // dereference the pointer - index_type.constInt(payload_offset, .False), // second field is the payload + index_type.constInt(payload_offset, .False), }; return self.builder.buildInBoundsGEP(operand, &indices, indices.len, ""); } @@ -5669,21 +5725,26 @@ pub const FuncGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const payload_ty = self.air.typeOf(ty_op.operand); + const inst_ty = self.air.typeOfIndex(inst); const operand = try self.resolveInst(ty_op.operand); + if (inst_ty.errorUnionSet().errorSetCardinality() == .zero) { + return operand; + } + const payload_ty = self.air.typeOf(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { return operand; } - const inst_ty = self.air.typeOfIndex(inst); - const ok_err_code = self.context.intType(16).constNull(); + const ok_err_code = (try self.dg.llvmType(Type.anyerror)).constNull(); const err_un_llvm_ty = try self.dg.llvmType(inst_ty); const target = self.dg.module.getTarget(); - const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; + const payload_offset = errUnionPayloadOffset(payload_ty, target); + const error_offset = errUnionErrorOffset(payload_ty, target); if (isByRef(inst_ty)) { const result_ptr = self.buildAlloca(err_un_llvm_ty); - const err_ptr = self.builder.buildStructGEP(result_ptr, 0, ""); - _ = self.builder.buildStore(ok_err_code, err_ptr); + const err_ptr = self.builder.buildStructGEP(result_ptr, error_offset, ""); + const store_inst = self.builder.buildStore(ok_err_code, err_ptr); + store_inst.setAlignment(Type.anyerror.abiAlignment(target)); const payload_ptr = self.builder.buildStructGEP(result_ptr, payload_offset, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -5694,7 +5755,7 @@ pub const FuncGen = struct { return result_ptr; } - const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), ok_err_code, 0, ""); + const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), ok_err_code, error_offset, ""); return self.builder.buildInsertValue(partial, operand, payload_offset, ""); } @@ -5711,11 +5772,13 @@ pub const FuncGen = struct { const err_un_llvm_ty = try self.dg.llvmType(err_un_ty); const target = self.dg.module.getTarget(); - const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1; + const payload_offset = errUnionPayloadOffset(payload_ty, target); + const error_offset = errUnionErrorOffset(payload_ty, target); if (isByRef(err_un_ty)) { const result_ptr = self.buildAlloca(err_un_llvm_ty); - const err_ptr = self.builder.buildStructGEP(result_ptr, 0, ""); - _ = self.builder.buildStore(operand, err_ptr); + const err_ptr = self.builder.buildStructGEP(result_ptr, error_offset, ""); + const store_inst = self.builder.buildStore(operand, err_ptr); + store_inst.setAlignment(Type.anyerror.abiAlignment(target)); const payload_ptr = self.builder.buildStructGEP(result_ptr, payload_offset, ""); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -5728,7 +5791,7 @@ pub const FuncGen = struct { return result_ptr; } - const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), operand, 0, ""); + const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), operand, error_offset, ""); // TODO set payload bytes to undef return partial; } @@ -8546,7 +8609,14 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool /// be effectively bitcasted to the actual return type. fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm.Type { if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) { - return dg.context.voidType(); + // If the return type is an error set or an error union, then we make this + // anyerror return type instead, so that it can be coerced into a function + // pointer type which has anyerror as the return type. + if (fn_info.return_type.isError()) { + return dg.llvmType(Type.anyerror); + } else { + return dg.context.voidType(); + } } const target = dg.module.getTarget(); switch (fn_info.cc) { @@ -8991,3 +9061,11 @@ fn buildAllocaInner( return builder.buildAlloca(llvm_ty, ""); } + +fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u1 { + return @boolToInt(Type.anyerror.abiAlignment(target) > payload_ty.abiAlignment(target)); +} + +fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u1 { + return @boolToInt(Type.anyerror.abiAlignment(target) <= payload_ty.abiAlignment(target)); +} diff --git a/src/type.zig b/src/type.zig index ea65cc8916..4b8a41915f 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2317,10 +2317,7 @@ pub const Type = extern union { .const_slice_u8_sentinel_0, .array_u8_sentinel_0, .anyerror_void_error_union, - .error_set, - .error_set_single, .error_set_inferred, - .error_set_merged, .manyptr_u8, .manyptr_const_u8, .manyptr_const_u8_sentinel_0, @@ -2361,8 +2358,20 @@ pub const Type = extern union { .fn_void_no_args, .fn_naked_noreturn_no_args, .fn_ccc_void_no_args, + .error_set_single, => return false, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + return names.len > 1; + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + return names.len > 1; + }, + // These types have more than one possible value, so the result is the same as // asking whether they are comptime-only types. .anyframe_T, @@ -2388,6 +2397,21 @@ pub const Type = extern union { } }, + .error_union => { + // This code needs to be kept in sync with the equivalent switch prong + // in abiSizeAdvanced. + const data = ty.castTag(.error_union).?.data; + if (data.error_set.errorSetCardinality() == .zero) { + return hasRuntimeBitsAdvanced(data.payload, ignore_comptime_only, sema_kit); + } else if (ignore_comptime_only) { + return true; + } else if (sema_kit) |sk| { + return !(try sk.sema.typeRequiresComptime(sk.block, sk.src, ty)); + } else { + return !comptimeOnly(ty); + } + }, + .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; if (sema_kit) |sk| { @@ -2467,12 +2491,6 @@ pub const Type = extern union { .int_signed, .int_unsigned => return ty.cast(Payload.Bits).?.data != 0, - .error_union => { - const payload = ty.castTag(.error_union).?.data; - return (try payload.error_set.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit)) or - (try payload.payload.hasRuntimeBitsAdvanced(ignore_comptime_only, sema_kit)); - }, - .tuple, .anon_struct => { const tuple = ty.tupleFields(); for (tuple.types) |field_ty, i| { @@ -2852,13 +2870,30 @@ pub const Type = extern union { else => unreachable, }, - .error_set, - .error_set_single, + // TODO revisit this when we have the concept of the error tag type .anyerror_void_error_union, .anyerror, .error_set_inferred, - .error_set_merged, - => return AbiAlignmentAdvanced{ .scalar = 2 }, // TODO revisit this when we have the concept of the error tag type + => return AbiAlignmentAdvanced{ .scalar = 2 }, + + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + if (names.len <= 1) { + return AbiAlignmentAdvanced{ .scalar = 0 }; + } else { + return AbiAlignmentAdvanced{ .scalar = 2 }; + } + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + if (names.len <= 1) { + return AbiAlignmentAdvanced{ .scalar = 0 }; + } else { + return AbiAlignmentAdvanced{ .scalar = 2 }; + } + }, .array, .array_sentinel => return ty.elemType().abiAlignmentAdvanced(target, strat), @@ -2900,31 +2935,29 @@ pub const Type = extern union { }, .error_union => { + // This code needs to be kept in sync with the equivalent switch prong + // in abiSizeAdvanced. const data = ty.castTag(.error_union).?.data; + if (data.error_set.errorSetCardinality() == .zero) { + return abiAlignmentAdvanced(data.payload, target, strat); + } + const code_align = abiAlignment(Type.anyerror, target); switch (strat) { .eager, .sema_kit => { - if (!(try data.error_set.hasRuntimeBitsAdvanced(false, sema_kit))) { - return data.payload.abiAlignmentAdvanced(target, strat); - } else if (!(try data.payload.hasRuntimeBitsAdvanced(false, sema_kit))) { - return data.error_set.abiAlignmentAdvanced(target, strat); + if (!(try data.payload.hasRuntimeBitsAdvanced(false, sema_kit))) { + return AbiAlignmentAdvanced{ .scalar = code_align }; } return AbiAlignmentAdvanced{ .scalar = @maximum( + code_align, (try data.payload.abiAlignmentAdvanced(target, strat)).scalar, - (try data.error_set.abiAlignmentAdvanced(target, strat)).scalar, ) }; }, .lazy => |arena| { switch (try data.payload.abiAlignmentAdvanced(target, strat)) { .scalar => |payload_align| { - if (payload_align == 0) { - return data.error_set.abiAlignmentAdvanced(target, strat); - } - switch (try data.error_set.abiAlignmentAdvanced(target, strat)) { - .scalar => |err_set_align| { - return AbiAlignmentAdvanced{ .scalar = @maximum(payload_align, err_set_align) }; - }, - .val => {}, - } + return AbiAlignmentAdvanced{ + .scalar = @maximum(code_align, payload_align), + }; }, .val => {}, } @@ -3018,6 +3051,7 @@ pub const Type = extern union { .@"undefined", .enum_literal, .type_info, + .error_set_single, => return AbiAlignmentAdvanced{ .scalar = 0 }, .noreturn, @@ -3136,6 +3170,7 @@ pub const Type = extern union { .empty_struct_literal, .empty_struct, .void, + .error_set_single, => return AbiSizeAdvanced{ .scalar = 0 }, .@"struct", .tuple, .anon_struct => switch (ty.containerLayout()) { @@ -3291,14 +3326,30 @@ pub const Type = extern union { }, // TODO revisit this when we have the concept of the error tag type - .error_set, - .error_set_single, .anyerror_void_error_union, .anyerror, .error_set_inferred, - .error_set_merged, => return AbiSizeAdvanced{ .scalar = 2 }, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + if (names.len <= 1) { + return AbiSizeAdvanced{ .scalar = 0 }; + } else { + return AbiSizeAdvanced{ .scalar = 2 }; + } + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + if (names.len <= 1) { + return AbiSizeAdvanced{ .scalar = 0 }; + } else { + return AbiSizeAdvanced{ .scalar = 2 }; + } + }, + .i16, .u16 => return AbiSizeAdvanced{ .scalar = intAbiSize(16, target) }, .i32, .u32 => return AbiSizeAdvanced{ .scalar = intAbiSize(32, target) }, .i64, .u64 => return AbiSizeAdvanced{ .scalar = intAbiSize(64, target) }, @@ -3325,24 +3376,42 @@ pub const Type = extern union { }, .error_union => { + // This code needs to be kept in sync with the equivalent switch prong + // in abiAlignmentAdvanced. const data = ty.castTag(.error_union).?.data; - if (!data.error_set.hasRuntimeBits() and !data.payload.hasRuntimeBits()) { - return AbiSizeAdvanced{ .scalar = 0 }; - } else if (!data.error_set.hasRuntimeBits()) { - return AbiSizeAdvanced{ .scalar = data.payload.abiSize(target) }; - } else if (!data.payload.hasRuntimeBits()) { - return AbiSizeAdvanced{ .scalar = data.error_set.abiSize(target) }; + // Here we need to care whether or not the error set is *empty* or whether + // it only has *one possible value*. In the former case, it means there + // cannot possibly be an error, meaning the ABI size is equivalent to the + // payload ABI size. In the latter case, we need to account for the "tag" + // because even if both the payload type and the error set type of an + // error union have no runtime bits, an error union still has + // 1 bit of data which is whether or not the value is an error. + // Zig still uses the error code encoding at runtime, even when only 1 bit + // would suffice. This prevents coercions from needing to branch. + if (data.error_set.errorSetCardinality() == .zero) { + return abiSizeAdvanced(data.payload, target, strat); } - const code_align = abiAlignment(data.error_set, target); + const code_size = abiSize(Type.anyerror, target); + if (!data.payload.hasRuntimeBits()) { + // Same as anyerror. + return AbiSizeAdvanced{ .scalar = code_size }; + } + const code_align = abiAlignment(Type.anyerror, target); const payload_align = abiAlignment(data.payload, target); - const big_align = @maximum(code_align, payload_align); const payload_size = abiSize(data.payload, target); var size: u64 = 0; - size += abiSize(data.error_set, target); - size = std.mem.alignForwardGeneric(u64, size, payload_align); - size += payload_size; - size = std.mem.alignForwardGeneric(u64, size, big_align); + if (code_align > payload_align) { + size += code_size; + size = std.mem.alignForwardGeneric(u64, size, payload_align); + size += payload_size; + size = std.mem.alignForwardGeneric(u64, size, code_align); + } else { + size += payload_size; + size = std.mem.alignForwardGeneric(u64, size, code_align); + size += code_size; + size = std.mem.alignForwardGeneric(u64, size, payload_align); + } return AbiSizeAdvanced{ .scalar = size }; }, } @@ -4166,6 +4235,35 @@ pub const Type = extern union { }; } + const ErrorSetCardinality = enum { zero, one, many }; + + pub fn errorSetCardinality(ty: Type) ErrorSetCardinality { + switch (ty.tag()) { + .anyerror => return .many, + .error_set_inferred => return .many, + .error_set_single => return .one, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + switch (names.len) { + 0 => return .zero, + 1 => return .one, + else => return .many, + } + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + switch (names.len) { + 0 => return .zero, + 1 => return .one, + else => return .many, + } + }, + else => unreachable, + } + } + /// Returns true if it is an error set that includes anyerror, false otherwise. /// Note that the result may be a false negative if the type did not get error set /// resolution prior to this call. @@ -4664,10 +4762,7 @@ pub const Type = extern union { .enum_literal, .anyerror_void_error_union, .error_union, - .error_set, - .error_set_single, .error_set_inferred, - .error_set_merged, .@"opaque", .var_args_param, .manyptr_u8, @@ -4696,6 +4791,18 @@ pub const Type = extern union { .bound_fn, => return null, + .error_set_single => return Value.initTag(.the_only_possible_value), + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + if (err_set_obj.names.count() > 1) return null; + return Value.initTag(.the_only_possible_value); + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + if (name_map.count() > 1) return null; + return Value.initTag(.the_only_possible_value); + }, + .@"struct" => { const s = ty.castTag(.@"struct").?.data; assert(s.haveFieldTypes()); diff --git a/test/behavior/error.zig b/test/behavior/error.zig index ada0f3bbf1..376d1bdf09 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -148,18 +148,39 @@ test "implicit cast to optional to error union to return result loc" { //comptime S.entry(); TODO } -test "error: fn returning empty error set can be passed as fn returning any error" { +test "fn returning empty error set can be passed as fn returning any error" { entry(); comptime entry(); } +test "fn returning empty error set can be passed as fn returning any error - pointer" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + + entryPtr(); + comptime entryPtr(); +} + fn entry() void { foo2(bar2); } +fn entryPtr() void { + var ptr = &bar2; + fooPtr(ptr); +} + fn foo2(f: fn () anyerror!void) void { const x = f(); - x catch {}; + x catch { + @panic("fail"); + }; +} + +fn fooPtr(f: *const fn () anyerror!void) void { + const x = f(); + x catch { + @panic("fail"); + }; } fn bar2() (error{}!void) {} @@ -239,7 +260,11 @@ fn testComptimeTestErrorEmptySet(x: EmptyErrorSet!i32) !void { } test "comptime err to int of error set with only 1 possible value" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A)); comptime testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A)); From c90a97f9be9ffef858b0e450de5006f61a12fafd Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 23 May 2022 18:24:03 +0200 Subject: [PATCH 02/18] codegen: Order error union fields per alignment Based on the size of the payload the native backends will lower the error union with its fields (errorset & payload) in the correct order. e.g. ErrorA!u8 will first lower the error set's value and then the payload. In the event of ErrorA!u32 will lower the payload first. --- src/codegen.zig | 50 +++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 42 insertions(+), 8 deletions(-) diff --git a/src/codegen.zig b/src/codegen.zig index bd556baa5e..81b303ab82 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -702,16 +702,50 @@ pub fn generateSymbol( .ErrorUnion => { const error_ty = typed_value.ty.errorUnionSet(); const payload_ty = typed_value.ty.errorUnionPayload(); + + if (error_ty.errorSetCardinality() == .zero) { + const payload_val = typed_value.val.castTag(.eu_payload).?.data; + return generateSymbol(bin_file, src_loc, .{ + .ty = payload_ty, + .val = payload_val, + }, code, debug_output, reloc_info); + } + const is_payload = typed_value.val.errorUnionIsPayload(); + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + const err_val = if (!is_payload) typed_value.val else Value.initTag(.zero); + return generateSymbol(bin_file, src_loc, .{ + .ty = error_ty, + .val = err_val, + }, code, debug_output, reloc_info); + } + + const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(target); const abi_align = typed_value.ty.abiAlignment(target); - { - const error_val = if (!is_payload) typed_value.val else Value.initTag(.zero); - const begin = code.items.len; + // error value first when its type is larger than the error union's payload + if (error_align > payload_align) { switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, - .val = error_val, + .val = if (is_payload) Value.initTag(.zero) else typed_value.val, + }, code, debug_output, reloc_info)) { + .appended => {}, + .externally_managed => |external_slice| { + code.appendSliceAssumeCapacity(external_slice); + }, + .fail => |em| return Result{ .fail = em }, + } + } + + // emit payload part of the error union + { + const begin = code.items.len; + const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.initTag(.undef); + switch (try generateSymbol(bin_file, src_loc, .{ + .ty = payload_ty, + .val = payload_val, }, code, debug_output, reloc_info)) { .appended => {}, .externally_managed => |external_slice| { @@ -728,12 +762,12 @@ pub fn generateSymbol( } } - if (payload_ty.hasRuntimeBits()) { + // Payload size is larger than error set, so emit our error set last + if (error_align < payload_align) { const begin = code.items.len; - const payload_val = if (typed_value.val.castTag(.eu_payload)) |val| val.data else Value.initTag(.undef); switch (try generateSymbol(bin_file, src_loc, .{ - .ty = payload_ty, - .val = payload_val, + .ty = error_ty, + .val = if (is_payload) Value.initTag(.zero) else typed_value.val, }, code, debug_output, reloc_info)) { .appended => {}, .externally_managed => |external_slice| { From 3a059ebe4c84a1e541bb3b2ccee2e7cc25686a4d Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 23 May 2022 22:06:27 +0200 Subject: [PATCH 03/18] wasm: Fixes for error union semantics --- src/arch/wasm/CodeGen.zig | 158 ++++++++++++++++++++++++++------------ src/codegen.zig | 6 +- test/behavior/error.zig | 1 - 3 files changed, 112 insertions(+), 53 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a35589f043..b74651859c 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -636,7 +636,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue { // means we must generate it from a constant. const val = self.air.value(ref).?; const ty = self.air.typeOf(ref); - if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt()) { + if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt() and !ty.isError()) { gop.value_ptr.* = WValue{ .none = {} }; return gop.value_ptr.*; } @@ -804,6 +804,8 @@ fn genFunctype(gpa: Allocator, fn_info: Type.Payload.Function.Data, target: std. } else { try returns.append(typeToValtype(fn_info.return_type, target)); } + } else if (fn_info.return_type.isError()) { + try returns.append(.i32); } // param types @@ -1373,10 +1375,15 @@ fn isByRef(ty: Type, target: std.Target) bool { .Int => return ty.intInfo(target).bits > 64, .Float => return ty.floatBits(target) > 64, .ErrorUnion => { - const has_tag = ty.errorUnionSet().hasRuntimeBitsIgnoreComptime(); - const has_pl = ty.errorUnionPayload().hasRuntimeBitsIgnoreComptime(); - if (!has_tag or !has_pl) return false; - return ty.hasRuntimeBitsIgnoreComptime(); + const err_ty = ty.errorUnionSet(); + const pl_ty = ty.errorUnionPayload(); + if (err_ty.errorSetCardinality() == .zero) { + return isByRef(pl_ty, target); + } + if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + return false; + } + return true; }, .Optional => { if (ty.isPtrLikeOptional()) return false; @@ -1624,13 +1631,14 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const ret_ty = self.decl.ty.fnReturnType(); + const fn_info = self.decl.ty.fnInfo(); + const ret_ty = fn_info.return_type; // result must be stored in the stack and we return a pointer // to the stack instead if (self.return_value != .none) { - try self.store(self.return_value, operand, self.decl.ty.fnReturnType(), 0); - } else if (self.decl.ty.fnInfo().cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) { + try self.store(self.return_value, operand, ret_ty, 0); + } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) { switch (ret_ty.zigTypeTag()) { // Aggregate types can be lowered as a singular value .Struct, .Union => { @@ -1650,7 +1658,11 @@ fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!WValue { else => try self.emitWValue(operand), } } else { - try self.emitWValue(operand); + if (!ret_ty.hasRuntimeBitsIgnoreComptime() and ret_ty.isError()) { + try self.addImm32(0); + } else { + try self.emitWValue(operand); + } } try self.restoreStackPointer(); try self.addTag(.@"return"); @@ -1675,7 +1687,13 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.air.typeOf(un_op).childType(); - if (!ret_ty.hasRuntimeBitsIgnoreComptime()) return WValue.none; + if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { + if (ret_ty.isError()) { + try self.addImm32(0); + } else { + return WValue.none; + } + } if (!firstParamSRet(self.decl.ty.fnInfo(), self.target)) { const result = try self.load(operand, ret_ty, 0); @@ -1723,8 +1741,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const sret = if (first_param_sret) blk: { const sret_local = try self.allocStack(ret_ty); - const ptr_offset = try self.buildPointerOffset(sret_local, 0, .new); - try self.emitWValue(ptr_offset); + try self.lowerToStack(sret_local); break :blk sret_local; } else WValue{ .none = {} }; @@ -1754,7 +1771,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. try self.addLabel(.call_indirect, fn_type_index); } - if (self.liveness.isUnused(inst) or !ret_ty.hasRuntimeBitsIgnoreComptime()) { + if (self.liveness.isUnused(inst) or (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError())) { return WValue.none; } else if (ret_ty.isNoReturn()) { try self.addTag(.@"unreachable"); @@ -1796,8 +1813,11 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro .ErrorUnion => { const err_ty = ty.errorUnionSet(); const pl_ty = ty.errorUnionPayload(); + if (err_ty.errorSetCardinality() == .zero) { + return self.store(lhs, rhs, pl_ty, 0); + } if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { - return self.store(lhs, rhs, err_ty, 0); + return self.store(lhs, rhs, Type.anyerror, 0); } const len = @intCast(u32, ty.abiSize(self.target)); @@ -2256,6 +2276,7 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue { const target = self.target; switch (ty.zigTypeTag()) { + .Void => return WValue{ .none = {} }, .Int => { const int_info = ty.intInfo(self.target); switch (int_info.signedness) { @@ -2324,6 +2345,10 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue { }, .ErrorUnion => { const error_type = ty.errorUnionSet(); + if (error_type.errorSetCardinality() == .zero) { + const pl_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef); + return self.lowerConstant(pl_val, ty.errorUnionPayload()); + } const is_pl = val.errorUnionIsPayload(); const err_val = if (!is_pl) val else Value.initTag(.zero); return self.lowerConstant(err_val, error_type); @@ -2892,12 +2917,19 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!W const err_ty = self.air.typeOf(un_op); const pl_ty = err_ty.errorUnionPayload(); - // load the error tag value + if (err_ty.errorUnionSet().errorSetCardinality() == .zero) { + switch (opcode) { + .i32_ne => return WValue{ .imm32 = 0 }, + .i32_eq => return WValue{ .imm32 = 1 }, + else => unreachable, + } + } + try self.emitWValue(operand); if (pl_ty.hasRuntimeBitsIgnoreComptime()) { try self.addMemArg(.i32_load16_u, .{ - .offset = operand.offset(), - .alignment = err_ty.errorUnionSet().abiAlignment(self.target), + .offset = operand.offset() + errUnionErrorOffset(pl_ty, self.target), + .alignment = Type.anyerror.abiAlignment(self.target), }); } @@ -2905,7 +2937,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!W try self.addImm32(0); try self.addTag(Mir.Inst.Tag.fromOpcode(opcode)); - const is_err_tmp = try self.allocLocal(Type.initTag(.i32)); // result is always an i32 + const is_err_tmp = try self.allocLocal(Type.i32); try self.addLabel(.local_set, is_err_tmp.local); return is_err_tmp; } @@ -2917,14 +2949,18 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) const op_ty = self.air.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; const payload_ty = err_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} }; - const err_align = err_ty.abiAlignment(self.target); - const set_size = err_ty.errorUnionSet().abiSize(self.target); - const offset = mem.alignForwardGeneric(u64, set_size, err_align); - if (op_is_ptr or isByRef(payload_ty, self.target)) { - return self.buildPointerOffset(operand, offset, .new); + + if (err_ty.errorUnionSet().errorSetCardinality() == .zero) { + return operand; } - return self.load(operand, payload_ty, @intCast(u32, offset)); + + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} }; + + const pl_offset = errUnionPayloadOffset(payload_ty, self.target); + if (op_is_ptr or isByRef(payload_ty, self.target)) { + return self.buildPointerOffset(operand, pl_offset, .new); + } + return self.load(operand, payload_ty, pl_offset); } fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!WValue { @@ -2935,11 +2971,16 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) In const op_ty = self.air.typeOf(ty_op.operand); const err_ty = if (op_is_ptr) op_ty.childType() else op_ty; const payload_ty = err_ty.errorUnionPayload(); + + if (err_ty.errorUnionSet().errorSetCardinality() == .zero) { + return WValue{ .imm32 = 0 }; + } + if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime()) { return operand; } - return self.load(operand, err_ty.errorUnionSet(), 0); + return self.load(operand, Type.anyerror, errUnionErrorOffset(payload_ty, self.target)); } fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -2947,22 +2988,26 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); + const err_ty = self.air.typeOfIndex(inst); - const op_ty = self.air.typeOf(ty_op.operand); - if (!op_ty.hasRuntimeBitsIgnoreComptime()) return operand; - const err_union_ty = self.air.getRefType(ty_op.ty); - const err_align = err_union_ty.abiAlignment(self.target); - const set_size = err_union_ty.errorUnionSet().abiSize(self.target); - const offset = mem.alignForwardGeneric(u64, set_size, err_align); + if (err_ty.errorUnionSet().errorSetCardinality() == .zero) { + return operand; + } - const err_union = try self.allocStack(err_union_ty); - const payload_ptr = try self.buildPointerOffset(err_union, offset, .new); - try self.store(payload_ptr, operand, op_ty, 0); + const pl_ty = self.air.typeOf(ty_op.operand); + if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + return operand; + } + + const err_union = try self.allocStack(err_ty); + const payload_ptr = try self.buildPointerOffset(err_union, errUnionPayloadOffset(pl_ty, self.target), .new); + try self.store(payload_ptr, operand, pl_ty, 0); // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. try self.emitWValue(err_union); try self.addImm32(0); - try self.addMemArg(.i32_store16, .{ .offset = err_union.offset(), .alignment = 2 }); + const err_val_offset = errUnionErrorOffset(pl_ty, self.target); + try self.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 }); return err_union; } @@ -2973,17 +3018,18 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); const err_ty = self.air.getRefType(ty_op.ty); + const pl_ty = err_ty.errorUnionPayload(); - if (!err_ty.errorUnionPayload().hasRuntimeBitsIgnoreComptime()) return operand; + if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { + return operand; + } const err_union = try self.allocStack(err_ty); - try self.store(err_union, operand, err_ty.errorUnionSet(), 0); + // store error value + try self.store(err_union, operand, Type.anyerror, errUnionErrorOffset(pl_ty, self.target)); // write 'undefined' to the payload - const err_align = err_ty.abiAlignment(self.target); - const set_size = err_ty.errorUnionSet().abiSize(self.target); - const offset = mem.alignForwardGeneric(u64, set_size, err_align); - const payload_ptr = try self.buildPointerOffset(err_union, offset, .new); + const payload_ptr = try self.buildPointerOffset(err_union, errUnionPayloadOffset(pl_ty, self.target), .new); const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(self.target)); try self.memset(payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaaaaaaaa }); @@ -3927,12 +3973,16 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) InnerError!WValue { fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const err_set_ty = self.air.typeOf(ty_op.operand).childType(); - const err_ty = err_set_ty.errorUnionSet(); const payload_ty = err_set_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); // set error-tag to '0' to annotate error union is non-error - try self.store(operand, .{ .imm32 = 0 }, err_ty, 0); + try self.store( + operand, + .{ .imm32 = 0 }, + Type.anyerror, + errUnionErrorOffset(payload_ty, self.target), + ); if (self.liveness.isUnused(inst)) return WValue{ .none = {} }; @@ -3940,11 +3990,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue return operand; } - const err_align = err_set_ty.abiAlignment(self.target); - const set_size = err_ty.abiSize(self.target); - const offset = mem.alignForwardGeneric(u64, set_size, err_align); - - return self.buildPointerOffset(operand, @intCast(u32, offset), .new); + return self.buildPointerOffset(operand, errUnionPayloadOffset(payload_ty, self.target), .new); } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -4572,3 +4618,17 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !WValue { } }); return WValue{ .none = {} }; } + +fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u32 { + if (Type.anyerror.abiAlignment(target) > payload_ty.abiAlignment(target)) { + return @intCast(u32, Type.anyerror.abiSize(target)); + } + return 0; +} + +fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u32 { + if (Type.anyerror.abiAlignment(target) > payload_ty.abiAlignment(target)) { + return 0; + } + return @intCast(u32, payload_ty.abiSize(target)); +} diff --git a/src/codegen.zig b/src/codegen.zig index 81b303ab82..0f411dc481 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -714,7 +714,7 @@ pub fn generateSymbol( const is_payload = typed_value.val.errorUnionIsPayload(); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - const err_val = if (!is_payload) typed_value.val else Value.initTag(.zero); + const err_val = if (is_payload) Value.initTag(.zero) else typed_value.val; return generateSymbol(bin_file, src_loc, .{ .ty = error_ty, .val = err_val, @@ -763,7 +763,7 @@ pub fn generateSymbol( } // Payload size is larger than error set, so emit our error set last - if (error_align < payload_align) { + if (error_align <= payload_align) { const begin = code.items.len; switch (try generateSymbol(bin_file, src_loc, .{ .ty = error_ty, @@ -794,7 +794,7 @@ pub fn generateSymbol( try code.writer().writeInt(u32, kv.value, endian); }, else => { - try code.writer().writeByteNTimes(0, @intCast(usize, typed_value.ty.abiSize(target))); + try code.writer().writeByteNTimes(0, @intCast(usize, Type.anyerror.abiSize(target))); }, } return Result{ .appended = {} }; diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 376d1bdf09..459ffb12d0 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -260,7 +260,6 @@ fn testComptimeTestErrorEmptySet(x: EmptyErrorSet!i32) !void { } test "comptime err to int of error set with only 1 possible value" { - if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO From c97c7f9e3bade44136f2bdf8ec4015f1b1b8303f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 23 May 2022 14:36:21 -0700 Subject: [PATCH 04/18] C backend: update to new error union semantics --- src/codegen/c.zig | 169 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 117 insertions(+), 52 deletions(-) diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 5f61f8586e..63082d46be 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -749,6 +749,12 @@ pub const DeclGen = struct { const error_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); + if (error_type.errorSetCardinality() == .zero) { + // We use the payload directly as the type. + const payload_val = val.castTag(.eu_payload).?.data; + return dg.renderValue(writer, payload_type, payload_val, location); + } + if (!payload_type.hasRuntimeBits()) { // We use the error type directly as the type. const err_val = if (val.errorUnionIsPayload()) Value.initTag(.zero) else val; @@ -894,10 +900,12 @@ pub const DeclGen = struct { try w.writeAll("ZIG_COLD "); } } - const return_ty = dg.decl.ty.fnReturnType(); - if (return_ty.hasRuntimeBits()) { - try dg.renderType(w, return_ty); - } else if (return_ty.zigTypeTag() == .NoReturn) { + const fn_info = dg.decl.ty.fnInfo(); + if (fn_info.return_type.hasRuntimeBits()) { + try dg.renderType(w, fn_info.return_type); + } else if (fn_info.return_type.isError()) { + try dg.renderType(w, Type.anyerror); + } else if (fn_info.return_type.zigTypeTag() == .NoReturn) { try w.writeAll("zig_noreturn void"); } else { try w.writeAll("void"); @@ -905,22 +913,19 @@ pub const DeclGen = struct { try w.writeAll(" "); try dg.renderDeclName(w, dg.decl_index); try w.writeAll("("); - const param_len = dg.decl.ty.fnParamLen(); - var index: usize = 0; var params_written: usize = 0; - while (index < param_len) : (index += 1) { - const param_type = dg.decl.ty.fnParamType(index); + for (fn_info.param_types) |param_type, index| { if (!param_type.hasRuntimeBitsIgnoreComptime()) continue; if (params_written > 0) { try w.writeAll(", "); } const name = CValue{ .arg = index }; - try dg.renderTypeAndName(w, dg.decl.ty.fnParamType(index), name, .Mut, 0); + try dg.renderTypeAndName(w, param_type, name, .Mut, 0); params_written += 1; } - if (dg.decl.ty.fnIsVarArgs()) { + if (fn_info.is_var_args) { if (params_written != 0) try w.writeAll(", "); try w.writeAll("..."); } else if (params_written == 0) { @@ -1156,26 +1161,36 @@ pub const DeclGen = struct { } fn renderErrorUnionTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 { - const child_type = t.errorUnionPayload(); - const err_set_type = t.errorUnionSet(); + const payload_ty = t.errorUnionPayload(); + const error_ty = t.errorUnionSet(); var buffer = std.ArrayList(u8).init(dg.typedefs.allocator); defer buffer.deinit(); const bw = buffer.writer(); - try bw.writeAll("typedef struct { "); const payload_name = CValue{ .bytes = "payload" }; - try dg.renderTypeAndName(bw, child_type, payload_name, .Mut, 0); - try bw.writeAll("; uint16_t error; } "); + const target = dg.module.getTarget(); + const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(target); + if (error_align > payload_align) { + try bw.writeAll("typedef struct { "); + try dg.renderTypeAndName(bw, payload_ty, payload_name, .Mut, 0); + try bw.writeAll("; uint16_t error; } "); + } else { + try bw.writeAll("typedef struct { uint16_t error; "); + try dg.renderTypeAndName(bw, payload_ty, payload_name, .Mut, 0); + try bw.writeAll("; } "); + } + const name_index = buffer.items.len; - if (err_set_type.castTag(.error_set_inferred)) |inf_err_set_payload| { + if (error_ty.castTag(.error_set_inferred)) |inf_err_set_payload| { const func = inf_err_set_payload.data.func; try bw.writeAll("zig_E_"); try dg.renderDeclName(bw, func.owner_decl); try bw.writeAll(";\n"); } else { try bw.print("zig_E_{s}_{s};\n", .{ - typeToCIdentifier(err_set_type, dg.module), typeToCIdentifier(child_type, dg.module), + typeToCIdentifier(error_ty, dg.module), typeToCIdentifier(payload_ty, dg.module), }); } @@ -1359,12 +1374,19 @@ pub const DeclGen = struct { return w.writeAll(name); }, .ErrorSet => { - comptime assert(Type.initTag(.anyerror).abiSize(builtin.target) == 2); + comptime assert(Type.anyerror.abiSize(builtin.target) == 2); return w.writeAll("uint16_t"); }, .ErrorUnion => { - if (t.errorUnionPayload().abiSize(target) == 0) { - return dg.renderType(w, t.errorUnionSet()); + const error_ty = t.errorUnionSet(); + const payload_ty = t.errorUnionPayload(); + + if (error_ty.errorSetCardinality() == .zero) { + return dg.renderType(w, payload_ty); + } + + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return dg.renderType(w, Type.anyerror); } const name = dg.getTypedefName(t) orelse @@ -1901,8 +1923,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .array_elem_val => try airArrayElemVal(f, inst), .unwrap_errunion_payload => try airUnwrapErrUnionPay(f, inst, ""), - .unwrap_errunion_err => try airUnwrapErrUnionErr(f, inst), .unwrap_errunion_payload_ptr => try airUnwrapErrUnionPay(f, inst, "&"), + .unwrap_errunion_err => try airUnwrapErrUnionErr(f, inst), .unwrap_errunion_err_ptr => try airUnwrapErrUnionErr(f, inst), .wrap_errunion_payload => try airWrapErrUnionPay(f, inst), .wrap_errunion_err => try airWrapErrUnionErr(f, inst), @@ -2120,11 +2142,14 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { fn airRet(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); - if (f.air.typeOf(un_op).isFnOrHasRuntimeBitsIgnoreComptime()) { + const ret_ty = f.air.typeOf(un_op); + if (ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) { const operand = try f.resolveInst(un_op); try writer.writeAll("return "); try f.writeCValue(writer, operand); try writer.writeAll(";\n"); + } else if (ret_ty.isError()) { + try writer.writeAll("return 0;"); } else { try writer.writeAll("return;\n"); } @@ -2136,13 +2161,16 @@ fn airRetLoad(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const ptr_ty = f.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); - if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + if (ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) { + const ptr = try f.resolveInst(un_op); + try writer.writeAll("return *"); + try f.writeCValue(writer, ptr); + try writer.writeAll(";\n"); + } else if (ret_ty.isError()) { + try writer.writeAll("return 0;\n"); + } else { try writer.writeAll("return;\n"); } - const ptr = try f.resolveInst(un_op); - try writer.writeAll("return *"); - try f.writeCValue(writer, ptr); - try writer.writeAll(";\n"); return CValue.none; } @@ -2713,19 +2741,20 @@ fn airCall( .Pointer => callee_ty.childType(), else => unreachable, }; - const ret_ty = fn_ty.fnReturnType(); - const unused_result = f.liveness.isUnused(inst); const writer = f.object.writer(); - var result_local: CValue = .none; - if (unused_result) { - if (ret_ty.hasRuntimeBits()) { - try writer.print("(void)", .{}); + const result_local: CValue = r: { + if (f.liveness.isUnused(inst)) { + if (loweredFnRetTyHasBits(fn_ty)) { + try writer.print("(void)", .{}); + } + break :r .none; + } else { + const local = try f.allocLocal(fn_ty.fnReturnType(), .Const); + try writer.writeAll(" = "); + break :r local; } - } else { - result_local = try f.allocLocal(ret_ty, .Const); - try writer.writeAll(" = "); - } + }; callee: { known: { @@ -3307,7 +3336,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { return local; } -// *(E!T) -> E NOT *E +/// *(E!T) -> E +/// Note that the result is never a pointer. fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) return CValue.none; @@ -3319,7 +3349,11 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const operand_ty = f.air.typeOf(ty_op.operand); if (operand_ty.zigTypeTag() == .Pointer) { - if (!operand_ty.childType().errorUnionPayload().hasRuntimeBits()) { + const err_union_ty = operand_ty.childType(); + if (err_union_ty.errorUnionSet().errorSetCardinality() == .zero) { + return CValue{ .bytes = "0" }; + } + if (!err_union_ty.errorUnionPayload().hasRuntimeBits()) { return operand; } const local = try f.allocLocal(inst_ty, .Const); @@ -3328,6 +3362,9 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try writer.writeAll(";\n"); return local; } + if (operand_ty.errorUnionSet().errorSetCardinality() == .zero) { + return CValue{ .bytes = "0" }; + } if (!operand_ty.errorUnionPayload().hasRuntimeBits()) { return operand; } @@ -3343,7 +3380,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { return local; } -fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, maybe_addrof: []const u8) !CValue { +fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, maybe_addrof: [*:0]const u8) !CValue { if (f.liveness.isUnused(inst)) return CValue.none; @@ -3351,17 +3388,19 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, maybe_addrof: []cons const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); const operand_ty = f.air.typeOf(ty_op.operand); + const operand_is_ptr = operand_ty.zigTypeTag() == .Pointer; + const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; + + if (error_union_ty.errorUnionSet().errorSetCardinality() == .zero) { + return operand; + } - const error_union_ty = if (operand_ty.zigTypeTag() == .Pointer) - operand_ty.childType() - else - operand_ty; if (!error_union_ty.errorUnionPayload().hasRuntimeBits()) { return CValue.none; } const inst_ty = f.air.typeOfIndex(inst); - const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else "."; + const maybe_deref = if (operand_is_ptr) "->" else "."; const local = try f.allocLocal(inst_ty, .Const); try writer.print(" = {s}(", .{maybe_addrof}); @@ -3421,6 +3460,11 @@ fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); + if (error_ty.errorSetCardinality() == .zero) { + // TODO: write undefined bytes through the pointer here + return operand; + } + // First, set the non-error value. if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { try f.writeCValueDeref(writer, operand); @@ -3464,6 +3508,9 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); const inst_ty = f.air.typeOfIndex(inst); + if (inst_ty.errorUnionSet().errorSetCardinality() == .zero) { + return operand; + } const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = { .error = 0, .payload = "); try f.writeCValue(writer, operand); @@ -3486,16 +3533,23 @@ fn airIsErr( const operand_ty = f.air.typeOf(un_op); const local = try f.allocLocal(Type.initTag(.bool), .Const); const payload_ty = operand_ty.errorUnionPayload(); + const error_ty = operand_ty.errorUnionSet(); + try writer.writeAll(" = "); - if (is_ptr) { - try f.writeCValueDeref(writer, operand); + + if (error_ty.errorSetCardinality() == .zero) { + try writer.print("0 {s} 0;\n", .{op_str}); } else { - try f.writeCValue(writer, operand); + if (is_ptr) { + try f.writeCValueDeref(writer, operand); + } else { + try f.writeCValue(writer, operand); + } + if (payload_ty.hasRuntimeBits()) { + try writer.writeAll(".error"); + } + try writer.print(" {s} 0;\n", .{op_str}); } - if (payload_ty.hasRuntimeBits()) { - try writer.writeAll(".error"); - } - try writer.print(" {s} 0;\n", .{op_str}); return local; } @@ -4129,3 +4183,14 @@ fn intMin(ty: Type, target: std.Target, buf: []u8) []const u8 { }, } } + +fn loweredFnRetTyHasBits(fn_ty: Type) bool { + const ret_ty = fn_ty.fnReturnType(); + if (ret_ty.hasRuntimeBitsIgnoreComptime()) { + return true; + } + if (ret_ty.isError()) { + return true; + } + return false; +} From 02e9d9b43b3b1cd9a4858a1f2bff302057dc2ee2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 23 May 2022 18:48:10 -0700 Subject: [PATCH 05/18] stage2: make `?anyerror` represented the same as `anyerror` I was able to get the backend implementation working on LLVM and the C backend, but I'm going to ask for some help on the other backends. --- src/arch/wasm/CodeGen.zig | 23 +++++++++------- src/codegen.zig | 2 +- src/codegen/c.zig | 13 +++++---- src/codegen/llvm.zig | 34 +++++++++++++++-------- src/type.zig | 58 +++++++++++++++++++++++++++++++++++---- test/behavior/error.zig | 3 +- 6 files changed, 98 insertions(+), 35 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index b74651859c..6d0f3a9d23 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1386,7 +1386,7 @@ fn isByRef(ty: Type, target: std.Target) bool { return true; }, .Optional => { - if (ty.isPtrLikeOptional()) return false; + if (ty.optionalReprIsPayload()) return false; var buf: Type.Payload.ElemType = undefined; return ty.optionalChild(&buf).hasRuntimeBitsIgnoreComptime(); }, @@ -1832,6 +1832,9 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro if (!pl_ty.hasRuntimeBitsIgnoreComptime()) { return self.store(lhs, rhs, Type.u8, 0); } + if (pl_ty.zigTypeTag() == .ErrorSet) { + return self.store(lhs, rhs, Type.anyerror, 0); + } const len = @intCast(u32, ty.abiSize(self.target)); return self.memcpy(lhs, rhs, .{ .imm32 = len }); @@ -2198,7 +2201,7 @@ fn lowerParentPtr(self: *Self, ptr_val: Value, ptr_child_ty: Type) InnerError!WV const parent_ptr = try self.lowerParentPtr(payload_ptr.container_ptr, payload_ptr.container_ty); var buf: Type.Payload.ElemType = undefined; const payload_ty = payload_ptr.container_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.optionalReprIsPayload()) { return parent_ptr; } @@ -2353,7 +2356,7 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue { const err_val = if (!is_pl) val else Value.initTag(.zero); return self.lowerConstant(err_val, error_type); }, - .Optional => if (ty.isPtrLikeOptional()) { + .Optional => if (ty.optionalReprIsPayload()) { var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); if (val.castTag(.opt_payload)) |payload| { @@ -2392,7 +2395,7 @@ fn emitUndefined(self: *Self, ty: Type) InnerError!WValue { .Optional => { var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); - if (ty.isPtrLikeOptional()) { + if (ty.optionalReprIsPayload()) { return self.emitUndefined(pl_ty); } return WValue{ .imm32 = 0xaaaaaaaa }; @@ -2542,7 +2545,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) Inner } fn cmp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue { - if (ty.zigTypeTag() == .Optional and !ty.isPtrLikeOptional()) { + if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); if (payload_ty.hasRuntimeBitsIgnoreComptime()) { @@ -3120,7 +3123,7 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: en fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue { try self.emitWValue(operand); - if (!optional_ty.isPtrLikeOptional()) { + if (!optional_ty.optionalReprIsPayload()) { var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); // When payload is zero-bits, we can treat operand as a value, rather than @@ -3146,7 +3149,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const opt_ty = self.air.typeOf(ty_op.operand); const payload_ty = self.air.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} }; - if (opt_ty.isPtrLikeOptional()) return operand; + if (opt_ty.optionalReprIsPayload()) return operand; const offset = opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target); @@ -3166,7 +3169,7 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.optionalReprIsPayload()) { return operand; } @@ -3184,7 +3187,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue return self.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()}); } - if (opt_ty.isPtrLikeOptional()) { + if (opt_ty.optionalReprIsPayload()) { return operand; } @@ -3215,7 +3218,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const operand = try self.resolveInst(ty_op.operand); const op_ty = self.air.typeOfIndex(inst); - if (op_ty.isPtrLikeOptional()) { + if (op_ty.optionalReprIsPayload()) { return operand; } const offset = std.math.cast(u32, op_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) catch { diff --git a/src/codegen.zig b/src/codegen.zig index 0f411dc481..eea8095a62 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -654,7 +654,7 @@ pub fn generateSymbol( return Result{ .appended = {} }; } - if (typed_value.ty.isPtrLikeOptional()) { + if (typed_value.ty.optionalReprIsPayload()) { if (typed_value.val.castTag(.opt_payload)) |payload| { switch (try generateSymbol(bin_file, src_loc, .{ .ty = payload_type, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 63082d46be..1b6708c1cf 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -712,7 +712,7 @@ pub const DeclGen = struct { .Optional => { var opt_buf: Type.Payload.ElemType = undefined; const payload_type = ty.optionalChild(&opt_buf); - if (ty.isPtrLikeOptional()) { + if (ty.optionalReprIsPayload()) { return dg.renderValue(writer, payload_type, val, location); } if (payload_type.abiSize(target) == 0) { @@ -1360,7 +1360,7 @@ pub const DeclGen = struct { var opt_buf: Type.Payload.ElemType = undefined; const child_type = t.optionalChild(&opt_buf); - if (t.isPtrLikeOptional()) { + if (t.optionalReprIsPayload()) { return dg.renderType(w, child_type); } @@ -3161,6 +3161,8 @@ fn airIsNull( if (ty.isPtrLikeOptional()) { // operand is a regular pointer, test `operand !=/== NULL` try writer.print("){s} {s} NULL;\n", .{ deref_suffix, operator }); + } else if (payload_type.zigTypeTag() == .ErrorSet) { + try writer.print("){s} {s} 0;\n", .{ deref_suffix, operator }); } else if (payload_type.abiSize(target) == 0) { try writer.print("){s} {s} true;\n", .{ deref_suffix, operator }); } else { @@ -3183,7 +3185,7 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { else operand_ty; - if (opt_ty.isPtrLikeOptional()) { + if (opt_ty.optionalReprIsPayload()) { // the operand is just a regular pointer, no need to do anything special. // *?*T -> **T and ?*T -> *T are **T -> **T and *T -> *T in C return operand; @@ -3209,7 +3211,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { const opt_ty = operand_ty.elemType(); - if (opt_ty.isPtrLikeOptional()) { + if (opt_ty.optionalReprIsPayload()) { // The payload and the optional are the same value. // Setting to non-null will be done when the payload is set. return operand; @@ -3419,8 +3421,7 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); const inst_ty = f.air.typeOfIndex(inst); - if (inst_ty.isPtrLikeOptional()) { - // the operand is just a regular pointer, no need to do anything special. + if (inst_ty.optionalReprIsPayload()) { return operand; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 95d12dff3a..e76b2941c2 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1390,7 +1390,7 @@ pub const Object = struct { gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_ty); return di_ty; } - if (ty.isPtrLikeOptional()) { + if (ty.optionalReprIsPayload()) { const ptr_di_ty = try o.lowerDebugType(child_ty, resolve); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(ptr_di_ty), .{ .mod = o.module }); @@ -1472,6 +1472,12 @@ pub const Object = struct { .ErrorUnion => { const err_set_ty = ty.errorUnionSet(); const payload_ty = ty.errorUnionPayload(); + if (err_set_ty.errorSetCardinality() == .zero) { + const payload_di_ty = try o.lowerDebugType(payload_ty, .full); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(payload_di_ty), .{ .mod = o.module }); + return payload_di_ty; + } if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { const err_set_di_ty = try o.lowerDebugType(err_set_ty, .full); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. @@ -2439,7 +2445,7 @@ pub const DeclGen = struct { return dg.context.intType(1); } const payload_llvm_ty = try dg.llvmType(child_ty); - if (t.isPtrLikeOptional()) { + if (t.optionalReprIsPayload()) { return payload_llvm_ty; } @@ -3058,7 +3064,7 @@ pub const DeclGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { return non_null_bit; } - if (tv.ty.isPtrLikeOptional()) { + if (tv.ty.optionalReprIsPayload()) { if (tv.val.castTag(.opt_payload)) |payload| { return dg.genTypedValue(.{ .ty = payload_ty, .val = payload.data }); } else if (is_pl) { @@ -3557,7 +3563,9 @@ pub const DeclGen = struct { const payload_ty = opt_payload_ptr.container_ty.optionalChild(&buf); bitcast_needed = !payload_ty.eql(ptr_child_ty, dg.module); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime() or + payload_ty.optionalReprIsPayload()) + { // In this case, we represent pointer to optional the same as pointer // to the payload. break :blk parent_llvm_ptr; @@ -4461,7 +4469,9 @@ pub const FuncGen = struct { .Int, .Bool, .Pointer, .ErrorSet => scalar_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or operand_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime() or + operand_ty.optionalReprIsPayload()) + { break :blk operand_ty; } // We need to emit instructions to check for equality/inequality @@ -5414,7 +5424,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const operand_ty = self.air.typeOf(un_op); const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.optionalReprIsPayload()) { const optional_llvm_ty = try self.dg.llvmType(optional_ty); const loaded = if (operand_is_ptr) self.builder.buildLoad(operand, "") else operand; return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); @@ -5499,7 +5509,7 @@ pub const FuncGen = struct { const res_ptr_ty = try self.dg.llvmType(result_ty); return self.builder.buildBitCast(operand, res_ptr_ty, ""); } - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.optionalReprIsPayload()) { // The payload and the optional are the same value. return operand; } @@ -5527,7 +5537,7 @@ pub const FuncGen = struct { const res_ptr_ty = try self.dg.llvmType(result_ty); return self.builder.buildBitCast(operand, res_ptr_ty, ""); } - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.optionalReprIsPayload()) { // The payload and the optional are the same value. // Setting to non-null will be done when the payload is set. return operand; @@ -5561,7 +5571,7 @@ pub const FuncGen = struct { const payload_ty = self.air.typeOfIndex(inst); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return null; - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.optionalReprIsPayload()) { // Payload value is the same as the optional value. return operand; } @@ -5702,7 +5712,9 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOfIndex(inst); - if (optional_ty.isPtrLikeOptional()) return operand; + if (optional_ty.optionalReprIsPayload()) { + return operand; + } const llvm_optional_ty = try self.dg.llvmType(optional_ty); if (isByRef(optional_ty)) { const optional_ptr = self.buildAlloca(llvm_optional_ty); @@ -7038,7 +7050,7 @@ pub const FuncGen = struct { } const success_bit = self.builder.buildExtractValue(result, 1, ""); - if (optional_ty.isPtrLikeOptional()) { + if (optional_ty.optionalReprIsPayload()) { return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, ""); } diff --git a/src/type.zig b/src/type.zig index 4b8a41915f..1c59cf9e59 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2916,8 +2916,10 @@ pub const Type = extern union { var buf: Payload.ElemType = undefined; const child_type = ty.optionalChild(&buf); - if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr()) { - return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }; + switch (child_type.zigTypeTag()) { + .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, + .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, target, strat), + else => {}, } switch (strat) { @@ -3365,14 +3367,29 @@ pub const Type = extern union { const child_type = ty.optionalChild(&buf); if (!child_type.hasRuntimeBits()) return AbiSizeAdvanced{ .scalar = 1 }; - if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr() and !child_type.isSlice()) - return AbiSizeAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }; + switch (child_type.zigTypeTag()) { + .Pointer => { + const ptr_info = child_type.ptrInfo().data; + const has_null = switch (ptr_info.size) { + .Slice, .C => true, + else => ptr_info.@"allowzero", + }; + if (!has_null) { + const ptr_size_bytes = @divExact(target.cpu.arch.ptrBitWidth(), 8); + return AbiSizeAdvanced{ .scalar = ptr_size_bytes }; + } + }, + .ErrorSet => return abiSizeAdvanced(Type.anyerror, target, strat), + else => {}, + } // Optional types are represented as a struct with the child type as the first // field and a boolean as the second. Since the child type's abi alignment is // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal // to the child type's ABI alignment. - return AbiSizeAdvanced{ .scalar = child_type.abiAlignment(target) + child_type.abiSize(target) }; + return AbiSizeAdvanced{ + .scalar = child_type.abiAlignment(target) + child_type.abiSize(target), + }; }, .error_union => { @@ -3901,8 +3918,39 @@ pub const Type = extern union { return ty.ptrInfo().data.@"allowzero"; } + /// See also `isPtrLikeOptional`. + pub fn optionalReprIsPayload(ty: Type) bool { + switch (ty.tag()) { + .optional_single_const_pointer, + .optional_single_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + => return true, + + .optional => { + const child_ty = ty.castTag(.optional).?.data; + switch (child_ty.zigTypeTag()) { + .Pointer => { + const info = child_ty.ptrInfo().data; + switch (info.size) { + .Slice, .C => return false, + .Many, .One => return !info.@"allowzero", + } + }, + .ErrorSet => return true, + else => return false, + } + }, + + .pointer => return ty.castTag(.pointer).?.data.size == .C, + + else => return false, + } + } + /// Returns true if the type is optional and would be lowered to a single pointer /// address value, using 0 for null. Note that this returns true for C pointers. + /// See also `hasOptionalRepr`. pub fn isPtrLikeOptional(self: Type) bool { switch (self.tag()) { .optional_single_const_pointer, diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 459ffb12d0..1b2a67bd57 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -433,9 +433,8 @@ test "return function call to error set from error union function" { } test "optional error set is the same size as error set" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO - comptime try expect(@sizeOf(?anyerror) == @sizeOf(anyerror)); + comptime try expect(@alignOf(?anyerror) == @alignOf(anyerror)); const S = struct { fn returnsOptErrSet() ?anyerror { return null; From 1bf7a6dff5e39aeeeefe2016a423d16f73ba2263 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 23 May 2022 19:12:30 -0700 Subject: [PATCH 06/18] enable passing behavior test This was disabled for macOS but I just tested it on my M1 and it works fine. --- test/behavior/eval.zig | 1 - 1 file changed, 1 deletion(-) diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index e0e787509a..2d53122706 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -425,7 +425,6 @@ test "f64 at compile time is lossy" { } test { - if (builtin.zig_backend != .stage1 and builtin.os.tag == .macos) return error.SkipZigTest; comptime try expect(@as(f128, 1 << 113) == 10384593717069655257060992658440192); } From 7db39384f7384b73c51dd9cab5b23ad0a3699fd0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 23 May 2022 19:49:04 -0700 Subject: [PATCH 07/18] move bound function behavior test to compile error test --- test/behavior/eval.zig | 22 ------------------- ... on bound fn referring to var instance.zig | 20 +++++++++++++++++ 2 files changed, 20 insertions(+), 22 deletions(-) create mode 100644 test/cases/compile_errors/call method on bound fn referring to var instance.zig diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 2d53122706..3ffa0a3a12 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -572,28 +572,6 @@ test "inlined loop has array literal with elided runtime scope on first iteratio } } -test "call method on bound fn referring to var instance" { - if (builtin.zig_backend != .stage1) { - // Let's delay solving this one; I want to try to eliminate bound functions from - // the language. - return error.SkipZigTest; // TODO - } - - try expect(bound_fn() == 1237); -} - -const SimpleStruct = struct { - field: i32, - - fn method(self: *const SimpleStruct) i32 { - return self.field + 3; - } -}; - -var simple_struct = SimpleStruct{ .field = 1234 }; - -const bound_fn = simple_struct.method; - test "ptr to local array argument at comptime" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO diff --git a/test/cases/compile_errors/call method on bound fn referring to var instance.zig b/test/cases/compile_errors/call method on bound fn referring to var instance.zig new file mode 100644 index 0000000000..10ff584124 --- /dev/null +++ b/test/cases/compile_errors/call method on bound fn referring to var instance.zig @@ -0,0 +1,20 @@ +export fn entry() void { + bad(bound_fn() == 1237); +} +const SimpleStruct = struct { + field: i32, + + fn method(self: *const SimpleStruct) i32 { + return self.field + 3; + } +}; +var simple_struct = SimpleStruct{ .field = 1234 }; +const bound_fn = simple_struct.method; +fn bad(ok: bool) void { + _ = ok; +} +// error +// target=native +// backend=stage2 +// +// :12:18: error: unable to resolve comptime value From cd59b8277d018a3418267b9fa0c219debdff5ca3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 23 May 2022 21:05:24 -0700 Subject: [PATCH 08/18] LLVM: rename two functions llvmType -> lowerType genTypedValue -> lowerValue --- src/codegen/llvm.zig | 366 +++++++++++++++++++++---------------------- 1 file changed, 183 insertions(+), 183 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index e76b2941c2..cf0188b060 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -745,7 +745,7 @@ pub const Object = struct { const param = llvm_func.getParam(llvm_arg_i); llvm_arg_i += 1; - const param_llvm_ty = try dg.llvmType(param_ty); + const param_llvm_ty = try dg.lowerType(param_ty); const abi_size = @intCast(c_uint, param_ty.abiSize(target)); const int_llvm_ty = dg.context.intType(abi_size * 8); const int_ptr_llvm_ty = int_llvm_ty.pointerType(0); @@ -775,7 +775,7 @@ pub const Object = struct { .Struct => { const fields = param_ty.structFields().values(); if (is_by_ref) { - const param_llvm_ty = try dg.llvmType(param_ty); + const param_llvm_ty = try dg.lowerType(param_ty); const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty); arg_ptr.setAlignment(param_ty.abiAlignment(target)); @@ -2100,7 +2100,7 @@ pub const DeclGen = struct { break :init_val decl.val; }; if (init_val.tag() != .unreachable_value) { - const llvm_init = try dg.genTypedValue(.{ .ty = decl.ty, .val = init_val }); + const llvm_init = try dg.lowerValue(.{ .ty = decl.ty, .val = init_val }); if (global.globalGetValueType() == llvm_init.typeOf()) { global.setInitializer(llvm_init); } else { @@ -2171,7 +2171,7 @@ pub const DeclGen = struct { const target = dg.module.getTarget(); const sret = firstParamSRet(fn_info, target); - const fn_type = try dg.llvmType(zig_fn_type); + const fn_type = try dg.lowerType(zig_fn_type); const fqn = try decl.getFullyQualifiedName(dg.module); defer dg.gpa.free(fqn); @@ -2198,7 +2198,7 @@ pub const DeclGen = struct { dg.addArgAttr(llvm_fn, 0, "nonnull"); // Sret pointers must not be address 0 dg.addArgAttr(llvm_fn, 0, "noalias"); - const raw_llvm_ret_ty = try dg.llvmType(fn_info.return_type); + const raw_llvm_ret_ty = try dg.lowerType(fn_info.return_type); llvm_fn.addSretAttr(0, raw_llvm_ret_ty); } @@ -2291,7 +2291,7 @@ pub const DeclGen = struct { const fqn = try decl.getFullyQualifiedName(dg.module); defer dg.gpa.free(fqn); - const llvm_type = try dg.llvmType(decl.ty); + const llvm_type = try dg.lowerType(decl.ty); const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); const llvm_global = dg.object.llvm_module.addGlobalInAddressSpace(llvm_type, fqn, llvm_addrspace); gop.value_ptr.* = llvm_global; @@ -2345,15 +2345,15 @@ pub const DeclGen = struct { } fn isUnnamedType(dg: *DeclGen, ty: Type, val: *const llvm.Value) bool { - // Once `llvmType` succeeds, successive calls to it with the same Zig type - // are guaranteed to succeed. So if a call to `llvmType` fails here it means + // Once `lowerType` succeeds, successive calls to it with the same Zig type + // are guaranteed to succeed. So if a call to `lowerType` fails here it means // it is the first time lowering the type, which means the value can't possible // have that type. - const llvm_ty = dg.llvmType(ty) catch return true; + const llvm_ty = dg.lowerType(ty) catch return true; return val.typeOf() != llvm_ty; } - fn llvmType(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type { + fn lowerType(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type { const gpa = dg.gpa; const target = dg.module.getTarget(); switch (t.zigTypeTag()) { @@ -2385,8 +2385,8 @@ pub const DeclGen = struct { const ptr_type = t.slicePtrFieldType(&buf); const fields: [2]*const llvm.Type = .{ - try dg.llvmType(ptr_type), - try dg.llvmType(Type.usize), + try dg.lowerType(ptr_type), + try dg.lowerType(Type.usize), }; return dg.context.structType(&fields, fields.len, .False); } @@ -2402,7 +2402,7 @@ pub const DeclGen = struct { else => elem_ty.hasRuntimeBitsIgnoreComptime(), }; const llvm_elem_ty = if (lower_elem_ty) - try dg.llvmType(elem_ty) + try dg.lowerType(elem_ty) else dg.context.intType(8); return llvm_elem_ty.pointerType(llvm_addrspace); @@ -2430,12 +2430,12 @@ pub const DeclGen = struct { .Array => { const elem_ty = t.childType(); assert(elem_ty.onePossibleValue() == null); - const elem_llvm_ty = try dg.llvmType(elem_ty); + const elem_llvm_ty = try dg.lowerType(elem_ty); const total_len = t.arrayLen() + @boolToInt(t.sentinel() != null); return elem_llvm_ty.arrayType(@intCast(c_uint, total_len)); }, .Vector => { - const elem_type = try dg.llvmType(t.childType()); + const elem_type = try dg.lowerType(t.childType()); return elem_type.vectorType(t.vectorLen()); }, .Optional => { @@ -2444,7 +2444,7 @@ pub const DeclGen = struct { if (!child_ty.hasRuntimeBitsIgnoreComptime()) { return dg.context.intType(1); } - const payload_llvm_ty = try dg.llvmType(child_ty); + const payload_llvm_ty = try dg.lowerType(child_ty); if (t.optionalReprIsPayload()) { return payload_llvm_ty; } @@ -2458,13 +2458,13 @@ pub const DeclGen = struct { const error_type = t.errorUnionSet(); const payload_type = t.errorUnionPayload(); if (error_type.errorSetCardinality() == .zero) { - return dg.llvmType(payload_type); + return dg.lowerType(payload_type); } if (!payload_type.hasRuntimeBitsIgnoreComptime()) { - return try dg.llvmType(Type.anyerror); + return try dg.lowerType(Type.anyerror); } - const llvm_error_type = try dg.llvmType(error_type); - const llvm_payload_type = try dg.llvmType(payload_type); + const llvm_error_type = try dg.lowerType(error_type); + const llvm_payload_type = try dg.lowerType(payload_type); const payload_align = payload_type.abiAlignment(target); const error_align = Type.anyerror.abiAlignment(target); @@ -2515,7 +2515,7 @@ pub const DeclGen = struct { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); try llvm_field_types.append(gpa, llvm_array_ty); } - const field_llvm_ty = try dg.llvmType(field_ty); + const field_llvm_ty = try dg.lowerType(field_ty); try llvm_field_types.append(gpa, field_llvm_ty); offset += field_ty.abiSize(target); @@ -2544,7 +2544,7 @@ pub const DeclGen = struct { if (struct_obj.layout == .Packed) { var buf: Type.Payload.Bits = undefined; const int_ty = struct_obj.packedIntegerType(target, &buf); - const int_llvm_ty = try dg.llvmType(int_ty); + const int_llvm_ty = try dg.lowerType(int_ty); gop.value_ptr.* = int_llvm_ty; return int_llvm_ty; } @@ -2579,7 +2579,7 @@ pub const DeclGen = struct { const llvm_array_ty = dg.context.intType(8).arrayType(@intCast(c_uint, padding_len)); try llvm_field_types.append(gpa, llvm_array_ty); } - const field_llvm_ty = try dg.llvmType(field.ty); + const field_llvm_ty = try dg.lowerType(field.ty); try llvm_field_types.append(gpa, field_llvm_ty); offset += field.ty.abiSize(target); @@ -2614,7 +2614,7 @@ pub const DeclGen = struct { const union_obj = t.cast(Type.Payload.Union).?.data; if (layout.payload_size == 0) { - const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty); + const enum_tag_llvm_ty = try dg.lowerType(union_obj.tag_ty); gop.value_ptr.* = enum_tag_llvm_ty; return enum_tag_llvm_ty; } @@ -2626,7 +2626,7 @@ pub const DeclGen = struct { gop.value_ptr.* = llvm_union_ty; // must be done before any recursive calls const aligned_field = union_obj.fields.values()[layout.most_aligned_field]; - const llvm_aligned_field_ty = try dg.llvmType(aligned_field.ty); + const llvm_aligned_field_ty = try dg.lowerType(aligned_field.ty); const llvm_payload_ty = t: { if (layout.most_aligned_field_size == layout.payload_size) { @@ -2645,7 +2645,7 @@ pub const DeclGen = struct { llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False); return llvm_union_ty; } - const enum_tag_llvm_ty = try dg.llvmType(union_obj.tag_ty); + const enum_tag_llvm_ty = try dg.lowerType(union_obj.tag_ty); // Put the tag before or after the payload depending on which one's // alignment is greater. @@ -2667,7 +2667,7 @@ pub const DeclGen = struct { llvm_union_ty.structSetBody(&llvm_fields, llvm_fields_len, .False); return llvm_union_ty; }, - .Fn => return llvmTypeFn(dg, t), + .Fn => return lowerTypeFn(dg, t), .ComptimeInt => unreachable, .ComptimeFloat => unreachable, .Type => unreachable, @@ -2682,7 +2682,7 @@ pub const DeclGen = struct { } } - fn llvmTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*const llvm.Type { + fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*const llvm.Type { const target = dg.module.getTarget(); const fn_info = fn_ty.fnInfo(); const llvm_ret_ty = try lowerFnRetTy(dg, fn_info); @@ -2691,7 +2691,7 @@ pub const DeclGen = struct { defer llvm_params.deinit(); if (firstParamSRet(fn_info, target)) { - const llvm_sret_ty = try dg.llvmType(fn_info.return_type); + const llvm_sret_ty = try dg.lowerType(fn_info.return_type); try llvm_params.append(llvm_sret_ty.pointerType(0)); } @@ -2703,7 +2703,7 @@ pub const DeclGen = struct { .data = dg.object.getStackTraceType(), }; const ptr_ty = Type.initPayload(&ptr_ty_payload.base); - try llvm_params.append(try dg.llvmType(ptr_ty)); + try llvm_params.append(try dg.lowerType(ptr_ty)); } var it = iterateParamTypes(dg, fn_info); @@ -2711,11 +2711,11 @@ pub const DeclGen = struct { .no_bits => continue, .byval => { const param_ty = fn_info.param_types[it.zig_index - 1]; - try llvm_params.append(try dg.llvmType(param_ty)); + try llvm_params.append(try dg.lowerType(param_ty)); }, .byref => { const param_ty = fn_info.param_types[it.zig_index - 1]; - const raw_llvm_ty = try dg.llvmType(param_ty); + const raw_llvm_ty = try dg.lowerType(param_ty); try llvm_params.append(raw_llvm_ty.pointerType(0)); }, .abi_sized_int => { @@ -2757,7 +2757,7 @@ pub const DeclGen = struct { // one field; in this case keep the type information // to avoid the potentially costly ptrtoint/bitcast. if (bits_used == 0 and field_abi_bits == int_bits) { - const llvm_field_ty = try dg.llvmType(field.ty); + const llvm_field_ty = try dg.lowerType(field.ty); llvm_params.appendAssumeCapacity(llvm_field_ty); field_i += 1; if (field_i >= fields.len) { @@ -2795,16 +2795,16 @@ pub const DeclGen = struct { ); } - fn genTypedValue(dg: *DeclGen, tv: TypedValue) Error!*const llvm.Value { + fn lowerValue(dg: *DeclGen, tv: TypedValue) Error!*const llvm.Value { if (tv.val.isUndef()) { - const llvm_type = try dg.llvmType(tv.ty); + const llvm_type = try dg.lowerType(tv.ty); return llvm_type.getUndef(); } const target = dg.module.getTarget(); switch (tv.ty.zigTypeTag()) { .Bool => { - const llvm_type = try dg.llvmType(tv.ty); + const llvm_type = try dg.lowerType(tv.ty); return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); }, // TODO this duplicates code with Pointer but they should share the handling @@ -2865,7 +2865,7 @@ pub const DeclGen = struct { return unsigned_val; }, .Float => { - const llvm_ty = try dg.llvmType(tv.ty); + const llvm_ty = try dg.lowerType(tv.ty); switch (tv.ty.floatBits(target)) { 16, 32, 64 => return llvm_ty.constReal(tv.val.toFloat(f64)), 80 => { @@ -2902,7 +2902,7 @@ pub const DeclGen = struct { const decl = dg.module.declPtr(decl_index); dg.module.markDeclAlive(decl); const val = try dg.resolveGlobalDecl(decl_index); - const llvm_var_type = try dg.llvmType(tv.ty); + const llvm_var_type = try dg.lowerType(tv.ty); const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace"); const llvm_type = llvm_var_type.pointerType(llvm_addrspace); return val.constBitCast(llvm_type); @@ -2911,11 +2911,11 @@ pub const DeclGen = struct { const slice = tv.val.castTag(.slice).?.data; var buf: Type.SlicePtrFieldTypeBuffer = undefined; const fields: [2]*const llvm.Value = .{ - try dg.genTypedValue(.{ + try dg.lowerValue(.{ .ty = tv.ty.slicePtrFieldType(&buf), .val = slice.ptr, }), - try dg.genTypedValue(.{ + try dg.lowerValue(.{ .ty = Type.usize, .val = slice.len, }), @@ -2923,15 +2923,15 @@ pub const DeclGen = struct { return dg.context.constStruct(&fields, fields.len, .False); }, .int_u64, .one, .int_big_positive => { - const llvm_usize = try dg.llvmType(Type.usize); + const llvm_usize = try dg.lowerType(Type.usize); const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(target), .False); - return llvm_int.constIntToPtr(try dg.llvmType(tv.ty)); + return llvm_int.constIntToPtr(try dg.lowerType(tv.ty)); }, .field_ptr, .opt_payload_ptr, .eu_payload_ptr, .elem_ptr => { return dg.lowerParentPtr(tv.val, tv.ty.childType()); }, .null_value, .zero => { - const llvm_type = try dg.llvmType(tv.ty); + const llvm_type = try dg.lowerType(tv.ty); return llvm_type.constNull(); }, else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ @@ -2986,7 +2986,7 @@ pub const DeclGen = struct { defer gpa.free(llvm_elems); var need_unnamed = false; for (elem_vals[0..len]) |elem_val, i| { - llvm_elems[i] = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_val }); + llvm_elems[i] = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_val }); need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[i]); } if (need_unnamed) { @@ -2996,7 +2996,7 @@ pub const DeclGen = struct { .True, ); } else { - const llvm_elem_ty = try dg.llvmType(elem_ty); + const llvm_elem_ty = try dg.lowerType(elem_ty); return llvm_elem_ty.constArray( llvm_elems.ptr, @intCast(c_uint, llvm_elems.len), @@ -3016,13 +3016,13 @@ pub const DeclGen = struct { var need_unnamed = false; if (len != 0) { for (llvm_elems[0..len]) |*elem| { - elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val }); + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); } need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[0]); } if (sentinel) |sent| { - llvm_elems[len] = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent }); + llvm_elems[len] = try dg.lowerValue(.{ .ty = elem_ty, .val = sent }); need_unnamed = need_unnamed or dg.isUnnamedType(elem_ty, llvm_elems[len]); } @@ -3033,7 +3033,7 @@ pub const DeclGen = struct { .True, ); } else { - const llvm_elem_ty = try dg.llvmType(elem_ty); + const llvm_elem_ty = try dg.lowerType(elem_ty); return llvm_elem_ty.constArray( llvm_elems.ptr, @intCast(c_uint, llvm_elems.len), @@ -3043,13 +3043,13 @@ pub const DeclGen = struct { .empty_array_sentinel => { const elem_ty = tv.ty.elemType(); const sent_val = tv.ty.sentinel().?; - const sentinel = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent_val }); + const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val }); const llvm_elems: [1]*const llvm.Value = .{sentinel}; const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]); if (need_unnamed) { return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True); } else { - const llvm_elem_ty = try dg.llvmType(elem_ty); + const llvm_elem_ty = try dg.lowerType(elem_ty); return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len); } }, @@ -3066,17 +3066,17 @@ pub const DeclGen = struct { } if (tv.ty.optionalReprIsPayload()) { if (tv.val.castTag(.opt_payload)) |payload| { - return dg.genTypedValue(.{ .ty = payload_ty, .val = payload.data }); + return dg.lowerValue(.{ .ty = payload_ty, .val = payload.data }); } else if (is_pl) { - return dg.genTypedValue(.{ .ty = payload_ty, .val = tv.val }); + return dg.lowerValue(.{ .ty = payload_ty, .val = tv.val }); } else { - const llvm_ty = try dg.llvmType(tv.ty); + const llvm_ty = try dg.lowerType(tv.ty); return llvm_ty.constNull(); } } assert(payload_ty.zigTypeTag() != .Fn); const fields: [2]*const llvm.Value = .{ - try dg.genTypedValue(.{ + try dg.lowerValue(.{ .ty = payload_ty, .val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef), }), @@ -3095,7 +3095,7 @@ pub const DeclGen = struct { return dg.resolveLlvmFunction(fn_decl_index); }, .ErrorSet => { - const llvm_ty = try dg.llvmType(tv.ty); + const llvm_ty = try dg.lowerType(tv.ty); switch (tv.val.tag()) { .@"error" => { const err_name = tv.val.castTag(.@"error").?.data.name; @@ -3113,23 +3113,23 @@ pub const DeclGen = struct { const payload_type = tv.ty.errorUnionPayload(); if (error_type.errorSetCardinality() == .zero) { const payload_val = tv.val.castTag(.eu_payload).?.data; - return dg.genTypedValue(.{ .ty = payload_type, .val = payload_val }); + return dg.lowerValue(.{ .ty = payload_type, .val = payload_val }); } const is_pl = tv.val.errorUnionIsPayload(); if (!payload_type.hasRuntimeBitsIgnoreComptime()) { // We use the error type directly as the type. const err_val = if (!is_pl) tv.val else Value.initTag(.zero); - return dg.genTypedValue(.{ .ty = error_type, .val = err_val }); + return dg.lowerValue(.{ .ty = error_type, .val = err_val }); } const payload_align = payload_type.abiAlignment(target); const error_align = Type.anyerror.abiAlignment(target); - const llvm_error_value = try dg.genTypedValue(.{ + const llvm_error_value = try dg.lowerValue(.{ .ty = error_type, .val = if (is_pl) Value.initTag(.zero) else tv.val, }); - const llvm_payload_value = try dg.genTypedValue(.{ + const llvm_payload_value = try dg.lowerValue(.{ .ty = payload_type, .val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef), }); @@ -3142,7 +3142,7 @@ pub const DeclGen = struct { } }, .Struct => { - const llvm_struct_ty = try dg.llvmType(tv.ty); + const llvm_struct_ty = try dg.lowerType(tv.ty); const field_vals = tv.val.castTag(.aggregate).?.data; const gpa = dg.gpa; @@ -3175,7 +3175,7 @@ pub const DeclGen = struct { llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } - const field_llvm_val = try dg.genTypedValue(.{ + const field_llvm_val = try dg.lowerValue(.{ .ty = field_ty, .val = field_vals[i], }); @@ -3223,7 +3223,7 @@ pub const DeclGen = struct { const field = fields[i]; if (!field.ty.hasRuntimeBitsIgnoreComptime()) continue; - const non_int_val = try dg.genTypedValue(.{ + const non_int_val = try dg.lowerValue(.{ .ty = field.ty, .val = field_val, }); @@ -3267,7 +3267,7 @@ pub const DeclGen = struct { llvm_fields.appendAssumeCapacity(llvm_array_ty.getUndef()); } - const field_llvm_val = try dg.genTypedValue(.{ + const field_llvm_val = try dg.lowerValue(.{ .ty = field.ty, .val = field_vals[i], }); @@ -3302,13 +3302,13 @@ pub const DeclGen = struct { } }, .Union => { - const llvm_union_ty = try dg.llvmType(tv.ty); + const llvm_union_ty = try dg.lowerType(tv.ty); const tag_and_val = tv.val.castTag(.@"union").?.data; const layout = tv.ty.unionGetLayout(target); if (layout.payload_size == 0) { - return genTypedValue(dg, .{ + return lowerValue(dg, .{ .ty = tv.ty.unionTagType().?, .val = tag_and_val.tag, }); @@ -3322,7 +3322,7 @@ pub const DeclGen = struct { const padding_len = @intCast(c_uint, layout.payload_size); break :p dg.context.intType(8).arrayType(padding_len).getUndef(); } - const field = try genTypedValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); + const field = try lowerValue(dg, .{ .ty = field_ty, .val = tag_and_val.val }); const field_size = field_ty.abiSize(target); if (field_size == layout.payload_size) { break :p field; @@ -3348,7 +3348,7 @@ pub const DeclGen = struct { return llvm_union_ty.constNamedStruct(&fields, fields.len); } } - const llvm_tag_value = try genTypedValue(dg, .{ + const llvm_tag_value = try lowerValue(dg, .{ .ty = tv.ty.unionTagType().?, .val = tag_and_val.tag, }); @@ -3385,7 +3385,7 @@ pub const DeclGen = struct { .data = bytes[i], }; - elem.* = try dg.genTypedValue(.{ + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = Value.initPayload(&byte_payload.base), }); @@ -3405,7 +3405,7 @@ pub const DeclGen = struct { const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len); defer dg.gpa.free(llvm_elems); for (llvm_elems) |*elem, i| { - elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_vals[i] }); + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] }); } return llvm.constVector( llvm_elems.ptr, @@ -3420,7 +3420,7 @@ pub const DeclGen = struct { const llvm_elems = try dg.gpa.alloc(*const llvm.Value, len); defer dg.gpa.free(llvm_elems); for (llvm_elems) |*elem| { - elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val }); + elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val }); } return llvm.constVector( llvm_elems.ptr, @@ -3470,7 +3470,7 @@ pub const DeclGen = struct { if (ptr_child_ty.eql(decl.ty, dg.module)) { return llvm_ptr; } else { - return llvm_ptr.constBitCast((try dg.llvmType(ptr_child_ty)).pointerType(0)); + return llvm_ptr.constBitCast((try dg.lowerType(ptr_child_ty)).pointerType(0)); } } @@ -3492,15 +3492,15 @@ pub const DeclGen = struct { }, .int_i64 => { const int = ptr_val.castTag(.int_i64).?.data; - const llvm_usize = try dg.llvmType(Type.usize); + const llvm_usize = try dg.lowerType(Type.usize); const llvm_int = llvm_usize.constInt(@bitCast(u64, int), .False); - return llvm_int.constIntToPtr((try dg.llvmType(ptr_child_ty)).pointerType(0)); + return llvm_int.constIntToPtr((try dg.lowerType(ptr_child_ty)).pointerType(0)); }, .int_u64 => { const int = ptr_val.castTag(.int_u64).?.data; - const llvm_usize = try dg.llvmType(Type.usize); + const llvm_usize = try dg.lowerType(Type.usize); const llvm_int = llvm_usize.constInt(int, .False); - return llvm_int.constIntToPtr((try dg.llvmType(ptr_child_ty)).pointerType(0)); + return llvm_int.constIntToPtr((try dg.lowerType(ptr_child_ty)).pointerType(0)); }, .field_ptr => blk: { const field_ptr = ptr_val.castTag(.field_ptr).?.data; @@ -3549,7 +3549,7 @@ pub const DeclGen = struct { const parent_llvm_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty); bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty, dg.module); - const llvm_usize = try dg.llvmType(Type.usize); + const llvm_usize = try dg.lowerType(Type.usize); const indices: [1]*const llvm.Value = .{ llvm_usize.constInt(elem_ptr.index, .False), }; @@ -3602,7 +3602,7 @@ pub const DeclGen = struct { else => unreachable, }; if (bitcast_needed) { - return llvm_ptr.constBitCast((try dg.llvmType(ptr_child_ty)).pointerType(0)); + return llvm_ptr.constBitCast((try dg.lowerType(ptr_child_ty)).pointerType(0)); } else { return llvm_ptr; } @@ -3621,11 +3621,11 @@ pub const DeclGen = struct { .data = tv.val.sliceLen(self.module), }; const fields: [2]*const llvm.Value = .{ - try self.genTypedValue(.{ + try self.lowerValue(.{ .ty = ptr_ty, .val = tv.val, }), - try self.genTypedValue(.{ + try self.lowerValue(.{ .ty = Type.usize, .val = Value.initPayload(&slice_len.base), }), @@ -3657,7 +3657,7 @@ pub const DeclGen = struct { else try self.resolveGlobalDecl(decl_index); - const llvm_type = try self.llvmType(tv.ty); + const llvm_type = try self.lowerType(tv.ty); if (tv.ty.zigTypeTag() == .Int) { return llvm_val.constPtrToInt(llvm_type); } else { @@ -3672,8 +3672,8 @@ pub const DeclGen = struct { // The value cannot be undefined, because we use the `nonnull` annotation // for non-optional pointers. We also need to respect the alignment, even though // the address will never be dereferenced. - const llvm_usize = try dg.llvmType(Type.usize); - const llvm_ptr_ty = try dg.llvmType(ptr_ty); + const llvm_usize = try dg.lowerType(Type.usize); + const llvm_ptr_ty = try dg.lowerType(ptr_ty); if (alignment != 0) { return llvm_usize.constInt(alignment, .False).constIntToPtr(llvm_ptr_ty); } @@ -3852,7 +3852,7 @@ pub const FuncGen = struct { const val = self.air.value(inst).?; const ty = self.air.typeOf(inst); - const llvm_val = try self.dg.genTypedValue(.{ .ty = ty, .val = val }); + const llvm_val = try self.dg.lowerValue(.{ .ty = ty, .val = val }); if (!isByRef(ty)) { gop.value_ptr.* = llvm_val; return llvm_val; @@ -3870,7 +3870,7 @@ pub const FuncGen = struct { // Because of LLVM limitations for lowering certain types such as unions, // the type of global constants might not match the type it is supposed to // be, and so we must bitcast the pointer at the usage sites. - const wanted_llvm_ty = try self.dg.llvmType(ty); + const wanted_llvm_ty = try self.dg.lowerType(ty); const wanted_llvm_ptr_ty = wanted_llvm_ty.pointerType(0); const casted_ptr = global.constBitCast(wanted_llvm_ptr_ty); gop.value_ptr.* = casted_ptr; @@ -4094,7 +4094,7 @@ pub const FuncGen = struct { defer llvm_args.deinit(); const ret_ptr = if (!sret) null else blk: { - const llvm_ret_ty = try self.dg.llvmType(return_type); + const llvm_ret_ty = try self.dg.lowerType(return_type); const ret_ptr = self.buildAlloca(llvm_ret_ty); ret_ptr.setAlignment(return_type.abiAlignment(target)); try llvm_args.append(ret_ptr); @@ -4126,7 +4126,7 @@ pub const FuncGen = struct { // which is always lowered to an LLVM type of `*i8`. // 2. The argument is a global which does act as a pointer, however // a bitcast is needed in order for the LLVM types to match. - const llvm_param_ty = try self.dg.llvmType(param_ty); + const llvm_param_ty = try self.dg.lowerType(param_ty); const casted_ptr = self.builder.buildBitCast(llvm_arg, llvm_param_ty, ""); try llvm_args.append(casted_ptr); } else { @@ -4173,7 +4173,7 @@ pub const FuncGen = struct { ); const int_ptr = self.buildAlloca(int_llvm_ty); int_ptr.setAlignment(alignment); - const param_llvm_ty = try self.dg.llvmType(param_ty); + const param_llvm_ty = try self.dg.lowerType(param_ty); const casted_ptr = self.builder.buildBitCast(int_ptr, param_llvm_ty.pointerType(0), ""); const store_inst = self.builder.buildStore(llvm_arg, casted_ptr); store_inst.setAlignment(alignment); @@ -4284,7 +4284,7 @@ pub const FuncGen = struct { return null; } - const llvm_ret_ty = try self.dg.llvmType(return_type); + const llvm_ret_ty = try self.dg.lowerType(return_type); if (ret_ptr) |rp| { call.setCallSret(llvm_ret_ty); @@ -4354,7 +4354,7 @@ pub const FuncGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const err_int = try self.dg.llvmType(Type.anyerror); + const err_int = try self.dg.lowerType(Type.anyerror); _ = self.builder.buildRet(err_int.constInt(0, .False)); } else { _ = self.builder.buildRetVoid(); @@ -4393,7 +4393,7 @@ pub const FuncGen = struct { // Functions with an empty error set are emitted with an error code // return type and return zero so they can be function pointers coerced // to functions that return anyerror. - const err_int = try self.dg.llvmType(Type.anyerror); + const err_int = try self.dg.lowerType(Type.anyerror); _ = self.builder.buildRet(err_int.constInt(0, .False)); } else { _ = self.builder.buildRetVoid(); @@ -4407,7 +4407,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(un_op); const target = self.dg.module.getTarget(); const abi_ret_ty = try lowerFnRetTy(self.dg, fn_info); - const llvm_ret_ty = try self.dg.llvmType(ret_ty); + const llvm_ret_ty = try self.dg.lowerType(ret_ty); const casted_ptr = if (abi_ret_ty == llvm_ret_ty) ptr else p: { const ptr_abi_ty = abi_ret_ty.pointerType(0); break :p self.builder.buildBitCast(ptr, ptr_abi_ty, ""); @@ -4588,7 +4588,7 @@ pub const FuncGen = struct { const is_body = inst_ty.zigTypeTag() == .Fn; if (!is_body and !inst_ty.hasRuntimeBitsIgnoreComptime()) return null; - const raw_llvm_ty = try self.dg.llvmType(inst_ty); + const raw_llvm_ty = try self.dg.lowerType(inst_ty); const llvm_ty = ty: { // If the zig tag type is a function, this represents an actual function body; not @@ -4728,9 +4728,9 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_ty = self.air.typeOf(ty_op.operand); const array_ty = operand_ty.childType(); - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); const len = llvm_usize.constInt(array_ty.arrayLen(), .False); - const slice_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + const slice_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); if (!array_ty.hasRuntimeBitsIgnoreComptime()) { return self.builder.buildInsertValue(slice_llvm_ty.getUndef(), len, 1, ""); } @@ -4755,7 +4755,7 @@ pub const FuncGen = struct { const dest_ty = self.air.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(); - const dest_llvm_ty = try self.dg.llvmType(dest_ty); + const dest_llvm_ty = try self.dg.lowerType(dest_ty); const target = self.dg.module.getTarget(); if (intrinsicsAllowed(dest_scalar_ty, target)) { @@ -4806,7 +4806,7 @@ pub const FuncGen = struct { const dest_ty = self.air.typeOfIndex(inst); const dest_scalar_ty = dest_ty.scalarType(); - const dest_llvm_ty = try self.dg.llvmType(dest_ty); + const dest_llvm_ty = try self.dg.lowerType(dest_ty); if (intrinsicsAllowed(operand_scalar_ty, target)) { // TODO set fast math flag @@ -4833,7 +4833,7 @@ pub const FuncGen = struct { compiler_rt_dest_abbrev, }) catch unreachable; - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); const param_types = [1]*const llvm.Type{operand_llvm_ty}; const libc_fn = self.getLibcFunction(fn_name, ¶m_types, libc_ret_ty); const params = [1]*const llvm.Value{operand}; @@ -4994,7 +4994,7 @@ pub const FuncGen = struct { const containing_int = struct_llvm_val; const shift_amt = containing_int.typeOf().constInt(bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); - const elem_llvm_ty = try self.dg.llvmType(field_ty); + const elem_llvm_ty = try self.dg.lowerType(field_ty); if (field_ty.zigTypeTag() == .Float) { const elem_bits = @intCast(c_uint, field_ty.bitSize(target)); const same_size_int = self.context.intType(elem_bits); @@ -5026,7 +5026,7 @@ pub const FuncGen = struct { return self.load(field_ptr, field_ptr_ty); }, .Union => { - const llvm_field_ty = try self.dg.llvmType(field_ty); + const llvm_field_ty = try self.dg.lowerType(field_ty); const layout = struct_ty.unionGetLayout(target); const payload_index = @boolToInt(layout.tag_align >= layout.payload_align); const union_field_ptr = self.builder.buildStructGEP(struct_llvm_val, payload_index, ""); @@ -5053,7 +5053,7 @@ pub const FuncGen = struct { const struct_ty = self.air.getRefType(ty_pl.ty).childType(); const field_offset = struct_ty.structFieldOffset(extra.field_index, target); - const res_ty = try self.dg.llvmType(self.air.getRefType(ty_pl.ty)); + const res_ty = try self.dg.lowerType(self.air.getRefType(ty_pl.ty)); if (field_offset == 0) { return self.builder.buildBitCast(field_ptr, res_ty, ""); } @@ -5383,7 +5383,7 @@ pub const FuncGen = struct { } const ret_ty = self.air.typeOfIndex(inst); - const ret_llvm_ty = try self.dg.llvmType(ret_ty); + const ret_llvm_ty = try self.dg.lowerType(ret_ty); const llvm_fn_ty = llvm.functionType( ret_llvm_ty, llvm_param_types.ptr, @@ -5425,7 +5425,7 @@ pub const FuncGen = struct { const operand_ty = self.air.typeOf(un_op); const optional_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; if (optional_ty.optionalReprIsPayload()) { - const optional_llvm_ty = try self.dg.llvmType(optional_ty); + const optional_llvm_ty = try self.dg.lowerType(optional_ty); const loaded = if (operand_is_ptr) self.builder.buildLoad(operand, "") else operand; return self.builder.buildICmp(pred, loaded, optional_llvm_ty.constNull(), ""); } @@ -5462,7 +5462,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(un_op); const err_union_ty = self.air.typeOf(un_op); const payload_ty = err_union_ty.errorUnionPayload(); - const err_set_ty = try self.dg.llvmType(Type.initTag(.anyerror)); + const err_set_ty = try self.dg.lowerType(Type.initTag(.anyerror)); const zero = err_set_ty.constNull(); if (err_union_ty.errorUnionSet().errorSetCardinality() == .zero) { @@ -5506,7 +5506,7 @@ pub const FuncGen = struct { // a pointer to a zero-bit value. // TODO once we update to LLVM 14 this bitcast won't be necessary. - const res_ptr_ty = try self.dg.llvmType(result_ty); + const res_ptr_ty = try self.dg.lowerType(result_ty); return self.builder.buildBitCast(operand, res_ptr_ty, ""); } if (optional_ty.optionalReprIsPayload()) { @@ -5534,7 +5534,7 @@ pub const FuncGen = struct { _ = self.builder.buildStore(non_null_bit, operand); // TODO once we update to LLVM 14 this bitcast won't be necessary. - const res_ptr_ty = try self.dg.llvmType(result_ty); + const res_ptr_ty = try self.dg.lowerType(result_ty); return self.builder.buildBitCast(operand, res_ptr_ty, ""); } if (optional_ty.optionalReprIsPayload()) { @@ -5604,7 +5604,7 @@ pub const FuncGen = struct { if (!operand_is_ptr) return null; // TODO once we update to LLVM 14 this bitcast won't be necessary. - const res_ptr_ty = try self.dg.llvmType(result_ty); + const res_ptr_ty = try self.dg.lowerType(result_ty); return self.builder.buildBitCast(operand, res_ptr_ty, ""); } if (operand_is_ptr or isByRef(payload_ty)) { @@ -5626,7 +5626,7 @@ pub const FuncGen = struct { const operand_ty = self.air.typeOf(ty_op.operand); const err_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty; if (err_union_ty.errorUnionSet().errorSetCardinality() == .zero) { - const err_llvm_ty = try self.dg.llvmType(Type.anyerror); + const err_llvm_ty = try self.dg.lowerType(Type.anyerror); if (operand_is_ptr) { return self.builder.buildBitCast(operand, err_llvm_ty.pointerType(0), ""); } else { @@ -5662,7 +5662,7 @@ pub const FuncGen = struct { return operand; } const payload_ty = error_union_ty.errorUnionPayload(); - const non_error_val = try self.dg.genTypedValue(.{ .ty = error_ty, .val = Value.zero }); + const non_error_val = try self.dg.lowerValue(.{ .ty = error_ty, .val = Value.zero }); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { _ = self.builder.buildStore(non_error_val, operand); return operand; @@ -5715,7 +5715,7 @@ pub const FuncGen = struct { if (optional_ty.optionalReprIsPayload()) { return operand; } - const llvm_optional_ty = try self.dg.llvmType(optional_ty); + const llvm_optional_ty = try self.dg.lowerType(optional_ty); if (isByRef(optional_ty)) { const optional_ptr = self.buildAlloca(llvm_optional_ty); const payload_ptr = self.builder.buildStructGEP(optional_ptr, 0, ""); @@ -5746,8 +5746,8 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { return operand; } - const ok_err_code = (try self.dg.llvmType(Type.anyerror)).constNull(); - const err_un_llvm_ty = try self.dg.llvmType(inst_ty); + const ok_err_code = (try self.dg.lowerType(Type.anyerror)).constNull(); + const err_un_llvm_ty = try self.dg.lowerType(inst_ty); const target = self.dg.module.getTarget(); const payload_offset = errUnionPayloadOffset(payload_ty, target); @@ -5781,7 +5781,7 @@ pub const FuncGen = struct { if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { return operand; } - const err_un_llvm_ty = try self.dg.llvmType(err_un_ty); + const err_un_llvm_ty = try self.dg.lowerType(err_un_ty); const target = self.dg.module.getTarget(); const payload_offset = errUnionPayloadOffset(payload_ty, target); @@ -5866,7 +5866,7 @@ pub const FuncGen = struct { const ptr = try self.resolveInst(bin_op.lhs); const len = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const llvm_slice_ty = try self.dg.llvmType(inst_ty); + const llvm_slice_ty = try self.dg.lowerType(inst_ty); // In case of slicing a global, the result type looks something like `{ i8*, i64 }` // but `ptr` is pointing to the global directly. If it's an array, we would want to @@ -5874,7 +5874,7 @@ pub const FuncGen = struct { // This prevents an assertion failure. var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = inst_ty.slicePtrFieldType(&buf); - const ptr_llvm_ty = try self.dg.llvmType(ptr_ty); + const ptr_llvm_ty = try self.dg.lowerType(ptr_ty); const casted_ptr = self.builder.buildBitCast(ptr, ptr_llvm_ty, ""); const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), casted_ptr, 0, ""); return self.builder.buildInsertValue(partial, len, 1, ""); @@ -6040,7 +6040,7 @@ pub const FuncGen = struct { // const d = @divTrunc(a, b); // const r = @rem(a, b); // return if (r == 0) d else d - ((a < 0) ^ (b < 0)); - const result_llvm_ty = try self.dg.llvmType(inst_ty); + const result_llvm_ty = try self.dg.lowerType(inst_ty); const zero = result_llvm_ty.constNull(); const div_trunc = self.builder.buildSDiv(lhs, rhs, ""); const rem = self.builder.buildSRem(lhs, rhs, ""); @@ -6090,7 +6090,7 @@ pub const FuncGen = struct { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - const inst_llvm_ty = try self.dg.llvmType(inst_ty); + const inst_llvm_ty = try self.dg.lowerType(inst_ty); const scalar_ty = inst_ty.scalarType(); if (scalar_ty.isRuntimeFloat()) { @@ -6174,8 +6174,8 @@ pub const FuncGen = struct { const intrinsic_name = if (scalar_ty.isSignedInt()) signed_intrinsic else unsigned_intrinsic; - const llvm_lhs_ty = try self.dg.llvmType(lhs_ty); - const llvm_dest_ty = try self.dg.llvmType(dest_ty); + const llvm_lhs_ty = try self.dg.lowerType(lhs_ty); + const llvm_dest_ty = try self.dg.lowerType(dest_ty); const tg = self.dg.module.getTarget(); @@ -6283,7 +6283,7 @@ pub const FuncGen = struct { ) !*const llvm.Value { const target = self.dg.module.getTarget(); const scalar_ty = ty.scalarType(); - const scalar_llvm_ty = try self.dg.llvmType(scalar_ty); + const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); if (intrinsicsAllowed(scalar_ty, target)) { const llvm_predicate: llvm.RealPredicate = switch (pred) { @@ -6383,8 +6383,8 @@ pub const FuncGen = struct { ) !*const llvm.Value { const target = self.dg.module.getTarget(); const scalar_ty = ty.scalarType(); - const llvm_ty = try self.dg.llvmType(ty); - const scalar_llvm_ty = try self.dg.llvmType(scalar_ty); + const llvm_ty = try self.dg.lowerType(ty); + const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const intrinsics_allowed = op != .tan and intrinsicsAllowed(scalar_ty, target); var fn_name_buf: [64]u8 = undefined; @@ -6478,12 +6478,12 @@ pub const FuncGen = struct { const rhs_scalar_ty = rhs_ty.scalarType(); const dest_ty = self.air.typeOfIndex(inst); - const llvm_dest_ty = try self.dg.llvmType(dest_ty); + const llvm_dest_ty = try self.dg.lowerType(dest_ty); const tg = self.dg.module.getTarget(); const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") + self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; @@ -6543,7 +6543,7 @@ pub const FuncGen = struct { const tg = self.dg.module.getTarget(); const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") + self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; if (lhs_scalar_ty.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, ""); @@ -6566,7 +6566,7 @@ pub const FuncGen = struct { const tg = self.dg.module.getTarget(); const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") + self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_type), "") else rhs; return self.builder.buildShl(lhs, casted_rhs, ""); @@ -6588,7 +6588,7 @@ pub const FuncGen = struct { const tg = self.dg.module.getTarget(); const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") + self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; if (lhs_scalar_ty.isSignedInt()) return self.builder.buildSShlSat(lhs, casted_rhs, ""); @@ -6611,7 +6611,7 @@ pub const FuncGen = struct { const tg = self.dg.module.getTarget(); const casted_rhs = if (rhs_scalar_ty.bitSize(tg) < lhs_scalar_ty.bitSize(tg)) - self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_ty), "") + self.builder.buildZExt(rhs, try self.dg.lowerType(lhs_ty), "") else rhs; const is_signed_int = lhs_scalar_ty.isSignedInt(); @@ -6639,7 +6639,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const dest_ty = self.air.typeOfIndex(inst); const dest_info = dest_ty.intInfo(target); - const dest_llvm_ty = try self.dg.llvmType(dest_ty); + const dest_llvm_ty = try self.dg.lowerType(dest_ty); const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const operand_info = operand_ty.intInfo(target); @@ -6661,7 +6661,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); - const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); return self.builder.buildTrunc(operand, dest_llvm_ty, ""); } @@ -6679,7 +6679,7 @@ pub const FuncGen = struct { if (!backendSupportsF80(target) and (src_bits == 80 or dest_bits == 80)) { return softF80TruncOrExt(self, operand, src_bits, dest_bits); } - const dest_llvm_ty = try self.dg.llvmType(dest_ty); + const dest_llvm_ty = try self.dg.lowerType(dest_ty); return self.builder.buildFPTrunc(operand, dest_llvm_ty, ""); } @@ -6697,7 +6697,7 @@ pub const FuncGen = struct { if (!backendSupportsF80(target) and (src_bits == 80 or dest_bits == 80)) { return softF80TruncOrExt(self, operand, src_bits, dest_bits); } - const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); return self.builder.buildFPExt(operand, dest_llvm_ty, ""); } @@ -6707,7 +6707,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + const dest_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); return self.builder.buildPtrToInt(operand, dest_llvm_ty, ""); } @@ -6720,7 +6720,7 @@ pub const FuncGen = struct { const inst_ty = self.air.typeOfIndex(inst); const operand_is_ref = isByRef(operand_ty); const result_is_ref = isByRef(inst_ty); - const llvm_dest_ty = try self.dg.llvmType(inst_ty); + const llvm_dest_ty = try self.dg.lowerType(inst_ty); const target = self.dg.module.getTarget(); if (operand_is_ref and result_is_ref) { @@ -6740,14 +6740,14 @@ pub const FuncGen = struct { const array_ptr = self.buildAlloca(llvm_dest_ty); const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8; if (bitcast_ok) { - const llvm_vector_ty = try self.dg.llvmType(operand_ty); + const llvm_vector_ty = try self.dg.lowerType(operand_ty); const casted_ptr = self.builder.buildBitCast(array_ptr, llvm_vector_ty.pointerType(0), ""); const llvm_store = self.builder.buildStore(operand, casted_ptr); llvm_store.setAlignment(inst_ty.abiAlignment(target)); } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); const vector_len = operand_ty.arrayLen(); @@ -6764,7 +6764,7 @@ pub const FuncGen = struct { return array_ptr; } else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) { const elem_ty = operand_ty.childType(); - const llvm_vector_ty = try self.dg.llvmType(inst_ty); + const llvm_vector_ty = try self.dg.lowerType(inst_ty); if (!operand_is_ref) { return self.dg.todo("implement bitcast non-ref array to vector", .{}); } @@ -6781,7 +6781,7 @@ pub const FuncGen = struct { } else { // If the ABI size of the element type is not evenly divisible by size in bits; // a simple bitcast will not work, and we fall back to extractelement. - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_u32 = self.context.intType(32); const zero = llvm_usize.constNull(); const vector_len = operand_ty.arrayLen(); @@ -6813,7 +6813,7 @@ pub const FuncGen = struct { const alignment = @maximum(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); const result_ptr = self.buildAlloca(llvm_dest_ty); result_ptr.setAlignment(alignment); - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), ""); const store_inst = self.builder.buildStore(operand, casted_ptr); store_inst.setAlignment(alignment); @@ -6827,7 +6827,7 @@ pub const FuncGen = struct { const alignment = @maximum(operand_ty.abiAlignment(target), inst_ty.abiAlignment(target)); const result_ptr = self.buildAlloca(llvm_dest_ty); result_ptr.setAlignment(alignment); - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); const casted_ptr = self.builder.buildBitCast(result_ptr, operand_llvm_ty.pointerType(0), ""); const store_inst = self.builder.buildStore(operand, casted_ptr); store_inst.setAlignment(alignment); @@ -6901,7 +6901,7 @@ pub const FuncGen = struct { const pointee_type = ptr_ty.childType(); if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); - const pointee_llvm_ty = try self.dg.llvmType(pointee_type); + const pointee_llvm_ty = try self.dg.lowerType(pointee_type); const alloca_inst = self.buildAlloca(pointee_llvm_ty); const target = self.dg.module.getTarget(); const alignment = ptr_ty.ptrAlignment(target); @@ -6915,7 +6915,7 @@ pub const FuncGen = struct { const ret_ty = ptr_ty.childType(); if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty); if (self.ret_ptr) |ret_ptr| return ret_ptr; - const ret_llvm_ty = try self.dg.llvmType(ret_ty); + const ret_llvm_ty = try self.dg.lowerType(ret_ty); const target = self.dg.module.getTarget(); const alloca_inst = self.buildAlloca(ret_llvm_ty); alloca_inst.setAlignment(ptr_ty.ptrAlignment(target)); @@ -6946,7 +6946,7 @@ pub const FuncGen = struct { const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, ""); const fill_char = u8_llvm_ty.constInt(0xaa, .False); const dest_ptr_align = ptr_ty.ptrAlignment(target); - const usize_llvm_ty = try self.dg.llvmType(Type.usize); + const usize_llvm_ty = try self.dg.lowerType(Type.usize); const len = usize_llvm_ty.constInt(operand_size, .False); _ = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align, ptr_ty.isVolatilePtr()); if (self.dg.module.comp.bin_file.options.valgrind) { @@ -6983,7 +6983,7 @@ pub const FuncGen = struct { const llvm_fn = self.getIntrinsic("llvm.returnaddress", &.{}); const params = [_]*const llvm.Value{llvm_i32.constNull()}; const ptr_val = self.builder.buildCall(llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); } @@ -7001,7 +7001,7 @@ pub const FuncGen = struct { const params = [_]*const llvm.Value{llvm_i32.constNull()}; const ptr_val = self.builder.buildCall(llvm_fn, ¶ms, params.len, .Fast, .Auto, ""); - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); return self.builder.buildPtrToInt(ptr_val, llvm_usize, ""); } @@ -7046,7 +7046,7 @@ pub const FuncGen = struct { var payload = self.builder.buildExtractValue(result, 0, ""); if (opt_abi_ty != null) { - payload = self.builder.buildTrunc(payload, try self.dg.llvmType(operand_ty), ""); + payload = self.builder.buildTrunc(payload, try self.dg.lowerType(operand_ty), ""); } const success_bit = self.builder.buildExtractValue(result, 1, ""); @@ -7054,7 +7054,7 @@ pub const FuncGen = struct { return self.builder.buildSelect(success_bit, payload.typeOf().constNull(), payload, ""); } - const optional_llvm_ty = try self.dg.llvmType(optional_ty); + const optional_llvm_ty = try self.dg.lowerType(optional_ty); const non_null_bit = self.builder.buildNot(success_bit, ""); const partial = self.builder.buildInsertValue(optional_llvm_ty.getUndef(), payload, 0, ""); return self.builder.buildInsertValue(partial, non_null_bit, 1, ""); @@ -7090,7 +7090,7 @@ pub const FuncGen = struct { ordering, single_threaded, ); - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); if (is_float) { return self.builder.buildBitCast(uncasted_result, operand_llvm_ty, ""); } else { @@ -7103,7 +7103,7 @@ pub const FuncGen = struct { } // It's a pointer but we need to treat it as an int. - const usize_llvm_ty = try self.dg.llvmType(Type.usize); + const usize_llvm_ty = try self.dg.lowerType(Type.usize); const casted_ptr = self.builder.buildBitCast(ptr, usize_llvm_ty.pointerType(0), ""); const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, ""); const uncasted_result = self.builder.buildAtomicRmw( @@ -7113,7 +7113,7 @@ pub const FuncGen = struct { ordering, single_threaded, ); - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, ""); } @@ -7132,7 +7132,7 @@ pub const FuncGen = struct { const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); const load_inst = (try self.load(casted_ptr, ptr_ty)).?; load_inst.setOrdering(ordering); - return self.builder.buildTrunc(load_inst, try self.dg.llvmType(operand_ty), ""); + return self.builder.buildTrunc(load_inst, try self.dg.lowerType(operand_ty), ""); } const load_inst = (try self.load(ptr, ptr_ty)).?; load_inst.setOrdering(ordering); @@ -7273,13 +7273,13 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const llvm_i1 = self.context.intType(1); - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const params = [_]*const llvm.Value{ operand, llvm_i1.constNull() }; const wrong_size_result = self.builder.buildCall(fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.air.typeOfIndex(inst); - const result_llvm_ty = try self.dg.llvmType(result_ty); + const result_llvm_ty = try self.dg.lowerType(result_ty); const target = self.dg.module.getTarget(); const bits = operand_ty.intInfo(target).bits; @@ -7301,12 +7301,12 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const params = [_]*const llvm.Value{operand}; - const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const operand_llvm_ty = try self.dg.lowerType(operand_ty); const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty}); const wrong_size_result = self.builder.buildCall(fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.air.typeOfIndex(inst); - const result_llvm_ty = try self.dg.llvmType(result_ty); + const result_llvm_ty = try self.dg.lowerType(result_ty); const target = self.dg.module.getTarget(); const bits = operand_ty.intInfo(target).bits; @@ -7330,7 +7330,7 @@ pub const FuncGen = struct { assert(bits % 8 == 0); var operand = try self.resolveInst(ty_op.operand); - var operand_llvm_ty = try self.dg.llvmType(operand_ty); + var operand_llvm_ty = try self.dg.lowerType(operand_ty); if (bits % 16 == 8) { // If not an even byte-multiple, we need zero-extend + shift-left 1 byte @@ -7364,7 +7364,7 @@ pub const FuncGen = struct { const wrong_size_result = self.builder.buildCall(fn_val, ¶ms, params.len, .C, .Auto, ""); const result_ty = self.air.typeOfIndex(inst); - const result_llvm_ty = try self.dg.llvmType(result_ty); + const result_llvm_ty = try self.dg.lowerType(result_ty); const result_bits = result_ty.intInfo(target).bits; if (bits > result_bits) { return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); @@ -7407,14 +7407,14 @@ pub const FuncGen = struct { } const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); - const llvm_ret_ty = try self.dg.llvmType(slice_ty); - const usize_llvm_ty = try self.dg.llvmType(Type.usize); + const llvm_ret_ty = try self.dg.lowerType(slice_ty); + const usize_llvm_ty = try self.dg.lowerType(Type.usize); const target = self.dg.module.getTarget(); const slice_alignment = slice_ty.abiAlignment(target); var int_tag_type_buffer: Type.Payload.Bits = undefined; const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer); - const param_types = [_]*const llvm.Type{try self.dg.llvmType(int_tag_ty)}; + const param_types = [_]*const llvm.Type{try self.dg.lowerType(int_tag_ty)}; const fn_type = llvm.functionType(llvm_ret_ty, ¶m_types, param_types.len, .False); const fn_val = self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); @@ -7471,7 +7471,7 @@ pub const FuncGen = struct { .base = .{ .tag = .enum_field_index }, .data = @intCast(u32, field_index), }; - break :int try self.dg.genTypedValue(.{ + break :int try self.dg.lowerValue(.{ .ty = enum_ty, .val = Value.initPayload(&tag_val_payload.base), }); @@ -7496,8 +7496,8 @@ pub const FuncGen = struct { // Function signature: fn (anyerror) bool - const ret_llvm_ty = try self.dg.llvmType(Type.bool); - const anyerror_llvm_ty = try self.dg.llvmType(Type.anyerror); + const ret_llvm_ty = try self.dg.lowerType(Type.bool); + const anyerror_llvm_ty = try self.dg.lowerType(Type.anyerror); const param_types = [_]*const llvm.Type{anyerror_llvm_ty}; const fn_type = llvm.functionType(ret_llvm_ty, ¶m_types, param_types.len, .False); @@ -7606,7 +7606,7 @@ pub const FuncGen = struct { .Add => switch (scalar_ty.zigTypeTag()) { .Int => return self.builder.buildAddReduce(operand), .Float => { - const scalar_llvm_ty = try self.dg.llvmType(scalar_ty); + const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const neutral_value = scalar_llvm_ty.constReal(-0.0); return self.builder.buildFPAddReduce(neutral_value, operand); }, @@ -7615,7 +7615,7 @@ pub const FuncGen = struct { .Mul => switch (scalar_ty.zigTypeTag()) { .Int => return self.builder.buildMulReduce(operand), .Float => { - const scalar_llvm_ty = try self.dg.llvmType(scalar_ty); + const scalar_llvm_ty = try self.dg.lowerType(scalar_ty); const neutral_value = scalar_llvm_ty.constReal(1.0); return self.builder.buildFPMulReduce(neutral_value, operand); }, @@ -7631,7 +7631,7 @@ pub const FuncGen = struct { const result_ty = self.air.typeOfIndex(inst); const len = @intCast(usize, result_ty.arrayLen()); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); - const llvm_result_ty = try self.dg.llvmType(result_ty); + const llvm_result_ty = try self.dg.lowerType(result_ty); const target = self.dg.module.getTarget(); switch (result_ty.zigTypeTag()) { @@ -7719,7 +7719,7 @@ pub const FuncGen = struct { .Array => { assert(isByRef(result_ty)); - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); const alloca_inst = self.buildAlloca(llvm_result_ty); alloca_inst.setAlignment(result_ty.abiAlignment(target)); @@ -7754,7 +7754,7 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = self.air.typeOfIndex(inst); - const union_llvm_ty = try self.dg.llvmType(union_ty); + const union_llvm_ty = try self.dg.lowerType(union_ty); const target = self.dg.module.getTarget(); const layout = union_ty.unionGetLayout(target); if (layout.payload_size == 0) { @@ -7774,8 +7774,8 @@ pub const FuncGen = struct { const union_obj = union_ty.cast(Type.Payload.Union).?.data; assert(union_obj.haveFieldTypes()); const field = union_obj.fields.values()[extra.field_index]; - const field_llvm_ty = try self.dg.llvmType(field.ty); - const tag_llvm_ty = try self.dg.llvmType(union_obj.tag_ty); + const field_llvm_ty = try self.dg.lowerType(field.ty); + const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty); const field_size = field.ty.abiSize(target); const field_align = field.normalAlignment(target); @@ -8011,7 +8011,7 @@ pub const FuncGen = struct { const slice_ty = Type.initTag(.const_slice_u8_sentinel_0); const slice_alignment = slice_ty.abiAlignment(self.dg.module.getTarget()); - const llvm_slice_ty = try self.dg.llvmType(slice_ty); + const llvm_slice_ty = try self.dg.lowerType(slice_ty); const llvm_slice_ptr_ty = llvm_slice_ty.pointerType(0); // TODO: Address space const error_name_table_global = self.dg.object.llvm_module.addGlobal(llvm_slice_ptr_ty, "__zig_err_name_table"); @@ -8075,7 +8075,7 @@ pub const FuncGen = struct { // out the relevant bits when accessing the pointee. // Here we perform a bitcast because we want to use the host_size // as the llvm pointer element type. - const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + const result_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); // TODO this can be removed if we change host_size to be bits instead // of bytes. return self.builder.buildBitCast(struct_ptr, result_llvm_ty, ""); @@ -8090,7 +8090,7 @@ pub const FuncGen = struct { // end of the struct. Treat our struct pointer as an array of two and get // the index to the element at index `1` to get a pointer to the end of // the struct. - const llvm_usize = try self.dg.llvmType(Type.usize); + const llvm_usize = try self.dg.lowerType(Type.usize); const llvm_index = llvm_usize.constInt(1, .False); const indices: [1]*const llvm.Value = .{llvm_index}; return self.builder.buildInBoundsGEP(struct_ptr, &indices, indices.len, ""); @@ -8111,7 +8111,7 @@ pub const FuncGen = struct { ) !?*const llvm.Value { const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = &union_obj.fields.values()[field_index]; - const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + const result_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst)); if (!field.ty.hasRuntimeBitsIgnoreComptime()) { return null; } @@ -8150,7 +8150,7 @@ pub const FuncGen = struct { const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr()); if (info.host_size == 0) { if (isByRef(info.pointee_type)) { - const elem_llvm_ty = try self.dg.llvmType(info.pointee_type); + const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); const result_align = info.pointee_type.abiAlignment(target); const max_align = @maximum(result_align, ptr_alignment); const result_ptr = self.buildAlloca(elem_llvm_ty); @@ -8183,7 +8183,7 @@ pub const FuncGen = struct { const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target)); const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False); const shifted_value = self.builder.buildLShr(containing_int, shift_amt, ""); - const elem_llvm_ty = try self.dg.llvmType(info.pointee_type); + const elem_llvm_ty = try self.dg.lowerType(info.pointee_type); if (isByRef(info.pointee_type)) { const result_align = info.pointee_type.abiAlignment(target); @@ -8625,7 +8625,7 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm. // anyerror return type instead, so that it can be coerced into a function // pointer type which has anyerror as the return type. if (fn_info.return_type.isError()) { - return dg.llvmType(Type.anyerror); + return dg.lowerType(Type.anyerror); } else { return dg.context.voidType(); } @@ -8636,7 +8636,7 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm. if (isByRef(fn_info.return_type)) { return dg.context.voidType(); } else { - return dg.llvmType(fn_info.return_type); + return dg.lowerType(fn_info.return_type); } }, .C => { @@ -8657,24 +8657,24 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm. else => false, }; switch (target.cpu.arch) { - .mips, .mipsel => return dg.llvmType(fn_info.return_type), + .mips, .mipsel => return dg.lowerType(fn_info.return_type), .x86_64 => switch (target.os.tag) { .windows => switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) { .integer => { if (is_scalar) { - return dg.llvmType(fn_info.return_type); + return dg.lowerType(fn_info.return_type); } else { const abi_size = fn_info.return_type.abiSize(target); return dg.context.intType(@intCast(c_uint, abi_size * 8)); } }, .memory => return dg.context.voidType(), - .sse => return dg.llvmType(fn_info.return_type), + .sse => return dg.lowerType(fn_info.return_type), else => unreachable, }, else => { if (is_scalar) { - return dg.llvmType(fn_info.return_type); + return dg.lowerType(fn_info.return_type); } const classes = x86_64_abi.classifySystemV(fn_info.return_type, target); if (classes[0] == .memory) { @@ -8715,10 +8715,10 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm. }, }, // TODO investigate C ABI for other architectures - else => return dg.llvmType(fn_info.return_type), + else => return dg.lowerType(fn_info.return_type), } }, - else => return dg.llvmType(fn_info.return_type), + else => return dg.lowerType(fn_info.return_type), } } From b42100c70fc306c6d6f69a55e9225a9a91e363ef Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Mon, 23 May 2022 22:10:50 +0200 Subject: [PATCH 09/18] dwarf: update abbrev info generation for new error union layout --- src/arch/x86_64/CodeGen.zig | 1 - src/link/Dwarf.zig | 10 ++++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index eeb4cab04f..68c8d3449b 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -4377,7 +4377,6 @@ fn genVarDbgInfo( fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void { switch (self.debug_output) { .dwarf => |dw| { - assert(ty.hasRuntimeBits()); const dbg_info = &dw.dbg_info; const index = dbg_info.items.len; try dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4 diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index a204dd91ae..61bec1f880 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -498,9 +498,11 @@ pub const DeclState = struct { .ErrorUnion => { const error_ty = ty.errorUnionSet(); const payload_ty = ty.errorUnionPayload(); + const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(target); const abi_size = ty.abiSize(target); - const abi_align = ty.abiAlignment(target); - const payload_off = mem.alignForwardGeneric(u64, error_ty.abiSize(target), abi_align); + const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(target) else 0; + const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(target); // DW.AT.structure_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type)); @@ -534,7 +536,7 @@ pub const DeclState = struct { try dbg_info_buffer.resize(index + 4); try self.addTypeReloc(atom, error_ty, @intCast(u32, index), null); // DW.AT.data_member_location, DW.FORM.sdata - try dbg_info_buffer.append(0); + try leb128.writeULEB128(dbg_info_buffer.writer(), error_off); // DW.AT.structure_type delimit children try dbg_info_buffer.append(0); @@ -2293,7 +2295,7 @@ fn addDbgInfoErrorSet( // DW.AT.enumeration_type try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type)); // DW.AT.byte_size, DW.FORM.sdata - const abi_size = ty.abiSize(target); + const abi_size = Type.anyerror.abiSize(target); try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size); // DW.AT.name, DW.FORM.string const name = try ty.nameAllocArena(arena, module); From 41f517e5f506500c4e3f0bea53d73db0a1daf456 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Mon, 23 May 2022 23:07:12 +0200 Subject: [PATCH 10/18] x64: update for new error union layout --- src/arch/x86_64/CodeGen.zig | 248 +++++++++++++++++++++++++----------- src/codegen.zig | 5 +- 2 files changed, 175 insertions(+), 78 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 68c8d3449b..dc2f55f6ef 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -854,7 +854,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { const ptr_ty = self.air.typeOfIndex(inst); const elem_ty = ptr_ty.elemType(); - if (!elem_ty.hasRuntimeBits()) { + if (!elem_ty.hasRuntimeBitsIgnoreComptime()) { return self.allocMem(inst, @sizeOf(usize), @alignOf(usize)); } @@ -1786,21 +1786,34 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const err_ty = err_union_ty.errorUnionSet(); const payload_ty = err_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - const operand_lock: ?RegisterLock = switch (operand) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, - }; - defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); const result: MCValue = result: { - if (!payload_ty.hasRuntimeBits()) break :result operand; + if (err_ty.errorSetCardinality() == .zero) { + break :result MCValue{ .immediate = 0 }; + } + + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + break :result operand; + } + + const err_off = errUnionErrOffset(err_union_ty, self.target.*); switch (operand) { .stack_offset => |off| { - break :result MCValue{ .stack_offset = off }; + const offset = off - @intCast(i32, err_off); + break :result MCValue{ .stack_offset = offset }; }, - .register => { + .register => |reg| { // TODO reuse operand - break :result try self.copyToRegisterWithInstTracking(inst, err_ty, operand); + const lock = self.register_manager.lockRegAssumeUnused(reg); + defer self.register_manager.unlockReg(lock); + const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand); + if (err_off > 0) { + const shift = @intCast(u6, err_off * 8); + try self.genShiftBinOpMir(.shr, err_union_ty, result.register, .{ .immediate = shift }); + } else { + try self.truncateRegister(Type.anyerror, result.register); + } + break :result result; }, else => return self.fail("TODO implement unwrap_err_err for {}", .{operand}), } @@ -1815,32 +1828,37 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { } const err_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = err_union_ty.errorUnionPayload(); + const err_ty = err_union_ty.errorUnionSet(); + const operand = try self.resolveInst(ty_op.operand); + const result: MCValue = result: { - if (!payload_ty.hasRuntimeBits()) break :result MCValue.none; + if (err_ty.errorSetCardinality() == .zero) { + // TODO check if we can reuse + break :result operand; + } - const operand = try self.resolveInst(ty_op.operand); - const operand_lock: ?RegisterLock = switch (operand) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, - }; - defer if (operand_lock) |lock| self.register_manager.unlockReg(lock); + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + break :result MCValue.none; + } - const abi_align = err_union_ty.abiAlignment(self.target.*); - const err_ty = err_union_ty.errorUnionSet(); - const err_abi_size = mem.alignForwardGeneric(u32, @intCast(u32, err_ty.abiSize(self.target.*)), abi_align); + const payload_off = errUnionPayloadOffset(err_union_ty, self.target.*); switch (operand) { .stack_offset => |off| { - const offset = off - @intCast(i32, err_abi_size); + const offset = off - @intCast(i32, payload_off); break :result MCValue{ .stack_offset = offset }; }, - .register => { + .register => |reg| { // TODO reuse operand - const shift = @intCast(u6, err_abi_size * @sizeOf(usize)); + const lock = self.register_manager.lockRegAssumeUnused(reg); + defer self.register_manager.unlockReg(lock); const result = try self.copyToRegisterWithInstTracking(inst, err_union_ty, operand); - try self.genShiftBinOpMir(.shr, Type.usize, result.register, .{ .immediate = shift }); - break :result MCValue{ - .register = registerAlias(result.register, @intCast(u32, payload_ty.abiSize(self.target.*))), - }; + if (payload_off > 0) { + const shift = @intCast(u6, payload_off * 8); + try self.genShiftBinOpMir(.shr, err_union_ty, result.register, .{ .immediate = shift }); + } else { + try self.truncateRegister(payload_ty, result.register); + } + break :result result; }, else => return self.fail("TODO implement unwrap_err_payload for {}", .{operand}), } @@ -1935,24 +1953,37 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// T to E!T fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; + if (self.liveness.isUnused(inst)) { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); } + const error_union_ty = self.air.getRefType(ty_op.ty); const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - assert(payload_ty.hasRuntimeBits()); - const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); - const abi_align = error_union_ty.abiAlignment(self.target.*); - const err_abi_size = @intCast(u32, error_ty.abiSize(self.target.*)); - const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); - const offset = mem.alignForwardGeneric(u32, err_abi_size, abi_align); - try self.genSetStack(error_ty, stack_offset, .{ .immediate = 0 }, .{}); - try self.genSetStack(payload_ty, stack_offset - @intCast(i32, offset), operand, .{}); + const result: MCValue = result: { + if (error_ty.errorSetCardinality() == .zero) { + break :result operand; + } - return self.finishAir(inst, .{ .stack_offset = stack_offset }, .{ ty_op.operand, .none, .none }); + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + break :result operand; + } + + const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); + const abi_align = error_union_ty.abiAlignment(self.target.*); + const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); + const payload_off = errUnionPayloadOffset(error_union_ty, self.target.*); + const err_off = errUnionErrOffset(error_union_ty, self.target.*); + try self.genSetStack(payload_ty, stack_offset - @intCast(i32, payload_off), operand, .{}); + try self.genSetStack(Type.anyerror, stack_offset - @intCast(i32, err_off), .{ .immediate = 0 }, .{}); + + break :result MCValue{ .stack_offset = stack_offset }; + }; + + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// E to E!T @@ -1962,19 +1993,22 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); } const error_union_ty = self.air.getRefType(ty_op.ty); - const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - const err = try self.resolveInst(ty_op.operand); + const operand = try self.resolveInst(ty_op.operand); + const result: MCValue = result: { - if (!payload_ty.hasRuntimeBits()) break :result err; + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + break :result operand; + } const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); const abi_align = error_union_ty.abiAlignment(self.target.*); - const err_abi_size = @intCast(u32, error_ty.abiSize(self.target.*)); const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); - const offset = mem.alignForwardGeneric(u32, err_abi_size, abi_align); - try self.genSetStack(error_ty, stack_offset, err, .{}); - try self.genSetStack(payload_ty, stack_offset - @intCast(i32, offset), .undef, .{}); + const payload_off = errUnionPayloadOffset(error_union_ty, self.target.*); + const err_off = errUnionErrOffset(error_union_ty, self.target.*); + try self.genSetStack(Type.anyerror, stack_offset - @intCast(i32, err_off), operand, .{}); + try self.genSetStack(payload_ty, stack_offset - @intCast(i32, payload_off), .undef, .{}); + break :result MCValue{ .stack_offset = stack_offset }; }; @@ -2535,7 +2569,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasRuntimeBits()) + if (!elem_ty.hasRuntimeBitsIgnoreComptime()) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -4102,6 +4136,9 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { const operand = try self.resolveInst(un_op); const ret_ty = self.fn_type.fnReturnType(); switch (self.ret_mcv) { + .immediate => { + assert(ret_ty.isError()); + }, .stack_offset => { const reg = try self.copyToTmpRegister(Type.usize, self.ret_mcv); const reg_lock = self.register_manager.lockRegAssumeUnused(reg); @@ -4134,6 +4171,9 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const ptr_ty = self.air.typeOf(un_op); const elem_ty = ptr_ty.elemType(); switch (self.ret_mcv) { + .immediate => { + assert(elem_ty.isError()); + }, .stack_offset => { const reg = try self.copyToTmpRegister(Type.usize, self.ret_mcv); const reg_lock = self.register_manager.lockRegAssumeUnused(reg); @@ -4603,7 +4643,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValu const cmp_ty: Type = if (!ty.isPtrLikeOptional()) blk: { var buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&buf); - break :blk if (payload_ty.hasRuntimeBits()) Type.bool else ty; + break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime()) Type.bool else ty; } else ty; try self.genBinOpMir(.cmp, cmp_ty, operand, MCValue{ .immediate = 0 }); @@ -4619,25 +4659,36 @@ fn isNonNull(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCV fn isErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { const err_type = ty.errorUnionSet(); - const payload_type = ty.errorUnionPayload(); - if (!err_type.hasRuntimeBits()) { + + if (err_type.errorSetCardinality() == .zero) { return MCValue{ .immediate = 0 }; // always false } try self.spillCompareFlagsIfOccupied(); self.compare_flags_inst = inst; - if (!payload_type.hasRuntimeBits()) { - if (err_type.abiSize(self.target.*) <= 8) { - try self.genBinOpMir(.cmp, err_type, operand, MCValue{ .immediate = 0 }); - return MCValue{ .compare_flags_unsigned = .gt }; - } else { - return self.fail("TODO isErr for errors with size larger than register size", .{}); - } - } else { - try self.genBinOpMir(.cmp, err_type, operand, MCValue{ .immediate = 0 }); - return MCValue{ .compare_flags_unsigned = .gt }; + const err_off = errUnionErrOffset(ty, self.target.*); + switch (operand) { + .stack_offset => |off| { + const offset = off - @intCast(i32, err_off); + try self.genBinOpMir(.cmp, Type.anyerror, .{ .stack_offset = offset }, .{ .immediate = 0 }); + }, + .register => |reg| { + const maybe_lock = self.register_manager.lockReg(reg); + defer if (maybe_lock) |lock| self.register_manager.unlockReg(lock); + const tmp_reg = try self.copyToTmpRegister(ty, operand); + if (err_off > 0) { + const shift = @intCast(u6, err_off * 8); + try self.genShiftBinOpMir(.shr, ty, tmp_reg, .{ .immediate = shift }); + } else { + try self.truncateRegister(Type.anyerror, tmp_reg); + } + try self.genBinOpMir(.cmp, Type.anyerror, .{ .register = tmp_reg }, .{ .immediate = 0 }); + }, + else => return self.fail("TODO implement isErr for {}", .{operand}), } + + return MCValue{ .compare_flags_unsigned = .gt }; } fn isNonErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue { @@ -5460,6 +5511,21 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl .immediate => |x_big| { const base_reg = opts.dest_stack_base orelse .rbp; switch (abi_size) { + 0 => { + assert(ty.isError()); + const payload = try self.addExtra(Mir.ImmPair{ + .dest_off = @bitCast(u32, -stack_offset), + .operand = @truncate(u32, x_big), + }); + _ = try self.addInst(.{ + .tag = .mov_mem_imm, + .ops = Mir.Inst.Ops.encode(.{ + .reg1 = base_reg, + .flags = 0b00, + }), + .data = .{ .payload = payload }, + }); + }, 1, 2, 4 => { const payload = try self.addExtra(Mir.ImmPair{ .dest_off = @bitCast(u32, -stack_offset), @@ -6642,7 +6708,7 @@ pub fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBits()) { + if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); @@ -6650,7 +6716,7 @@ pub fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBits()) + if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); @@ -6779,6 +6845,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { const target = self.target.*; switch (typed_value.ty.zigTypeTag()) { + .Void => return MCValue{ .none = {} }, .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => {}, else => { @@ -6840,26 +6907,35 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { } }, .ErrorSet => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = self.bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return MCValue{ .immediate = error_index }; + switch (typed_value.val.tag()) { + .@"error" => { + const err_name = typed_value.val.castTag(.@"error").?.data.name; + const module = self.bin_file.options.module.?; + const global_error_set = module.global_error_set; + const error_index = global_error_set.get(err_name).?; + return MCValue{ .immediate = error_index }; + }, + else => { + // In this case we are rendering an error union which has a 0 bits payload. + return MCValue{ .immediate = 0 }; + }, + } }, .ErrorUnion => { const error_type = typed_value.ty.errorUnionSet(); const payload_type = typed_value.ty.errorUnionPayload(); - if (typed_value.val.castTag(.eu_payload)) |_| { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return MCValue{ .immediate = 0 }; - } - } else { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val }); - } + if (error_type.errorSetCardinality() == .zero) { + const payload_val = typed_value.val.castTag(.eu_payload).?.data; + return self.genTypedValue(.{ .ty = payload_type, .val = payload_val }); + } + + const is_pl = typed_value.val.errorUnionIsPayload(); + + if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + // We use the error type directly as the type. + const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); + return self.genTypedValue(.{ .ty = error_type, .val = err_val }); } }, @@ -6867,7 +6943,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { .ComptimeFloat => unreachable, .Type => unreachable, .EnumLiteral => unreachable, - .Void => unreachable, .NoReturn => unreachable, .Undefined => unreachable, .Null => unreachable, @@ -6921,11 +6996,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // Return values if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { result.return_value = .{ .none = {} }; } else { const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - if (ret_ty_size <= 8) { + if (ret_ty_size == 0) { + assert(ret_ty.isError()); + result.return_value = .{ .immediate = 0 }; + } else if (ret_ty_size <= 8) { const aliased_reg = registerAlias(c_abi_int_return_regs[0], ret_ty_size); result.return_value = .{ .register = aliased_reg }; } else { @@ -7105,3 +7183,19 @@ fn intrinsicsAllowed(target: Target, ty: Type) bool { fn hasAvxSupport(target: Target) bool { return Target.x86.featureSetHasAny(target.cpu.features, .{ .avx, .avx2 }); } + +fn errUnionPayloadOffset(ty: Type, target: std.Target) u64 { + const payload_ty = ty.errorUnionPayload(); + return if (Type.anyerror.abiAlignment(target) >= payload_ty.abiAlignment(target)) + Type.anyerror.abiSize(target) + else + 0; +} + +fn errUnionErrOffset(ty: Type, target: std.Target) u64 { + const payload_ty = ty.errorUnionPayload(); + return if (Type.anyerror.abiAlignment(target) >= payload_ty.abiAlignment(target)) + 0 + else + payload_ty.abiSize(target); +} diff --git a/src/codegen.zig b/src/codegen.zig index eea8095a62..4f400fa7fc 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -442,7 +442,10 @@ pub fn generateSymbol( .Int => { const info = typed_value.ty.intInfo(target); if (info.bits <= 8) { - const x = @intCast(u8, typed_value.val.toUnsignedInt(target)); + const x: u8 = switch (info.signedness) { + .unsigned => @intCast(u8, typed_value.val.toUnsignedInt(target)), + .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt())), + }; try code.append(x); return Result{ .appended = {} }; } From c043d57cabdc4db20a55a9877ec607c81d15442f Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 24 May 2022 17:35:02 +0200 Subject: [PATCH 11/18] x64,arm,aarch64: omit unsupported tests for now --- test/behavior/error.zig | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 1b2a67bd57..18cfb03457 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -149,12 +149,19 @@ test "implicit cast to optional to error union to return result loc" { } test "fn returning empty error set can be passed as fn returning any error" { + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + entry(); comptime entry(); } test "fn returning empty error set can be passed as fn returning any error - pointer" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO entryPtr(); comptime entryPtr(); From 8c49420928b29271429cc09b5d5f1447a942f8d6 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 24 May 2022 19:23:33 +0200 Subject: [PATCH 12/18] aarch64: update for new error union layout --- src/arch/aarch64/CodeGen.zig | 114 ++++++++++++++++++++++------------- src/arch/x86_64/CodeGen.zig | 25 ++------ src/codegen.zig | 16 +++++ test/behavior/error.zig | 2 + 4 files changed, 95 insertions(+), 62 deletions(-) diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index f4f2b1e5e5..5f358efb09 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3,6 +3,7 @@ const builtin = @import("builtin"); const mem = std.mem; const math = std.math; const assert = std.debug.assert; +const codegen = @import("../../codegen.zig"); const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); @@ -22,12 +23,14 @@ const leb128 = std.leb; const log = std.log.scoped(.codegen); const build_options = @import("build_options"); -const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError; -const FnResult = @import("../../codegen.zig").FnResult; -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; +const GenerateSymbolError = codegen.GenerateSymbolError; +const FnResult = codegen.FnResult; +const DebugInfoOutput = codegen.DebugInfoOutput; const bits = @import("bits.zig"); const abi = @import("abi.zig"); +const errUnionPayloadOffset = codegen.errUnionPayloadOffset; +const errUnionErrOffset = codegen.errUnionErrOffset; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; const Register = bits.Register; @@ -3272,7 +3275,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. fn ret(self: *Self, mcv: MCValue) !void { const ret_ty = self.fn_type.fnReturnType(); - try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); + switch (self.ret_mcv) { + .immediate => { + assert(ret_ty.isError()); + }, + else => { + try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); + }, + } // Just add space for an instruction, patch this later const index = try self.addInst(.{ .tag = .nop, @@ -3601,30 +3611,39 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { const error_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); - if (!error_type.hasRuntimeBits()) { + if (error_type.errorSetCardinality() == .zero) { return MCValue{ .immediate = 0 }; // always false - } else if (!payload_type.hasRuntimeBits()) { - if (error_type.abiSize(self.target.*) <= 8) { - const reg_mcv: MCValue = switch (operand) { - .register => operand, - else => .{ .register = try self.copyToTmpRegister(error_type, operand) }, - }; + } + const err_off = errUnionErrOffset(ty, self.target.*); + switch (operand) { + .stack_offset => |off| { + const offset = off - @intCast(u32, err_off); + const tmp_reg = try self.copyToTmpRegister(Type.anyerror, .{ .stack_offset = offset }); _ = try self.addInst(.{ .tag = .cmp_immediate, .data = .{ .r_imm12_sh = .{ - .rn = reg_mcv.register, + .rn = tmp_reg, .imm12 = 0, } }, }); - - return MCValue{ .compare_flags_unsigned = .gt }; - } else { - return self.fail("TODO isErr for errors with size > 8", .{}); - } - } else { - return self.fail("TODO isErr for non-empty payloads", .{}); + }, + .register => |reg| { + if (err_off > 0 or payload_type.hasRuntimeBitsIgnoreComptime()) { + return self.fail("TODO implement isErr for register operand with payload bits", .{}); + } + _ = try self.addInst(.{ + .tag = .cmp_immediate, + .data = .{ .r_imm12_sh = .{ + .rn = reg, + .imm12 = 0, + } }, + }); + }, + else => return self.fail("TODO implement isErr for {}", .{operand}), } + + return MCValue{ .compare_flags_unsigned = .gt }; } fn isNonErr(self: *Self, ty: Type, operand: MCValue) !MCValue { @@ -4483,7 +4502,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBits()) { + if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); @@ -4491,7 +4510,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBits()) + if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); @@ -4674,32 +4693,38 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { } }, .ErrorSet => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = self.bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return MCValue{ .immediate = error_index }; + switch (typed_value.val.tag()) { + .@"error" => { + const err_name = typed_value.val.castTag(.@"error").?.data.name; + const module = self.bin_file.options.module.?; + const global_error_set = module.global_error_set; + const error_index = global_error_set.get(err_name).?; + return MCValue{ .immediate = error_index }; + }, + else => { + // In this case we are rendering an error union which has a 0 bits payload. + return MCValue{ .immediate = 0 }; + }, + } }, .ErrorUnion => { const error_type = typed_value.ty.errorUnionSet(); const payload_type = typed_value.ty.errorUnionPayload(); - if (typed_value.val.castTag(.eu_payload)) |pl| { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return MCValue{ .immediate = 0 }; - } - - _ = pl; - return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty.fmtDebug()}); - } else { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val }); - } - - return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty.fmtDebug()}); + if (error_type.errorSetCardinality() == .zero) { + const payload_val = typed_value.val.castTag(.eu_payload).?.data; + return self.genTypedValue(.{ .ty = payload_type, .val = payload_val }); } + + const is_pl = typed_value.val.errorUnionIsPayload(); + + if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + // We use the error type directly as the type. + const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); + return self.genTypedValue(.{ .ty = error_type, .val = err_val }); + } + + return self.lowerUnnamedConst(typed_value); }, .Struct => { return self.lowerUnnamedConst(typed_value); @@ -4796,13 +4821,16 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { result.return_value = .{ .none = {} }; } else switch (cc) { .Naked => unreachable, .Unspecified, .C => { const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - if (ret_ty_size <= 8) { + if (ret_ty_size == 0) { + assert(ret_ty.isError()); + result.return_value = .{ .immediate = 0 }; + } else if (ret_ty_size <= 8) { result.return_value = .{ .register = registerAlias(c_abi_int_return_regs[0], ret_ty_size) }; } else { return self.fail("TODO support more return types for ARM backend", .{}); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index dc2f55f6ef..ba550f6d82 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2,6 +2,7 @@ const std = @import("std"); const build_options = @import("build_options"); const builtin = @import("builtin"); const assert = std.debug.assert; +const codegen = @import("../../codegen.zig"); const leb128 = std.leb; const link = @import("../../link.zig"); const log = std.log.scoped(.codegen); @@ -12,11 +13,11 @@ const trace = @import("../../tracy.zig").trace; const Air = @import("../../Air.zig"); const Allocator = mem.Allocator; const Compilation = @import("../../Compilation.zig"); -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; +const DebugInfoOutput = codegen.DebugInfoOutput; const DW = std.dwarf; const ErrorMsg = Module.ErrorMsg; -const FnResult = @import("../../codegen.zig").FnResult; -const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError; +const FnResult = codegen.FnResult; +const GenerateSymbolError = codegen.GenerateSymbolError; const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); const Mir = @import("Mir.zig"); @@ -28,6 +29,8 @@ const Value = @import("../../value.zig").Value; const bits = @import("bits.zig"); const abi = @import("abi.zig"); +const errUnionPayloadOffset = codegen.errUnionPayloadOffset; +const errUnionErrOffset = codegen.errUnionErrOffset; const callee_preserved_regs = abi.callee_preserved_regs; const caller_preserved_regs = abi.caller_preserved_regs; @@ -7183,19 +7186,3 @@ fn intrinsicsAllowed(target: Target, ty: Type) bool { fn hasAvxSupport(target: Target) bool { return Target.x86.featureSetHasAny(target.cpu.features, .{ .avx, .avx2 }); } - -fn errUnionPayloadOffset(ty: Type, target: std.Target) u64 { - const payload_ty = ty.errorUnionPayload(); - return if (Type.anyerror.abiAlignment(target) >= payload_ty.abiAlignment(target)) - Type.anyerror.abiSize(target) - else - 0; -} - -fn errUnionErrOffset(ty: Type, target: std.Target) u64 { - const payload_ty = ty.errorUnionPayload(); - return if (Type.anyerror.abiAlignment(target) >= payload_ty.abiAlignment(target)) - 0 - else - payload_ty.abiSize(target); -} diff --git a/src/codegen.zig b/src/codegen.zig index 4f400fa7fc..86f2613b5f 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -890,3 +890,19 @@ fn lowerDeclRef( return Result{ .appended = {} }; } + +pub fn errUnionPayloadOffset(ty: Type, target: std.Target) u64 { + const payload_ty = ty.errorUnionPayload(); + return if (Type.anyerror.abiAlignment(target) >= payload_ty.abiAlignment(target)) + Type.anyerror.abiSize(target) + else + 0; +} + +pub fn errUnionErrOffset(ty: Type, target: std.Target) u64 { + const payload_ty = ty.errorUnionPayload(); + return if (Type.anyerror.abiAlignment(target) >= payload_ty.abiAlignment(target)) + 0 + else + payload_ty.abiSize(target); +} diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 18cfb03457..230c2540dc 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -440,6 +440,8 @@ test "return function call to error set from error union function" { } test "optional error set is the same size as error set" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + comptime try expect(@sizeOf(?anyerror) == @sizeOf(anyerror)); comptime try expect(@alignOf(?anyerror) == @alignOf(anyerror)); const S = struct { From 26376c9fda910e28a686d3f772dbda4319abc16d Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 24 May 2022 19:48:51 +0200 Subject: [PATCH 13/18] wasm: use errUnionPayloadOffset and errUnionErrOffset from codegen.zig --- src/arch/wasm/CodeGen.zig | 34 +++++++++++----------------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 6d0f3a9d23..cfa2c8bb4e 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -22,6 +22,8 @@ const Liveness = @import("../../Liveness.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const abi = @import("abi.zig"); +const errUnionPayloadOffset = codegen.errUnionPayloadOffset; +const errUnionErrOffset = codegen.errUnionErrOffset; /// Wasm Value, created when generating an instruction const WValue = union(enum) { @@ -2931,7 +2933,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!W try self.emitWValue(operand); if (pl_ty.hasRuntimeBitsIgnoreComptime()) { try self.addMemArg(.i32_load16_u, .{ - .offset = operand.offset() + errUnionErrorOffset(pl_ty, self.target), + .offset = operand.offset() + @intCast(u32, errUnionErrOffset(pl_ty, self.target)), .alignment = Type.anyerror.abiAlignment(self.target), }); } @@ -2959,7 +2961,7 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) if (!payload_ty.hasRuntimeBitsIgnoreComptime()) return WValue{ .none = {} }; - const pl_offset = errUnionPayloadOffset(payload_ty, self.target); + const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target)); if (op_is_ptr or isByRef(payload_ty, self.target)) { return self.buildPointerOffset(operand, pl_offset, .new); } @@ -2983,7 +2985,7 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) In return operand; } - return self.load(operand, Type.anyerror, errUnionErrorOffset(payload_ty, self.target)); + return self.load(operand, Type.anyerror, @intCast(u32, errUnionErrOffset(payload_ty, self.target))); } fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -3003,13 +3005,13 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { } const err_union = try self.allocStack(err_ty); - const payload_ptr = try self.buildPointerOffset(err_union, errUnionPayloadOffset(pl_ty, self.target), .new); + const payload_ptr = try self.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, self.target)), .new); try self.store(payload_ptr, operand, pl_ty, 0); // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. try self.emitWValue(err_union); try self.addImm32(0); - const err_val_offset = errUnionErrorOffset(pl_ty, self.target); + const err_val_offset = @intCast(u32, errUnionErrOffset(pl_ty, self.target)); try self.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 }); return err_union; @@ -3029,10 +3031,10 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const err_union = try self.allocStack(err_ty); // store error value - try self.store(err_union, operand, Type.anyerror, errUnionErrorOffset(pl_ty, self.target)); + try self.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrOffset(pl_ty, self.target))); // write 'undefined' to the payload - const payload_ptr = try self.buildPointerOffset(err_union, errUnionPayloadOffset(pl_ty, self.target), .new); + const payload_ptr = try self.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, self.target)), .new); const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(self.target)); try self.memset(payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaaaaaaaa }); @@ -3984,7 +3986,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue operand, .{ .imm32 = 0 }, Type.anyerror, - errUnionErrorOffset(payload_ty, self.target), + @intCast(u32, errUnionErrOffset(payload_ty, self.target)), ); if (self.liveness.isUnused(inst)) return WValue{ .none = {} }; @@ -3993,7 +3995,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue return operand; } - return self.buildPointerOffset(operand, errUnionPayloadOffset(payload_ty, self.target), .new); + return self.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, self.target)), .new); } fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -4621,17 +4623,3 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !WValue { } }); return WValue{ .none = {} }; } - -fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u32 { - if (Type.anyerror.abiAlignment(target) > payload_ty.abiAlignment(target)) { - return @intCast(u32, Type.anyerror.abiSize(target)); - } - return 0; -} - -fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u32 { - if (Type.anyerror.abiAlignment(target) > payload_ty.abiAlignment(target)) { - return 0; - } - return @intCast(u32, payload_ty.abiSize(target)); -} From c847a462ae11e0d483ad877b3ecc9ec291c29bb3 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Tue, 24 May 2022 20:47:45 +0200 Subject: [PATCH 14/18] stage2 ARM: update to new union layout --- src/arch/arm/CodeGen.zig | 101 +++++++++++++++++++++++++-------------- test/behavior/error.zig | 1 + 2 files changed, 65 insertions(+), 37 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 75fe8f6403..3d69e4022b 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -3,6 +3,7 @@ const builtin = @import("builtin"); const mem = std.mem; const math = std.math; const assert = std.debug.assert; +const codegen = @import("../../codegen.zig"); const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); @@ -22,12 +23,14 @@ const leb128 = std.leb; const log = std.log.scoped(.codegen); const build_options = @import("build_options"); -const FnResult = @import("../../codegen.zig").FnResult; -const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError; -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; +const FnResult = codegen.FnResult; +const GenerateSymbolError = codegen.GenerateSymbolError; +const DebugInfoOutput = codegen.DebugInfoOutput; const bits = @import("bits.zig"); const abi = @import("abi.zig"); +const errUnionPayloadOffset = codegen.errUnionPayloadOffset; +const errUnionErrOffset = codegen.errUnionErrOffset; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; const Register = bits.Register; @@ -1763,19 +1766,26 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { /// Given an error union, returns the error fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { + const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBits()) return error_union_mcv; + if (err_ty.errorSetCardinality() == .zero) { + return MCValue{ .immediate = 0 }; + } + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return error_union_mcv; + } + const err_offset = @intCast(u32, errUnionErrOffset(error_union_ty, self.target.*)); switch (error_union_mcv) { .register => return self.fail("TODO errUnionErr for registers", .{}), .stack_argument_offset => |off| { - return MCValue{ .stack_argument_offset = off }; + return MCValue{ .stack_argument_offset = off - err_offset }; }, .stack_offset => |off| { - return MCValue{ .stack_offset = off }; + return MCValue{ .stack_offset = off - err_offset }; }, .memory => |addr| { - return MCValue{ .memory = addr }; + return MCValue{ .memory = addr + err_offset }; }, else => unreachable, // invalid MCValue for an error union } @@ -1793,24 +1803,26 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { /// Given an error union, returns the payload fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { + const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); - if (!payload_ty.hasRuntimeBits()) return MCValue.none; - - const error_ty = error_union_ty.errorUnionSet(); - const error_size = @intCast(u32, error_ty.abiSize(self.target.*)); - const eu_align = @intCast(u32, error_union_ty.abiAlignment(self.target.*)); - const offset = std.mem.alignForwardGeneric(u32, error_size, eu_align); + if (err_ty.errorSetCardinality() == .zero) { + return error_union_mcv; + } + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return MCValue.none; + } + const payload_offset = @intCast(u32, errUnionPayloadOffset(error_union_ty, self.target.*)); switch (error_union_mcv) { .register => return self.fail("TODO errUnionPayload for registers", .{}), .stack_argument_offset => |off| { - return MCValue{ .stack_argument_offset = off - offset }; + return MCValue{ .stack_argument_offset = off - payload_offset }; }, .stack_offset => |off| { - return MCValue{ .stack_offset = off - offset }; + return MCValue{ .stack_offset = off - payload_offset }; }, .memory => |addr| { - return MCValue{ .memory = addr - offset }; + return MCValue{ .memory = addr + payload_offset }; }, else => unreachable, // invalid MCValue for an error union } @@ -3478,6 +3490,9 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void { switch (self.ret_mcv) { .none => {}, + .immediate => { + assert(ret_ty.isError()); + }, .register => |reg| { // Return result by value try self.genSetReg(ret_ty, reg, operand); @@ -3867,7 +3882,7 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { const error_type = ty.errorUnionSet(); const error_int_type = Type.initTag(.u16); - if (!error_type.hasRuntimeBits()) { + if (error_type.errorSetCardinality() == .zero) { return MCValue{ .immediate = 0 }; // always false } @@ -4975,7 +4990,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasRuntimeBits()) { + if (!tv.ty.hasRuntimeBitsIgnoreComptime() and !tv.ty.isError()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); @@ -4983,7 +4998,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasRuntimeBits()) + if (!inst_ty.hasRuntimeBitsIgnoreComptime() and !inst_ty.isError()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); @@ -5147,26 +5162,35 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { } }, .ErrorSet => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = self.bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return MCValue{ .immediate = error_index }; + switch (typed_value.val.tag()) { + .@"error" => { + const err_name = typed_value.val.castTag(.@"error").?.data.name; + const module = self.bin_file.options.module.?; + const global_error_set = module.global_error_set; + const error_index = global_error_set.get(err_name).?; + return MCValue{ .immediate = error_index }; + }, + else => { + // In this case we are rendering an error union which has a 0 bits payload. + return MCValue{ .immediate = 0 }; + }, + } }, .ErrorUnion => { const error_type = typed_value.ty.errorUnionSet(); const payload_type = typed_value.ty.errorUnionPayload(); - if (typed_value.val.castTag(.eu_payload)) |_| { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return MCValue{ .immediate = 0 }; - } - } else { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val }); - } + if (error_type.errorSetCardinality() == .zero) { + const payload_val = typed_value.val.castTag(.eu_payload).?.data; + return self.genTypedValue(.{ .ty = payload_type, .val = payload_val }); + } + + const is_pl = typed_value.val.errorUnionIsPayload(); + + if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + // We use the error type directly as the type. + const err_val = if (!is_pl) typed_value.val else Value.initTag(.zero); + return self.genTypedValue(.{ .ty = error_type, .val = err_val }); } }, @@ -5231,7 +5255,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime()) { result.return_value = .{ .none = {} }; } else { const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); @@ -5278,11 +5302,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { .Unspecified => { if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { + } else if (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError()) { result.return_value = .{ .none = {} }; } else { const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - if (ret_ty_size <= 4) { + if (ret_ty_size == 0) { + assert(ret_ty.isError()); + result.return_value = .{ .immediate = 0 }; + } else if (ret_ty_size <= 4) { result.return_value = .{ .register = .r0 }; } else { // The result is returned by reference, not by diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 230c2540dc..83a9384d71 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -441,6 +441,7 @@ test "return function call to error set from error union function" { test "optional error set is the same size as error set" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO comptime try expect(@sizeOf(?anyerror) == @sizeOf(anyerror)); comptime try expect(@alignOf(?anyerror) == @alignOf(anyerror)); From c711c788f0a840f45d0d7423efe2f946b47caafb Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 24 May 2022 15:10:18 -0700 Subject: [PATCH 15/18] stage2: fixes for error unions, optionals, errors * `?E` where E is an error set with only one field now lowers the same as `bool`. * Fix implementation of errUnionErrOffset and errUnionPayloadOffset to properly compute the offset of each field. Also name them the same as the corresponding LLVM functions and have the same function signature, to avoid confusion. This fixes a bug where wasm was passing the error union type instead of the payload type. * Fix C backend handling of optionals with zero-bit payload types. * C backend: separate out airOptionalPayload and airOptionalPayloadPtr which reduces branching and cleans up control flow. * Make Type.isNoReturn return true for error sets with no fields. * Make `?error{}` have only one possible value (null). --- src/Sema.zig | 11 ++++- src/arch/aarch64/CodeGen.zig | 4 +- src/arch/arm/CodeGen.zig | 6 +-- src/arch/wasm/CodeGen.zig | 18 ++++---- src/arch/x86_64/CodeGen.zig | 16 +++---- src/codegen.zig | 28 +++++++----- src/codegen/c.zig | 88 +++++++++++++++++++++++------------- src/type.zig | 64 ++++++++++++++++++++++---- test/behavior/error.zig | 34 +++++++++++++- 9 files changed, 192 insertions(+), 77 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index b718912a38..b0c3c17483 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -23316,7 +23316,6 @@ pub fn typeHasOnePossibleValue( .const_slice, .mut_slice, .anyopaque, - .optional, .optional_single_mut_pointer, .optional_single_const_pointer, .enum_literal, @@ -23351,6 +23350,16 @@ pub fn typeHasOnePossibleValue( .bound_fn, => return null, + .optional => { + var buf: Type.Payload.ElemType = undefined; + const child_ty = ty.optionalChild(&buf); + if (child_ty.isNoReturn()) { + return Value.@"null"; + } else { + return null; + } + }, + .error_set_single => { const name = ty.castTag(.error_set_single).?.data; return try Value.Tag.@"error".create(sema.arena, .{ .name = name }); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 5f358efb09..2a71f3138a 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -30,7 +30,7 @@ const DebugInfoOutput = codegen.DebugInfoOutput; const bits = @import("bits.zig"); const abi = @import("abi.zig"); const errUnionPayloadOffset = codegen.errUnionPayloadOffset; -const errUnionErrOffset = codegen.errUnionErrOffset; +const errUnionErrorOffset = codegen.errUnionErrorOffset; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; const Register = bits.Register; @@ -3615,7 +3615,7 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { return MCValue{ .immediate = 0 }; // always false } - const err_off = errUnionErrOffset(ty, self.target.*); + const err_off = errUnionErrorOffset(payload_type, self.target.*); switch (operand) { .stack_offset => |off| { const offset = off - @intCast(u32, err_off); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 3d69e4022b..b7682a5b9a 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -30,7 +30,7 @@ const DebugInfoOutput = codegen.DebugInfoOutput; const bits = @import("bits.zig"); const abi = @import("abi.zig"); const errUnionPayloadOffset = codegen.errUnionPayloadOffset; -const errUnionErrOffset = codegen.errUnionErrOffset; +const errUnionErrorOffset = codegen.errUnionErrorOffset; const RegisterManager = abi.RegisterManager; const RegisterLock = RegisterManager.RegisterLock; const Register = bits.Register; @@ -1775,7 +1775,7 @@ fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCV return error_union_mcv; } - const err_offset = @intCast(u32, errUnionErrOffset(error_union_ty, self.target.*)); + const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); switch (error_union_mcv) { .register => return self.fail("TODO errUnionErr for registers", .{}), .stack_argument_offset => |off| { @@ -1812,7 +1812,7 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) return MCValue.none; } - const payload_offset = @intCast(u32, errUnionPayloadOffset(error_union_ty, self.target.*)); + const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); switch (error_union_mcv) { .register => return self.fail("TODO errUnionPayload for registers", .{}), .stack_argument_offset => |off| { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index cfa2c8bb4e..1eddb7441b 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -23,7 +23,7 @@ const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const abi = @import("abi.zig"); const errUnionPayloadOffset = codegen.errUnionPayloadOffset; -const errUnionErrOffset = codegen.errUnionErrOffset; +const errUnionErrorOffset = codegen.errUnionErrorOffset; /// Wasm Value, created when generating an instruction const WValue = union(enum) { @@ -2919,10 +2919,10 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); - const err_ty = self.air.typeOf(un_op); - const pl_ty = err_ty.errorUnionPayload(); + const err_union_ty = self.air.typeOf(un_op); + const pl_ty = err_union_ty.errorUnionPayload(); - if (err_ty.errorUnionSet().errorSetCardinality() == .zero) { + if (err_union_ty.errorUnionSet().errorSetCardinality() == .zero) { switch (opcode) { .i32_ne => return WValue{ .imm32 = 0 }, .i32_eq => return WValue{ .imm32 = 1 }, @@ -2933,7 +2933,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!W try self.emitWValue(operand); if (pl_ty.hasRuntimeBitsIgnoreComptime()) { try self.addMemArg(.i32_load16_u, .{ - .offset = operand.offset() + @intCast(u32, errUnionErrOffset(pl_ty, self.target)), + .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, self.target)), .alignment = Type.anyerror.abiAlignment(self.target), }); } @@ -2985,7 +2985,7 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) In return operand; } - return self.load(operand, Type.anyerror, @intCast(u32, errUnionErrOffset(payload_ty, self.target))); + return self.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, self.target))); } fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { @@ -3011,7 +3011,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { // ensure we also write '0' to the error part, so any present stack value gets overwritten by it. try self.emitWValue(err_union); try self.addImm32(0); - const err_val_offset = @intCast(u32, errUnionErrOffset(pl_ty, self.target)); + const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, self.target)); try self.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 }); return err_union; @@ -3031,7 +3031,7 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const err_union = try self.allocStack(err_ty); // store error value - try self.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrOffset(pl_ty, self.target))); + try self.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, self.target))); // write 'undefined' to the payload const payload_ptr = try self.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, self.target)), .new); @@ -3986,7 +3986,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue operand, .{ .imm32 = 0 }, Type.anyerror, - @intCast(u32, errUnionErrOffset(payload_ty, self.target)), + @intCast(u32, errUnionErrorOffset(payload_ty, self.target)), ); if (self.liveness.isUnused(inst)) return WValue{ .none = {} }; diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ba550f6d82..5c69f78724 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -30,7 +30,7 @@ const Value = @import("../../value.zig").Value; const bits = @import("bits.zig"); const abi = @import("abi.zig"); const errUnionPayloadOffset = codegen.errUnionPayloadOffset; -const errUnionErrOffset = codegen.errUnionErrOffset; +const errUnionErrorOffset = codegen.errUnionErrorOffset; const callee_preserved_regs = abi.callee_preserved_regs; const caller_preserved_regs = abi.caller_preserved_regs; @@ -1799,7 +1799,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { break :result operand; } - const err_off = errUnionErrOffset(err_union_ty, self.target.*); + const err_off = errUnionErrorOffset(payload_ty, self.target.*); switch (operand) { .stack_offset => |off| { const offset = off - @intCast(i32, err_off); @@ -1844,7 +1844,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue.none; } - const payload_off = errUnionPayloadOffset(err_union_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); switch (operand) { .stack_offset => |off| { const offset = off - @intCast(i32, payload_off); @@ -1978,8 +1978,8 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); const abi_align = error_union_ty.abiAlignment(self.target.*); const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); - const payload_off = errUnionPayloadOffset(error_union_ty, self.target.*); - const err_off = errUnionErrOffset(error_union_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); + const err_off = errUnionErrorOffset(payload_ty, self.target.*); try self.genSetStack(payload_ty, stack_offset - @intCast(i32, payload_off), operand, .{}); try self.genSetStack(Type.anyerror, stack_offset - @intCast(i32, err_off), .{ .immediate = 0 }, .{}); @@ -2007,8 +2007,8 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); const abi_align = error_union_ty.abiAlignment(self.target.*); const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); - const payload_off = errUnionPayloadOffset(error_union_ty, self.target.*); - const err_off = errUnionErrOffset(error_union_ty, self.target.*); + const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); + const err_off = errUnionErrorOffset(payload_ty, self.target.*); try self.genSetStack(Type.anyerror, stack_offset - @intCast(i32, err_off), operand, .{}); try self.genSetStack(payload_ty, stack_offset - @intCast(i32, payload_off), .undef, .{}); @@ -4670,7 +4670,7 @@ fn isErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue try self.spillCompareFlagsIfOccupied(); self.compare_flags_inst = inst; - const err_off = errUnionErrOffset(ty, self.target.*); + const err_off = errUnionErrorOffset(ty.errorUnionPayload(), self.target.*); switch (operand) { .stack_offset => |off| { const offset = off - @intCast(i32, err_off); diff --git a/src/codegen.zig b/src/codegen.zig index 86f2613b5f..fbe462959e 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -891,18 +891,22 @@ fn lowerDeclRef( return Result{ .appended = {} }; } -pub fn errUnionPayloadOffset(ty: Type, target: std.Target) u64 { - const payload_ty = ty.errorUnionPayload(); - return if (Type.anyerror.abiAlignment(target) >= payload_ty.abiAlignment(target)) - Type.anyerror.abiSize(target) - else - 0; +pub fn errUnionPayloadOffset(payload_ty: Type, target: std.Target) u64 { + const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(target); + if (payload_align >= error_align) { + return 0; + } else { + return mem.alignForwardGeneric(u64, Type.anyerror.abiSize(target), payload_align); + } } -pub fn errUnionErrOffset(ty: Type, target: std.Target) u64 { - const payload_ty = ty.errorUnionPayload(); - return if (Type.anyerror.abiAlignment(target) >= payload_ty.abiAlignment(target)) - 0 - else - payload_ty.abiSize(target); +pub fn errUnionErrorOffset(payload_ty: Type, target: std.Target) u64 { + const payload_align = payload_ty.abiAlignment(target); + const error_align = Type.anyerror.abiAlignment(target); + if (payload_align >= error_align) { + return mem.alignForwardGeneric(u64, payload_ty.abiSize(target), error_align); + } else { + return 0; + } } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 1b6708c1cf..1e45090648 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -711,21 +711,24 @@ pub const DeclGen = struct { .Bool => return writer.print("{}", .{val.toBool()}), .Optional => { var opt_buf: Type.Payload.ElemType = undefined; - const payload_type = ty.optionalChild(&opt_buf); - if (ty.optionalReprIsPayload()) { - return dg.renderValue(writer, payload_type, val, location); - } - if (payload_type.abiSize(target) == 0) { + const payload_ty = ty.optionalChild(&opt_buf); + + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { const is_null = val.castTag(.opt_payload) == null; return writer.print("{}", .{is_null}); } + + if (ty.optionalReprIsPayload()) { + return dg.renderValue(writer, payload_ty, val, location); + } + try writer.writeByte('('); try dg.renderTypecast(writer, ty); try writer.writeAll("){"); if (val.castTag(.opt_payload)) |pl| { const payload_val = pl.data; try writer.writeAll(" .is_null = false, .payload = "); - try dg.renderValue(writer, payload_type, payload_val, location); + try dg.renderValue(writer, payload_ty, payload_val, location); try writer.writeAll(" }"); } else { try writer.writeAll(" .is_null = true }"); @@ -1360,12 +1363,12 @@ pub const DeclGen = struct { var opt_buf: Type.Payload.ElemType = undefined; const child_type = t.optionalChild(&opt_buf); - if (t.optionalReprIsPayload()) { - return dg.renderType(w, child_type); + if (!child_type.hasRuntimeBitsIgnoreComptime()) { + return w.writeAll("bool"); } - if (child_type.abiSize(target) == 0) { - return w.writeAll("bool"); + if (t.optionalReprIsPayload()) { + return dg.renderType(w, child_type); } const name = dg.getTypedefName(t) orelse @@ -1816,8 +1819,9 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .not => try airNot (f, inst), .optional_payload => try airOptionalPayload(f, inst), - .optional_payload_ptr => try airOptionalPayload(f, inst), + .optional_payload_ptr => try airOptionalPayloadPtr(f, inst), .optional_payload_ptr_set => try airOptionalPayloadPtrSet(f, inst), + .wrap_optional => try airWrapOptional(f, inst), .is_err => try airIsErr(f, inst, false, "!="), .is_non_err => try airIsErr(f, inst, false, "=="), @@ -1846,7 +1850,6 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .cond_br => try airCondBr(f, inst), .br => try airBr(f, inst), .switch_br => try airSwitchBr(f, inst), - .wrap_optional => try airWrapOptional(f, inst), .struct_field_ptr => try airStructFieldPtr(f, inst), .array_to_slice => try airArrayToSlice(f, inst), .cmpxchg_weak => try airCmpxchg(f, inst, "weak"), @@ -3145,7 +3148,6 @@ fn airIsNull( const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); const operand = try f.resolveInst(un_op); - const target = f.object.dg.module.getTarget(); const local = try f.allocLocal(Type.initTag(.bool), .Const); try writer.writeAll(" = ("); @@ -3153,18 +3155,18 @@ fn airIsNull( const ty = f.air.typeOf(un_op); var opt_buf: Type.Payload.ElemType = undefined; - const payload_type = if (ty.zigTypeTag() == .Pointer) + const payload_ty = if (ty.zigTypeTag() == .Pointer) ty.childType().optionalChild(&opt_buf) else ty.optionalChild(&opt_buf); - if (ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + try writer.print("){s} {s} true;\n", .{ deref_suffix, operator }); + } else if (ty.isPtrLikeOptional()) { // operand is a regular pointer, test `operand !=/== NULL` try writer.print("){s} {s} NULL;\n", .{ deref_suffix, operator }); - } else if (payload_type.zigTypeTag() == .ErrorSet) { + } else if (payload_ty.zigTypeTag() == .ErrorSet) { try writer.print("){s} {s} 0;\n", .{ deref_suffix, operator }); - } else if (payload_type.abiSize(target) == 0) { - try writer.print("){s} {s} true;\n", .{ deref_suffix, operator }); } else { try writer.print("){s}.is_null {s} true;\n", .{ deref_suffix, operator }); } @@ -3172,18 +3174,46 @@ fn airIsNull( } fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { - if (f.liveness.isUnused(inst)) - return CValue.none; + if (f.liveness.isUnused(inst)) return CValue.none; const ty_op = f.air.instructions.items(.data)[inst].ty_op; const writer = f.object.writer(); const operand = try f.resolveInst(ty_op.operand); - const operand_ty = f.air.typeOf(ty_op.operand); + const opt_ty = f.air.typeOf(ty_op.operand); - const opt_ty = if (operand_ty.zigTypeTag() == .Pointer) - operand_ty.elemType() - else - operand_ty; + var buf: Type.Payload.ElemType = undefined; + const payload_ty = opt_ty.optionalChild(&buf); + + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return CValue.none; + } + + if (opt_ty.optionalReprIsPayload()) { + return operand; + } + + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); + try writer.writeAll(" = ("); + try f.writeCValue(writer, operand); + try writer.writeAll(").payload;\n"); + return local; +} + +fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; + + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const writer = f.object.writer(); + const operand = try f.resolveInst(ty_op.operand); + const ptr_ty = f.air.typeOf(ty_op.operand); + const opt_ty = ptr_ty.childType(); + var buf: Type.Payload.ElemType = undefined; + const payload_ty = opt_ty.optionalChild(&buf); + + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + return operand; + } if (opt_ty.optionalReprIsPayload()) { // the operand is just a regular pointer, no need to do anything special. @@ -3192,14 +3222,10 @@ fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { } const inst_ty = f.air.typeOfIndex(inst); - const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else "."; - const maybe_addrof = if (inst_ty.zigTypeTag() == .Pointer) "&" else ""; - const local = try f.allocLocal(inst_ty, .Const); - try writer.print(" = {s}(", .{maybe_addrof}); + try writer.writeAll(" = &("); try f.writeCValue(writer, operand); - - try writer.print("){s}payload;\n", .{maybe_deref}); + try writer.writeAll(")->payload;\n"); return local; } diff --git a/src/type.zig b/src/type.zig index 1c59cf9e59..4325d6d772 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2375,7 +2375,6 @@ pub const Type = extern union { // These types have more than one possible value, so the result is the same as // asking whether they are comptime-only types. .anyframe_T, - .optional, .optional_single_mut_pointer, .optional_single_const_pointer, .single_const_pointer, @@ -2397,6 +2396,22 @@ pub const Type = extern union { } }, + .optional => { + var buf: Payload.ElemType = undefined; + const child_ty = ty.optionalChild(&buf); + if (child_ty.isNoReturn()) { + // Then the optional is comptime-known to be null. + return false; + } + if (ignore_comptime_only) { + return true; + } else if (sema_kit) |sk| { + return !(try sk.sema.typeRequiresComptime(sk.block, sk.src, child_ty)); + } else { + return !comptimeOnly(child_ty); + } + }, + .error_union => { // This code needs to be kept in sync with the equivalent switch prong // in abiSizeAdvanced. @@ -2665,13 +2680,22 @@ pub const Type = extern union { }; } - pub fn isNoReturn(self: Type) bool { - const definitely_correct_result = - self.tag_if_small_enough != .bound_fn and - self.zigTypeTag() == .NoReturn; - const fast_result = self.tag_if_small_enough == Tag.noreturn; - assert(fast_result == definitely_correct_result); - return fast_result; + /// TODO add enums with no fields here + pub fn isNoReturn(ty: Type) bool { + switch (ty.tag()) { + .noreturn => return true, + .error_set => { + const err_set_obj = ty.castTag(.error_set).?.data; + const names = err_set_obj.names.keys(); + return names.len == 0; + }, + .error_set_merged => { + const name_map = ty.castTag(.error_set_merged).?.data; + const names = name_map.keys(); + return names.len == 0; + }, + else => return false, + } } /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit. @@ -2918,7 +2942,13 @@ pub const Type = extern union { switch (child_type.zigTypeTag()) { .Pointer => return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) }, - .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, target, strat), + .ErrorSet => switch (child_type.errorSetCardinality()) { + // `?error{}` is comptime-known to be null. + .zero => return AbiAlignmentAdvanced{ .scalar = 0 }, + .one => return AbiAlignmentAdvanced{ .scalar = 1 }, + .many => return abiAlignmentAdvanced(Type.anyerror, target, strat), + }, + .NoReturn => return AbiAlignmentAdvanced{ .scalar = 0 }, else => {}, } @@ -3365,6 +3395,11 @@ pub const Type = extern union { .optional => { var buf: Payload.ElemType = undefined; const child_type = ty.optionalChild(&buf); + + if (child_type.isNoReturn()) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + if (!child_type.hasRuntimeBits()) return AbiSizeAdvanced{ .scalar = 1 }; switch (child_type.zigTypeTag()) { @@ -4804,7 +4839,6 @@ pub const Type = extern union { .const_slice, .mut_slice, .anyopaque, - .optional, .optional_single_mut_pointer, .optional_single_const_pointer, .enum_literal, @@ -4839,6 +4873,16 @@ pub const Type = extern union { .bound_fn, => return null, + .optional => { + var buf: Payload.ElemType = undefined; + const child_ty = ty.optionalChild(&buf); + if (child_ty.isNoReturn()) { + return Value.@"null"; + } else { + return null; + } + }, + .error_set_single => return Value.initTag(.the_only_possible_value), .error_set => { const err_set_obj = ty.castTag(.error_set).?.data; diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 83a9384d71..4f316aeab2 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -121,7 +121,7 @@ test "debug info for optional error set" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - const SomeError = error{Hello}; + const SomeError = error{ Hello, Hello2 }; var a_local_variable: ?SomeError = null; _ = a_local_variable; } @@ -454,6 +454,38 @@ test "optional error set is the same size as error set" { comptime try expect(S.returnsOptErrSet() == null); } +test "optional error set with only one error is the same size as bool" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + + const E = error{only}; + comptime try expect(@sizeOf(?E) == @sizeOf(bool)); + comptime try expect(@alignOf(?E) == @alignOf(bool)); + const S = struct { + fn gimmeNull() ?E { + return null; + } + fn gimmeErr() ?E { + return error.only; + } + }; + try expect(S.gimmeNull() == null); + try expect(error.only == S.gimmeErr().?); + comptime try expect(S.gimmeNull() == null); + comptime try expect(error.only == S.gimmeErr().?); +} + +test "optional empty error set" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + + const T = ?error{}; + var t: T = undefined; + if (t != null) { + @compileError("test failed"); + } +} + test "nested catch" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO From 1f16b07d6fe43f96287b6cca8e8b58996199481f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 24 May 2022 17:53:04 -0700 Subject: [PATCH 16/18] stage2: treat `error{}!void` as a zero-bit type --- src/Sema.zig | 24 ++++++- src/codegen/llvm.zig | 151 +++++++++++++++++++++------------------- src/type.zig | 65 +++++++++++++---- test/behavior/error.zig | 33 ++++++++- 4 files changed, 183 insertions(+), 90 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index b0c3c17483..e625539286 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -23320,7 +23320,6 @@ pub fn typeHasOnePossibleValue( .optional_single_const_pointer, .enum_literal, .anyerror_void_error_union, - .error_union, .error_set_inferred, .@"opaque", .var_args_param, @@ -23360,6 +23359,29 @@ pub fn typeHasOnePossibleValue( } }, + .error_union => { + const error_ty = ty.errorUnionSet(); + switch (error_ty.errorSetCardinality()) { + .zero => { + const payload_ty = ty.errorUnionPayload(); + if (try typeHasOnePossibleValue(sema, block, src, payload_ty)) |payload_val| { + return try Value.Tag.eu_payload.create(sema.arena, payload_val); + } else { + return null; + } + }, + .one => { + if (ty.errorUnionPayload().isNoReturn()) { + const error_val = (try typeHasOnePossibleValue(sema, block, src, error_ty)).?; + return error_val; + } else { + return null; + } + }, + .many => return null, + } + }, + .error_set_single => { const name = ty.castTag(.error_set_single).?.data; return try Value.Tag.@"error".create(sema.arena, .{ .name = name }); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index cf0188b060..ec71297c10 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1470,16 +1470,25 @@ pub const Object = struct { return full_di_ty; }, .ErrorUnion => { - const err_set_ty = ty.errorUnionSet(); const payload_ty = ty.errorUnionPayload(); - if (err_set_ty.errorSetCardinality() == .zero) { - const payload_di_ty = try o.lowerDebugType(payload_ty, .full); - // The recursive call to `lowerDebugType` means we can't use `gop` anymore. - try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(payload_di_ty), .{ .mod = o.module }); - return payload_di_ty; + switch (ty.errorUnionSet().errorSetCardinality()) { + .zero => { + const payload_di_ty = try o.lowerDebugType(payload_ty, .full); + // The recursive call to `lowerDebugType` means we can't use `gop` anymore. + try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(payload_di_ty), .{ .mod = o.module }); + return payload_di_ty; + }, + .one => { + if (payload_ty.isNoReturn()) { + const di_type = dib.createBasicType("void", 0, DW.ATE.signed); + gop.value_ptr.* = AnnotatedDITypePtr.initFull(di_type); + return di_type; + } + }, + .many => {}, } if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - const err_set_di_ty = try o.lowerDebugType(err_set_ty, .full); + const err_set_di_ty = try o.lowerDebugType(Type.anyerror, .full); // The recursive call to `lowerDebugType` means we can't use `gop` anymore. try o.di_type_map.putContext(gpa, ty, AnnotatedDITypePtr.initFull(err_set_di_ty), .{ .mod = o.module }); return err_set_di_ty; @@ -1502,56 +1511,51 @@ pub const Object = struct { break :blk fwd_decl; }; - const err_set_size = err_set_ty.abiSize(target); - const err_set_align = err_set_ty.abiAlignment(target); + const error_size = Type.anyerror.abiSize(target); + const error_align = Type.anyerror.abiAlignment(target); const payload_size = payload_ty.abiSize(target); const payload_align = payload_ty.abiAlignment(target); - var offset: u64 = 0; - offset += err_set_size; - offset = std.mem.alignForwardGeneric(u64, offset, payload_align); - const payload_offset = offset; - - var len: u8 = 2; - var fields: [3]*llvm.DIType = .{ - dib.createMemberType( - fwd_decl.toScope(), - "tag", - di_file, - line, - err_set_size * 8, // size in bits - err_set_align * 8, // align in bits - 0, // offset in bits - 0, // flags - try o.lowerDebugType(err_set_ty, .full), - ), - dib.createMemberType( - fwd_decl.toScope(), - "value", - di_file, - line, - payload_size * 8, // size in bits - payload_align * 8, // align in bits - payload_offset * 8, // offset in bits - 0, // flags - try o.lowerDebugType(payload_ty, .full), - ), - undefined, - }; - - const error_size = Type.anyerror.abiSize(target); - if (payload_align > error_size) { - fields[2] = fields[1]; - const pad_len = @intCast(u32, payload_align - error_size); - fields[1] = dib.createArrayType( - pad_len * 8, - 8, - try o.lowerDebugType(Type.u8, .full), - @intCast(c_int, pad_len), - ); - len += 1; + var error_index: u32 = undefined; + var payload_index: u32 = undefined; + var error_offset: u64 = undefined; + var payload_offset: u64 = undefined; + if (error_align > payload_align) { + error_index = 0; + payload_index = 1; + error_offset = 0; + payload_offset = std.mem.alignForwardGeneric(u64, error_size, payload_align); + } else { + payload_index = 0; + error_index = 1; + payload_offset = 0; + error_offset = std.mem.alignForwardGeneric(u64, payload_size, error_align); } + var fields: [2]*llvm.DIType = undefined; + fields[error_index] = dib.createMemberType( + fwd_decl.toScope(), + "tag", + di_file, + line, + error_size * 8, // size in bits + error_align * 8, // align in bits + error_offset * 8, // offset in bits + 0, // flags + try o.lowerDebugType(Type.anyerror, .full), + ); + fields[payload_index] = dib.createMemberType( + fwd_decl.toScope(), + "value", + di_file, + line, + payload_size * 8, // size in bits + payload_align * 8, // align in bits + payload_offset * 8, // offset in bits + 0, // flags + try o.lowerDebugType(payload_ty, .full), + ); + const full_di_ty = dib.createStructType( compile_unit_scope, name.ptr, @@ -1562,7 +1566,7 @@ pub const Object = struct { 0, // flags null, // derived from &fields, - len, + fields.len, 0, // run time lang null, // vtable holder "", // unique id @@ -2455,18 +2459,23 @@ pub const DeclGen = struct { return dg.context.structType(&fields, fields.len, .False); }, .ErrorUnion => { - const error_type = t.errorUnionSet(); - const payload_type = t.errorUnionPayload(); - if (error_type.errorSetCardinality() == .zero) { - return dg.lowerType(payload_type); + const payload_ty = t.errorUnionPayload(); + switch (t.errorUnionSet().errorSetCardinality()) { + .zero => return dg.lowerType(payload_ty), + .one => { + if (payload_ty.isNoReturn()) { + return dg.context.voidType(); + } + }, + .many => {}, } - if (!payload_type.hasRuntimeBitsIgnoreComptime()) { + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { return try dg.lowerType(Type.anyerror); } - const llvm_error_type = try dg.lowerType(error_type); - const llvm_payload_type = try dg.lowerType(payload_type); + const llvm_error_type = try dg.lowerType(Type.anyerror); + const llvm_payload_type = try dg.lowerType(payload_ty); - const payload_align = payload_type.abiAlignment(target); + const payload_align = payload_ty.abiAlignment(target); const error_align = Type.anyerror.abiAlignment(target); if (error_align > payload_align) { const fields: [2]*const llvm.Type = .{ llvm_error_type, llvm_payload_type }; @@ -2476,9 +2485,7 @@ pub const DeclGen = struct { return dg.context.structType(&fields, fields.len, .False); } }, - .ErrorSet => { - return dg.context.intType(16); - }, + .ErrorSet => return dg.context.intType(16), .Struct => { const gop = try dg.object.type_map.getOrPutContext(gpa, t, .{ .mod = dg.module }); if (gop.found_existing) return gop.value_ptr.*; @@ -3095,7 +3102,7 @@ pub const DeclGen = struct { return dg.resolveLlvmFunction(fn_decl_index); }, .ErrorSet => { - const llvm_ty = try dg.lowerType(tv.ty); + const llvm_ty = try dg.lowerType(Type.anyerror); switch (tv.val.tag()) { .@"error" => { const err_name = tv.val.castTag(.@"error").?.data.name; @@ -3109,9 +3116,8 @@ pub const DeclGen = struct { } }, .ErrorUnion => { - const error_type = tv.ty.errorUnionSet(); const payload_type = tv.ty.errorUnionPayload(); - if (error_type.errorSetCardinality() == .zero) { + if (tv.ty.errorUnionSet().errorSetCardinality() == .zero) { const payload_val = tv.val.castTag(.eu_payload).?.data; return dg.lowerValue(.{ .ty = payload_type, .val = payload_val }); } @@ -3120,13 +3126,13 @@ pub const DeclGen = struct { if (!payload_type.hasRuntimeBitsIgnoreComptime()) { // We use the error type directly as the type. const err_val = if (!is_pl) tv.val else Value.initTag(.zero); - return dg.lowerValue(.{ .ty = error_type, .val = err_val }); + return dg.lowerValue(.{ .ty = Type.anyerror, .val = err_val }); } const payload_align = payload_type.abiAlignment(target); const error_align = Type.anyerror.abiAlignment(target); const llvm_error_value = try dg.lowerValue(.{ - .ty = error_type, + .ty = Type.anyerror, .val = if (is_pl) Value.initTag(.zero) else tv.val, }); const llvm_payload_value = try dg.lowerValue(.{ @@ -5656,13 +5662,12 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const error_union_ty = self.air.typeOf(ty_op.operand).childType(); - const error_ty = error_union_ty.errorUnionSet(); - if (error_ty.errorSetCardinality() == .zero) { + if (error_union_ty.errorUnionSet().errorSetCardinality() == .zero) { // TODO: write undefined bytes through the pointer here return operand; } const payload_ty = error_union_ty.errorUnionPayload(); - const non_error_val = try self.dg.lowerValue(.{ .ty = error_ty, .val = Value.zero }); + const non_error_val = try self.dg.lowerValue(.{ .ty = Type.anyerror, .val = Value.zero }); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { _ = self.builder.buildStore(non_error_val, operand); return operand; @@ -6715,9 +6720,9 @@ pub const FuncGen = struct { if (self.liveness.isUnused(inst)) return null; const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand = try self.resolveInst(ty_op.operand); const operand_ty = self.air.typeOf(ty_op.operand); const inst_ty = self.air.typeOfIndex(inst); + const operand = try self.resolveInst(ty_op.operand); const operand_is_ref = isByRef(operand_ty); const result_is_ref = isByRef(inst_ty); const llvm_dest_ty = try self.dg.lowerType(inst_ty); diff --git a/src/type.zig b/src/type.zig index 4325d6d772..145ae4904a 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2416,14 +2416,18 @@ pub const Type = extern union { // This code needs to be kept in sync with the equivalent switch prong // in abiSizeAdvanced. const data = ty.castTag(.error_union).?.data; - if (data.error_set.errorSetCardinality() == .zero) { - return hasRuntimeBitsAdvanced(data.payload, ignore_comptime_only, sema_kit); - } else if (ignore_comptime_only) { - return true; - } else if (sema_kit) |sk| { - return !(try sk.sema.typeRequiresComptime(sk.block, sk.src, ty)); - } else { - return !comptimeOnly(ty); + switch (data.error_set.errorSetCardinality()) { + .zero => return hasRuntimeBitsAdvanced(data.payload, ignore_comptime_only, sema_kit), + .one => return !data.payload.isNoReturn(), + .many => { + if (ignore_comptime_only) { + return true; + } else if (sema_kit) |sk| { + return !(try sk.sema.typeRequiresComptime(sk.block, sk.src, ty)); + } else { + return !comptimeOnly(ty); + } + }, } }, @@ -2970,8 +2974,14 @@ pub const Type = extern union { // This code needs to be kept in sync with the equivalent switch prong // in abiSizeAdvanced. const data = ty.castTag(.error_union).?.data; - if (data.error_set.errorSetCardinality() == .zero) { - return abiAlignmentAdvanced(data.payload, target, strat); + switch (data.error_set.errorSetCardinality()) { + .zero => return abiAlignmentAdvanced(data.payload, target, strat), + .one => { + if (data.payload.isNoReturn()) { + return AbiAlignmentAdvanced{ .scalar = 0 }; + } + }, + .many => {}, } const code_align = abiAlignment(Type.anyerror, target); switch (strat) { @@ -3440,8 +3450,14 @@ pub const Type = extern union { // 1 bit of data which is whether or not the value is an error. // Zig still uses the error code encoding at runtime, even when only 1 bit // would suffice. This prevents coercions from needing to branch. - if (data.error_set.errorSetCardinality() == .zero) { - return abiSizeAdvanced(data.payload, target, strat); + switch (data.error_set.errorSetCardinality()) { + .zero => return abiSizeAdvanced(data.payload, target, strat), + .one => { + if (data.payload.isNoReturn()) { + return AbiSizeAdvanced{ .scalar = 0 }; + } + }, + .many => {}, } const code_size = abiSize(Type.anyerror, target); if (!data.payload.hasRuntimeBits()) { @@ -4843,7 +4859,6 @@ pub const Type = extern union { .optional_single_const_pointer, .enum_literal, .anyerror_void_error_union, - .error_union, .error_set_inferred, .@"opaque", .var_args_param, @@ -4883,6 +4898,30 @@ pub const Type = extern union { } }, + .error_union => { + const error_ty = ty.errorUnionSet(); + switch (error_ty.errorSetCardinality()) { + .zero => { + const payload_ty = ty.errorUnionPayload(); + if (onePossibleValue(payload_ty)) |payload_val| { + _ = payload_val; + return Value.initTag(.the_only_possible_value); + } else { + return null; + } + }, + .one => { + if (ty.errorUnionPayload().isNoReturn()) { + const error_val = onePossibleValue(error_ty).?; + return error_val; + } else { + return null; + } + }, + .many => return null, + } + }, + .error_set_single => return Value.initTag(.the_only_possible_value), .error_set => { const err_set_obj = ty.castTag(.error_set).?.data; diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 4f316aeab2..312ab1524a 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -479,13 +479,40 @@ test "optional error set with only one error is the same size as bool" { test "optional empty error set" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; - const T = ?error{}; - var t: T = undefined; - if (t != null) { + comptime try expect(@sizeOf(error{}!void) == @sizeOf(void)); + comptime try expect(@alignOf(error{}!void) == @alignOf(void)); + + var x: ?error{} = undefined; + if (x != null) { @compileError("test failed"); } } +test "empty error set plus zero-bit payload" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + + comptime try expect(@sizeOf(error{}!void) == @sizeOf(void)); + comptime try expect(@alignOf(error{}!void) == @alignOf(void)); + + var x: error{}!void = undefined; + if (x) |payload| { + if (payload != {}) { + @compileError("test failed"); + } + } else |_| { + @compileError("test failed"); + } + const S = struct { + fn empty() error{}!void {} + fn inferred() !void { + return empty(); + } + }; + try S.inferred(); +} + test "nested catch" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO From a4ff94804cfcdee49fb9c70812c15ff7d2829ee5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 24 May 2022 18:20:03 -0700 Subject: [PATCH 17/18] Sema: additional check for one-possible-value types in analyzeLoad This is needed because pointers to zero-bit types are not necessarily comptime known, but when doing a load, only the element type having one possible value is relevant. --- src/Sema.zig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/Sema.zig b/src/Sema.zig index e625539286..cf9b5aa57f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -20950,6 +20950,11 @@ fn analyzeLoad( .Pointer => ptr_ty.childType(), else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty.fmt(sema.mod)}), }; + + if (try sema.typeHasOnePossibleValue(block, src, elem_ty)) |opv| { + return sema.addConstant(elem_ty, opv); + } + if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| { return sema.addConstant(elem_ty, elem_val); From 60af42705d62417c73a13481e60b0861423e77fe Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 24 May 2022 18:21:34 -0700 Subject: [PATCH 18/18] mark two behavior tests as passing --- test/behavior/eval.zig | 4 ---- 1 file changed, 4 deletions(-) diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 3ffa0a3a12..383c32172c 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -646,8 +646,6 @@ pub fn TypeWithCompTimeSlice(comptime field_name: []const u8) type { } test "comptime function with mutable pointer is not memoized" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO - comptime { var x: i32 = 1; const ptr = &x; @@ -662,8 +660,6 @@ fn increment(value: *i32) void { } test "const ptr to comptime mutable data is not memoized" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO - comptime { var foo = SingleFieldStruct{ .x = 1 }; try expect(foo.read_x() == 1);