From 3f4569bf187bfe296323aee6fbb59ab374041243 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 19 Mar 2023 22:43:59 -0400 Subject: [PATCH] codegen: fix backend breakage due to optional layout change --- src/arch/aarch64/CodeGen.zig | 82 ++++++++++++++++++++---------------- src/arch/wasm/CodeGen.zig | 49 ++++++++------------- test/behavior/error.zig | 1 + 3 files changed, 65 insertions(+), 67 deletions(-) diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index e20cf900af..e2e2ce9ead 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -3011,41 +3011,16 @@ fn optionalPayload(self: *Self, inst: Air.Inst.Index, mcv: MCValue, optional_ty: return MCValue{ .register = reg }; } - const offset = @intCast(u32, optional_ty.abiSize(self.target.*) - payload_ty.abiSize(self.target.*)); switch (mcv) { - .register => |source_reg| { + .register => { // TODO should we reuse the operand here? const raw_reg = try self.register_manager.allocReg(inst, gp); const dest_reg = raw_reg.toX(); - const shift = @intCast(u6, offset * 8); - if (shift == 0) { - try self.genSetReg(payload_ty, dest_reg, mcv); - } else { - _ = try self.addInst(.{ - .tag = if (payload_ty.isSignedInt()) - Mir.Inst.Tag.asr_immediate - else - Mir.Inst.Tag.lsr_immediate, - .data = .{ .rr_shift = .{ - .rd = dest_reg, - .rn = source_reg.toX(), - .shift = shift, - } }, - }); - } - + try self.genSetReg(payload_ty, dest_reg, mcv); return MCValue{ .register = self.registerAlias(dest_reg, payload_ty) }; }, - .stack_argument_offset => |off| { - return MCValue{ .stack_argument_offset = off + offset }; - }, - .stack_offset => |off| { - return MCValue{ .stack_offset = off - offset }; - }, - .memory => |addr| { - return MCValue{ .memory = addr + offset }; - }, + .stack_argument_offset, .stack_offset, .memory => return mcv, else => unreachable, // invalid MCValue for an error union } } @@ -3289,12 +3264,11 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { const optional_abi_size = @intCast(u32, optional_ty.abiSize(self.target.*)); const optional_abi_align = optional_ty.abiAlignment(self.target.*); - const payload_abi_size = @intCast(u32, payload_ty.abiSize(self.target.*)); - const offset = optional_abi_size - payload_abi_size; + const offset = @intCast(u32, payload_ty.abiSize(self.target.*)); const stack_offset = try self.allocMem(optional_abi_size, optional_abi_align, inst); - try self.genSetStack(Type.bool, stack_offset, .{ .immediate = 1 }); - try self.genSetStack(payload_ty, stack_offset - @intCast(u32, offset), operand); + try self.genSetStack(payload_ty, stack_offset, operand); + try self.genSetStack(Type.bool, stack_offset - offset, .{ .immediate = 1 }); break :result MCValue{ .stack_offset = stack_offset }; }; @@ -4834,13 +4808,49 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { } fn isNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { - const sentinel_ty: Type = if (!operand_ty.isPtrLikeOptional()) blk: { + const sentinel: struct { ty: Type, bind: ReadArg.Bind } = if (!operand_ty.isPtrLikeOptional()) blk: { var buf: Type.Payload.ElemType = undefined; const payload_ty = operand_ty.optionalChild(&buf); - break :blk if (payload_ty.hasRuntimeBitsIgnoreComptime()) Type.bool else operand_ty; - } else operand_ty; + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) + break :blk .{ .ty = operand_ty, .bind = operand_bind }; + + const offset = @intCast(u32, payload_ty.abiSize(self.target.*)); + const operand_mcv = try operand_bind.resolveToMcv(self); + const new_mcv: MCValue = switch (operand_mcv) { + .register => |source_reg| new: { + // TODO should we reuse the operand here? + const raw_reg = try self.register_manager.allocReg(null, gp); + const dest_reg = raw_reg.toX(); + + const shift = @intCast(u6, offset * 8); + if (shift == 0) { + try self.genSetReg(payload_ty, dest_reg, operand_mcv); + } else { + _ = try self.addInst(.{ + .tag = if (payload_ty.isSignedInt()) + Mir.Inst.Tag.asr_immediate + else + Mir.Inst.Tag.lsr_immediate, + .data = .{ .rr_shift = .{ + .rd = dest_reg, + .rn = source_reg.toX(), + .shift = shift, + } }, + }); + } + + break :new .{ .register = self.registerAlias(dest_reg, payload_ty) }; + }, + .stack_argument_offset => |off| .{ .stack_argument_offset = off + offset }, + .stack_offset => |off| .{ .stack_offset = off - offset }, + .memory => |addr| .{ .memory = addr + offset }, + else => unreachable, // invalid MCValue for an optional + }; + + break :blk .{ .ty = Type.bool, .bind = .{ .mcv = new_mcv } }; + } else .{ .ty = operand_ty, .bind = operand_bind }; const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } }; - return self.cmp(operand_bind, imm_bind, sentinel_ty, .eq); + return self.cmp(sentinel.bind, imm_bind, sentinel.ty, .eq); } fn isNonNull(self: *Self, operand_bind: ReadArg.Bind, operand_ty: Type) !MCValue { diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index c05f07a602..9af66eb40c 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2715,20 +2715,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, ptr_child_ty: Type) InnerError }, .opt_payload_ptr => { const payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data; - const parent_ptr = try func.lowerParentPtr(payload_ptr.container_ptr, payload_ptr.container_ty); - var buf: Type.Payload.ElemType = undefined; - const payload_ty = payload_ptr.container_ty.optionalChild(&buf); - if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.optionalReprIsPayload()) { - return parent_ptr; - } - - const abi_size = payload_ptr.container_ty.abiSize(func.target); - const offset = abi_size - payload_ty.abiSize(func.target); - - return WValue{ .memory_offset = .{ - .pointer = parent_ptr.memory, - .offset = @intCast(u32, offset), - } }; + return func.lowerParentPtr(payload_ptr.container_ptr, payload_ptr.container_ty); }, else => |tag| return func.fail("TODO: Implement lowerParentPtr for tag: {}", .{tag}), } @@ -2889,7 +2876,7 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue { } } else { const is_pl = val.tag() == .opt_payload; - return WValue{ .imm32 = if (is_pl) @as(u32, 1) else 0 }; + return WValue{ .imm32 = @boolToInt(is_pl) }; }, .Struct => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -3882,7 +3869,11 @@ fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcod // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value if (payload_ty.hasRuntimeBitsIgnoreComptime()) { - try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset(), .alignment = 1 }); + const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { + const module = func.bin_file.base.options.module.?; + return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(module)}); + }; + try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 }); } } else if (payload_ty.isSlice()) { switch (func.arch()) { @@ -3911,13 +3902,11 @@ fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const operand = try func.resolveInst(ty_op.operand); if (opt_ty.optionalReprIsPayload()) break :result func.reuseOperand(ty_op.operand, operand); - const offset = opt_ty.abiSize(func.target) - payload_ty.abiSize(func.target); - if (isByRef(payload_ty, func.target)) { - break :result try func.buildPointerOffset(operand, offset, .new); + break :result try func.buildPointerOffset(operand, 0, .new); } - const payload = try func.load(operand, payload_ty, @intCast(u32, offset)); + const payload = try func.load(operand, payload_ty, 0); break :result try payload.toLocal(func, payload_ty); }; func.finishAir(inst, result, &.{ty_op.operand}); @@ -3936,8 +3925,7 @@ fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { break :result func.reuseOperand(ty_op.operand, operand); } - const offset = opt_ty.abiSize(func.target) - payload_ty.abiSize(func.target); - break :result try func.buildPointerOffset(operand, offset, .new); + break :result try func.buildPointerOffset(operand, 0, .new); }; func.finishAir(inst, result, &.{ty_op.operand}); } @@ -3956,16 +3944,16 @@ fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi return func.finishAir(inst, operand, &.{ty_op.operand}); } - const offset = std.math.cast(u32, opt_ty.abiSize(func.target) - payload_ty.abiSize(func.target)) orelse { + const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { const module = func.bin_file.base.options.module.?; return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)}); }; try func.emitWValue(operand); try func.addImm32(1); - try func.addMemArg(.i32_store8, .{ .offset = operand.offset(), .alignment = 1 }); + try func.addMemArg(.i32_store8, .{ .offset = operand.offset() + offset, .alignment = 1 }); - const result = try func.buildPointerOffset(operand, offset, .new); + const result = try func.buildPointerOffset(operand, 0, .new); return func.finishAir(inst, result, &.{ty_op.operand}); } @@ -3988,7 +3976,7 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (op_ty.optionalReprIsPayload()) { break :result func.reuseOperand(ty_op.operand, operand); } - const offset = std.math.cast(u32, op_ty.abiSize(func.target) - payload_ty.abiSize(func.target)) orelse { + const offset = std.math.cast(u32, payload_ty.abiSize(func.target)) orelse { const module = func.bin_file.base.options.module.?; return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)}); }; @@ -3997,9 +3985,9 @@ fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const result_ptr = try func.allocStack(op_ty); try func.emitWValue(result_ptr); try func.addImm32(1); - try func.addMemArg(.i32_store8, .{ .offset = result_ptr.offset(), .alignment = 1 }); + try func.addMemArg(.i32_store8, .{ .offset = result_ptr.offset() + offset, .alignment = 1 }); - const payload_ptr = try func.buildPointerOffset(result_ptr, offset, .new); + const payload_ptr = try func.buildPointerOffset(result_ptr, 0, .new); try func.store(payload_ptr, operand, payload_ty, 0); break :result result_ptr; }; @@ -4719,7 +4707,6 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: assert(op == .eq or op == .neq); var buf: Type.Payload.ElemType = undefined; const payload_ty = operand_ty.optionalChild(&buf); - const offset = @intCast(u32, operand_ty.abiSize(func.target) - payload_ty.abiSize(func.target)); // We store the final result in here that will be validated // if the optional is truly equal. @@ -4732,8 +4719,8 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: try func.addTag(.i32_ne); // inverse so we can exit early try func.addLabel(.br_if, 0); - _ = try func.load(lhs, payload_ty, offset); - _ = try func.load(rhs, payload_ty, offset); + _ = try func.load(lhs, payload_ty, 0); + _ = try func.load(rhs, payload_ty, 0); const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, func.target) }); try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); try func.addLabel(.br_if, 0); diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 8119a10028..a708971a49 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -874,6 +874,7 @@ test "field access of anyerror results in smaller error set" { } test "optional error union return type" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct {