diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index ba176ecb1e..07807d0850 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -4472,16 +4472,11 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, .ptr_stack_offset => |off| { - // TODO: maybe addressing from sp instead of fp - const imm12 = math.cast(u12, off) orelse - return self.fail("TODO larger stack offsets", .{}); - _ = try self.addInst(.{ - .tag = .sub_immediate, - .data = .{ .rr_imm12_sh = .{ - .rd = reg, - .rn = .x29, - .imm12 = imm12, + .tag = .ldr_ptr_stack, + .data = .{ .load_store_stack = .{ + .rt = reg, + .offset = @intCast(u32, off), } }, }); }, diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index 54e40c776f..febe29d9a9 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -150,6 +150,7 @@ pub fn emitMir( .ldp => try emit.mirLoadStoreRegisterPair(inst), .stp => try emit.mirLoadStoreRegisterPair(inst), + .ldr_ptr_stack => try emit.mirLoadStoreStack(inst), .ldr_stack => try emit.mirLoadStoreStack(inst), .ldrb_stack => try emit.mirLoadStoreStack(inst), .ldrh_stack => try emit.mirLoadStoreStack(inst), @@ -159,8 +160,8 @@ pub fn emitMir( .strb_stack => try emit.mirLoadStoreStack(inst), .strh_stack => try emit.mirLoadStoreStack(inst), - .ldr_stack_argument => try emit.mirLoadStackArgument(inst), .ldr_ptr_stack_argument => try emit.mirLoadStackArgument(inst), + .ldr_stack_argument => try emit.mirLoadStackArgument(inst), .ldrb_stack_argument => try emit.mirLoadStackArgument(inst), .ldrh_stack_argument => try emit.mirLoadStackArgument(inst), .ldrsb_stack_argument => try emit.mirLoadStackArgument(inst), @@ -1003,23 +1004,43 @@ fn mirLoadStoreStack(emit: *Emit, inst: Mir.Inst.Index) !void { const rt = load_store_stack.rt; const raw_offset = emit.stack_size - load_store_stack.offset; - const offset = switch (tag) { - .ldrb_stack, .ldrsb_stack, .strb_stack => blk: { - if (math.cast(u12, raw_offset)) |imm| { - break :blk Instruction.LoadStoreOffset.imm(imm); - } else { + switch (tag) { + .ldr_ptr_stack => { + const offset = if (math.cast(u12, raw_offset)) |imm| imm else { + return emit.fail("TODO load stack argument ptr with larger offset", .{}); + }; + + switch (tag) { + .ldr_ptr_stack => try emit.writeInstruction(Instruction.add(rt, .sp, offset, false)), + else => unreachable, + } + }, + .ldrb_stack, .ldrsb_stack, .strb_stack => { + const offset = if (math.cast(u12, raw_offset)) |imm| Instruction.LoadStoreOffset.imm(imm) else { return emit.fail("TODO load/store stack byte with larger offset", .{}); + }; + + switch (tag) { + .ldrb_stack => try emit.writeInstruction(Instruction.ldrb(rt, .sp, offset)), + .ldrsb_stack => try emit.writeInstruction(Instruction.ldrsb(rt, .sp, offset)), + .strb_stack => try emit.writeInstruction(Instruction.strb(rt, .sp, offset)), + else => unreachable, } }, - .ldrh_stack, .ldrsh_stack, .strh_stack => blk: { + .ldrh_stack, .ldrsh_stack, .strh_stack => { assert(std.mem.isAlignedGeneric(u32, raw_offset, 2)); // misaligned stack entry - if (math.cast(u12, @divExact(raw_offset, 2))) |imm| { - break :blk Instruction.LoadStoreOffset.imm(imm); - } else { + const offset = if (math.cast(u12, @divExact(raw_offset, 2))) |imm| Instruction.LoadStoreOffset.imm(imm) else { return emit.fail("TODO load/store stack halfword with larger offset", .{}); + }; + + switch (tag) { + .ldrh_stack => try emit.writeInstruction(Instruction.ldrh(rt, .sp, offset)), + .ldrsh_stack => try emit.writeInstruction(Instruction.ldrsh(rt, .sp, offset)), + .strh_stack => try emit.writeInstruction(Instruction.strh(rt, .sp, offset)), + else => unreachable, } }, - .ldr_stack, .str_stack => blk: { + .ldr_stack, .str_stack => { const alignment: u32 = switch (rt.size()) { 32 => 4, 64 => 8, @@ -1027,25 +1048,17 @@ fn mirLoadStoreStack(emit: *Emit, inst: Mir.Inst.Index) !void { }; assert(std.mem.isAlignedGeneric(u32, raw_offset, alignment)); // misaligned stack entry - if (math.cast(u12, @divExact(raw_offset, alignment))) |imm| { - break :blk Instruction.LoadStoreOffset.imm(imm); - } else { + const offset = if (math.cast(u12, @divExact(raw_offset, alignment))) |imm| Instruction.LoadStoreOffset.imm(imm) else { return emit.fail("TODO load/store stack with larger offset", .{}); + }; + + switch (tag) { + .ldr_stack => try emit.writeInstruction(Instruction.ldr(rt, .sp, offset)), + .str_stack => try emit.writeInstruction(Instruction.str(rt, .sp, offset)), + else => unreachable, } }, else => unreachable, - }; - - switch (tag) { - .ldr_stack => try emit.writeInstruction(Instruction.ldr(rt, .sp, offset)), - .ldrb_stack => try emit.writeInstruction(Instruction.ldrb(rt, .sp, offset)), - .ldrh_stack => try emit.writeInstruction(Instruction.ldrh(rt, .sp, offset)), - .ldrsb_stack => try emit.writeInstruction(Instruction.ldrsb(rt, .sp, offset)), - .ldrsh_stack => try emit.writeInstruction(Instruction.ldrsh(rt, .sp, offset)), - .str_stack => try emit.writeInstruction(Instruction.str(rt, .sp, offset)), - .strb_stack => try emit.writeInstruction(Instruction.strb(rt, .sp, offset)), - .strh_stack => try emit.writeInstruction(Instruction.strh(rt, .sp, offset)), - else => unreachable, } } diff --git a/src/arch/aarch64/Mir.zig b/src/arch/aarch64/Mir.zig index 9106050904..927e4c9893 100644 --- a/src/arch/aarch64/Mir.zig +++ b/src/arch/aarch64/Mir.zig @@ -92,6 +92,8 @@ pub const Inst = struct { load_memory_ptr_direct, /// Load Pair of Registers ldp, + /// Pseudo-instruction: Load pointer to stack item + ldr_ptr_stack, /// Pseudo-instruction: Load pointer to stack argument ldr_ptr_stack_argument, /// Pseudo-instruction: Load from stack