From f14831ec73d7dd87a770f902fb53d1ede486e524 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sun, 5 Mar 2023 18:47:00 +0100 Subject: [PATCH] x86_64: truncate immediates --- src/arch/x86_64/CodeGen.zig | 109 +++++++++++++++++++++++++++++------ src/arch/x86_64/Emit.zig | 16 ++--- src/arch/x86_64/Encoding.zig | 6 +- src/arch/x86_64/Mir.zig | 4 +- src/arch/x86_64/encoder.zig | 24 +++----- 5 files changed, 114 insertions(+), 45 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index c108ad6f32..ef599092de 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -385,6 +385,24 @@ pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 { return self.addExtraAssumeCapacity(extra); } +fn extraData(self: *Self, comptime T: type, index: u32) struct { data: T, end: u32 } { + const fields = std.meta.fields(T); + var i: u32 = index; + var result: T = undefined; + inline for (fields) |field| { + @field(result, field.name) = switch (field.type) { + u32 => self.mir_extra.items[i], + i32 => @bitCast(i32, self.mir_extra.items[i]), + else => @compileError("bad field type"), + }; + i += 1; + } + return .{ + .data = result, + .end = i, + }; +} + pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 { const fields = std.meta.fields(@TypeOf(extra)); const result = @intCast(u32, self.mir_extra.items.len); @@ -2759,9 +2777,15 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type 1, 2, 4 => { // TODO this is wasteful! // introduce new MIR tag specifically for mov [reg + 0], imm + const operand = switch (abi_size) { + 1 => @truncate(u8, imm), + 2 => @truncate(u16, imm), + 4 => @truncate(u32, imm), + else => unreachable, + }; const payload = try self.addExtra(Mir.ImmPair{ .dest_off = 0, - .operand = @truncate(u32, imm), + .operand = operand, }); _ = try self.addInst(.{ .tag = .mov_mem_imm, @@ -2872,10 +2896,17 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type return self.fail("TODO saving imm to memory for abi_size {}", .{abi_size}); } + const operand = switch (abi_size) { + 1 => @truncate(u8, imm), + 2 => @truncate(u16, imm), + 4 => @truncate(u32, imm), + 8 => @truncate(u32, imm), + else => unreachable, + }; const payload = try self.addExtra(Mir.ImmPair{ .dest_off = 0, // TODO check if this logic is correct - .operand = @truncate(u32, imm), + .operand = operand, }); const flags: u2 = switch (abi_size) { 1 => 0b00, @@ -3600,7 +3631,13 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu _ = try self.addInst(.{ .tag = mir_tag, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_reg, abi_size) }), - .data = .{ .imm = @truncate(u32, imm) }, + .data = .{ .imm = switch (abi_size) { + 1 => @truncate(u8, imm), + 2 => @truncate(u16, imm), + 4 => @truncate(u32, imm), + 8 => @truncate(u32, imm), + else => unreachable, + } }, }); }, .memory, @@ -3671,9 +3708,16 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu 8 => 0b11, else => unreachable, }; + const operand = switch (abi_size) { + 1 => @truncate(u8, imm), + 2 => @truncate(u16, imm), + 4 => @truncate(u32, imm), + 8 => @truncate(u32, imm), + else => unreachable, + }; const payload = try self.addExtra(Mir.ImmPair{ .dest_off = -off, - .operand = @truncate(u32, imm), + .operand = operand, }); _ = try self.addInst(.{ .tag = tag, @@ -4855,7 +4899,13 @@ fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u _ = try self.addInst(.{ .tag = .xor, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(cond_reg, abi_size) }), - .data = .{ .imm = @intCast(u32, imm) }, + .data = .{ .imm = switch (abi_size) { + 1 => @truncate(u8, imm), + 2 => @truncate(u16, imm), + 4 => @truncate(u32, imm), + 8 => @truncate(u32, imm), + else => unreachable, + } }, }); }, .register => |reg| { @@ -5366,20 +5416,27 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE // We have a positive stack offset value but we want a twos complement negative // offset from rbp, which is at the top of the stack frame. // mov [rbp+offset], immediate + const operand = switch (abi_size) { + 1 => @truncate(u8, imm), + 2 => @truncate(u16, imm), + 4 => @truncate(u32, imm), + else => unreachable, + }; + const flags: u2 = switch (abi_size) { + 1 => 0b00, + 2 => 0b01, + 4 => 0b10, + else => unreachable, + }; const payload = try self.addExtra(Mir.ImmPair{ .dest_off = -stack_offset, - .operand = @truncate(u32, imm), + .operand = operand, }); _ = try self.addInst(.{ .tag = .mov_mem_imm, .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp, - .flags = switch (abi_size) { - 1 => 0b00, - 2 => 0b01, - 4 => 0b10, - else => unreachable, - }, + .flags = flags, }), .data = .{ .payload = payload }, }); @@ -5518,7 +5575,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl assert(ty.isError()); const payload = try self.addExtra(Mir.ImmPair{ .dest_off = -stack_offset, - .operand = @truncate(u32, x_big), + .operand = @truncate(u8, x_big), }); _ = try self.addInst(.{ .tag = .mov_mem_imm, @@ -5530,9 +5587,15 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl }); }, 1, 2, 4 => { + const operand = switch (abi_size) { + 1 => @truncate(u8, x_big), + 2 => @truncate(u16, x_big), + 4 => @truncate(u32, x_big), + else => unreachable, + }; const payload = try self.addExtra(Mir.ImmPair{ .dest_off = -stack_offset, - .operand = @truncate(u32, x_big), + .operand = operand, }); _ = try self.addInst(.{ .tag = .mov_mem_imm, @@ -5932,7 +5995,7 @@ fn genInlineMemset( const loop_start = try self.addInst(.{ .tag = .cmp, .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }), - .data = .{ .imm = @bitCast(u32, @as(i32, -1)) }, + .data = .{ .imm = @bitCast(u8, @as(i8, -1)) }, }); // je end @@ -6037,7 +6100,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void _ = try self.addInst(.{ .tag = .mov, .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size) }), - .data = .{ .imm = @truncate(u32, x) }, + .data = .{ .imm = switch (abi_size) { + 1 => @truncate(u8, x), + 2 => @truncate(u16, x), + 4 => @truncate(u32, x), + 8 => @truncate(u32, x), + else => unreachable, + } }, }); return; } @@ -6204,7 +6273,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .reg1 = registerAlias(reg, abi_size), .flags = 0b01, }), - .data = .{ .imm = @truncate(u32, x) }, + .data = .{ .imm = switch (abi_size) { + 1 => @truncate(u8, x), + 2 => @truncate(u16, x), + 4 => @truncate(u32, x), + 8 => @truncate(u32, x), + else => unreachable, + } }, }); } else { // If this is RAX, we can use a direct load. diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index 1c540adc9d..5d52b87d87 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -236,12 +236,12 @@ fn encode(emit: *Emit, mnemonic: Instruction.Mnemonic, ops: struct { op3: Instruction.Operand = .none, op4: Instruction.Operand = .none, }) InnerError!void { - const inst = try Instruction.new(mnemonic, .{ + const inst = Instruction.new(mnemonic, .{ .op1 = ops.op1, .op2 = ops.op2, .op3 = ops.op3, .op4 = ops.op4, - }); + }) catch unreachable; return inst.encode(emit.code.writer()); } @@ -624,7 +624,7 @@ fn mirArithScaleSrc(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst. const payload = emit.mir.instructions.items(.data)[inst].payload; const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode(); const scale_index = Memory.ScaleIndex{ - .scale = scale, + .scale = @as(u4, 1) << scale, .index = index_reg_disp.index, }; return emit.encode(mnemonic, .{ @@ -643,7 +643,7 @@ fn mirArithScaleDst(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst. const payload = emit.mir.instructions.items(.data)[inst].payload; const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode(); const scale_index = Memory.ScaleIndex{ - .scale = scale, + .scale = @as(u4, 1) << scale, .index = index_reg_disp.index, }; assert(ops.reg2 != .none); @@ -663,7 +663,7 @@ fn mirArithScaleImm(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst. const payload = emit.mir.instructions.items(.data)[inst].payload; const index_reg_disp_imm = emit.mir.extraData(Mir.IndexRegisterDispImm, payload).data.decode(); const scale_index = Memory.ScaleIndex{ - .scale = scale, + .scale = @as(u4, 1) << scale, .index = index_reg_disp_imm.index, }; return emit.encode(mnemonic, .{ @@ -688,7 +688,7 @@ fn mirArithMemIndexImm(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.In 0b11 => .qword, }; const scale_index = Memory.ScaleIndex{ - .scale = 0, + .scale = 1, .index = index_reg_disp_imm.index, }; return emit.encode(mnemonic, .{ @@ -777,7 +777,7 @@ fn mirMovabs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void { } else emit.mir.instructions.items(.data)[inst].imm; return emit.encode(.mov, .{ .op1 = .{ .reg = ops.reg1 }, - .op2 = .{ .imm = @bitCast(i64, imm) }, + .op2 = .{ .imm = imm }, }); }, 0b01 => { @@ -983,7 +983,7 @@ fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void { const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode(); const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null; const scale_index = Memory.ScaleIndex{ - .scale = 0, + .scale = 1, .index = index_reg_disp.index, }; return emit.encode(.lea, .{ diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig index 2cccded7ec..7cf8910924 100644 --- a/src/arch/x86_64/Encoding.zig +++ b/src/arch/x86_64/Encoding.zig @@ -390,9 +390,9 @@ pub const Op = enum { .imm => |imm| { if (imm == 1) return .unity; - if (math.cast(i8, imm)) |_| return .imm8; - if (math.cast(i16, imm)) |_| return .imm16; - if (math.cast(i32, imm)) |_| return .imm32; + if (math.cast(u8, imm)) |_| return .imm8; + if (math.cast(u16, imm)) |_| return .imm16; + if (math.cast(u32, imm)) |_| return .imm32; return .imm64; }, } diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index b3be08e86b..4124592627 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -594,9 +594,9 @@ pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void { mir.* = undefined; } -pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } { +pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end: u32 } { const fields = std.meta.fields(T); - var i: usize = index; + var i: u32 = index; var result: T = undefined; inline for (fields) |field| { @field(result, field.name) = switch (field.type) { diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig index 3daffc7ad2..eefc7fd6e2 100644 --- a/src/arch/x86_64/encoder.zig +++ b/src/arch/x86_64/encoder.zig @@ -22,7 +22,7 @@ pub const Instruction = struct { none, reg: Register, mem: Memory, - imm: i64, + imm: u64, /// Returns the bitsize of the operand. /// Asserts the operand is either register or memory. @@ -47,6 +47,7 @@ pub const Instruction = struct { } pub fn fmtPrint(op: Operand, enc_op: Encoding.Op, writer: anytype) !void { + _ = enc_op; switch (op) { .none => {}, .reg => |reg| try writer.writeAll(@tagName(reg)), @@ -92,14 +93,7 @@ pub const Instruction = struct { .moffs => |moffs| try writer.print("{s}:0x{x}", .{ @tagName(moffs.seg), moffs.offset }), }, .imm => |imm| { - if (enc_op == .imm64) { - return writer.print("0x{x}", .{@bitCast(u64, imm)}); - } - const imm_abs = try std.math.absInt(imm); - if (sign(imm) < 0) { - try writer.writeByte('-'); - } - try writer.print("0x{x}", .{imm_abs}); + try writer.print("0x{x}", .{imm}); }, } } @@ -117,7 +111,7 @@ pub const Instruction = struct { .op3 = args.op3, .op4 = args.op4, }) orelse return error.InvalidInstruction; - std.log.debug("{}", .{encoding}); + std.log.warn("{}", .{encoding}); return .{ .op1 = args.op1, .op2 = args.op2, @@ -386,12 +380,12 @@ pub const Instruction = struct { } } - fn encodeImm(imm: i64, kind: Encoding.Op, encoder: anytype) !void { + fn encodeImm(imm: u64, kind: Encoding.Op, encoder: anytype) !void { switch (kind) { - .imm8, .rel8 => try encoder.imm8(@truncate(i8, imm)), - .imm16, .rel16 => try encoder.imm16(@truncate(i16, imm)), - .imm32, .rel32 => try encoder.imm32(@truncate(i32, imm)), - .imm64 => try encoder.imm64(@bitCast(u64, imm)), + .imm8, .rel8 => try encoder.imm8(@bitCast(i8, @truncate(u8, imm))), + .imm16, .rel16 => try encoder.imm16(@bitCast(i16, @truncate(u16, imm))), + .imm32, .rel32 => try encoder.imm32(@bitCast(i32, @truncate(u32, imm))), + .imm64 => try encoder.imm64(imm), else => unreachable, } }