From 15100a2db84d976c2e1c97a8bf50cf26ec4968b3 Mon Sep 17 00:00:00 2001 From: Shengchen Kan Date: Thu, 18 May 2023 17:26:00 +0800 Subject: [PATCH] [X86][MC] Move the code about MOV encoding optimization to X86EncodingOptimization.cpp, NFCI --- .../X86/MCTargetDesc/X86EncodingOptimization.cpp | 60 +++++++++++++++ .../X86/MCTargetDesc/X86EncodingOptimization.h | 1 + llvm/lib/Target/X86/X86MCInstLower.cpp | 86 +--------------------- 3 files changed, 64 insertions(+), 83 deletions(-) diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp index 8894304..6d61d54 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.cpp @@ -12,8 +12,10 @@ #include "X86EncodingOptimization.h" #include "X86BaseInfo.h" +#include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstrDesc.h" +#include "llvm/Support/Casting.h" using namespace llvm; @@ -288,3 +290,61 @@ bool X86::optimizeINCDEC(MCInst &MI, bool In64BitMode) { MI.setOpcode(NewOpc); return true; } + +/// Simplify things like MOV32rm to MOV32o32a. +bool X86::optimizeMOV(MCInst &MI, bool In64BitMode) { + // Don't make these simplifications in 64-bit mode; other assemblers don't + // perform them because they make the code larger. + if (In64BitMode) + return false; + unsigned NewOpc; + // We don't currently select the correct instruction form for instructions + // which have a short %eax, etc. form. Handle this by custom lowering, for + // now. + // + // Note, we are currently not handling the following instructions: + // MOV64ao8, MOV64o8a + // XCHG16ar, XCHG32ar, XCHG64ar + switch (MI.getOpcode()) { + default: + return false; + FROM_TO(MOV8mr_NOREX, MOV8o32a) + FROM_TO(MOV8mr, MOV8o32a) + FROM_TO(MOV8rm_NOREX, MOV8ao32) + FROM_TO(MOV8rm, MOV8ao32) + FROM_TO(MOV16mr, MOV16o32a) + FROM_TO(MOV16rm, MOV16ao32) + FROM_TO(MOV32mr, MOV32o32a) + FROM_TO(MOV32rm, MOV32ao32) + } + bool IsStore = MI.getOperand(0).isReg() && MI.getOperand(1).isReg(); + unsigned AddrBase = IsStore; + unsigned RegOp = IsStore ? 0 : 5; + unsigned AddrOp = AddrBase + 3; + // Check whether the destination register can be fixed. + unsigned Reg = MI.getOperand(RegOp).getReg(); + if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) + return false; + // Check whether this is an absolute address. + // FIXME: We know TLVP symbol refs aren't, but there should be a better way + // to do this here. + bool Absolute = true; + if (MI.getOperand(AddrOp).isExpr()) { + const MCExpr *MCE = MI.getOperand(AddrOp).getExpr(); + if (const MCSymbolRefExpr *SRE = dyn_cast(MCE)) + if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP) + Absolute = false; + } + if (Absolute && (MI.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 || + MI.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 || + MI.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0)) + return false; + // If so, rewrite the instruction. + MCOperand Saved = MI.getOperand(AddrOp); + MCOperand Seg = MI.getOperand(AddrBase + X86::AddrSegmentReg); + MI.clear(); + MI.setOpcode(NewOpc); + MI.addOperand(Saved); + MI.addOperand(Seg); + return true; +} diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h index 1177d2c7..b1908a6 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h +++ b/llvm/lib/Target/X86/MCTargetDesc/X86EncodingOptimization.h @@ -21,6 +21,7 @@ bool optimizeShiftRotateWithImmediateOne(MCInst &MI); bool optimizeVPCMPWithImmediateOneOrSix(MCInst &MI); bool optimizeMOVSX(MCInst &MI); bool optimizeINCDEC(MCInst &MI, bool In64BitMode); +bool optimizeMOV(MCInst &MI, bool In64BitMode); } // namespace X86 } // namespace llvm #endif diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index ff3b41b..db5bf15 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -343,58 +343,6 @@ static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { Inst.addOperand(Saved); } -/// Simplify things like MOV32rm to MOV32o32a. -static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, - unsigned Opcode) { - // Don't make these simplifications in 64-bit mode; other assemblers don't - // perform them because they make the code larger. - if (Printer.getSubtarget().is64Bit()) - return; - - bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg(); - unsigned AddrBase = IsStore; - unsigned RegOp = IsStore ? 0 : 5; - unsigned AddrOp = AddrBase + 3; - assert( - Inst.getNumOperands() == 6 && Inst.getOperand(RegOp).isReg() && - Inst.getOperand(AddrBase + X86::AddrBaseReg).isReg() && - Inst.getOperand(AddrBase + X86::AddrScaleAmt).isImm() && - Inst.getOperand(AddrBase + X86::AddrIndexReg).isReg() && - Inst.getOperand(AddrBase + X86::AddrSegmentReg).isReg() && - (Inst.getOperand(AddrOp).isExpr() || Inst.getOperand(AddrOp).isImm()) && - "Unexpected instruction!"); - - // Check whether the destination register can be fixed. - unsigned Reg = Inst.getOperand(RegOp).getReg(); - if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) - return; - - // Check whether this is an absolute address. - // FIXME: We know TLVP symbol refs aren't, but there should be a better way - // to do this here. - bool Absolute = true; - if (Inst.getOperand(AddrOp).isExpr()) { - const MCExpr *MCE = Inst.getOperand(AddrOp).getExpr(); - if (const MCSymbolRefExpr *SRE = dyn_cast(MCE)) - if (SRE->getKind() == MCSymbolRefExpr::VK_TLVP) - Absolute = false; - } - - if (Absolute && - (Inst.getOperand(AddrBase + X86::AddrBaseReg).getReg() != 0 || - Inst.getOperand(AddrBase + X86::AddrScaleAmt).getImm() != 1 || - Inst.getOperand(AddrBase + X86::AddrIndexReg).getReg() != 0)) - return; - - // If so, rewrite the instruction. - MCOperand Saved = Inst.getOperand(AddrOp); - MCOperand Seg = Inst.getOperand(AddrBase + X86::AddrSegmentReg); - Inst = MCInst(); - Inst.setOpcode(Opcode); - Inst.addOperand(Saved); - Inst.addOperand(Seg); -} - static unsigned getRetOpcode(const X86Subtarget &Subtarget) { return Subtarget.is64Bit() ? X86::RET64 : X86::RET32; } @@ -490,6 +438,9 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { if (X86::optimizeINCDEC(OutMI, In64BitMode)) return; + if (X86::optimizeMOV(OutMI, In64BitMode)) + return; + // Handle a few special cases to eliminate operand modifiers. switch (OutMI.getOpcode()) { case X86::LEA64_32r: @@ -581,37 +532,6 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { OutMI.setOpcode(convertTailJumpOpcode(OutMI.getOpcode())); break; - // We don't currently select the correct instruction form for instructions - // which have a short %eax, etc. form. Handle this by custom lowering, for - // now. - // - // Note, we are currently not handling the following instructions: - // MOV64ao8, MOV64o8a - // XCHG16ar, XCHG32ar, XCHG64ar - case X86::MOV8mr_NOREX: - case X86::MOV8mr: - case X86::MOV8rm_NOREX: - case X86::MOV8rm: - case X86::MOV16mr: - case X86::MOV16rm: - case X86::MOV32mr: - case X86::MOV32rm: { - unsigned NewOpc; - switch (OutMI.getOpcode()) { - default: llvm_unreachable("Invalid opcode"); - case X86::MOV8mr_NOREX: - case X86::MOV8mr: NewOpc = X86::MOV8o32a; break; - case X86::MOV8rm_NOREX: - case X86::MOV8rm: NewOpc = X86::MOV8ao32; break; - case X86::MOV16mr: NewOpc = X86::MOV16o32a; break; - case X86::MOV16rm: NewOpc = X86::MOV16ao32; break; - case X86::MOV32mr: NewOpc = X86::MOV32o32a; break; - case X86::MOV32rm: NewOpc = X86::MOV32ao32; break; - } - SimplifyShortMoveForm(AsmPrinter, OutMI, NewOpc); - break; - } - case X86::ADC8ri: case X86::ADC16ri: case X86::ADC32ri: case X86::ADC64ri32: case X86::ADD8ri: case X86::ADD16ri: case X86::ADD32ri: case X86::ADD64ri32: case X86::AND8ri: case X86::AND16ri: case X86::AND32ri: case X86::AND64ri32: -- 2.7.4