From: Amir Ayupov Date: Fri, 21 Jan 2022 07:34:53 +0000 (-0800) Subject: [X86][NFC] Move table from getRelaxedOpcodeArith into its own class X-Git-Tag: upstream/15.0.7~13847 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=999fa9f68788d51c109b41b9d194dd3ff9a306f5;p=platform%2Fupstream%2Fllvm.git [X86][NFC] Move table from getRelaxedOpcodeArith into its own class Move out the table and prepare the code to reuse it for the reverse mapping. Follows the example of memory folding/unfolding tables in X86InstrFoldTables.cpp Preparation step to unify `llvm::X86::getRelaxedOpcodeArith` and `getShortArithOpcode` in BOLT X86MCPlusBuilder.cpp. Addresses https://lists.llvm.org/pipermail/llvm-dev/2022-January/154526.html Reviewed By: skan, MaskRay Differential Revision: https://reviews.llvm.org/D121402 --- diff --git a/llvm/lib/Target/X86/MCTargetDesc/CMakeLists.txt b/llvm/lib/Target/X86/MCTargetDesc/CMakeLists.txt index 0f5030e..cb76702 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/CMakeLists.txt +++ b/llvm/lib/Target/X86/MCTargetDesc/CMakeLists.txt @@ -3,6 +3,7 @@ add_llvm_component_library(LLVMX86Desc X86IntelInstPrinter.cpp X86InstComments.cpp X86InstPrinterCommon.cpp + X86InstrRelaxTables.cpp X86ShuffleDecode.cpp X86AsmBackend.cpp X86MCTargetDesc.cpp diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp index 3df48b4..8555f8f 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -8,6 +8,7 @@ #include "MCTargetDesc/X86BaseInfo.h" #include "MCTargetDesc/X86FixupKinds.h" +#include "MCTargetDesc/X86InstrRelaxTables.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/BinaryFormat/ELF.h" #include "llvm/BinaryFormat/MachO.h" @@ -222,87 +223,7 @@ static unsigned getRelaxedOpcodeBranch(const MCInst &Inst, bool Is16BitMode) { static unsigned getRelaxedOpcodeArith(const MCInst &Inst) { unsigned Op = Inst.getOpcode(); - switch (Op) { - default: - return Op; - - // IMUL - case X86::IMUL16rri8: return X86::IMUL16rri; - case X86::IMUL16rmi8: return X86::IMUL16rmi; - case X86::IMUL32rri8: return X86::IMUL32rri; - case X86::IMUL32rmi8: return X86::IMUL32rmi; - case X86::IMUL64rri8: return X86::IMUL64rri32; - case X86::IMUL64rmi8: return X86::IMUL64rmi32; - - // AND - case X86::AND16ri8: return X86::AND16ri; - case X86::AND16mi8: return X86::AND16mi; - case X86::AND32ri8: return X86::AND32ri; - case X86::AND32mi8: return X86::AND32mi; - case X86::AND64ri8: return X86::AND64ri32; - case X86::AND64mi8: return X86::AND64mi32; - - // OR - case X86::OR16ri8: return X86::OR16ri; - case X86::OR16mi8: return X86::OR16mi; - case X86::OR32ri8: return X86::OR32ri; - case X86::OR32mi8: return X86::OR32mi; - case X86::OR64ri8: return X86::OR64ri32; - case X86::OR64mi8: return X86::OR64mi32; - - // XOR - case X86::XOR16ri8: return X86::XOR16ri; - case X86::XOR16mi8: return X86::XOR16mi; - case X86::XOR32ri8: return X86::XOR32ri; - case X86::XOR32mi8: return X86::XOR32mi; - case X86::XOR64ri8: return X86::XOR64ri32; - case X86::XOR64mi8: return X86::XOR64mi32; - - // ADD - case X86::ADD16ri8: return X86::ADD16ri; - case X86::ADD16mi8: return X86::ADD16mi; - case X86::ADD32ri8: return X86::ADD32ri; - case X86::ADD32mi8: return X86::ADD32mi; - case X86::ADD64ri8: return X86::ADD64ri32; - case X86::ADD64mi8: return X86::ADD64mi32; - - // ADC - case X86::ADC16ri8: return X86::ADC16ri; - case X86::ADC16mi8: return X86::ADC16mi; - case X86::ADC32ri8: return X86::ADC32ri; - case X86::ADC32mi8: return X86::ADC32mi; - case X86::ADC64ri8: return X86::ADC64ri32; - case X86::ADC64mi8: return X86::ADC64mi32; - - // SUB - case X86::SUB16ri8: return X86::SUB16ri; - case X86::SUB16mi8: return X86::SUB16mi; - case X86::SUB32ri8: return X86::SUB32ri; - case X86::SUB32mi8: return X86::SUB32mi; - case X86::SUB64ri8: return X86::SUB64ri32; - case X86::SUB64mi8: return X86::SUB64mi32; - - // SBB - case X86::SBB16ri8: return X86::SBB16ri; - case X86::SBB16mi8: return X86::SBB16mi; - case X86::SBB32ri8: return X86::SBB32ri; - case X86::SBB32mi8: return X86::SBB32mi; - case X86::SBB64ri8: return X86::SBB64ri32; - case X86::SBB64mi8: return X86::SBB64mi32; - - // CMP - case X86::CMP16ri8: return X86::CMP16ri; - case X86::CMP16mi8: return X86::CMP16mi; - case X86::CMP32ri8: return X86::CMP32ri; - case X86::CMP32mi8: return X86::CMP32mi; - case X86::CMP64ri8: return X86::CMP64ri32; - case X86::CMP64mi8: return X86::CMP64mi32; - - // PUSH - case X86::PUSH32i8: return X86::PUSHi32; - case X86::PUSH16i8: return X86::PUSHi16; - case X86::PUSH64i8: return X86::PUSH64i32; - } + return X86::getRelaxedOpcodeArith(Op); } static unsigned getRelaxedOpcode(const MCInst &Inst, bool Is16BitMode) { diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.cpp new file mode 100644 index 0000000..901082c --- /dev/null +++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.cpp @@ -0,0 +1,165 @@ +//===- X86InstrRelaxTables.cpp - X86 Instruction Relaxation Tables -*- C++ -*-// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the X86 instruction relaxation tables. +// +//===----------------------------------------------------------------------===// + +#include "X86InstrRelaxTables.h" +#include "X86InstrInfo.h" +#include "llvm/ADT/STLExtras.h" + +using namespace llvm; + +// These tables are sorted by their ShortOp value allowing them to be binary +// searched at runtime without the need for additional storage. The enum values +// are currently emitted in X86GenInstrInfo.inc in alphabetical order. Which +// makes sorting these tables a simple matter of alphabetizing the table. +static const X86InstrRelaxTableEntry InstrRelaxTable[] = { + // ADC + { X86::ADC16mi8, X86::ADC16mi }, + { X86::ADC16ri8, X86::ADC16ri }, + { X86::ADC32mi8, X86::ADC32mi }, + { X86::ADC32ri8, X86::ADC32ri }, + { X86::ADC64mi8, X86::ADC64mi32 }, + { X86::ADC64ri8, X86::ADC64ri32 }, + // ADD + { X86::ADD16mi8, X86::ADD16mi }, + { X86::ADD16ri8, X86::ADD16ri }, + { X86::ADD32mi8, X86::ADD32mi }, + { X86::ADD32ri8, X86::ADD32ri }, + { X86::ADD64mi8, X86::ADD64mi32 }, + { X86::ADD64ri8, X86::ADD64ri32 }, + // AND + { X86::AND16mi8, X86::AND16mi }, + { X86::AND16ri8, X86::AND16ri }, + { X86::AND32mi8, X86::AND32mi }, + { X86::AND32ri8, X86::AND32ri }, + { X86::AND64mi8, X86::AND64mi32 }, + { X86::AND64ri8, X86::AND64ri32 }, + // CMP + { X86::CMP16mi8, X86::CMP16mi }, + { X86::CMP16ri8, X86::CMP16ri }, + { X86::CMP32mi8, X86::CMP32mi }, + { X86::CMP32ri8, X86::CMP32ri }, + { X86::CMP64mi8, X86::CMP64mi32 }, + { X86::CMP64ri8, X86::CMP64ri32 }, + // IMUL + { X86::IMUL16rmi8, X86::IMUL16rmi }, + { X86::IMUL16rri8, X86::IMUL16rri }, + { X86::IMUL32rmi8, X86::IMUL32rmi }, + { X86::IMUL32rri8, X86::IMUL32rri }, + { X86::IMUL64rmi8, X86::IMUL64rmi32 }, + { X86::IMUL64rri8, X86::IMUL64rri32 }, + // OR + { X86::OR16mi8, X86::OR16mi }, + { X86::OR16ri8, X86::OR16ri }, + { X86::OR32mi8, X86::OR32mi }, + { X86::OR32ri8, X86::OR32ri }, + { X86::OR64mi8, X86::OR64mi32 }, + { X86::OR64ri8, X86::OR64ri32 }, + // PUSH + { X86::PUSH16i8, X86::PUSHi16 }, + { X86::PUSH32i8, X86::PUSHi32 }, + { X86::PUSH64i8, X86::PUSH64i32 }, + // SBB + { X86::SBB16mi8, X86::SBB16mi }, + { X86::SBB16ri8, X86::SBB16ri }, + { X86::SBB32mi8, X86::SBB32mi }, + { X86::SBB32ri8, X86::SBB32ri }, + { X86::SBB64mi8, X86::SBB64mi32 }, + { X86::SBB64ri8, X86::SBB64ri32 }, + // SUB + { X86::SUB16mi8, X86::SUB16mi }, + { X86::SUB16ri8, X86::SUB16ri }, + { X86::SUB32mi8, X86::SUB32mi }, + { X86::SUB32ri8, X86::SUB32ri }, + { X86::SUB64mi8, X86::SUB64mi32 }, + { X86::SUB64ri8, X86::SUB64ri32 }, + // XOR + { X86::XOR16mi8, X86::XOR16mi }, + { X86::XOR16ri8, X86::XOR16ri }, + { X86::XOR32mi8, X86::XOR32mi }, + { X86::XOR32ri8, X86::XOR32ri }, + { X86::XOR64mi8, X86::XOR64mi32 }, + { X86::XOR64ri8, X86::XOR64ri32 }, +}; + +static const X86InstrRelaxTableEntry * +lookupRelaxTableImpl(ArrayRef Table, + unsigned ShortOp) { +#ifndef NDEBUG + // Make sure the tables are sorted. + static std::atomic RelaxTableChecked(false); + if (!RelaxTableChecked.load(std::memory_order_relaxed)) { + assert(llvm::is_sorted(InstrRelaxTable) && + std::adjacent_find(std::begin(InstrRelaxTable), + std::end(InstrRelaxTable)) == + std::end(InstrRelaxTable) && + "InstrRelaxTable is not sorted and unique!"); + RelaxTableChecked.store(true, std::memory_order_relaxed); + } +#endif + + const X86InstrRelaxTableEntry *Data = llvm::lower_bound(Table, ShortOp); + if (Data != Table.end() && Data->KeyOp == ShortOp) + return Data; + return nullptr; +} + +const X86InstrRelaxTableEntry *llvm::lookupRelaxTable(unsigned ShortOp) { + return lookupRelaxTableImpl(InstrRelaxTable, ShortOp); +} + +namespace { + +// This class stores the short form tables. It is instantiated as a +// ManagedStatic to lazily init the short form table. +struct X86ShortFormTable { + // Stores relaxation table entries sorted by relaxed form opcode. + SmallVector Table; + + X86ShortFormTable() { + for (const X86InstrRelaxTableEntry &Entry : InstrRelaxTable) + Table.push_back({Entry.DstOp, Entry.KeyOp}); + + llvm::sort(Table); + + // Now that it's sorted, ensure its unique. + assert(std::adjacent_find(Table.begin(), Table.end()) == Table.end() && + "Short form table is not unique!"); + } +}; +} // namespace + +static ManagedStatic ShortTable; + +const X86InstrRelaxTableEntry *llvm::lookupShortTable(unsigned RelaxOp) { + auto &Table = ShortTable->Table; + auto I = llvm::lower_bound(Table, RelaxOp); + if (I != Table.end() && I->KeyOp == RelaxOp) + return &*I; + return nullptr; +} + +namespace llvm { + +/// Get the short instruction opcode for a given relaxed opcode. +unsigned X86::getShortOpcodeArith(unsigned RelaxOp) { + if (const X86InstrRelaxTableEntry *I = lookupShortTable(RelaxOp)) + return I->DstOp; + return RelaxOp; +} + +/// Get the relaxed instruction opcode for a given short opcode. +unsigned X86::getRelaxedOpcodeArith(unsigned ShortOp) { + if (const X86InstrRelaxTableEntry *I = lookupRelaxTable(ShortOp)) + return I->DstOp; + return ShortOp; +} +} // namespace llvm diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.h b/llvm/lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.h new file mode 100644 index 0000000..0551c18 --- /dev/null +++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstrRelaxTables.h @@ -0,0 +1,54 @@ +//===-- X86InstrRelaxTables.h - X86 Instruction Relaxation Tables -*- C++ -*-=// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file contains the interface to query the X86 instruction relaxation +// tables. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_X86_X86INSTRRELAXTABLES_H +#define LLVM_LIB_TARGET_X86_X86INSTRRELAXTABLES_H + +#include + +namespace llvm { + +// This struct is used for both the relaxed and short tables. The KeyOp is used +// to determine the sorting order. +struct X86InstrRelaxTableEntry { + uint16_t KeyOp; + uint16_t DstOp; + + bool operator<(const X86InstrRelaxTableEntry &RHS) const { + return KeyOp < RHS.KeyOp; + } + bool operator==(const X86InstrRelaxTableEntry &RHS) const { + return KeyOp == RHS.KeyOp; + } + friend bool operator<(const X86InstrRelaxTableEntry &TE, unsigned Opcode) { + return TE.KeyOp < Opcode; + } +}; + +/// Look up the relaxed form table entry for a given \p ShortOp. +const X86InstrRelaxTableEntry *lookupRelaxTable(unsigned ShortOp); + +/// Look up the short form table entry for a given \p RelaxOp. +const X86InstrRelaxTableEntry *lookupShortTable(unsigned RelaxOp); + +namespace X86 { + +/// Get the short instruction opcode for a given relaxed opcode. +unsigned getShortOpcodeArith(unsigned RelaxOp); + +/// Get the relaxed instruction opcode for a given short opcode. +unsigned getRelaxedOpcodeArith(unsigned ShortOp); +} // namespace X86 +} // namespace llvm + +#endif