FunctionPass *createSelectOptimizePass();
FunctionPass *createCallBrPass();
+
+ /// Lowers KCFI operand bundles for indirect calls.
+ FunctionPass *createKCFIPass();
} // End llvm namespace
#endif
llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
}
+ //===--------------------------------------------------------------------===//
+ /// \name KCFI check lowering.
+ /// @{
+
+ virtual MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator &MBBI,
+ const TargetInstrInfo *TII) const {
+ llvm_unreachable("KCFI is not supported on this target");
+ }
+
+ /// @}
+
/// Inserts in the IR a target-specific intrinsic specifying a fence.
/// It is called by AtomicExpandPass before expanding an
/// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
void initializeIntervalPartitionPass(PassRegistry&);
void initializeJMCInstrumenterPass(PassRegistry&);
void initializeJumpThreadingPass(PassRegistry&);
+void initializeKCFIPass(PassRegistry &);
void initializeLCSSAVerificationPassPass(PassRegistry&);
void initializeLCSSAWrapperPassPass(PassRegistry&);
void initializeLazyBlockFrequencyInfoPassPass(PassRegistry&);
(void) llvm::createInstSimplifyLegacyPass();
(void) llvm::createInstructionCombiningPass();
(void) llvm::createJMCInstrumenterPass();
+ (void) llvm::createKCFIPass();
(void) llvm::createLCSSAPass();
(void) llvm::createLICMPass();
(void) llvm::createLoopSinkPass();
InterleavedLoadCombinePass.cpp
IntrinsicLowering.cpp
JMCInstrumenter.cpp
+ KCFI.cpp
LatencyPriorityQueue.cpp
LazyMachineBlockFrequencyInfo.cpp
LexicalScopes.cpp
-//===---- AArch64KCFI.cpp - Implements KCFI -------------------------------===//
+//===---- KCFI.cpp - Implements KCFI --------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
//
-// This file implements KCFI indirect call checking.
+// This pass implements KCFI indirect call check lowering.
//
//===----------------------------------------------------------------------===//
-#include "AArch64.h"
-#include "AArch64InstrInfo.h"
-#include "AArch64Subtarget.h"
-#include "AArch64TargetMachine.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/InitializePasses.h"
using namespace llvm;
-#define DEBUG_TYPE "aarch64-kcfi"
-#define AARCH64_KCFI_PASS_NAME "Insert KCFI indirect call checks"
+#define DEBUG_TYPE "kcfi"
+#define KCFI_PASS_NAME "Insert KCFI indirect call checks"
STATISTIC(NumKCFIChecksAdded, "Number of indirect call checks added");
namespace {
-class AArch64KCFI : public MachineFunctionPass {
+class KCFI : public MachineFunctionPass {
public:
static char ID;
- AArch64KCFI() : MachineFunctionPass(ID) {}
+ KCFI() : MachineFunctionPass(ID) {}
- StringRef getPassName() const override { return AARCH64_KCFI_PASS_NAME; }
+ StringRef getPassName() const override { return KCFI_PASS_NAME; }
bool runOnMachineFunction(MachineFunction &MF) override;
private:
/// Machine instruction info used throughout the class.
- const AArch64InstrInfo *TII = nullptr;
+ const TargetInstrInfo *TII = nullptr;
+
+ /// Target lowering for arch-specific parts.
+ const TargetLowering *TLI = nullptr;
/// Emits a KCFI check before an indirect call.
/// \returns true if the check was added and false otherwise.
MachineBasicBlock::instr_iterator I) const;
};
-char AArch64KCFI::ID = 0;
+char KCFI::ID = 0;
} // end anonymous namespace
-INITIALIZE_PASS(AArch64KCFI, DEBUG_TYPE, AARCH64_KCFI_PASS_NAME, false, false)
+INITIALIZE_PASS(KCFI, DEBUG_TYPE, KCFI_PASS_NAME, false, false)
-FunctionPass *llvm::createAArch64KCFIPass() { return new AArch64KCFI(); }
+FunctionPass *llvm::createKCFIPass() { return new KCFI(); }
-bool AArch64KCFI::emitCheck(MachineBasicBlock &MBB,
- MachineBasicBlock::instr_iterator MBBI) const {
+bool KCFI::emitCheck(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator MBBI) const {
assert(TII && "Target instruction info was not initialized");
+ assert(TLI && "Target lowering was not initialized");
// If the call instruction is bundled, we can only emit a check safely if
// it's the first instruction in the bundle.
if (MBBI->isBundled() && !std::prev(MBBI)->isBundle())
report_fatal_error("Cannot emit a KCFI check for a bundled call");
- switch (MBBI->getOpcode()) {
- case AArch64::BLR:
- case AArch64::BLRNoIP:
- case AArch64::TCRETURNri:
- case AArch64::TCRETURNriBTI:
- break;
- default:
- llvm_unreachable("Unexpected CFI call opcode");
- }
-
- MachineOperand &Target = MBBI->getOperand(0);
- assert(Target.isReg() && "Invalid target operand for an indirect call");
- Target.setIsRenamable(false);
+ // Emit a KCFI check for the call instruction at MBBI. The implementation
+ // must unfold memory operands if applicable.
+ MachineInstr *Check = TLI->EmitKCFICheck(MBB, MBBI, TII);
- MachineInstr *Check =
- BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(AArch64::KCFI_CHECK))
- .addReg(Target.getReg())
- .addImm(MBBI->getCFIType())
- .getInstr();
+ // Clear the original call's CFI type.
+ assert(MBBI->isCall() && "Unexpected instruction type");
MBBI->setCFIType(*MBB.getParent(), 0);
// If not already bundled, bundle the check and the call to prevent
return true;
}
-bool AArch64KCFI::runOnMachineFunction(MachineFunction &MF) {
+bool KCFI::runOnMachineFunction(MachineFunction &MF) {
const Module *M = MF.getMMI().getModule();
if (!M->getModuleFlag("kcfi"))
return false;
- const auto &SubTarget = MF.getSubtarget<AArch64Subtarget>();
+ const auto &SubTarget = MF.getSubtarget();
TII = SubTarget.getInstrInfo();
+ TLI = SubTarget.getTargetLowering();
bool Changed = false;
for (MachineBasicBlock &MBB : MF) {
+ // Use instr_iterator because we don't want to skip bundles.
for (MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),
MIE = MBB.instr_end();
MII != MIE; ++MII) {
FunctionPass *createAArch64SLSHardeningPass();
FunctionPass *createAArch64IndirectThunks();
FunctionPass *createAArch64SpeculationHardeningPass();
-FunctionPass *createAArch64KCFIPass();
FunctionPass *createAArch64LoadStoreOptimizationPass();
ModulePass *createAArch64LowerHomogeneousPrologEpilogPass();
FunctionPass *createAArch64SIMDInstrOptPass();
void initializeAArch64DeadRegisterDefinitionsPass(PassRegistry&);
void initializeAArch64ExpandPseudoPass(PassRegistry &);
void initializeAArch64GlobalsTaggingPass(PassRegistry &);
-void initializeAArch64KCFIPass(PassRegistry &);
void initializeAArch64LoadStoreOptPass(PassRegistry&);
void initializeAArch64LowerHomogeneousPrologEpilogPass(PassRegistry &);
void initializeAArch64MIPeepholeOptPass(PassRegistry &);
return TargetLowering::shouldConvertFpToSat(Op, FPVT, VT);
}
+MachineInstr *
+AArch64TargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator &MBBI,
+ const TargetInstrInfo *TII) const {
+ assert(MBBI->isCall() && MBBI->getCFIType() &&
+ "Invalid call instruction for a KCFI check");
+
+ switch (MBBI->getOpcode()) {
+ case AArch64::BLR:
+ case AArch64::BLRNoIP:
+ case AArch64::TCRETURNri:
+ case AArch64::TCRETURNriBTI:
+ break;
+ default:
+ llvm_unreachable("Unexpected CFI call opcode");
+ }
+
+ MachineOperand &Target = MBBI->getOperand(0);
+ assert(Target.isReg() && "Invalid target operand for an indirect call");
+ Target.setIsRenamable(false);
+
+ return BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(AArch64::KCFI_CHECK))
+ .addReg(Target.getReg())
+ .addImm(MBBI->getCFIType())
+ .getInstr();
+}
+
bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const {
return Subtarget->hasAggressiveFMA() && VT.isFloatingPoint();
}
bool supportKCFIBundles() const override { return true; }
+ MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator &MBBI,
+ const TargetInstrInfo *TII) const override;
+
/// Enable aggressive FMA fusion on targets that want it.
bool enableAggressiveFMAFusion(EVT VT) const override;
initializeAArch64ConditionOptimizerPass(*PR);
initializeAArch64DeadRegisterDefinitionsPass(*PR);
initializeAArch64ExpandPseudoPass(*PR);
- initializeAArch64KCFIPass(*PR);
initializeAArch64LoadStoreOptPass(*PR);
initializeAArch64MIPeepholeOptPass(*PR);
initializeAArch64SIMDInstrOptPass(*PR);
initializeFalkorHWPFFixPass(*PR);
initializeFalkorMarkStridedAccessesLegacyPass(*PR);
initializeLDTLSCleanupPass(*PR);
+ initializeKCFIPass(*PR);
initializeSMEABIPass(*PR);
initializeSVEIntrinsicOptsPass(*PR);
initializeAArch64SpeculationHardeningPass(*PR);
addPass(createAArch64LoadStoreOptimizationPass());
}
// Emit KCFI checks for indirect calls.
- addPass(createAArch64KCFIPass());
+ addPass(createKCFIPass());
// The AArch64SpeculationHardeningPass destroys dominator tree and natural
// loop info, which is needed for the FalkorHWPFFixPass and also later on.
AArch64ISelDAGToDAG.cpp
AArch64ISelLowering.cpp
AArch64InstrInfo.cpp
- AArch64KCFI.cpp
AArch64LoadStoreOptimizer.cpp
AArch64LowerHomogeneousPrologEpilog.cpp
AArch64MachineFunctionInfo.cpp
X86InstrFoldTables.cpp
X86InstrInfo.cpp
X86EvexToVex.cpp
- X86KCFI.cpp
X86LegalizerInfo.cpp
X86LoadValueInjectionLoadHardening.cpp
X86LoadValueInjectionRetHardening.cpp
/// destinations as part of CET IBT mechanism.
FunctionPass *createX86IndirectBranchTrackingPass();
-/// This pass inserts KCFI checks before indirect calls.
-FunctionPass *createX86KCFIPass();
-
/// Return a pass that pads short functions with NOOPs.
/// This will prevent a stall when returning on the Atom.
FunctionPass *createX86PadShortFunctions();
void initializeX86FastTileConfigPass(PassRegistry &);
void initializeX86FixupSetCCPassPass(PassRegistry &);
void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
-void initializeX86KCFIPass(PassRegistry &);
void initializeX86LoadValueInjectionLoadHardeningPassPass(PassRegistry &);
void initializeX86LoadValueInjectionRetHardeningPassPass(PassRegistry &);
void initializeX86LowerAMXIntrinsicsLegacyPassPass(PassRegistry &);
return Subtarget.is64Bit();
}
+MachineInstr *
+X86TargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator &MBBI,
+ const TargetInstrInfo *TII) const {
+ assert(MBBI->isCall() && MBBI->getCFIType() &&
+ "Invalid call instruction for a KCFI check");
+
+ MachineFunction &MF = *MBB.getParent();
+ // If the call target is a memory operand, unfold it and use R11 for the
+ // call, so KCFI_CHECK won't have to recompute the address.
+ switch (MBBI->getOpcode()) {
+ case X86::CALL64m:
+ case X86::CALL64m_NT:
+ case X86::TAILJMPm64:
+ case X86::TAILJMPm64_REX: {
+ MachineBasicBlock::instr_iterator OrigCall = MBBI;
+ SmallVector<MachineInstr *, 2> NewMIs;
+ if (!TII->unfoldMemoryOperand(MF, *OrigCall, X86::R11, /*UnfoldLoad=*/true,
+ /*UnfoldStore=*/false, NewMIs))
+ report_fatal_error("Failed to unfold memory operand for a KCFI check");
+ for (auto *NewMI : NewMIs)
+ MBBI = MBB.insert(OrigCall, NewMI);
+ assert(MBBI->isCall() &&
+ "Unexpected instruction after memory operand unfolding");
+ if (OrigCall->shouldUpdateCallSiteInfo())
+ MF.moveCallSiteInfo(&*OrigCall, &*MBBI);
+ MBBI->setCFIType(MF, OrigCall->getCFIType());
+ OrigCall->eraseFromParent();
+ break;
+ }
+ default:
+ break;
+ }
+
+ MachineOperand &Target = MBBI->getOperand(0);
+ Register TargetReg;
+ switch (MBBI->getOpcode()) {
+ case X86::CALL64r:
+ case X86::CALL64r_NT:
+ case X86::TAILJMPr64:
+ case X86::TAILJMPr64_REX:
+ assert(Target.isReg() && "Unexpected target operand for an indirect call");
+ Target.setIsRenamable(false);
+ TargetReg = Target.getReg();
+ break;
+ case X86::CALL64pcrel32:
+ case X86::TAILJMPd64:
+ assert(Target.isSymbol() && "Unexpected target operand for a direct call");
+ // X86TargetLowering::EmitLoweredIndirectThunk always uses r11 for
+ // 64-bit indirect thunk calls.
+ assert(StringRef(Target.getSymbolName()).endswith("_r11") &&
+ "Unexpected register for an indirect thunk call");
+ TargetReg = X86::R11;
+ break;
+ default:
+ llvm_unreachable("Unexpected CFI call opcode");
+ break;
+ }
+
+ return BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(X86::KCFI_CHECK))
+ .addReg(TargetReg)
+ .addImm(MBBI->getCFIType())
+ .getInstr();
+}
+
/// Returns true if stack probing through a function call is requested.
bool X86TargetLowering::hasStackProbeSymbol(const MachineFunction &MF) const {
return !getStackProbeSymbolName(MF).empty();
bool supportKCFIBundles() const override { return true; }
+ MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator &MBBI,
+ const TargetInstrInfo *TII) const override;
+
bool hasStackProbeSymbol(const MachineFunction &MF) const override;
bool hasInlineStackProbe(const MachineFunction &MF) const override;
StringRef getStackProbeSymbolName(const MachineFunction &MF) const override;
return nullptr;
// Don't fold loads into indirect calls that need a KCFI check as we'll
- // have to unfold these in X86KCFIPass anyway.
+ // have to unfold these in X86TargetLowering::EmitKCFICheck anyway.
if (MI.isCall() && MI.getCFIType())
return nullptr;
+++ /dev/null
-//===---- X86KCFI.cpp - Implements KCFI -----------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file implements KCFI indirect call checking.
-//
-//===----------------------------------------------------------------------===//
-
-#include "X86.h"
-#include "X86InstrInfo.h"
-#include "X86Subtarget.h"
-#include "X86TargetMachine.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
-#include "llvm/CodeGen/MachineInstrBuilder.h"
-#include "llvm/CodeGen/MachineInstrBundle.h"
-#include "llvm/CodeGen/MachineModuleInfo.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "x86-kcfi"
-#define X86_KCFI_PASS_NAME "Insert KCFI indirect call checks"
-
-STATISTIC(NumKCFIChecksAdded, "Number of indirect call checks added");
-
-namespace {
-class X86KCFI : public MachineFunctionPass {
-public:
- static char ID;
-
- X86KCFI() : MachineFunctionPass(ID) {}
-
- StringRef getPassName() const override { return X86_KCFI_PASS_NAME; }
- bool runOnMachineFunction(MachineFunction &MF) override;
-
-private:
- /// Machine instruction info used throughout the class.
- const X86InstrInfo *TII = nullptr;
-
- /// Emits a KCFI check before an indirect call.
- /// \returns true if the check was added and false otherwise.
- bool emitCheck(MachineBasicBlock &MBB,
- MachineBasicBlock::instr_iterator I) const;
-};
-
-char X86KCFI::ID = 0;
-} // end anonymous namespace
-
-INITIALIZE_PASS(X86KCFI, DEBUG_TYPE, X86_KCFI_PASS_NAME, false, false)
-
-FunctionPass *llvm::createX86KCFIPass() { return new X86KCFI(); }
-
-bool X86KCFI::emitCheck(MachineBasicBlock &MBB,
- MachineBasicBlock::instr_iterator MBBI) const {
- assert(TII && "Target instruction info was not initialized");
-
- // If the call instruction is bundled, we can only emit a check safely if
- // it's the first instruction in the bundle.
- if (MBBI->isBundled() && !std::prev(MBBI)->isBundle())
- report_fatal_error("Cannot emit a KCFI check for a bundled call");
-
- MachineFunction &MF = *MBB.getParent();
- // If the call target is a memory operand, unfold it and use R11 for the
- // call, so KCFI_CHECK won't have to recompute the address.
- switch (MBBI->getOpcode()) {
- case X86::CALL64m:
- case X86::CALL64m_NT:
- case X86::TAILJMPm64:
- case X86::TAILJMPm64_REX: {
- MachineBasicBlock::instr_iterator OrigCall = MBBI;
- SmallVector<MachineInstr *, 2> NewMIs;
- if (!TII->unfoldMemoryOperand(MF, *OrigCall, X86::R11, /*UnfoldLoad=*/true,
- /*UnfoldStore=*/false, NewMIs))
- report_fatal_error("Failed to unfold memory operand for a KCFI check");
- for (auto *NewMI : NewMIs)
- MBBI = MBB.insert(OrigCall, NewMI);
- assert(MBBI->isCall() &&
- "Unexpected instruction after memory operand unfolding");
- if (OrigCall->shouldUpdateCallSiteInfo())
- MF.moveCallSiteInfo(&*OrigCall, &*MBBI);
- MBBI->setCFIType(MF, OrigCall->getCFIType());
- OrigCall->eraseFromParent();
- break;
- }
- default:
- break;
- }
-
- MachineInstr *Check =
- BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(X86::KCFI_CHECK))
- .getInstr();
- MachineOperand &Target = MBBI->getOperand(0);
- switch (MBBI->getOpcode()) {
- case X86::CALL64r:
- case X86::CALL64r_NT:
- case X86::TAILJMPr64:
- case X86::TAILJMPr64_REX:
- assert(Target.isReg() && "Unexpected target operand for an indirect call");
- Check->addOperand(MachineOperand::CreateReg(Target.getReg(), false));
- Target.setIsRenamable(false);
- break;
- case X86::CALL64pcrel32:
- case X86::TAILJMPd64:
- assert(Target.isSymbol() && "Unexpected target operand for a direct call");
- // X86TargetLowering::EmitLoweredIndirectThunk always uses r11 for
- // 64-bit indirect thunk calls.
- assert(StringRef(Target.getSymbolName()).endswith("_r11") &&
- "Unexpected register for an indirect thunk call");
- Check->addOperand(MachineOperand::CreateReg(X86::R11, false));
- break;
- default:
- llvm_unreachable("Unexpected CFI call opcode");
- }
-
- Check->addOperand(MachineOperand::CreateImm(MBBI->getCFIType()));
- MBBI->setCFIType(MF, 0);
-
- // If not already bundled, bundle the check and the call to prevent
- // further changes.
- if (!MBBI->isBundled())
- finalizeBundle(MBB, Check->getIterator(), std::next(MBBI->getIterator()));
-
- ++NumKCFIChecksAdded;
- return true;
-}
-
-bool X86KCFI::runOnMachineFunction(MachineFunction &MF) {
- const Module *M = MF.getMMI().getModule();
- if (!M->getModuleFlag("kcfi"))
- return false;
-
- const auto &SubTarget = MF.getSubtarget<X86Subtarget>();
- TII = SubTarget.getInstrInfo();
-
- bool Changed = false;
- for (MachineBasicBlock &MBB : MF) {
- for (MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),
- MIE = MBB.instr_end();
- MII != MIE; ++MII) {
- if (MII->isCall() && MII->getCFIType())
- Changed |= emitCheck(MBB, MII);
- }
- }
-
- return Changed;
-}
initializeX86TileConfigPass(PR);
initializeX86FastPreTileConfigPass(PR);
initializeX86FastTileConfigPass(PR);
- initializeX86KCFIPass(PR);
+ initializeKCFIPass(PR);
initializeX86LowerTileCopyPass(PR);
initializeX86ExpandPseudoPass(PR);
initializeX86ExecutionDomainFixPass(PR);
void X86PassConfig::addPreSched2() {
addPass(createX86ExpandPseudoPass());
- addPass(createX86KCFIPass());
+ addPass(createKCFIPass());
}
void X86PassConfig::addPreEmitPass() {
; RUN: llc -mtriple=aarch64-- -verify-machineinstrs < %s | FileCheck %s --check-prefix=ASM
; RUN: llc -mtriple=aarch64-- -verify-machineinstrs -stop-after=finalize-isel < %s | FileCheck %s --check-prefixes=MIR,ISEL
-; RUN: llc -mtriple=aarch64-- -verify-machineinstrs -stop-after=aarch64-kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI
+; RUN: llc -mtriple=aarch64-- -verify-machineinstrs -stop-after=kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI
; ASM: .word 12345678
define void @f1(ptr noundef %x) !kcfi_type !2 {
; RUN: llc -mtriple=aarch64-- -verify-machineinstrs -mattr=harden-sls-blr -stop-after=finalize-isel < %s | FileCheck %s --check-prefixes=MIR,ISEL-SLS
; RUN: llc -mtriple=aarch64-- -verify-machineinstrs -mattr=harden-sls-blr -stop-after=finalize-isel -global-isel < %s | FileCheck %s --check-prefixes=MIR,ISEL-SLS
-; RUN: llc -mtriple=aarch64-- -verify-machineinstrs -stop-after=aarch64-kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI
-; RUN: llc -mtriple=aarch64-- -verify-machineinstrs -mattr=harden-sls-blr -stop-after=aarch64-kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI-SLS
+; RUN: llc -mtriple=aarch64-- -verify-machineinstrs -stop-after=kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI
+; RUN: llc -mtriple=aarch64-- -verify-machineinstrs -mattr=harden-sls-blr -stop-after=kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI-SLS
; ASM: .word 12345678
define void @f1(ptr noundef %x) !kcfi_type !1 {
; RUN: llc -mtriple=x86_64-unknown-linux-gnu -verify-machineinstrs < %s | FileCheck %s --check-prefix=ASM
; RUN: llc -mtriple=x86_64-unknown-linux-gnu -verify-machineinstrs -stop-after=finalize-isel < %s | FileCheck %s --check-prefixes=MIR,ISEL
-; RUN: llc -mtriple=x86_64-unknown-linux-gnu -verify-machineinstrs -stop-after=x86-kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -verify-machineinstrs -stop-after=kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI
; ASM: .p2align 4, 0x90
; ASM: .type __cfi_f1,@function