From: Petar Jovanovic Date: Wed, 1 Nov 2017 23:05:52 +0000 (+0000) Subject: Revert "Correct dwarf unwind information in function epilogue for X86" X-Git-Tag: llvmorg-6.0.0-rc1~4381 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=bb5c84fb5747ddaf867a5da2ddb6a37a0dbb5dd5;p=platform%2Fupstream%2Fllvm.git Revert "Correct dwarf unwind information in function epilogue for X86" This reverts r317100 as it introduced sanitizer-x86_64-linux-autoconf buildbot failure (build #15606). llvm-svn: 317136 --- diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h index 30b74d2..8e6b157 100644 --- a/llvm/include/llvm/CodeGen/Passes.h +++ b/llvm/include/llvm/CodeGen/Passes.h @@ -417,9 +417,6 @@ namespace llvm { /// shuffles. FunctionPass *createExpandReductionsPass(); - /// Creates CFI Instruction Inserter pass. \see CFIInstrInserter.cpp - FunctionPass *createCFIInstrInserter(); - } // End llvm namespace #endif diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h index abf69e2..c3ad8fe 100644 --- a/llvm/include/llvm/InitializePasses.h +++ b/llvm/include/llvm/InitializePasses.h @@ -85,7 +85,6 @@ void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&); void initializeCFGPrinterLegacyPassPass(PassRegistry&); void initializeCFGSimplifyPassPass(PassRegistry&); void initializeCFGViewerLegacyPassPass(PassRegistry&); -void initializeCFIInstrInserterPass(PassRegistry&); void initializeCFLAndersAAWrapperPassPass(PassRegistry&); void initializeCFLSteensAAWrapperPassPass(PassRegistry&); void initializeCallGraphDOTPrinterPass(PassRegistry&); diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td index 927da1c..048bd1f 100644 --- a/llvm/include/llvm/Target/Target.td +++ b/llvm/include/llvm/Target/Target.td @@ -902,7 +902,7 @@ def CFI_INSTRUCTION : Instruction { let InOperandList = (ins i32imm:$id); let AsmString = ""; let hasCtrlDep = 1; - let isNotDuplicable = 0; + let isNotDuplicable = 1; } def EH_LABEL : Instruction { let OutOperandList = (outs); diff --git a/llvm/include/llvm/Target/TargetFrameLowering.h b/llvm/include/llvm/Target/TargetFrameLowering.h index e144116..31017cb 100644 --- a/llvm/include/llvm/Target/TargetFrameLowering.h +++ b/llvm/include/llvm/Target/TargetFrameLowering.h @@ -341,14 +341,6 @@ public: return false; return true; } - - /// Return initial CFA offset value i.e. the one valid at the beginning of the - /// function (before any stack operations). - virtual int getInitialCFAOffset(const MachineFunction &MF) const; - - /// Return initial CFA register value i.e. the one valid at the beginning of - /// the function (before any stack operations). - virtual unsigned getInitialCFARegister(const MachineFunction &MF) const; }; } // End llvm namespace diff --git a/llvm/lib/CodeGen/BranchFolding.cpp b/llvm/lib/CodeGen/BranchFolding.cpp index f7c46ac..40cb0c0 100644 --- a/llvm/lib/CodeGen/BranchFolding.cpp +++ b/llvm/lib/CodeGen/BranchFolding.cpp @@ -296,11 +296,6 @@ static unsigned HashEndOfMBB(const MachineBasicBlock &MBB) { return HashMachineInstr(*I); } -// Whether MI should be counted as an instruction when calculating common tail. -static bool countsAsInstruction(const MachineInstr &MI) { - return !(MI.isDebugValue() || MI.isCFIInstruction()); -} - /// ComputeCommonTailLength - Given two machine basic blocks, compute the number /// of instructions they actually have in common together at their end. Return /// iterators for the first shared instruction in each block. @@ -315,9 +310,9 @@ static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1, while (I1 != MBB1->begin() && I2 != MBB2->begin()) { --I1; --I2; // Skip debugging pseudos; necessary to avoid changing the code. - while (!countsAsInstruction(*I1)) { + while (I1->isDebugValue()) { if (I1==MBB1->begin()) { - while (!countsAsInstruction(*I2)) { + while (I2->isDebugValue()) { if (I2==MBB2->begin()) // I1==DBG at begin; I2==DBG at begin return TailLen; @@ -330,7 +325,7 @@ static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1, --I1; } // I1==first (untested) non-DBG preceding known match - while (!countsAsInstruction(*I2)) { + while (I2->isDebugValue()) { if (I2==MBB2->begin()) { ++I1; // I1==non-DBG, or first of DBGs not at begin; I2==DBG at begin @@ -373,35 +368,6 @@ static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1, } ++I1; } - - // Ensure that I1 and I2 do not point to a CFI_INSTRUCTION. This can happen if - // I1 and I2 are non-identical when compared and then one or both of them ends - // up pointing to a CFI instruction after being incremented. For example: - /* - BB1: - ... - INSTRUCTION_A - ADD32ri8 <- last common instruction - ... - BB2: - ... - INSTRUCTION_B - CFI_INSTRUCTION - ADD32ri8 <- last common instruction - ... - */ - // When INSTRUCTION_A and INSTRUCTION_B are compared as not equal, after - // incrementing the iterators, I1 will point to ADD, however I2 will point to - // the CFI instruction. Later on, this leads to BB2 being 'hacked off' at the - // wrong place (in ReplaceTailWithBranchTo()) which results in losing this CFI - // instruction. - while (I1 != MBB1->end() && I1->isCFIInstruction()) { - ++I1; - } - - while (I2 != MBB2->end() && I2->isCFIInstruction()) { - ++I2; - } return TailLen; } @@ -488,7 +454,7 @@ static unsigned EstimateRuntime(MachineBasicBlock::iterator I, MachineBasicBlock::iterator E) { unsigned Time = 0; for (; I != E; ++I) { - if (!countsAsInstruction(*I)) + if (I->isDebugValue()) continue; if (I->isCall()) Time += 10; @@ -848,12 +814,12 @@ mergeOperations(MachineBasicBlock::iterator MBBIStartPos, assert(MBBI != MBBIE && "Reached BB end within common tail length!"); (void)MBBIE; - if (!countsAsInstruction(*MBBI)) { + if (MBBI->isDebugValue()) { ++MBBI; continue; } - while ((MBBICommon != MBBIECommon) && !countsAsInstruction(*MBBICommon)) + while ((MBBICommon != MBBIECommon) && MBBICommon->isDebugValue()) ++MBBICommon; assert(MBBICommon != MBBIECommon && @@ -893,7 +859,7 @@ void BranchFolder::mergeCommonTails(unsigned commonTailIndex) { } for (auto &MI : *MBB) { - if (!countsAsInstruction(MI)) + if (MI.isDebugValue()) continue; DebugLoc DL = MI.getDebugLoc(); for (unsigned int i = 0 ; i < NextCommonInsts.size() ; i++) { @@ -903,7 +869,7 @@ void BranchFolder::mergeCommonTails(unsigned commonTailIndex) { auto &Pos = NextCommonInsts[i]; assert(Pos != SameTails[i].getBlock()->end() && "Reached BB end within common tail"); - while (!countsAsInstruction(*Pos)) { + while (Pos->isDebugValue()) { ++Pos; assert(Pos != SameTails[i].getBlock()->end() && "Reached BB end within common tail"); diff --git a/llvm/lib/CodeGen/CFIInstrInserter.cpp b/llvm/lib/CodeGen/CFIInstrInserter.cpp deleted file mode 100644 index 7760a3b..0000000 --- a/llvm/lib/CodeGen/CFIInstrInserter.cpp +++ /dev/null @@ -1,319 +0,0 @@ -//===------ CFIInstrInserter.cpp - Insert additional CFI instructions -----===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -/// \file This pass verifies incoming and outgoing CFA information of basic -/// blocks. CFA information is information about offset and register set by CFI -/// directives, valid at the start and end of a basic block. This pass checks -/// that outgoing information of predecessors matches incoming information of -/// their successors. Then it checks if blocks have correct CFA calculation rule -/// set and inserts additional CFI instruction at their beginnings if they -/// don't. CFI instructions are inserted if basic blocks have incorrect offset -/// or register set by previous blocks, as a result of a non-linear layout of -/// blocks in a function. -//===----------------------------------------------------------------------===// - -#include "llvm/CodeGen/MachineFunctionPass.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineModuleInfo.h" -#include "llvm/CodeGen/Passes.h" -#include "llvm/Target/TargetFrameLowering.h" -#include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetSubtargetInfo.h" -using namespace llvm; - -namespace { -class CFIInstrInserter : public MachineFunctionPass { - public: - static char ID; - - CFIInstrInserter() : MachineFunctionPass(ID) { - initializeCFIInstrInserterPass(*PassRegistry::getPassRegistry()); - } - - void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.setPreservesAll(); - MachineFunctionPass::getAnalysisUsage(AU); - } - - bool runOnMachineFunction(MachineFunction &MF) override { - - if (!MF.getMMI().hasDebugInfo() && - !MF.getFunction()->needsUnwindTableEntry()) - return false; - - MBBVector.resize(MF.getNumBlockIDs()); - calculateCFAInfo(MF); -#ifndef NDEBUG - unsigned ErrorNum = verify(MF); - if (ErrorNum) - report_fatal_error("Found " + Twine(ErrorNum) + - " in/out CFI information errors."); -#endif - bool insertedCFI = insertCFIInstrs(MF); - MBBVector.clear(); - return insertedCFI; - } - - private: - struct MBBCFAInfo { - MachineBasicBlock *MBB; - /// Value of cfa offset valid at basic block entry. - int IncomingCFAOffset = -1; - /// Value of cfa offset valid at basic block exit. - int OutgoingCFAOffset = -1; - /// Value of cfa register valid at basic block entry. - unsigned IncomingCFARegister = 0; - /// Value of cfa register valid at basic block exit. - unsigned OutgoingCFARegister = 0; - /// If in/out cfa offset and register values for this block have already - /// been set or not. - bool Processed = false; - }; - - /// Contains cfa offset and register values valid at entry and exit of basic - /// blocks. - SmallVector MBBVector; - - /// Calculate cfa offset and register values valid at entry and exit for all - /// basic blocks in a function. - void calculateCFAInfo(MachineFunction &MF); - /// Calculate cfa offset and register values valid at basic block exit by - /// checking the block for CFI instructions. Block's incoming CFA info remains - /// the same. - void calculateOutgoingCFAInfo(struct MBBCFAInfo &MBBInfo); - /// Update in/out cfa offset and register values for successors of the basic - /// block. - void updateSuccCFAInfo(struct MBBCFAInfo &MBBInfo); - - /// Check if incoming CFA information of a basic block matches outgoing CFA - /// information of the previous block. If it doesn't, insert CFI instruction - /// at the beginning of the block that corrects the CFA calculation rule for - /// that block. - bool insertCFIInstrs(MachineFunction &MF); - /// Return the cfa offset value that should be set at the beginning of a MBB - /// if needed. The negated value is needed when creating CFI instructions that - /// set absolute offset. - int getCorrectCFAOffset(MachineBasicBlock *MBB) { - return -MBBVector[MBB->getNumber()].IncomingCFAOffset; - } - - void report(const char *msg, MachineBasicBlock &MBB); - /// Go through each MBB in a function and check that outgoing offset and - /// register of its predecessors match incoming offset and register of that - /// MBB, as well as that incoming offset and register of its successors match - /// outgoing offset and register of the MBB. - unsigned verify(MachineFunction &MF); -}; -} - -char CFIInstrInserter::ID = 0; -INITIALIZE_PASS(CFIInstrInserter, "cfi-instr-inserter", - "Check CFA info and insert CFI instructions if needed", false, - false) -FunctionPass *llvm::createCFIInstrInserter() { return new CFIInstrInserter(); } - -void CFIInstrInserter::calculateCFAInfo(MachineFunction &MF) { - // Initial CFA offset value i.e. the one valid at the beginning of the - // function. - int InitialOffset = - MF.getSubtarget().getFrameLowering()->getInitialCFAOffset(MF); - // Initial CFA register value i.e. the one valid at the beginning of the - // function. - unsigned InitialRegister = - MF.getSubtarget().getFrameLowering()->getInitialCFARegister(MF); - - // Initialize MBBMap. - for (MachineBasicBlock &MBB : MF) { - struct MBBCFAInfo MBBInfo; - MBBInfo.MBB = &MBB; - MBBInfo.IncomingCFAOffset = InitialOffset; - MBBInfo.OutgoingCFAOffset = InitialOffset; - MBBInfo.IncomingCFARegister = InitialRegister; - MBBInfo.OutgoingCFARegister = InitialRegister; - MBBVector[MBB.getNumber()] = MBBInfo; - } - - // Set in/out cfa info for all blocks in the function. This traversal is based - // on the assumption that the first block in the function is the entry block - // i.e. that it has initial cfa offset and register values as incoming CFA - // information. - for (MachineBasicBlock &MBB : MF) { - if (MBBVector[MBB.getNumber()].Processed) continue; - calculateOutgoingCFAInfo(MBBVector[MBB.getNumber()]); - updateSuccCFAInfo(MBBVector[MBB.getNumber()]); - } -} - -void CFIInstrInserter::calculateOutgoingCFAInfo(struct MBBCFAInfo &MBBInfo) { - // Outgoing cfa offset set by the block. - int SetOffset = MBBInfo.IncomingCFAOffset; - // Outgoing cfa register set by the block. - unsigned SetRegister = MBBInfo.IncomingCFARegister; - const std::vector &Instrs = - MBBInfo.MBB->getParent()->getFrameInstructions(); - - // Determine cfa offset and register set by the block. - for (MachineInstr &MI : - make_range(MBBInfo.MBB->instr_begin(), MBBInfo.MBB->instr_end())) { - if (MI.isCFIInstruction()) { - unsigned CFIIndex = MI.getOperand(0).getCFIIndex(); - const MCCFIInstruction &CFI = Instrs[CFIIndex]; - if (CFI.getOperation() == MCCFIInstruction::OpDefCfaRegister) { - SetRegister = CFI.getRegister(); - } else if (CFI.getOperation() == MCCFIInstruction::OpDefCfaOffset) { - SetOffset = CFI.getOffset(); - } else if (CFI.getOperation() == MCCFIInstruction::OpAdjustCfaOffset) { - SetOffset += CFI.getOffset(); - } else if (CFI.getOperation() == MCCFIInstruction::OpDefCfa) { - SetRegister = CFI.getRegister(); - SetOffset = CFI.getOffset(); - } - } - } - - MBBInfo.Processed = true; - - // Update outgoing CFA info. - MBBInfo.OutgoingCFAOffset = SetOffset; - MBBInfo.OutgoingCFARegister = SetRegister; -} - -void CFIInstrInserter::updateSuccCFAInfo(struct MBBCFAInfo &MBBInfo) { - - for (MachineBasicBlock *Succ : MBBInfo.MBB->successors()) { - struct MBBCFAInfo &SuccInfo = MBBVector[Succ->getNumber()]; - if (SuccInfo.Processed) continue; - SuccInfo.IncomingCFAOffset = MBBInfo.OutgoingCFAOffset; - SuccInfo.IncomingCFARegister = MBBInfo.OutgoingCFARegister; - calculateOutgoingCFAInfo(SuccInfo); - updateSuccCFAInfo(SuccInfo); - } -} - -bool CFIInstrInserter::insertCFIInstrs(MachineFunction &MF) { - - const struct MBBCFAInfo *PrevMBBInfo = &MBBVector[MF.front().getNumber()]; - const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); - bool InsertedCFIInstr = false; - - for (MachineBasicBlock &MBB : MF) { - // Skip the first MBB in a function - if (MBB.getNumber() == MF.front().getNumber()) continue; - - const struct MBBCFAInfo& MBBInfo = MBBVector[MBB.getNumber()]; - auto MBBI = MBBInfo.MBB->begin(); - DebugLoc DL = MBBInfo.MBB->findDebugLoc(MBBI); - - if (PrevMBBInfo->OutgoingCFAOffset != MBBInfo.IncomingCFAOffset) { - // If both outgoing offset and register of a previous block don't match - // incoming offset and register of this block, add a def_cfa instruction - // with the correct offset and register for this block. - if (PrevMBBInfo->OutgoingCFARegister != MBBInfo.IncomingCFARegister) { - unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa( - nullptr, MBBInfo.IncomingCFARegister, getCorrectCFAOffset(&MBB))); - BuildMI(*MBBInfo.MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) - .addCFIIndex(CFIIndex); - // If outgoing offset of a previous block doesn't match incoming offset - // of this block, add a def_cfa_offset instruction with the correct - // offset for this block. - } else { - unsigned CFIIndex = - MF.addFrameInst(MCCFIInstruction::createDefCfaOffset( - nullptr, getCorrectCFAOffset(&MBB))); - BuildMI(*MBBInfo.MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) - .addCFIIndex(CFIIndex); - } - InsertedCFIInstr = true; - // If outgoing register of a previous block doesn't match incoming - // register of this block, add a def_cfa_register instruction with the - // correct register for this block. - } else if (PrevMBBInfo->OutgoingCFARegister != MBBInfo.IncomingCFARegister) { - unsigned CFIIndex = - MF.addFrameInst(MCCFIInstruction::createDefCfaRegister( - nullptr, MBBInfo.IncomingCFARegister)); - BuildMI(*MBBInfo.MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) - .addCFIIndex(CFIIndex); - InsertedCFIInstr = true; - } - PrevMBBInfo = &MBBInfo; - } - return InsertedCFIInstr; -} - -void CFIInstrInserter::report(const char *msg, MachineBasicBlock &MBB) { - errs() << '\n'; - errs() << "*** " << msg << " ***\n" - << "- function: " << MBB.getParent()->getName() << "\n"; - errs() << "- basic block: BB#" << MBB.getNumber() << ' ' << MBB.getName() - << " (" << (const void *)&MBB << ')'; - errs() << '\n'; -} - -unsigned CFIInstrInserter::verify(MachineFunction &MF) { - unsigned ErrorNum = 0; - for (MachineBasicBlock &CurrMBB : MF) { - const struct MBBCFAInfo& CurrMBBInfo = MBBVector[CurrMBB.getNumber()]; - for (MachineBasicBlock *Pred : CurrMBB.predecessors()) { - const struct MBBCFAInfo& PredMBBInfo = MBBVector[Pred->getNumber()]; - // Check that outgoing offset values of predecessors match the incoming - // offset value of CurrMBB - if (PredMBBInfo.OutgoingCFAOffset != CurrMBBInfo.IncomingCFAOffset) { - report("The outgoing offset of a predecessor is inconsistent.", - CurrMBB); - errs() << "Predecessor BB#" << Pred->getNumber() - << " has outgoing offset (" << PredMBBInfo.OutgoingCFAOffset - << "), while BB#" << CurrMBB.getNumber() - << " has incoming offset (" << CurrMBBInfo.IncomingCFAOffset - << ").\n"; - ErrorNum++; - } - // Check that outgoing register values of predecessors match the incoming - // register value of CurrMBB - if (PredMBBInfo.OutgoingCFARegister != CurrMBBInfo.IncomingCFARegister) { - report("The outgoing register of a predecessor is inconsistent.", - CurrMBB); - errs() << "Predecessor BB#" << Pred->getNumber() - << " has outgoing register (" << PredMBBInfo.OutgoingCFARegister - << "), while BB#" << CurrMBB.getNumber() - << " has incoming register (" << CurrMBBInfo.IncomingCFARegister - << ").\n"; - ErrorNum++; - } - } - - for (MachineBasicBlock *Succ : CurrMBB.successors()) { - const struct MBBCFAInfo& SuccMBBInfo = MBBVector[Succ->getNumber()]; - // Check that incoming offset values of successors match the outgoing - // offset value of CurrMBB - if (SuccMBBInfo.IncomingCFAOffset != CurrMBBInfo.OutgoingCFAOffset) { - report("The incoming offset of a successor is inconsistent.", CurrMBB); - errs() << "Successor BB#" << Succ->getNumber() - << " has incoming offset (" << SuccMBBInfo.IncomingCFAOffset - << "), while BB#" << CurrMBB.getNumber() - << " has outgoing offset (" << CurrMBBInfo.OutgoingCFAOffset - << ").\n"; - ErrorNum++; - } - // Check that incoming register values of successors match the outgoing - // register value of CurrMBB - if (SuccMBBInfo.IncomingCFARegister != CurrMBBInfo.OutgoingCFARegister) { - report("The incoming register of a successor is inconsistent.", - CurrMBB); - errs() << "Successor BB#" << Succ->getNumber() - << " has incoming register (" << SuccMBBInfo.IncomingCFARegister - << "), while BB#" << CurrMBB.getNumber() - << " has outgoing register (" << CurrMBBInfo.OutgoingCFARegister - << ").\n"; - ErrorNum++; - } - } - } - return ErrorNum; -} diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt index 44da41a..7ec7fda 100644 --- a/llvm/lib/CodeGen/CMakeLists.txt +++ b/llvm/lib/CodeGen/CMakeLists.txt @@ -9,7 +9,6 @@ add_llvm_library(LLVMCodeGen BuiltinGCs.cpp CalcSpillWeights.cpp CallingConvLower.cpp - CFIInstrInserter.cpp CodeGen.cpp CodeGenPrepare.cpp CountingFunctionInserter.cpp diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp index c0e7511..f4ccb48 100644 --- a/llvm/lib/CodeGen/CodeGen.cpp +++ b/llvm/lib/CodeGen/CodeGen.cpp @@ -23,7 +23,6 @@ void llvm::initializeCodeGen(PassRegistry &Registry) { initializeAtomicExpandPass(Registry); initializeBranchFolderPassPass(Registry); initializeBranchRelaxationPass(Registry); - initializeCFIInstrInserterPass(Registry); initializeCodeGenPreparePass(Registry); initializeCountingFunctionInserterPass(Registry); initializeDeadMachineInstructionElimPass(Registry); diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index 9137c83..bb2dda9 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -320,45 +320,8 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const { } case MachineOperand::MO_MCSymbol: return getMCSymbol() == Other.getMCSymbol(); - case MachineOperand::MO_CFIIndex: { - const MachineFunction *MF = getParent()->getParent()->getParent(); - const MachineFunction *OtherMF = - Other.getParent()->getParent()->getParent(); - MCCFIInstruction Inst = MF->getFrameInstructions()[getCFIIndex()]; - MCCFIInstruction OtherInst = - OtherMF->getFrameInstructions()[Other.getCFIIndex()]; - MCCFIInstruction::OpType op = Inst.getOperation(); - if (op != OtherInst.getOperation()) return false; - switch (op) { - case MCCFIInstruction::OpDefCfa: - case MCCFIInstruction::OpOffset: - case MCCFIInstruction::OpRelOffset: - if (Inst.getRegister() != OtherInst.getRegister()) return false; - if (Inst.getOffset() != OtherInst.getOffset()) return false; - break; - case MCCFIInstruction::OpRestore: - case MCCFIInstruction::OpUndefined: - case MCCFIInstruction::OpSameValue: - case MCCFIInstruction::OpDefCfaRegister: - if (Inst.getRegister() != OtherInst.getRegister()) return false; - break; - case MCCFIInstruction::OpRegister: - if (Inst.getRegister() != OtherInst.getRegister()) return false; - if (Inst.getRegister2() != OtherInst.getRegister2()) return false; - break; - case MCCFIInstruction::OpDefCfaOffset: - case MCCFIInstruction::OpAdjustCfaOffset: - case MCCFIInstruction::OpGnuArgsSize: - if (Inst.getOffset() != OtherInst.getOffset()) return false; - break; - case MCCFIInstruction::OpRememberState: - case MCCFIInstruction::OpRestoreState: - case MCCFIInstruction::OpEscape: - case MCCFIInstruction::OpWindowSave: - break; - } - return true; - } + case MachineOperand::MO_CFIIndex: + return getCFIIndex() == Other.getCFIIndex(); case MachineOperand::MO_Metadata: return getMetadata() == Other.getMetadata(); case MachineOperand::MO_IntrinsicID: @@ -407,13 +370,8 @@ hash_code llvm::hash_value(const MachineOperand &MO) { return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMetadata()); case MachineOperand::MO_MCSymbol: return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getMCSymbol()); - case MachineOperand::MO_CFIIndex: { - const MachineFunction *MF = MO.getParent()->getParent()->getParent(); - MCCFIInstruction Inst = MF->getFrameInstructions()[MO.getCFIIndex()]; - return hash_combine(MO.getType(), MO.getTargetFlags(), Inst.getOperation(), - Inst.getRegister(), Inst.getRegister2(), - Inst.getOffset()); - } + case MachineOperand::MO_CFIIndex: + return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getCFIIndex()); case MachineOperand::MO_IntrinsicID: return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getIntrinsicID()); case MachineOperand::MO_Predicate: diff --git a/llvm/lib/CodeGen/TailDuplicator.cpp b/llvm/lib/CodeGen/TailDuplicator.cpp index 93b53c3..bd3a20f 100644 --- a/llvm/lib/CodeGen/TailDuplicator.cpp +++ b/llvm/lib/CodeGen/TailDuplicator.cpp @@ -603,8 +603,8 @@ bool TailDuplicator::shouldTailDuplicate(bool IsSimple, if (PreRegAlloc && MI.isCall()) return false; - if (!MI.isPHI() && !MI.isMetaInstruction()) - InstrCount += 1; + if (!MI.isPHI() && !MI.isDebugValue()) + InstrCount += 1; if (InstrCount > MaxDuplicateCount) return false; diff --git a/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp b/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp index 22e73dc..9dd98b4 100644 --- a/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp +++ b/llvm/lib/CodeGen/TargetFrameLoweringImpl.cpp @@ -104,12 +104,3 @@ unsigned TargetFrameLowering::getStackAlignmentSkew( return 0; } - -int TargetFrameLowering::getInitialCFAOffset(const MachineFunction &MF) const { - llvm_unreachable("getInitialCFAOffset() not implemented!"); -} - -unsigned TargetFrameLowering::getInitialCFARegister(const MachineFunction &MF) - const { - llvm_unreachable("getInitialCFARegister() not implemented!"); -} \ No newline at end of file diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp index b477c80..988f296 100644 --- a/llvm/lib/Target/X86/X86FrameLowering.cpp +++ b/llvm/lib/Target/X86/X86FrameLowering.cpp @@ -1562,11 +1562,6 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, bool HasFP = hasFP(MF); uint64_t NumBytes = 0; - bool NeedsDwarfCFI = - (!MF.getTarget().getTargetTriple().isOSDarwin() && - !MF.getTarget().getTargetTriple().isOSWindows()) && - (MF.getMMI().hasDebugInfo() || MF.getFunction()->needsUnwindTableEntry()); - if (IsFunclet) { assert(HasFP && "EH funclets without FP not yet implemented"); NumBytes = getWinEHFuncletFrameSize(MF); @@ -1589,13 +1584,6 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr) .setMIFlag(MachineInstr::FrameDestroy); - if (NeedsDwarfCFI) { - unsigned DwarfStackPtr = - TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true); - BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfa( - nullptr, DwarfStackPtr, -SlotSize)); - --MBBI; - } } MachineBasicBlock::iterator FirstCSPop = MBBI; @@ -1659,11 +1647,6 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, } else if (NumBytes) { // Adjust stack pointer back: ESP += numbytes. emitSPUpdate(MBB, MBBI, NumBytes, /*InEpilogue=*/true); - if (!hasFP(MF) && NeedsDwarfCFI) { - // Define the current CFA rule to use the provided offset. - BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaOffset( - nullptr, -CSSize - SlotSize)); - } --MBBI; } @@ -1676,23 +1659,6 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, if (NeedsWin64CFI && MF.hasWinCFI()) BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue)); - if (!hasFP(MF) && NeedsDwarfCFI) { - MBBI = FirstCSPop; - int64_t Offset = -CSSize - SlotSize; - // Mark callee-saved pop instruction. - // Define the current CFA rule to use the provided offset. - while (MBBI != MBB.end()) { - MachineBasicBlock::iterator PI = MBBI; - unsigned Opc = PI->getOpcode(); - ++MBBI; - if (Opc == X86::POP32r || Opc == X86::POP64r) { - Offset += SlotSize; - BuildCFI(MBB, MBBI, DL, - MCCFIInstruction::createDefCfaOffset(nullptr, Offset)); - } - } - } - if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) { // Add the return addr area delta back since we are not tail calling. int Offset = -1 * X86FI->getTCReturnAddrDelta(); @@ -2869,15 +2835,6 @@ MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers( return MBBI; } -int X86FrameLowering::getInitialCFAOffset(const MachineFunction &MF) const { - return TRI->getSlotSize(); -} - -unsigned X86FrameLowering::getInitialCFARegister(const MachineFunction &MF) - const { - return TRI->getDwarfRegNum(StackPtr, true); -} - namespace { // Struct used by orderFrameObjects to help sort the stack objects. struct X86FrameSortingObject { diff --git a/llvm/lib/Target/X86/X86FrameLowering.h b/llvm/lib/Target/X86/X86FrameLowering.h index 1ed88b5..38ac96e 100644 --- a/llvm/lib/Target/X86/X86FrameLowering.h +++ b/llvm/lib/Target/X86/X86FrameLowering.h @@ -168,10 +168,6 @@ public: MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool RestoreSP = false) const; - int getInitialCFAOffset(const MachineFunction &MF) const override; - - unsigned getInitialCFARegister(const MachineFunction &MF) const override; - private: uint64_t calculateMaxStackAlign(const MachineFunction &MF) const; diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp index 11fe84f..6e6c724 100644 --- a/llvm/lib/Target/X86/X86TargetMachine.cpp +++ b/llvm/lib/Target/X86/X86TargetMachine.cpp @@ -436,11 +436,4 @@ void X86PassConfig::addPreEmitPass() { addPass(createX86FixupLEAs()); addPass(createX86EvexToVexInsts()); } - - // Verify basic block incoming and outgoing cfa offset and register values and - // correct CFA calculation rule where needed by inserting appropriate CFI - // instructions. - const Triple &TT = TM->getTargetTriple(); - if (!TT.isOSDarwin() && !TT.isOSWindows()) - addPass(createCFIInstrInserter()); } diff --git a/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll b/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll index 109962c..6814ed1 100644 --- a/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll +++ b/llvm/test/CodeGen/X86/2009-03-16-PHIElimInLPad.ll @@ -23,7 +23,6 @@ lpad: ; preds = %cont, %entry } ; CHECK: lpad -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: Ltmp declare i32 @__gxx_personality_v0(...) diff --git a/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll b/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll index dd05910..416761f 100644 --- a/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll +++ b/llvm/test/CodeGen/X86/2011-10-19-widen_vselect.ll @@ -88,7 +88,6 @@ define void @full_test() { ; X32-NEXT: movss %xmm4, {{[0-9]+}}(%esp) ; X32-NEXT: movss %xmm0, {{[0-9]+}}(%esp) ; X32-NEXT: addl $60, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: full_test: diff --git a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll index 9d28f44..64a6313 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll @@ -20,7 +20,6 @@ define i64 @test_add_i64(i64 %arg1, i64 %arg2) { ; X32-NEXT: addl 8(%ebp), %eax ; X32-NEXT: adcl 12(%ebp), %edx ; X32-NEXT: popl %ebp -; X32-NEXT: .cfi_def_cfa %esp, 4 ; X32-NEXT: retl %ret = add i64 %arg1, %arg2 ret i64 %ret diff --git a/llvm/test/CodeGen/X86/GlobalISel/brcond.ll b/llvm/test/CodeGen/X86/GlobalISel/brcond.ll index 2467344..917ee6f 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/brcond.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/brcond.ll @@ -36,7 +36,6 @@ define i32 @test_1(i32 %a, i32 %b, i32 %tValue, i32 %fValue) { ; X32-NEXT: movl %eax, (%esp) ; X32-NEXT: movl (%esp), %eax ; X32-NEXT: popl %ecx -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: %retval = alloca i32, align 4 diff --git a/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll index 23987a3..4100a72 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll @@ -117,7 +117,6 @@ define <8 x i32> @test_v8i32_args(<8 x i32> %arg1, <8 x i32> %arg2) { ; X32-NEXT: movups 16(%esp), %xmm1 ; X32-NEXT: movaps %xmm2, %xmm0 ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_v8i32_args: @@ -136,7 +135,6 @@ define void @test_trivial_call() { ; X32-NEXT: .cfi_def_cfa_offset 16 ; X32-NEXT: calll trivial_callee ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_trivial_call: @@ -145,7 +143,6 @@ define void @test_trivial_call() { ; X64-NEXT: .cfi_def_cfa_offset 16 ; X64-NEXT: callq trivial_callee ; X64-NEXT: popq %rax -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq call void @trivial_callee() ret void @@ -163,7 +160,6 @@ define void @test_simple_arg_call(i32 %in0, i32 %in1) { ; X32-NEXT: movl %eax, 4(%esp) ; X32-NEXT: calll simple_arg_callee ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_simple_arg_call: @@ -175,7 +171,6 @@ define void @test_simple_arg_call(i32 %in0, i32 %in1) { ; X64-NEXT: movl %eax, %esi ; X64-NEXT: callq simple_arg_callee ; X64-NEXT: popq %rax -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq call void @simple_arg_callee(i32 %in1, i32 %in0) ret void @@ -198,7 +193,6 @@ define void @test_simple_arg8_call(i32 %in0) { ; X32-NEXT: movl %eax, 28(%esp) ; X32-NEXT: calll simple_arg8_callee ; X32-NEXT: addl $44, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_simple_arg8_call: @@ -214,7 +208,6 @@ define void @test_simple_arg8_call(i32 %in0) { ; X64-NEXT: movl %edi, %r9d ; X64-NEXT: callq simple_arg8_callee ; X64-NEXT: addq $24, %rsp -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq call void @simple_arg8_callee(i32 %in0, i32 %in0, i32 %in0, i32 %in0,i32 %in0, i32 %in0, i32 %in0, i32 %in0) ret void @@ -231,7 +224,6 @@ define i32 @test_simple_return_callee() { ; X32-NEXT: calll simple_return_callee ; X32-NEXT: addl %eax, %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_simple_return_callee: @@ -242,7 +234,6 @@ define i32 @test_simple_return_callee() { ; X64-NEXT: callq simple_return_callee ; X64-NEXT: addl %eax, %eax ; X64-NEXT: popq %rcx -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq %call = call i32 @simple_return_callee(i32 5) %r = add i32 %call, %call @@ -263,7 +254,6 @@ define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) { ; X32-NEXT: paddd (%esp), %xmm0 # 16-byte Folded Reload ; X32-NEXT: paddd 16(%esp), %xmm1 # 16-byte Folded Reload ; X32-NEXT: addl $44, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_split_return_callee: @@ -278,7 +268,6 @@ define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) { ; X64-NEXT: paddd (%rsp), %xmm0 # 16-byte Folded Reload ; X64-NEXT: paddd 16(%rsp), %xmm1 # 16-byte Folded Reload ; X64-NEXT: addq $40, %rsp -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq %call = call <8 x i32> @split_return_callee(<8 x i32> %arg2) %r = add <8 x i32> %arg1, %call @@ -292,7 +281,6 @@ define void @test_indirect_call(void()* %func) { ; X32-NEXT: .cfi_def_cfa_offset 16 ; X32-NEXT: calll *16(%esp) ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_indirect_call: @@ -301,7 +289,6 @@ define void @test_indirect_call(void()* %func) { ; X64-NEXT: .cfi_def_cfa_offset 16 ; X64-NEXT: callq *%rdi ; X64-NEXT: popq %rax -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq call void %func() ret void @@ -330,11 +317,8 @@ define void @test_abi_exts_call(i8* %addr) { ; X32-NEXT: movl %esi, (%esp) ; X32-NEXT: calll take_char ; X32-NEXT: addl $4, %esp -; X32-NEXT: .cfi_def_cfa_offset 12 ; X32-NEXT: popl %esi -; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: popl %ebx -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_abi_exts_call: @@ -351,7 +335,6 @@ define void @test_abi_exts_call(i8* %addr) { ; X64-NEXT: movl %ebx, %edi ; X64-NEXT: callq take_char ; X64-NEXT: popq %rbx -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq %val = load i8, i8* %addr call void @take_char(i8 %val) @@ -374,7 +357,6 @@ define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) { ; X32-NEXT: movl %ecx, 4(%esp) ; X32-NEXT: calll variadic_callee ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_variadic_call_1: @@ -386,7 +368,6 @@ define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) { ; X64-NEXT: movb $0, %al ; X64-NEXT: callq variadic_callee ; X64-NEXT: popq %rax -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq %addr = load i8*, i8** %addr_ptr @@ -412,7 +393,6 @@ define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) { ; X32-NEXT: movl %ecx, 4(%eax) ; X32-NEXT: calll variadic_callee ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_variadic_call_2: @@ -425,7 +405,6 @@ define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) { ; X64-NEXT: movq %rcx, %xmm0 ; X64-NEXT: callq variadic_callee ; X64-NEXT: popq %rax -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq %addr = load i8*, i8** %addr_ptr diff --git a/llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll b/llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll index f260d0d..7b2a050 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/frameIndex.ll @@ -18,7 +18,6 @@ define i32* @allocai32() { ; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: movl %esp, %eax ; X32-NEXT: popl %ecx -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X32ABI-LABEL: allocai32: diff --git a/llvm/test/CodeGen/X86/O0-pipeline.ll b/llvm/test/CodeGen/X86/O0-pipeline.ll index 8ecafad..1f7415e 100644 --- a/llvm/test/CodeGen/X86/O0-pipeline.ll +++ b/llvm/test/CodeGen/X86/O0-pipeline.ll @@ -49,7 +49,6 @@ ; CHECK-NEXT: X86 pseudo instruction expansion pass ; CHECK-NEXT: Analyze Machine Code For Garbage Collection ; CHECK-NEXT: X86 vzeroupper inserter -; CHECK-NEXT: Check CFA info and insert CFI instructions if needed ; CHECK-NEXT: Contiguously Lay Out Funclets ; CHECK-NEXT: StackMap Liveness Analysis ; CHECK-NEXT: Live DEBUG_VALUE analysis diff --git a/llvm/test/CodeGen/X86/TruncAssertZext.ll b/llvm/test/CodeGen/X86/TruncAssertZext.ll index ed98fd5..b9ae57c 100644 --- a/llvm/test/CodeGen/X86/TruncAssertZext.ll +++ b/llvm/test/CodeGen/X86/TruncAssertZext.ll @@ -25,7 +25,6 @@ define i64 @main() { ; CHECK-NEXT: subq %rcx, %rax ; CHECK-NEXT: shrq $32, %rax ; CHECK-NEXT: popq %rcx -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %b = call i64 @foo() %or = and i64 %b, 18446744069414584575 ; this is 0xffffffff000000ff diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll index 909e839..b75bd8c 100644 --- a/llvm/test/CodeGen/X86/avx512-mask-op.ll +++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll @@ -699,13 +699,11 @@ define <16 x i8> @test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) { ; AVX512BW-NEXT: jg LBB17_1 ; AVX512BW-NEXT: ## BB#2: ; AVX512BW-NEXT: vpcmpltud %zmm2, %zmm1, %k0 -; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 -; AVX512BW-NEXT: ## kill: %XMM0 %XMM0 %ZMM0 -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq +; AVX512BW-NEXT: jmp LBB17_3 ; AVX512BW-NEXT: LBB17_1: -; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0 -; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 +; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0 +; AVX512BW-NEXT: LBB17_3: +; AVX512BW-NEXT: vpmovm2b %k0, %zmm0 ; AVX512BW-NEXT: ## kill: %XMM0 %XMM0 %ZMM0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq diff --git a/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll b/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll index fa6adec..bb541f4 100644 --- a/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll +++ b/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll @@ -209,18 +209,12 @@ define i64 @caller_argv64i1() #0 { ; LINUXOSX64-NEXT: pushq %rax ; LINUXOSX64-NEXT: .cfi_adjust_cfa_offset 8 ; LINUXOSX64-NEXT: callq test_argv64i1 -; LINUXOSX64-NEXT: addq $16, %rsp +; LINUXOSX64-NEXT: addq $24, %rsp ; LINUXOSX64-NEXT: .cfi_adjust_cfa_offset -16 -; LINUXOSX64-NEXT: addq $8, %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 40 ; LINUXOSX64-NEXT: popq %r12 -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 32 ; LINUXOSX64-NEXT: popq %r13 -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 24 ; LINUXOSX64-NEXT: popq %r14 -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %r15 -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %v0 = bitcast i64 4294967298 to <64 x i1> @@ -293,7 +287,6 @@ define <64 x i1> @caller_retv64i1() #0 { ; LINUXOSX64-NEXT: kmovq %rax, %k0 ; LINUXOSX64-NEXT: vpmovm2b %k0, %zmm0 ; LINUXOSX64-NEXT: popq %rax -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %call = call x86_regcallcc <64 x i1> @test_retv64i1() @@ -404,9 +397,7 @@ define x86_regcallcc i32 @test_argv32i1(<32 x i1> %x0, <32 x i1> %x1, <32 x i1> ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm14 # 16-byte Reload ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm15 # 16-byte Reload ; LINUXOSX64-NEXT: addq $128, %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: vzeroupper ; LINUXOSX64-NEXT: retq entry: @@ -460,7 +451,6 @@ define i32 @caller_argv32i1() #0 { ; LINUXOSX64-NEXT: movl $1, %edx ; LINUXOSX64-NEXT: callq test_argv32i1 ; LINUXOSX64-NEXT: popq %rcx -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %v0 = bitcast i32 1 to <32 x i1> @@ -523,7 +513,6 @@ define i32 @caller_retv32i1() #0 { ; LINUXOSX64-NEXT: callq test_retv32i1 ; LINUXOSX64-NEXT: incl %eax ; LINUXOSX64-NEXT: popq %rcx -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %call = call x86_regcallcc <32 x i1> @test_retv32i1() @@ -637,9 +626,7 @@ define x86_regcallcc i16 @test_argv16i1(<16 x i1> %x0, <16 x i1> %x1, <16 x i1> ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm14 # 16-byte Reload ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm15 # 16-byte Reload ; LINUXOSX64-NEXT: addq $128, %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %res = call i16 @test_argv16i1helper(<16 x i1> %x0, <16 x i1> %x1, <16 x i1> %x2) ret i16 %res @@ -691,7 +678,6 @@ define i16 @caller_argv16i1() #0 { ; LINUXOSX64-NEXT: movl $1, %edx ; LINUXOSX64-NEXT: callq test_argv16i1 ; LINUXOSX64-NEXT: popq %rcx -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %v0 = bitcast i16 1 to <16 x i1> @@ -760,7 +746,6 @@ define i16 @caller_retv16i1() #0 { ; LINUXOSX64-NEXT: incl %eax ; LINUXOSX64-NEXT: # kill: %AX %AX %EAX ; LINUXOSX64-NEXT: popq %rcx -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %call = call x86_regcallcc <16 x i1> @test_retv16i1() @@ -874,9 +859,7 @@ define x86_regcallcc i8 @test_argv8i1(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2) ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm14 # 16-byte Reload ; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm15 # 16-byte Reload ; LINUXOSX64-NEXT: addq $128, %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %res = call i8 @test_argv8i1helper(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2) ret i8 %res @@ -928,7 +911,6 @@ define i8 @caller_argv8i1() #0 { ; LINUXOSX64-NEXT: movl $1, %edx ; LINUXOSX64-NEXT: callq test_argv8i1 ; LINUXOSX64-NEXT: popq %rcx -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq entry: %v0 = bitcast i8 1 to <8 x i1> @@ -1002,11 +984,9 @@ define <8 x i1> @caller_retv8i1() #0 { ; LINUXOSX64-NEXT: vpmovm2w %k0, %zmm0 ; LINUXOSX64-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; LINUXOSX64-NEXT: popq %rax -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: vzeroupper ; LINUXOSX64-NEXT: retq entry: %call = call x86_regcallcc <8 x i1> @test_retv8i1() ret <8 x i1> %call } - diff --git a/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll b/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll index b4f1d2c..43a1871 100644 --- a/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll +++ b/llvm/test/CodeGen/X86/avx512-regcall-NoMask.ll @@ -63,7 +63,6 @@ define x86_regcallcc i1 @test_CallargReti1(i1 %a) { ; LINUXOSX64-NEXT: callq test_argReti1 ; LINUXOSX64-NEXT: incb %al ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = add i1 %a, 1 %c = call x86_regcallcc i1 @test_argReti1(i1 %b) @@ -131,7 +130,6 @@ define x86_regcallcc i8 @test_CallargReti8(i8 %a) { ; LINUXOSX64-NEXT: callq test_argReti8 ; LINUXOSX64-NEXT: incb %al ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = add i8 %a, 1 %c = call x86_regcallcc i8 @test_argReti8(i8 %b) @@ -202,7 +200,6 @@ define x86_regcallcc i16 @test_CallargReti16(i16 %a) { ; LINUXOSX64-NEXT: incl %eax ; LINUXOSX64-NEXT: # kill: %AX %AX %EAX ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = add i16 %a, 1 %c = call x86_regcallcc i16 @test_argReti16(i16 %b) @@ -264,7 +261,6 @@ define x86_regcallcc i32 @test_CallargReti32(i32 %a) { ; LINUXOSX64-NEXT: callq test_argReti32 ; LINUXOSX64-NEXT: incl %eax ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = add i32 %a, 1 %c = call x86_regcallcc i32 @test_argReti32(i32 %b) @@ -331,7 +327,6 @@ define x86_regcallcc i64 @test_CallargReti64(i64 %a) { ; LINUXOSX64-NEXT: callq test_argReti64 ; LINUXOSX64-NEXT: incq %rax ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = add i64 %a, 1 %c = call x86_regcallcc i64 @test_argReti64(i64 %b) @@ -411,9 +406,7 @@ define x86_regcallcc float @test_CallargRetFloat(float %a) { ; LINUXOSX64-NEXT: vaddss %xmm8, %xmm0, %xmm0 ; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload ; LINUXOSX64-NEXT: addq $16, %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = fadd float 1.0, %a %c = call x86_regcallcc float @test_argRetFloat(float %b) @@ -493,9 +486,7 @@ define x86_regcallcc double @test_CallargRetDouble(double %a) { ; LINUXOSX64-NEXT: vaddsd %xmm8, %xmm0, %xmm0 ; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload ; LINUXOSX64-NEXT: addq $16, %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = fadd double 1.0, %a %c = call x86_regcallcc double @test_argRetDouble(double %b) @@ -557,7 +548,6 @@ define x86_regcallcc x86_fp80 @test_CallargRetf80(x86_fp80 %a) { ; LINUXOSX64-NEXT: callq test_argRetf80 ; LINUXOSX64-NEXT: fadd %st(0), %st(0) ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = fadd x86_fp80 %a, %a %c = call x86_regcallcc x86_fp80 @test_argRetf80(x86_fp80 %b) @@ -621,7 +611,6 @@ define x86_regcallcc [4 x i32]* @test_CallargRetPointer([4 x i32]* %a) { ; LINUXOSX64-NEXT: callq test_argRetPointer ; LINUXOSX64-NEXT: incl %eax ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = ptrtoint [4 x i32]* %a to i32 %c = add i32 %b, 1 @@ -705,9 +694,7 @@ define x86_regcallcc <4 x i32> @test_CallargRet128Vector(<4 x i32> %a) { ; LINUXOSX64-NEXT: vmovdqa32 %xmm8, %xmm0 {%k1} ; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload ; LINUXOSX64-NEXT: addq $16, %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = call x86_regcallcc <4 x i32> @test_argRet128Vector(<4 x i32> %a, <4 x i32> %a) %c = select <4 x i1> undef , <4 x i32> %a, <4 x i32> %b @@ -781,9 +768,7 @@ define x86_regcallcc <8 x i32> @test_CallargRet256Vector(<8 x i32> %a) { ; LINUXOSX64-NEXT: vmovdqu (%rsp), %ymm1 # 32-byte Reload ; LINUXOSX64-NEXT: vmovdqa32 %ymm1, %ymm0 {%k1} ; LINUXOSX64-NEXT: addq $48, %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = call x86_regcallcc <8 x i32> @test_argRet256Vector(<8 x i32> %a, <8 x i32> %a) %c = select <8 x i1> undef , <8 x i32> %a, <8 x i32> %b @@ -857,9 +842,7 @@ define x86_regcallcc <16 x i32> @test_CallargRet512Vector(<16 x i32> %a) { ; LINUXOSX64-NEXT: vmovdqu64 (%rsp), %zmm1 # 64-byte Reload ; LINUXOSX64-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} ; LINUXOSX64-NEXT: addq $112, %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16 ; LINUXOSX64-NEXT: popq %rsp -; LINUXOSX64-NEXT: .cfi_def_cfa_offset 8 ; LINUXOSX64-NEXT: retq %b = call x86_regcallcc <16 x i32> @test_argRet512Vector(<16 x i32> %a, <16 x i32> %a) %c = select <16 x i1> undef , <16 x i32> %a, <16 x i32> %b diff --git a/llvm/test/CodeGen/X86/avx512-schedule.ll b/llvm/test/CodeGen/X86/avx512-schedule.ll index abc8c1a..8372fbd 100755 --- a/llvm/test/CodeGen/X86/avx512-schedule.ll +++ b/llvm/test/CodeGen/X86/avx512-schedule.ll @@ -8839,7 +8839,6 @@ define <16 x float> @broadcast_ss_spill(float %x) { ; GENERIC-NEXT: callq func_f32 ; GENERIC-NEXT: vbroadcastss (%rsp), %zmm0 # 16-byte Folded Reload ; GENERIC-NEXT: addq $24, %rsp # sched: [1:0.33] -; GENERIC-NEXT: .cfi_def_cfa_offset 8 ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: broadcast_ss_spill: @@ -8853,7 +8852,6 @@ define <16 x float> @broadcast_ss_spill(float %x) { ; SKX-NEXT: vbroadcastss (%rsp), %zmm0 # 16-byte Folded Reload sched: [8:0.50] ; SKX-NEXT: # sched: [8:0.50] ; SKX-NEXT: addq $24, %rsp # sched: [1:0.25] -; SKX-NEXT: .cfi_def_cfa_offset 8 ; SKX-NEXT: retq # sched: [7:1.00] %a = fadd float %x, %x call void @func_f32(float %a) @@ -8874,7 +8872,6 @@ define <8 x double> @broadcast_sd_spill(double %x) { ; GENERIC-NEXT: callq func_f64 ; GENERIC-NEXT: vbroadcastsd (%rsp), %zmm0 # 16-byte Folded Reload ; GENERIC-NEXT: addq $24, %rsp # sched: [1:0.33] -; GENERIC-NEXT: .cfi_def_cfa_offset 8 ; GENERIC-NEXT: retq # sched: [1:1.00] ; ; SKX-LABEL: broadcast_sd_spill: @@ -8888,7 +8885,6 @@ define <8 x double> @broadcast_sd_spill(double %x) { ; SKX-NEXT: vbroadcastsd (%rsp), %zmm0 # 16-byte Folded Reload sched: [8:0.50] ; SKX-NEXT: # sched: [8:0.50] ; SKX-NEXT: addq $24, %rsp # sched: [1:0.25] -; SKX-NEXT: .cfi_def_cfa_offset 8 ; SKX-NEXT: retq # sched: [7:1.00] %a = fadd double %x, %x call void @func_f64(double %a) diff --git a/llvm/test/CodeGen/X86/avx512-select.ll b/llvm/test/CodeGen/X86/avx512-select.ll index 51a7c68..43cf9ee 100644 --- a/llvm/test/CodeGen/X86/avx512-select.ll +++ b/llvm/test/CodeGen/X86/avx512-select.ll @@ -115,7 +115,6 @@ define <16 x double> @select04(<16 x double> %a, <16 x double> %b) { ; X86-NEXT: vmovaps 8(%ebp), %zmm1 ; X86-NEXT: movl %ebp, %esp ; X86-NEXT: popl %ebp -; X86-NEXT: .cfi_def_cfa %esp, 4 ; X86-NEXT: retl ; ; X64-LABEL: select04: diff --git a/llvm/test/CodeGen/X86/avx512-vbroadcast.ll b/llvm/test/CodeGen/X86/avx512-vbroadcast.ll index 9aacb23..584968f 100644 --- a/llvm/test/CodeGen/X86/avx512-vbroadcast.ll +++ b/llvm/test/CodeGen/X86/avx512-vbroadcast.ll @@ -413,7 +413,6 @@ define <16 x float> @broadcast_ss_spill(float %x) { ; ALL-NEXT: callq func_f32 ; ALL-NEXT: vbroadcastss (%rsp), %zmm0 # 16-byte Folded Reload ; ALL-NEXT: addq $24, %rsp -; ALL-NEXT: .cfi_def_cfa_offset 8 ; ALL-NEXT: retq %a = fadd float %x, %x call void @func_f32(float %a) @@ -433,7 +432,6 @@ define <8 x double> @broadcast_sd_spill(double %x) { ; ALL-NEXT: callq func_f64 ; ALL-NEXT: vbroadcastsd (%rsp), %zmm0 # 16-byte Folded Reload ; ALL-NEXT: addq $24, %rsp -; ALL-NEXT: .cfi_def_cfa_offset 8 ; ALL-NEXT: retq %a = fadd double %x, %x call void @func_f64(double %a) diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll index 7f170cd..d1bf8fd 100644 --- a/llvm/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics-fast-isel.ll @@ -717,7 +717,6 @@ define <8 x i64> @test_mm512_mask_set1_epi8(<8 x i64> %__O, i64 %__M, i8 signext ; X32-NEXT: vpbroadcastb %eax, %zmm3 {%k1} ; X32-NEXT: vmovdqa64 %zmm3, %zmm0 ; X32-NEXT: popl %ebx -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm512_mask_set1_epi8: @@ -1445,7 +1444,6 @@ define <8 x i64> @test_mm512_maskz_set1_epi8(i64 %__M, i8 signext %__A) { ; X32-NEXT: korq %k0, %k1, %k1 ; X32-NEXT: vpbroadcastb %eax, %zmm0 {%k1} {z} ; X32-NEXT: popl %ebx -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm512_maskz_set1_epi8: diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll index 87565ac..a5ef180 100644 --- a/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll @@ -355,7 +355,6 @@ define i64 @test_pcmpeq_b(<64 x i8> %a, <64 x i8> %b) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.mask.pcmpeq.b.512(<64 x i8> %a, <64 x i8> %b, i64 -1) @@ -381,7 +380,6 @@ define i64 @test_mask_pcmpeq_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.mask.pcmpeq.b.512(<64 x i8> %a, <64 x i8> %b, i64 %mask) @@ -447,7 +445,6 @@ define i64 @test_pcmpgt_b(<64 x i8> %a, <64 x i8> %b) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.mask.pcmpgt.b.512(<64 x i8> %a, <64 x i8> %b, i64 -1) @@ -473,7 +470,6 @@ define i64 @test_mask_pcmpgt_b(<64 x i8> %a, <64 x i8> %b, i64 %mask) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.mask.pcmpgt.b.512(<64 x i8> %a, <64 x i8> %b, i64 %mask) @@ -1706,7 +1702,6 @@ define i64 @test_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1) { ; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $60, %esp -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res0 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 -1) @@ -2508,11 +2503,8 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) { ; AVX512F-32-NEXT: addl %esi, %eax ; AVX512F-32-NEXT: adcl %ecx, %edx ; AVX512F-32-NEXT: addl $60, %esp -; AVX512F-32-NEXT: .cfi_def_cfa_offset 12 ; AVX512F-32-NEXT: popl %esi -; AVX512F-32-NEXT: .cfi_def_cfa_offset 8 ; AVX512F-32-NEXT: popl %ebx -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res0 = call i64 @llvm.x86.avx512.mask.cmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 %mask) @@ -2594,7 +2586,6 @@ define i64 @test_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1) { ; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: adcl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $60, %esp -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res0 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 -1) @@ -3396,11 +3387,8 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m ; AVX512F-32-NEXT: addl %esi, %eax ; AVX512F-32-NEXT: adcl %ecx, %edx ; AVX512F-32-NEXT: addl $60, %esp -; AVX512F-32-NEXT: .cfi_def_cfa_offset 12 ; AVX512F-32-NEXT: popl %esi -; AVX512F-32-NEXT: .cfi_def_cfa_offset 8 ; AVX512F-32-NEXT: popl %ebx -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res0 = call i64 @llvm.x86.avx512.mask.ucmp.b.512(<64 x i8> %a0, <64 x i8> %a1, i32 0, i64 %mask) diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll index c262064..e23deeb 100644 --- a/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll @@ -1499,7 +1499,6 @@ define i64@test_int_x86_avx512_kunpck_qd(i64 %x0, i64 %x1) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.kunpck.dq(i64 %x0, i64 %x1) ret i64 %res @@ -1523,7 +1522,6 @@ define i64@test_int_x86_avx512_cvtb2mask_512(<64 x i8> %x0) { ; AVX512F-32-NEXT: movl (%esp), %eax ; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $12, %esp -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.cvtb2mask.512(<64 x i8> %x0) ret i64 %res @@ -1714,7 +1712,6 @@ define i64@test_int_x86_avx512_ptestm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 %x ; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: adcxl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $20, %esp -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.ptestm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) %res1 = call i64 @llvm.x86.avx512.ptestm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64-1) @@ -1779,7 +1776,6 @@ define i64@test_int_x86_avx512_ptestnm_b_512(<64 x i8> %x0, <64 x i8> %x1, i64 % ; AVX512F-32-NEXT: addl {{[0-9]+}}(%esp), %eax ; AVX512F-32-NEXT: adcxl {{[0-9]+}}(%esp), %edx ; AVX512F-32-NEXT: addl $20, %esp -; AVX512F-32-NEXT: .cfi_def_cfa_offset 4 ; AVX512F-32-NEXT: retl %res = call i64 @llvm.x86.avx512.ptestnm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64 %x2) %res1 = call i64 @llvm.x86.avx512.ptestnm.b.512(<64 x i8> %x0, <64 x i8> %x1, i64-1) diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll index 3f4a696..f5578d6 100644 --- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll @@ -233,7 +233,6 @@ define <2 x i64> @test_mm_mask_broadcastd_epi32(<2 x i64> %a0, i8 %a1, <2 x i64> ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastd %xmm1, %xmm0 {%k1} ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_broadcastd_epi32: @@ -266,7 +265,6 @@ define <2 x i64> @test_mm_maskz_broadcastd_epi32(i8 %a0, <2 x i64> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastd %xmm0, %xmm0 {%k1} {z} ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_broadcastd_epi32: @@ -371,7 +369,6 @@ define <2 x i64> @test_mm_mask_broadcastq_epi64(<2 x i64> %a0, i8 %a1, <2 x i64> ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastq %xmm1, %xmm0 {%k1} ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_broadcastq_epi64: @@ -401,7 +398,6 @@ define <2 x i64> @test_mm_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastq %xmm0, %xmm0 {%k1} {z} ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_broadcastq_epi64: @@ -445,7 +441,6 @@ define <4 x i64> @test_mm256_mask_broadcastq_epi64(<4 x i64> %a0, i8 %a1, <2 x i ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastq %xmm1, %ymm0 {%k1} ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_broadcastq_epi64: @@ -475,7 +470,6 @@ define <4 x i64> @test_mm256_maskz_broadcastq_epi64(i8 %a0, <2 x i64> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpbroadcastq %xmm0, %ymm0 {%k1} {z} ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_broadcastq_epi64: @@ -519,7 +513,6 @@ define <2 x double> @test_mm_mask_broadcastsd_pd(<2 x double> %a0, i8 %a1, <2 x ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_broadcastsd_pd: @@ -549,7 +542,6 @@ define <2 x double> @test_mm_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_broadcastsd_pd: @@ -593,7 +585,6 @@ define <4 x double> @test_mm256_mask_broadcastsd_pd(<4 x double> %a0, i8 %a1, <2 ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vbroadcastsd %xmm1, %ymm0 {%k1} ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_broadcastsd_pd: @@ -623,7 +614,6 @@ define <4 x double> @test_mm256_maskz_broadcastsd_pd(i8 %a0, <2 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z} ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_broadcastsd_pd: @@ -667,7 +657,6 @@ define <4 x float> @test_mm_mask_broadcastss_ps(<4 x float> %a0, i8 %a1, <4 x fl ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vbroadcastss %xmm1, %xmm0 {%k1} ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_broadcastss_ps: @@ -697,7 +686,6 @@ define <4 x float> @test_mm_maskz_broadcastss_ps(i8 %a0, <4 x float> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z} ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_broadcastss_ps: @@ -793,7 +781,6 @@ define <2 x double> @test_mm_mask_movddup_pd(<2 x double> %a0, i8 %a1, <2 x doub ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_movddup_pd: @@ -823,7 +810,6 @@ define <2 x double> @test_mm_maskz_movddup_pd(i8 %a0, <2 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_movddup_pd: @@ -867,7 +853,6 @@ define <4 x double> @test_mm256_mask_movddup_pd(<4 x double> %a0, i8 %a1, <4 x d ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = ymm1[0,0,2,2] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_movddup_pd: @@ -897,7 +882,6 @@ define <4 x double> @test_mm256_maskz_movddup_pd(i8 %a0, <4 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_movddup_pd: @@ -941,7 +925,6 @@ define <4 x float> @test_mm_mask_movehdup_ps(<4 x float> %a0, i8 %a1, <4 x float ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = xmm1[1,1,3,3] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_movehdup_ps: @@ -971,7 +954,6 @@ define <4 x float> @test_mm_maskz_movehdup_ps(i8 %a0, <4 x float> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_movehdup_ps: @@ -1067,7 +1049,6 @@ define <4 x float> @test_mm_mask_moveldup_ps(<4 x float> %a0, i8 %a1, <4 x float ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = xmm1[0,0,2,2] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_moveldup_ps: @@ -1097,7 +1078,6 @@ define <4 x float> @test_mm_maskz_moveldup_ps(i8 %a0, <4 x float> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_moveldup_ps: @@ -1193,7 +1173,6 @@ define <4 x i64> @test_mm256_mask_permutex_epi64(<4 x i64> %a0, i8 %a1, <4 x i64 ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpermq {{.*#+}} ymm0 {%k1} = ymm1[1,0,0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_permutex_epi64: @@ -1223,7 +1202,6 @@ define <4 x i64> @test_mm256_maskz_permutex_epi64(i8 %a0, <4 x i64> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_permutex_epi64: @@ -1267,7 +1245,6 @@ define <4 x double> @test_mm256_mask_permutex_pd(<4 x double> %a0, i8 %a1, <4 x ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = ymm1[1,0,0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_permutex_pd: @@ -1297,7 +1274,6 @@ define <4 x double> @test_mm256_maskz_permutex_pd(i8 %a0, <4 x double> %a1) { ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_permutex_pd: @@ -1341,7 +1317,6 @@ define <2 x double> @test_mm_mask_shuffle_pd(<2 x double> %a0, i8 %a1, <2 x doub ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} = xmm1[1],xmm2[1] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_shuffle_pd: @@ -1371,7 +1346,6 @@ define <2 x double> @test_mm_maskz_shuffle_pd(i8 %a0, <2 x double> %a1, <2 x dou ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shuffle_pd: @@ -1415,7 +1389,6 @@ define <4 x double> @test_mm256_mask_shuffle_pd(<4 x double> %a0, i8 %a1, <4 x d ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vshufpd {{.*#+}} ymm0 {%k1} = ymm1[1],ymm2[1],ymm1[2],ymm2[2] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_mask_shuffle_pd: @@ -1445,7 +1418,6 @@ define <4 x double> @test_mm256_maskz_shuffle_pd(i8 %a0, <4 x double> %a1, <4 x ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[2],ymm1[2] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_shuffle_pd: @@ -1489,7 +1461,6 @@ define <4 x float> @test_mm_mask_shuffle_ps(<4 x float> %a0, i8 %a1, <4 x float> ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vshufps {{.*#+}} xmm0 {%k1} = xmm1[0,1],xmm2[0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_mask_shuffle_ps: @@ -1519,7 +1490,6 @@ define <4 x float> @test_mm_maskz_shuffle_ps(i8 %a0, <4 x float> %a1, <4 x float ; X32-NEXT: kmovw %eax, %k1 ; X32-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1],xmm1[0,0] ; X32-NEXT: popl %eax -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test_mm_maskz_shuffle_ps: diff --git a/llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll b/llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll index 1098e7b..9fc9572 100644 --- a/llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll +++ b/llvm/test/CodeGen/X86/avx512vl-vbroadcast.ll @@ -12,7 +12,6 @@ define <8 x float> @_256_broadcast_ss_spill(float %x) { ; CHECK-NEXT: callq func_f32 ; CHECK-NEXT: vbroadcastss (%rsp), %ymm0 # 16-byte Folded Reload ; CHECK-NEXT: addq $24, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %a = fadd float %x, %x call void @func_f32(float %a) @@ -31,7 +30,6 @@ define <4 x float> @_128_broadcast_ss_spill(float %x) { ; CHECK-NEXT: callq func_f32 ; CHECK-NEXT: vbroadcastss (%rsp), %xmm0 # 16-byte Folded Reload ; CHECK-NEXT: addq $24, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %a = fadd float %x, %x call void @func_f32(float %a) @@ -51,7 +49,6 @@ define <4 x double> @_256_broadcast_sd_spill(double %x) { ; CHECK-NEXT: callq func_f64 ; CHECK-NEXT: vbroadcastsd (%rsp), %ymm0 # 16-byte Folded Reload ; CHECK-NEXT: addq $24, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %a = fadd double %x, %x call void @func_f64(double %a) diff --git a/llvm/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll b/llvm/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll index a084f0e..5ee06fd 100644 --- a/llvm/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll +++ b/llvm/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll @@ -109,7 +109,6 @@ define zeroext i32 @test_vpcmpeqb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -228,7 +227,6 @@ define zeroext i32 @test_vpcmpeqb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -350,7 +348,6 @@ define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask(i16 zeroext %__u, <2 x ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -473,7 +470,6 @@ define zeroext i32 @test_masked_vpcmpeqb_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -601,7 +597,6 @@ define zeroext i64 @test_vpcmpeqb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -725,7 +720,6 @@ define zeroext i64 @test_vpcmpeqb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -852,7 +846,6 @@ define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask(i16 zeroext %__u, <2 x ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -980,7 +973,6 @@ define zeroext i64 @test_masked_vpcmpeqb_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1032,7 +1024,6 @@ define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1080,7 +1071,6 @@ define zeroext i64 @test_vpcmpeqb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1139,7 +1129,6 @@ define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask(i32 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1199,7 +1188,6 @@ define zeroext i64 @test_masked_vpcmpeqb_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1404,7 +1392,6 @@ define zeroext i32 @test_vpcmpeqw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1478,7 +1465,6 @@ define zeroext i32 @test_vpcmpeqw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1555,7 +1541,6 @@ define zeroext i32 @test_masked_vpcmpeqw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x i ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1633,7 +1618,6 @@ define zeroext i32 @test_masked_vpcmpeqw_v8i1_v32i1_mask_mem(i8 zeroext %__u, <2 ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1716,7 +1700,6 @@ define zeroext i64 @test_vpcmpeqw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1795,7 +1778,6 @@ define zeroext i64 @test_vpcmpeqw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1877,7 +1859,6 @@ define zeroext i64 @test_masked_vpcmpeqw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x i ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -1960,7 +1941,6 @@ define zeroext i64 @test_masked_vpcmpeqw_v8i1_v64i1_mask_mem(i8 zeroext %__u, <2 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2084,7 +2064,6 @@ define zeroext i32 @test_vpcmpeqw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2204,7 +2183,6 @@ define zeroext i32 @test_vpcmpeqw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2327,7 +2305,6 @@ define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask(i16 zeroext %__u, <4 x ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2451,7 +2428,6 @@ define zeroext i32 @test_masked_vpcmpeqw_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2580,7 +2556,6 @@ define zeroext i64 @test_vpcmpeqw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2705,7 +2680,6 @@ define zeroext i64 @test_vpcmpeqw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2833,7 +2807,6 @@ define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask(i16 zeroext %__u, <4 x ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -2962,7 +2935,6 @@ define zeroext i64 @test_masked_vpcmpeqw_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3316,7 +3288,6 @@ define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3581,7 +3552,6 @@ define zeroext i64 @test_vpcmpeqw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -3942,7 +3912,6 @@ define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask(i32 zeroext %__u, <8 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -4219,7 +4188,6 @@ define zeroext i64 @test_masked_vpcmpeqw_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5083,7 +5051,6 @@ define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5125,7 +5092,6 @@ define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5187,7 +5153,6 @@ define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x i ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5251,7 +5216,6 @@ define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask_mem(i8 zeroext %__u, <2 ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5299,7 +5263,6 @@ define zeroext i32 @test_vpcmpeqd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5363,7 +5326,6 @@ define zeroext i32 @test_masked_vpcmpeqd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5417,7 +5379,6 @@ define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5465,7 +5426,6 @@ define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5533,7 +5493,6 @@ define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x i ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5603,7 +5562,6 @@ define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask_mem(i8 zeroext %__u, <2 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5657,7 +5615,6 @@ define zeroext i64 @test_vpcmpeqd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -5727,7 +5684,6 @@ define zeroext i64 @test_masked_vpcmpeqd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6001,7 +5957,6 @@ define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6075,7 +6030,6 @@ define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6152,7 +6106,6 @@ define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6230,7 +6183,6 @@ define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4 ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6308,7 +6260,6 @@ define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6386,7 +6337,6 @@ define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6470,7 +6420,6 @@ define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6549,7 +6498,6 @@ define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6631,7 +6579,6 @@ define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6714,7 +6661,6 @@ define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6797,7 +6743,6 @@ define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -6880,7 +6825,6 @@ define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7002,7 +6946,6 @@ define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7119,7 +7062,6 @@ define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7239,7 +7181,6 @@ define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask(i16 zeroext %__u, <8 x ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7360,7 +7301,6 @@ define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7481,7 +7421,6 @@ define zeroext i32 @test_vpcmpeqd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7602,7 +7541,6 @@ define zeroext i32 @test_masked_vpcmpeqd_v16i1_v32i1_mask_mem_b(i16 zeroext %__u ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7729,7 +7667,6 @@ define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7851,7 +7788,6 @@ define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -7976,7 +7912,6 @@ define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask(i16 zeroext %__u, <8 x ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -8102,7 +8037,6 @@ define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -8228,7 +8162,6 @@ define zeroext i64 @test_vpcmpeqd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -8354,7 +8287,6 @@ define zeroext i64 @test_masked_vpcmpeqd_v16i1_v64i1_mask_mem_b(i16 zeroext %__u ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9199,7 +9131,6 @@ define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__b ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9241,7 +9172,6 @@ define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64>* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9295,7 +9225,6 @@ define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x i ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9351,7 +9280,6 @@ define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask_mem(i8 zeroext %__u, <2 ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9399,7 +9327,6 @@ define zeroext i32 @test_vpcmpeqq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9455,7 +9382,6 @@ define zeroext i32 @test_masked_vpcmpeqq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9509,7 +9435,6 @@ define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__b ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9557,7 +9482,6 @@ define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64>* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9617,7 +9541,6 @@ define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x i ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9679,7 +9602,6 @@ define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask_mem(i8 zeroext %__u, <2 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9733,7 +9655,6 @@ define zeroext i64 @test_vpcmpeqq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -9795,7 +9716,6 @@ define zeroext i64 @test_masked_vpcmpeqq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10687,7 +10607,6 @@ define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10731,7 +10650,6 @@ define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10795,7 +10713,6 @@ define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x i ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10861,7 +10778,6 @@ define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem(i8 zeroext %__u, <4 ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10911,7 +10827,6 @@ define zeroext i32 @test_vpcmpeqq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -10977,7 +10892,6 @@ define zeroext i32 @test_masked_vpcmpeqq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11033,7 +10947,6 @@ define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11083,7 +10996,6 @@ define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11153,7 +11065,6 @@ define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x i ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11225,7 +11136,6 @@ define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem(i8 zeroext %__u, <4 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11281,7 +11191,6 @@ define zeroext i64 @test_vpcmpeqq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11353,7 +11262,6 @@ define zeroext i64 @test_masked_vpcmpeqq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11601,7 +11509,6 @@ define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__b ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11673,7 +11580,6 @@ define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64>* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11748,7 +11654,6 @@ define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x i ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11824,7 +11729,6 @@ define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem(i8 zeroext %__u, <8 ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11900,7 +11804,6 @@ define zeroext i32 @test_vpcmpeqq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -11976,7 +11879,6 @@ define zeroext i32 @test_masked_vpcmpeqq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12058,7 +11960,6 @@ define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__b ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12135,7 +12036,6 @@ define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64>* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12215,7 +12115,6 @@ define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x i ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12296,7 +12195,6 @@ define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem(i8 zeroext %__u, <8 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12377,7 +12275,6 @@ define zeroext i64 @test_vpcmpeqq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12458,7 +12355,6 @@ define zeroext i64 @test_masked_vpcmpeqq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12582,7 +12478,6 @@ define zeroext i32 @test_vpcmpsgtb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12701,7 +12596,6 @@ define zeroext i32 @test_vpcmpsgtb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12823,7 +12717,6 @@ define zeroext i32 @test_masked_vpcmpsgtb_v16i1_v32i1_mask(i16 zeroext %__u, <2 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -12946,7 +12839,6 @@ define zeroext i32 @test_masked_vpcmpsgtb_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13074,7 +12966,6 @@ define zeroext i64 @test_vpcmpsgtb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13198,7 +13089,6 @@ define zeroext i64 @test_vpcmpsgtb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13325,7 +13215,6 @@ define zeroext i64 @test_masked_vpcmpsgtb_v16i1_v64i1_mask(i16 zeroext %__u, <2 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13453,7 +13342,6 @@ define zeroext i64 @test_masked_vpcmpsgtb_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13505,7 +13393,6 @@ define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13553,7 +13440,6 @@ define zeroext i64 @test_vpcmpsgtb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13612,7 +13498,6 @@ define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask(i32 zeroext %__u, <4 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13672,7 +13557,6 @@ define zeroext i64 @test_masked_vpcmpsgtb_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13877,7 +13761,6 @@ define zeroext i32 @test_vpcmpsgtw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -13951,7 +13834,6 @@ define zeroext i32 @test_vpcmpsgtw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14028,7 +13910,6 @@ define zeroext i32 @test_masked_vpcmpsgtw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14106,7 +13987,6 @@ define zeroext i32 @test_masked_vpcmpsgtw_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14189,7 +14069,6 @@ define zeroext i64 @test_vpcmpsgtw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14268,7 +14147,6 @@ define zeroext i64 @test_vpcmpsgtw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14350,7 +14228,6 @@ define zeroext i64 @test_masked_vpcmpsgtw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14433,7 +14310,6 @@ define zeroext i64 @test_masked_vpcmpsgtw_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14557,7 +14433,6 @@ define zeroext i32 @test_vpcmpsgtw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14677,7 +14552,6 @@ define zeroext i32 @test_vpcmpsgtw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14800,7 +14674,6 @@ define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask(i16 zeroext %__u, <4 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -14924,7 +14797,6 @@ define zeroext i32 @test_masked_vpcmpsgtw_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15053,7 +14925,6 @@ define zeroext i64 @test_vpcmpsgtw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15178,7 +15049,6 @@ define zeroext i64 @test_vpcmpsgtw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15306,7 +15176,6 @@ define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask(i16 zeroext %__u, <4 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15435,7 +15304,6 @@ define zeroext i64 @test_masked_vpcmpsgtw_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -15789,7 +15657,6 @@ define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -16054,7 +15921,6 @@ define zeroext i64 @test_vpcmpsgtw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -16415,7 +16281,6 @@ define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask(i32 zeroext %__u, <8 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -16692,7 +16557,6 @@ define zeroext i64 @test_masked_vpcmpsgtw_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17556,7 +17420,6 @@ define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17598,7 +17461,6 @@ define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17660,7 +17522,6 @@ define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17724,7 +17585,6 @@ define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17772,7 +17632,6 @@ define zeroext i32 @test_vpcmpsgtd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17836,7 +17695,6 @@ define zeroext i32 @test_masked_vpcmpsgtd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17890,7 +17748,6 @@ define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -17938,7 +17795,6 @@ define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18006,7 +17862,6 @@ define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18076,7 +17931,6 @@ define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18130,7 +17984,6 @@ define zeroext i64 @test_vpcmpsgtd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18200,7 +18053,6 @@ define zeroext i64 @test_masked_vpcmpsgtd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18474,7 +18326,6 @@ define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18548,7 +18399,6 @@ define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18625,7 +18475,6 @@ define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18703,7 +18552,6 @@ define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18781,7 +18629,6 @@ define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18859,7 +18706,6 @@ define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -18943,7 +18789,6 @@ define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19022,7 +18867,6 @@ define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19104,7 +18948,6 @@ define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19187,7 +19030,6 @@ define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19270,7 +19112,6 @@ define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19353,7 +19194,6 @@ define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19475,7 +19315,6 @@ define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19592,7 +19431,6 @@ define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19712,7 +19550,6 @@ define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19833,7 +19670,6 @@ define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -19954,7 +19790,6 @@ define zeroext i32 @test_vpcmpsgtd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* % ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20075,7 +19910,6 @@ define zeroext i32 @test_masked_vpcmpsgtd_v16i1_v32i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20202,7 +20036,6 @@ define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20324,7 +20157,6 @@ define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20449,7 +20281,6 @@ define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20575,7 +20406,6 @@ define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20701,7 +20531,6 @@ define zeroext i64 @test_vpcmpsgtd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* % ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -20827,7 +20656,6 @@ define zeroext i64 @test_masked_vpcmpsgtd_v16i1_v64i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21672,7 +21500,6 @@ define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21714,7 +21541,6 @@ define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21768,7 +21594,6 @@ define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21824,7 +21649,6 @@ define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21872,7 +21696,6 @@ define zeroext i32 @test_vpcmpsgtq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21928,7 +21751,6 @@ define zeroext i32 @test_masked_vpcmpsgtq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -21982,7 +21804,6 @@ define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22030,7 +21851,6 @@ define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22090,7 +21910,6 @@ define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22152,7 +21971,6 @@ define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22206,7 +22024,6 @@ define zeroext i64 @test_vpcmpsgtq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -22268,7 +22085,6 @@ define zeroext i64 @test_masked_vpcmpsgtq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23160,7 +22976,6 @@ define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23204,7 +23019,6 @@ define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23268,7 +23082,6 @@ define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23334,7 +23147,6 @@ define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23384,7 +23196,6 @@ define zeroext i32 @test_vpcmpsgtq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23450,7 +23261,6 @@ define zeroext i32 @test_masked_vpcmpsgtq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23506,7 +23316,6 @@ define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23556,7 +23365,6 @@ define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23626,7 +23434,6 @@ define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23698,7 +23505,6 @@ define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23754,7 +23560,6 @@ define zeroext i64 @test_vpcmpsgtq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -23826,7 +23631,6 @@ define zeroext i64 @test_masked_vpcmpsgtq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24074,7 +23878,6 @@ define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24146,7 +23949,6 @@ define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24221,7 +24023,6 @@ define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24297,7 +24098,6 @@ define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24373,7 +24173,6 @@ define zeroext i32 @test_vpcmpsgtq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24449,7 +24248,6 @@ define zeroext i32 @test_masked_vpcmpsgtq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24531,7 +24329,6 @@ define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24608,7 +24405,6 @@ define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24688,7 +24484,6 @@ define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24769,7 +24564,6 @@ define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24850,7 +24644,6 @@ define zeroext i64 @test_vpcmpsgtq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -24931,7 +24724,6 @@ define zeroext i64 @test_masked_vpcmpsgtq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25057,7 +24849,6 @@ define zeroext i32 @test_vpcmpsgeb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25179,7 +24970,6 @@ define zeroext i32 @test_vpcmpsgeb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25303,7 +25093,6 @@ define zeroext i32 @test_masked_vpcmpsgeb_v16i1_v32i1_mask(i16 zeroext %__u, <2 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25429,7 +25218,6 @@ define zeroext i32 @test_masked_vpcmpsgeb_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25559,7 +25347,6 @@ define zeroext i64 @test_vpcmpsgeb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25686,7 +25473,6 @@ define zeroext i64 @test_vpcmpsgeb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25815,7 +25601,6 @@ define zeroext i64 @test_masked_vpcmpsgeb_v16i1_v64i1_mask(i16 zeroext %__u, <2 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -25946,7 +25731,6 @@ define zeroext i64 @test_masked_vpcmpsgeb_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26000,7 +25784,6 @@ define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26051,7 +25834,6 @@ define zeroext i64 @test_vpcmpsgeb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26112,7 +25894,6 @@ define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask(i32 zeroext %__u, <4 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26175,7 +25956,6 @@ define zeroext i64 @test_masked_vpcmpsgeb_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26392,7 +26172,6 @@ define zeroext i32 @test_vpcmpsgew_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26469,7 +26248,6 @@ define zeroext i32 @test_vpcmpsgew_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26548,7 +26326,6 @@ define zeroext i32 @test_masked_vpcmpsgew_v8i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26629,7 +26406,6 @@ define zeroext i32 @test_masked_vpcmpsgew_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26714,7 +26490,6 @@ define zeroext i64 @test_vpcmpsgew_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26796,7 +26571,6 @@ define zeroext i64 @test_vpcmpsgew_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26880,7 +26654,6 @@ define zeroext i64 @test_masked_vpcmpsgew_v8i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -26966,7 +26739,6 @@ define zeroext i64 @test_masked_vpcmpsgew_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27092,7 +26864,6 @@ define zeroext i32 @test_vpcmpsgew_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27215,7 +26986,6 @@ define zeroext i32 @test_vpcmpsgew_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27340,7 +27110,6 @@ define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask(i16 zeroext %__u, <4 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27467,7 +27236,6 @@ define zeroext i32 @test_masked_vpcmpsgew_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27598,7 +27366,6 @@ define zeroext i64 @test_vpcmpsgew_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27726,7 +27493,6 @@ define zeroext i64 @test_vpcmpsgew_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27856,7 +27622,6 @@ define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask(i16 zeroext %__u, <4 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -27988,7 +27753,6 @@ define zeroext i64 @test_masked_vpcmpsgew_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -28345,7 +28109,6 @@ define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -28615,7 +28378,6 @@ define zeroext i64 @test_vpcmpsgew_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -28979,7 +28741,6 @@ define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask(i32 zeroext %__u, <8 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -29261,7 +29022,6 @@ define zeroext i64 @test_masked_vpcmpsgew_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30143,7 +29903,6 @@ define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30188,7 +29947,6 @@ define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30250,7 +30008,6 @@ define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30315,7 +30072,6 @@ define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30365,7 +30121,6 @@ define zeroext i32 @test_vpcmpsged_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30429,7 +30184,6 @@ define zeroext i32 @test_masked_vpcmpsged_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30485,7 +30239,6 @@ define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30536,7 +30289,6 @@ define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30604,7 +30356,6 @@ define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30675,7 +30426,6 @@ define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30731,7 +30481,6 @@ define zeroext i64 @test_vpcmpsged_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -30801,7 +30550,6 @@ define zeroext i64 @test_masked_vpcmpsged_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31075,7 +30823,6 @@ define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31149,7 +30896,6 @@ define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31226,7 +30972,6 @@ define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31304,7 +31049,6 @@ define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31382,7 +31126,6 @@ define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31460,7 +31203,6 @@ define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31544,7 +31286,6 @@ define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31623,7 +31364,6 @@ define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31705,7 +31445,6 @@ define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31788,7 +31527,6 @@ define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31871,7 +31609,6 @@ define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -31954,7 +31691,6 @@ define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32076,7 +31812,6 @@ define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32193,7 +31928,6 @@ define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32313,7 +32047,6 @@ define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32434,7 +32167,6 @@ define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32555,7 +32287,6 @@ define zeroext i32 @test_vpcmpsged_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* % ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32676,7 +32407,6 @@ define zeroext i32 @test_masked_vpcmpsged_v16i1_v32i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32803,7 +32533,6 @@ define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -32925,7 +32654,6 @@ define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -33050,7 +32778,6 @@ define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -33176,7 +32903,6 @@ define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -33302,7 +33028,6 @@ define zeroext i64 @test_vpcmpsged_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* % ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -33428,7 +33153,6 @@ define zeroext i64 @test_masked_vpcmpsged_v16i1_v64i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34299,7 +34023,6 @@ define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34344,7 +34067,6 @@ define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34398,7 +34120,6 @@ define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34455,7 +34176,6 @@ define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34505,7 +34225,6 @@ define zeroext i32 @test_vpcmpsgeq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34561,7 +34280,6 @@ define zeroext i32 @test_masked_vpcmpsgeq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34617,7 +34335,6 @@ define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34668,7 +34385,6 @@ define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34728,7 +34444,6 @@ define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34791,7 +34506,6 @@ define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34847,7 +34561,6 @@ define zeroext i64 @test_vpcmpsgeq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -34909,7 +34622,6 @@ define zeroext i64 @test_masked_vpcmpsgeq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -35831,7 +35543,6 @@ define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -35878,7 +35589,6 @@ define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -35944,7 +35654,6 @@ define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36013,7 +35722,6 @@ define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36065,7 +35773,6 @@ define zeroext i32 @test_vpcmpsgeq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36133,7 +35840,6 @@ define zeroext i32 @test_masked_vpcmpsgeq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36191,7 +35897,6 @@ define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36244,7 +35949,6 @@ define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36316,7 +36020,6 @@ define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36391,7 +36094,6 @@ define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36449,7 +36151,6 @@ define zeroext i64 @test_vpcmpsgeq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36523,7 +36224,6 @@ define zeroext i64 @test_masked_vpcmpsgeq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36771,7 +36471,6 @@ define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36843,7 +36542,6 @@ define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36918,7 +36616,6 @@ define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -36994,7 +36691,6 @@ define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37070,7 +36766,6 @@ define zeroext i32 @test_vpcmpsgeq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37146,7 +36841,6 @@ define zeroext i32 @test_masked_vpcmpsgeq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37228,7 +36922,6 @@ define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37305,7 +36998,6 @@ define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37385,7 +37077,6 @@ define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37466,7 +37157,6 @@ define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37547,7 +37237,6 @@ define zeroext i64 @test_vpcmpsgeq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37628,7 +37317,6 @@ define zeroext i64 @test_masked_vpcmpsgeq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37755,7 +37443,6 @@ define zeroext i32 @test_vpcmpultb_v16i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -37877,7 +37564,6 @@ define zeroext i32 @test_vpcmpultb_v16i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38002,7 +37688,6 @@ define zeroext i32 @test_masked_vpcmpultb_v16i1_v32i1_mask(i16 zeroext %__u, <2 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38128,7 +37813,6 @@ define zeroext i32 @test_masked_vpcmpultb_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38259,7 +37943,6 @@ define zeroext i64 @test_vpcmpultb_v16i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38386,7 +38069,6 @@ define zeroext i64 @test_vpcmpultb_v16i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38516,7 +38198,6 @@ define zeroext i64 @test_masked_vpcmpultb_v16i1_v64i1_mask(i16 zeroext %__u, <2 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38647,7 +38328,6 @@ define zeroext i64 @test_masked_vpcmpultb_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38702,7 +38382,6 @@ define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38753,7 +38432,6 @@ define zeroext i64 @test_vpcmpultb_v32i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38815,7 +38493,6 @@ define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask(i32 zeroext %__u, <4 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -38878,7 +38555,6 @@ define zeroext i64 @test_masked_vpcmpultb_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39098,7 +38774,6 @@ define zeroext i32 @test_vpcmpultw_v8i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39175,7 +38850,6 @@ define zeroext i32 @test_vpcmpultw_v8i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39255,7 +38929,6 @@ define zeroext i32 @test_masked_vpcmpultw_v8i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39336,7 +39009,6 @@ define zeroext i32 @test_masked_vpcmpultw_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39422,7 +39094,6 @@ define zeroext i64 @test_vpcmpultw_v8i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39504,7 +39175,6 @@ define zeroext i64 @test_vpcmpultw_v8i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39589,7 +39259,6 @@ define zeroext i64 @test_masked_vpcmpultw_v8i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39675,7 +39344,6 @@ define zeroext i64 @test_masked_vpcmpultw_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39802,7 +39470,6 @@ define zeroext i32 @test_vpcmpultw_v16i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -39925,7 +39592,6 @@ define zeroext i32 @test_vpcmpultw_v16i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -40051,7 +39717,6 @@ define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask(i16 zeroext %__u, <4 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -40178,7 +39843,6 @@ define zeroext i32 @test_masked_vpcmpultw_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -40310,7 +39974,6 @@ define zeroext i64 @test_vpcmpultw_v16i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -40438,7 +40101,6 @@ define zeroext i64 @test_vpcmpultw_v16i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -40569,7 +40231,6 @@ define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask(i16 zeroext %__u, <4 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -40701,7 +40362,6 @@ define zeroext i64 @test_masked_vpcmpultw_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -41060,7 +40720,6 @@ define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -41330,7 +40989,6 @@ define zeroext i64 @test_vpcmpultw_v32i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -41696,7 +41354,6 @@ define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask(i32 zeroext %__u, <8 ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -41978,7 +41635,6 @@ define zeroext i64 @test_masked_vpcmpultw_v32i1_v64i1_mask_mem(i32 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -42881,7 +42537,6 @@ define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -42926,7 +42581,6 @@ define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -42991,7 +42645,6 @@ define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43058,7 +42711,6 @@ define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43109,7 +42761,6 @@ define zeroext i32 @test_vpcmpultd_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, i32* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43176,7 +42827,6 @@ define zeroext i32 @test_masked_vpcmpultd_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43233,7 +42883,6 @@ define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43284,7 +42933,6 @@ define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43355,7 +43003,6 @@ define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43428,7 +43075,6 @@ define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43485,7 +43131,6 @@ define zeroext i64 @test_vpcmpultd_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, i32* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43558,7 +43203,6 @@ define zeroext i64 @test_masked_vpcmpultd_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43832,7 +43476,6 @@ define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43906,7 +43549,6 @@ define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -43983,7 +43625,6 @@ define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44061,7 +43702,6 @@ define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44139,7 +43779,6 @@ define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44217,7 +43856,6 @@ define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44301,7 +43939,6 @@ define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44380,7 +44017,6 @@ define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44462,7 +44098,6 @@ define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44545,7 +44180,6 @@ define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44628,7 +44262,6 @@ define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44711,7 +44344,6 @@ define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44833,7 +44465,6 @@ define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -44950,7 +44581,6 @@ define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45070,7 +44700,6 @@ define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45191,7 +44820,6 @@ define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45312,7 +44940,6 @@ define zeroext i32 @test_vpcmpultd_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, i32* % ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45433,7 +45060,6 @@ define zeroext i32 @test_masked_vpcmpultd_v16i1_v32i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45560,7 +45186,6 @@ define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45682,7 +45307,6 @@ define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45807,7 +45431,6 @@ define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -45933,7 +45556,6 @@ define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -46059,7 +45681,6 @@ define zeroext i64 @test_vpcmpultd_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, i32* % ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -46185,7 +45806,6 @@ define zeroext i64 @test_masked_vpcmpultd_v16i1_v64i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47087,7 +46707,6 @@ define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47132,7 +46751,6 @@ define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47189,7 +46807,6 @@ define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47248,7 +46865,6 @@ define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47299,7 +46915,6 @@ define zeroext i32 @test_vpcmpultq_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47358,7 +46973,6 @@ define zeroext i32 @test_masked_vpcmpultq_v2i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47415,7 +47029,6 @@ define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47466,7 +47079,6 @@ define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47529,7 +47141,6 @@ define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask(i8 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47594,7 +47205,6 @@ define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47651,7 +47261,6 @@ define zeroext i64 @test_vpcmpultq_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -47716,7 +47325,6 @@ define zeroext i64 @test_masked_vpcmpultq_v2i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48647,7 +48255,6 @@ define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48694,7 +48301,6 @@ define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48761,7 +48367,6 @@ define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48830,7 +48435,6 @@ define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48883,7 +48487,6 @@ define zeroext i32 @test_vpcmpultq_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -48952,7 +48555,6 @@ define zeroext i32 @test_masked_vpcmpultq_v4i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49011,7 +48613,6 @@ define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49064,7 +48665,6 @@ define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49137,7 +48737,6 @@ define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49212,7 +48811,6 @@ define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49271,7 +48869,6 @@ define zeroext i64 @test_vpcmpultq_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49346,7 +48943,6 @@ define zeroext i64 @test_masked_vpcmpultq_v4i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49594,7 +49190,6 @@ define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49666,7 +49261,6 @@ define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49741,7 +49335,6 @@ define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49817,7 +49410,6 @@ define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49893,7 +49485,6 @@ define zeroext i32 @test_vpcmpultq_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, i64* %_ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -49969,7 +49560,6 @@ define zeroext i32 @test_masked_vpcmpultq_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -50051,7 +49641,6 @@ define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -50128,7 +49717,6 @@ define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -50208,7 +49796,6 @@ define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -50289,7 +49876,6 @@ define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -50370,7 +49956,6 @@ define zeroext i64 @test_vpcmpultq_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, i64* %_ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -50451,7 +50036,6 @@ define zeroext i64 @test_masked_vpcmpultq_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51245,7 +50829,6 @@ define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51287,7 +50870,6 @@ define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51331,7 +50913,6 @@ define zeroext i32 @test_vcmpoeqps_v4i1_v32i1_mask_mem_b(<2 x i64> %__a, float* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51383,7 +50964,6 @@ define zeroext i32 @test_masked_vcmpoeqps_v4i1_v32i1_mask(i4 zeroext %__u, <2 x ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51435,7 +51015,6 @@ define zeroext i32 @test_masked_vcmpoeqps_v4i1_v32i1_mask_mem(i4 zeroext %__u, < ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51489,7 +51068,6 @@ define zeroext i32 @test_masked_vcmpoeqps_v4i1_v32i1_mask_mem_b(i4 zeroext %__u, ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51543,7 +51121,6 @@ define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51591,7 +51168,6 @@ define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51641,7 +51217,6 @@ define zeroext i64 @test_vcmpoeqps_v4i1_v64i1_mask_mem_b(<2 x i64> %__a, float* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51699,7 +51274,6 @@ define zeroext i64 @test_masked_vcmpoeqps_v4i1_v64i1_mask(i4 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51757,7 +51331,6 @@ define zeroext i64 @test_masked_vcmpoeqps_v4i1_v64i1_mask_mem(i4 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -51817,7 +51390,6 @@ define zeroext i64 @test_masked_vcmpoeqps_v4i1_v64i1_mask_mem_b(i4 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52091,7 +51663,6 @@ define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52165,7 +51736,6 @@ define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52240,7 +51810,6 @@ define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, float* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52318,7 +51887,6 @@ define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52396,7 +51964,6 @@ define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52475,7 +52042,6 @@ define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52560,7 +52126,6 @@ define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52639,7 +52204,6 @@ define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52719,7 +52283,6 @@ define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, float* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52802,7 +52365,6 @@ define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask(i8 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52885,7 +52447,6 @@ define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -52969,7 +52530,6 @@ define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53092,7 +52652,6 @@ define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53209,7 +52768,6 @@ define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53327,7 +52885,6 @@ define zeroext i32 @test_vcmpoeqps_v16i1_v32i1_mask_mem_b(<8 x i64> %__a, float* ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53448,7 +53005,6 @@ define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53569,7 +53125,6 @@ define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53691,7 +53246,6 @@ define zeroext i32 @test_masked_vcmpoeqps_v16i1_v32i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53860,7 +53414,6 @@ define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %_ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -53982,7 +53535,6 @@ define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -54105,7 +53657,6 @@ define zeroext i64 @test_vcmpoeqps_v16i1_v64i1_mask_mem_b(<8 x i64> %__a, float* ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -54231,7 +53782,6 @@ define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_mask(i16 zeroext %__u, <8 ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -54357,7 +53907,6 @@ define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_mask_mem(i16 zeroext %__u, ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -54484,7 +54033,6 @@ define zeroext i64 @test_masked_vcmpoeqps_v16i1_v64i1_mask_mem_b(i16 zeroext %__ ; NoVLX-NEXT: popq %r14 ; NoVLX-NEXT: popq %r15 ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55338,7 +54886,6 @@ define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55380,7 +54927,6 @@ define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55424,7 +54970,6 @@ define zeroext i32 @test_vcmpoeqpd_v2i1_v32i1_mask_mem_b(<2 x i64> %__a, double* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55475,7 +55020,6 @@ define zeroext i32 @test_masked_vcmpoeqpd_v2i1_v32i1_mask(i2 zeroext %__u, <2 x ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55526,7 +55070,6 @@ define zeroext i32 @test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem(i2 zeroext %__u, < ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55579,7 +55122,6 @@ define zeroext i32 @test_masked_vcmpoeqpd_v2i1_v32i1_mask_mem_b(i2 zeroext %__u, ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55633,7 +55175,6 @@ define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask(<2 x i64> %__a, <2 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55681,7 +55222,6 @@ define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask_mem(<2 x i64> %__a, <2 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55731,7 +55271,6 @@ define zeroext i64 @test_vcmpoeqpd_v2i1_v64i1_mask_mem_b(<2 x i64> %__a, double* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55788,7 +55327,6 @@ define zeroext i64 @test_masked_vcmpoeqpd_v2i1_v64i1_mask(i2 zeroext %__u, <2 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55845,7 +55383,6 @@ define zeroext i64 @test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem(i2 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -55904,7 +55441,6 @@ define zeroext i64 @test_masked_vcmpoeqpd_v2i1_v64i1_mask_mem_b(i2 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56724,7 +56260,6 @@ define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56768,7 +56303,6 @@ define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56814,7 +56348,6 @@ define zeroext i32 @test_vcmpoeqpd_v4i1_v32i1_mask_mem_b(<4 x i64> %__a, double* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56868,7 +56401,6 @@ define zeroext i32 @test_masked_vcmpoeqpd_v4i1_v32i1_mask(i4 zeroext %__u, <4 x ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56922,7 +56454,6 @@ define zeroext i32 @test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem(i4 zeroext %__u, < ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -56978,7 +56509,6 @@ define zeroext i32 @test_masked_vcmpoeqpd_v4i1_v32i1_mask_mem_b(i4 zeroext %__u, ; NoVLX-NEXT: movl {{[0-9]+}}(%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57034,7 +56564,6 @@ define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57084,7 +56613,6 @@ define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57136,7 +56664,6 @@ define zeroext i64 @test_vcmpoeqpd_v4i1_v64i1_mask_mem_b(<4 x i64> %__a, double* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57196,7 +56723,6 @@ define zeroext i64 @test_masked_vcmpoeqpd_v4i1_v64i1_mask(i4 zeroext %__u, <4 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57256,7 +56782,6 @@ define zeroext i64 @test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem(i4 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57318,7 +56843,6 @@ define zeroext i64 @test_masked_vcmpoeqpd_v4i1_v64i1_mask_mem_b(i4 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57622,7 +57146,6 @@ define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57694,7 +57217,6 @@ define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57767,7 +57289,6 @@ define zeroext i32 @test_vcmpoeqpd_v8i1_v32i1_mask_mem_b(<8 x i64> %__a, double* ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57843,7 +57364,6 @@ define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57919,7 +57439,6 @@ define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -57996,7 +57515,6 @@ define zeroext i32 @test_masked_vcmpoeqpd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: movl (%rsp), %eax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -58129,7 +57647,6 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask(<8 x i64> %__a, <8 x i64> %__ ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -58206,7 +57723,6 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask_mem(<8 x i64> %__a, <8 x i64> ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -58284,7 +57800,6 @@ define zeroext i64 @test_vcmpoeqpd_v8i1_v64i1_mask_mem_b(<8 x i64> %__a, double* ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -58365,7 +57880,6 @@ define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_mask(i8 zeroext %__u, <8 x ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -58446,7 +57960,6 @@ define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_mask_mem(i8 zeroext %__u, < ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: @@ -58528,7 +58041,6 @@ define zeroext i64 @test_masked_vcmpoeqpd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u, ; NoVLX-NEXT: orq %rcx, %rax ; NoVLX-NEXT: movq %rbp, %rsp ; NoVLX-NEXT: popq %rbp -; NoVLX-NEXT: .cfi_def_cfa %rsp, 8 ; NoVLX-NEXT: vzeroupper ; NoVLX-NEXT: retq entry: diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll index c482220..e197713 100644 --- a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll +++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll @@ -439,7 +439,6 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) { ; AVX512F-NEXT: movl (%rsp), %eax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll index f5fe395..f6cfbbb 100644 --- a/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll +++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-512.ll @@ -594,7 +594,6 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) { ; AVX512F-NEXT: movl (%rsp), %eax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1240,7 +1239,6 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) { ; AVX1-NEXT: orq %rcx, %rax ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp -; AVX1-NEXT: .cfi_def_cfa %rsp, 8 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -1459,7 +1457,6 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) { ; AVX2-NEXT: orq %rcx, %rax ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp -; AVX2-NEXT: .cfi_def_cfa %rsp, 8 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1502,7 +1499,6 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) { ; AVX512F-NEXT: orq %rcx, %rax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll index 1959000..4ed55ac 100644 --- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll +++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll @@ -321,17 +321,11 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) { ; AVX512-NEXT: vpinsrb $15, %r9d, %xmm0, %xmm0 ; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX512-NEXT: popq %rbx -; AVX512-NEXT: .cfi_def_cfa_offset 48 ; AVX512-NEXT: popq %r12 -; AVX512-NEXT: .cfi_def_cfa_offset 40 ; AVX512-NEXT: popq %r13 -; AVX512-NEXT: .cfi_def_cfa_offset 32 ; AVX512-NEXT: popq %r14 -; AVX512-NEXT: .cfi_def_cfa_offset 24 ; AVX512-NEXT: popq %r15 -; AVX512-NEXT: .cfi_def_cfa_offset 16 ; AVX512-NEXT: popq %rbp -; AVX512-NEXT: .cfi_def_cfa_offset 8 ; AVX512-NEXT: retq %1 = bitcast i16 %a0 to <16 x i1> %2 = zext <16 x i1> %1 to <16 x i8> diff --git a/llvm/test/CodeGen/X86/bitcast-setcc-256.ll b/llvm/test/CodeGen/X86/bitcast-setcc-256.ll index 7616051..ee2dac1 100644 --- a/llvm/test/CodeGen/X86/bitcast-setcc-256.ll +++ b/llvm/test/CodeGen/X86/bitcast-setcc-256.ll @@ -204,7 +204,6 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b) { ; AVX512F-NEXT: movl (%rsp), %eax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/bitcast-setcc-512.ll b/llvm/test/CodeGen/X86/bitcast-setcc-512.ll index ef98108..2b73c6e 100644 --- a/llvm/test/CodeGen/X86/bitcast-setcc-512.ll +++ b/llvm/test/CodeGen/X86/bitcast-setcc-512.ll @@ -203,7 +203,6 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b) { ; AVX512F-NEXT: movl (%rsp), %eax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -770,7 +769,6 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) { ; AVX1-NEXT: orq %rcx, %rax ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp -; AVX1-NEXT: .cfi_def_cfa %rsp, 8 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -985,7 +983,6 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) { ; AVX2-NEXT: orq %rcx, %rax ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp -; AVX2-NEXT: .cfi_def_cfa %rsp, 8 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -1024,7 +1021,6 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) { ; AVX512F-NEXT: orq %rcx, %rax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/bool-vector.ll b/llvm/test/CodeGen/X86/bool-vector.ll index 692d992..eb40744 100644 --- a/llvm/test/CodeGen/X86/bool-vector.ll +++ b/llvm/test/CodeGen/X86/bool-vector.ll @@ -93,7 +93,6 @@ define i32 @PR15215_good(<4 x i32> %input) { ; X32-NEXT: leal (%eax,%edx,4), %eax ; X32-NEXT: leal (%eax,%esi,8), %eax ; X32-NEXT: popl %esi -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X32-SSE2-LABEL: PR15215_good: @@ -116,7 +115,6 @@ define i32 @PR15215_good(<4 x i32> %input) { ; X32-SSE2-NEXT: leal (%eax,%edx,4), %eax ; X32-SSE2-NEXT: leal (%eax,%esi,8), %eax ; X32-SSE2-NEXT: popl %esi -; X32-SSE2-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE2-NEXT: retl ; ; X32-AVX2-LABEL: PR15215_good: @@ -136,7 +134,6 @@ define i32 @PR15215_good(<4 x i32> %input) { ; X32-AVX2-NEXT: leal (%eax,%edx,4), %eax ; X32-AVX2-NEXT: leal (%eax,%esi,8), %eax ; X32-AVX2-NEXT: popl %esi -; X32-AVX2-NEXT: .cfi_def_cfa_offset 4 ; X32-AVX2-NEXT: retl ; ; X64-LABEL: PR15215_good: diff --git a/llvm/test/CodeGen/X86/cmp.ll b/llvm/test/CodeGen/X86/cmp.ll index 6f9abae..82e133d 100644 --- a/llvm/test/CodeGen/X86/cmp.ll +++ b/llvm/test/CodeGen/X86/cmp.ll @@ -247,13 +247,10 @@ define i32 @test12() ssp uwtable { ; CHECK-NEXT: # BB#1: # %T ; CHECK-NEXT: movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00] ; CHECK-NEXT: popq %rcx # encoding: [0x59] -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq # encoding: [0xc3] ; CHECK-NEXT: .LBB12_2: # %F -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movl $2, %eax # encoding: [0xb8,0x02,0x00,0x00,0x00] ; CHECK-NEXT: popq %rcx # encoding: [0x59] -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq # encoding: [0xc3] entry: %tmp1 = call zeroext i1 @test12b() diff --git a/llvm/test/CodeGen/X86/emutls-pie.ll b/llvm/test/CodeGen/X86/emutls-pie.ll index f4561fc..3c312a9 100644 --- a/llvm/test/CodeGen/X86/emutls-pie.ll +++ b/llvm/test/CodeGen/X86/emutls-pie.ll @@ -18,16 +18,13 @@ define i32 @my_get_xyz() { ; X32-NEXT: calll my_emutls_get_address@PLT ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $8, %esp -; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: popl %ebx -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: my_get_xyz: ; X64: movq my_emutls_v_xyz@GOTPCREL(%rip), %rdi ; X64-NEXT: callq my_emutls_get_address@PLT ; X64-NEXT: movl (%rax), %eax ; X64-NEXT: popq %rcx -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: @@ -47,16 +44,13 @@ define i32 @f1() { ; X32-NEXT: calll __emutls_get_address@PLT ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $8, %esp -; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: popl %ebx -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: f1: ; X64: leaq __emutls_v.i(%rip), %rdi ; X64-NEXT: callq __emutls_get_address@PLT ; X64-NEXT: movl (%rax), %eax ; X64-NEXT: popq %rcx -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: diff --git a/llvm/test/CodeGen/X86/emutls.ll b/llvm/test/CodeGen/X86/emutls.ll index 2321cd2..8c0ba90 100644 --- a/llvm/test/CodeGen/X86/emutls.ll +++ b/llvm/test/CodeGen/X86/emutls.ll @@ -16,14 +16,12 @@ define i32 @my_get_xyz() { ; X32-NEXT: calll my_emutls_get_address ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: my_get_xyz: ; X64: movl $my_emutls_v_xyz, %edi ; X64-NEXT: callq my_emutls_get_address ; X64-NEXT: movl (%rax), %eax ; X64-NEXT: popq %rcx -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: @@ -47,14 +45,12 @@ define i32 @f1() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: f1: ; X64: movl $__emutls_v.i1, %edi ; X64-NEXT: callq __emutls_get_address ; X64-NEXT: movl (%rax), %eax ; X64-NEXT: popq %rcx -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: @@ -67,13 +63,11 @@ define i32* @f2() { ; X32: movl $__emutls_v.i1, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; X64-LABEL: f2: ; X64: movl $__emutls_v.i1, %edi ; X64-NEXT: callq __emutls_get_address ; X64-NEXT: popq %rcx -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: @@ -98,7 +92,6 @@ define i32* @f4() { ; X32: movl $__emutls_v.i2, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -123,7 +116,6 @@ define i32* @f6() { ; X32: movl $__emutls_v.i3, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -136,7 +128,6 @@ define i32 @f7() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -149,7 +140,6 @@ define i32* @f8() { ; X32: movl $__emutls_v.i4, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -162,7 +152,6 @@ define i32 @f9() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -175,7 +164,6 @@ define i32* @f10() { ; X32: movl $__emutls_v.i5, (%esp) ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -188,7 +176,6 @@ define i16 @f11() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movzwl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -202,7 +189,6 @@ define i32 @f12() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movswl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -217,7 +203,6 @@ define i8 @f13() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movb (%eax), %al ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: @@ -231,7 +216,6 @@ define i32 @f14() { ; X32-NEXT: calll __emutls_get_address ; X32-NEXT: movsbl (%eax), %eax ; X32-NEXT: addl $12, %esp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl entry: diff --git a/llvm/test/CodeGen/X86/epilogue-cfi-fp.ll b/llvm/test/CodeGen/X86/epilogue-cfi-fp.ll deleted file mode 100644 index c2fe1c7..0000000 --- a/llvm/test/CodeGen/X86/epilogue-cfi-fp.ll +++ /dev/null @@ -1,43 +0,0 @@ -; RUN: llc -O0 %s -o - | FileCheck %s - -; ModuleID = 'epilogue-cfi-fp.c' -source_filename = "epilogue-cfi-fp.c" -target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" -target triple = "i686-pc-linux" - -; Function Attrs: noinline nounwind -define i32 @foo(i32 %i, i32 %j, i32 %k, i32 %l, i32 %m) #0 { - -; CHECK-LABEL: foo: -; CHECK: popl %ebp -; CHECK-NEXT: .cfi_def_cfa %esp, 4 -; CHECK-NEXT: retl - -entry: - %i.addr = alloca i32, align 4 - %j.addr = alloca i32, align 4 - %k.addr = alloca i32, align 4 - %l.addr = alloca i32, align 4 - %m.addr = alloca i32, align 4 - store i32 %i, i32* %i.addr, align 4 - store i32 %j, i32* %j.addr, align 4 - store i32 %k, i32* %k.addr, align 4 - store i32 %l, i32* %l.addr, align 4 - store i32 %m, i32* %m.addr, align 4 - ret i32 0 -} - -attributes #0 = { "no-frame-pointer-elim"="true" } - -!llvm.dbg.cu = !{!0} -!llvm.module.flags = !{!3, !4, !5, !6, !7} - -!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 3f8116e6a2815b1d5f3491493938d0c63c9f42c9) (http://llvm.org/git/llvm.git 4fde77f8f1a8e4482e69b6a7484bc7d1b99b3c0a)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) -!1 = !DIFile(filename: "epilogue-cfi-fp.c", directory: "epilogue-dwarf/test") -!2 = !{} -!3 = !{i32 1, !"NumRegisterParameters", i32 0} -!4 = !{i32 2, !"Dwarf Version", i32 4} -!5 = !{i32 2, !"Debug Info Version", i32 3} -!6 = !{i32 1, !"wchar_size", i32 4} -!7 = !{i32 7, !"PIC Level", i32 2} - diff --git a/llvm/test/CodeGen/X86/epilogue-cfi-no-fp.ll b/llvm/test/CodeGen/X86/epilogue-cfi-no-fp.ll deleted file mode 100644 index 79d6f47..0000000 --- a/llvm/test/CodeGen/X86/epilogue-cfi-no-fp.ll +++ /dev/null @@ -1,46 +0,0 @@ -; RUN: llc -O0 < %s | FileCheck %s - -; ModuleID = 'epilogue-cfi-no-fp.c' -source_filename = "epilogue-cfi-no-fp.c" -target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" -target triple = "i686-pc-linux" - -; Function Attrs: noinline nounwind -define i32 @foo(i32 %i, i32 %j, i32 %k, i32 %l, i32 %m) { -; CHECK-LABEL: foo: -; CHECK: addl $20, %esp -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: popl %esi -; CHECK-NEXT: .cfi_def_cfa_offset 12 -; CHECK-NEXT: popl %edi -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: popl %ebx -; CHECK-NEXT: .cfi_def_cfa_offset 4 -; CHECK-NEXT: retl -entry: - %i.addr = alloca i32, align 4 - %j.addr = alloca i32, align 4 - %k.addr = alloca i32, align 4 - %l.addr = alloca i32, align 4 - %m.addr = alloca i32, align 4 - store i32 %i, i32* %i.addr, align 4 - store i32 %j, i32* %j.addr, align 4 - store i32 %k, i32* %k.addr, align 4 - store i32 %l, i32* %l.addr, align 4 - store i32 %m, i32* %m.addr, align 4 - ret i32 0 -} - -!llvm.dbg.cu = !{!0} -!llvm.module.flags = !{!3, !4, !5, !6, !7} - -!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 5.0.0 (http://llvm.org/git/clang.git 3f8116e6a2815b1d5f3491493938d0c63c9f42c9) (http://llvm.org/git/llvm.git 4fde77f8f1a8e4482e69b6a7484bc7d1b99b3c0a)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) -!1 = !DIFile(filename: "epilogue-cfi-no-fp.c", directory: "epilogue-dwarf/test") -!2 = !{} -!3 = !{i32 1, !"NumRegisterParameters", i32 0} -!4 = !{i32 2, !"Dwarf Version", i32 4} -!5 = !{i32 2, !"Debug Info Version", i32 3} -!6 = !{i32 1, !"wchar_size", i32 4} -!7 = !{i32 7, !"PIC Level", i32 2} - - diff --git a/llvm/test/CodeGen/X86/fast-isel-store.ll b/llvm/test/CodeGen/X86/fast-isel-store.ll index e2412e9..e359e62 100644 --- a/llvm/test/CodeGen/X86/fast-isel-store.ll +++ b/llvm/test/CodeGen/X86/fast-isel-store.ll @@ -375,7 +375,6 @@ define <4 x double> @test_store_4xf64(<4 x double>* nocapture %addr, <4 x double ; SSE64-NEXT: movupd %xmm0, (%eax) ; SSE64-NEXT: movupd %xmm1, 16(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVX32-LABEL: test_store_4xf64: @@ -414,7 +413,6 @@ define <4 x double> @test_store_4xf64_aligned(<4 x double>* nocapture %addr, <4 ; SSE64-NEXT: movapd %xmm0, (%eax) ; SSE64-NEXT: movapd %xmm1, 16(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVX32-LABEL: test_store_4xf64_aligned: @@ -454,7 +452,6 @@ define <16 x i32> @test_store_16xi32(<16 x i32>* nocapture %addr, <16 x i32> %va ; SSE64-NEXT: movups %xmm2, 32(%eax) ; SSE64-NEXT: movups %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_16xi32: @@ -504,7 +501,6 @@ define <16 x i32> @test_store_16xi32_aligned(<16 x i32>* nocapture %addr, <16 x ; SSE64-NEXT: movaps %xmm2, 32(%eax) ; SSE64-NEXT: movaps %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_16xi32_aligned: @@ -554,7 +550,6 @@ define <16 x float> @test_store_16xf32(<16 x float>* nocapture %addr, <16 x floa ; SSE64-NEXT: movups %xmm2, 32(%eax) ; SSE64-NEXT: movups %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_16xf32: @@ -604,7 +599,6 @@ define <16 x float> @test_store_16xf32_aligned(<16 x float>* nocapture %addr, <1 ; SSE64-NEXT: movaps %xmm2, 32(%eax) ; SSE64-NEXT: movaps %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_16xf32_aligned: @@ -662,7 +656,6 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double ; SSE64-NEXT: movupd %xmm2, 32(%eax) ; SSE64-NEXT: movupd %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_8xf64: @@ -689,7 +682,6 @@ define <8 x double> @test_store_8xf64(<8 x double>* nocapture %addr, <8 x double ; AVXONLY64-NEXT: vmovupd %ymm1, 32(%eax) ; AVXONLY64-NEXT: movl %ebp, %esp ; AVXONLY64-NEXT: popl %ebp -; AVXONLY64-NEXT: .cfi_def_cfa %esp, 4 ; AVXONLY64-NEXT: retl ; ; AVX51232-LABEL: test_store_8xf64: @@ -737,7 +729,6 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8 ; SSE64-NEXT: movapd %xmm2, 32(%eax) ; SSE64-NEXT: movapd %xmm3, 48(%eax) ; SSE64-NEXT: addl $12, %esp -; SSE64-NEXT: .cfi_def_cfa_offset 4 ; SSE64-NEXT: retl ; ; AVXONLY32-LABEL: test_store_8xf64_aligned: @@ -764,7 +755,6 @@ define <8 x double> @test_store_8xf64_aligned(<8 x double>* nocapture %addr, <8 ; AVXONLY64-NEXT: vmovapd %ymm1, 32(%eax) ; AVXONLY64-NEXT: movl %ebp, %esp ; AVXONLY64-NEXT: popl %ebp -; AVXONLY64-NEXT: .cfi_def_cfa %esp, 4 ; AVXONLY64-NEXT: retl ; ; AVX51232-LABEL: test_store_8xf64_aligned: diff --git a/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll b/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll index ee64790..ba80c83 100644 --- a/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll +++ b/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic-2.ll @@ -18,15 +18,11 @@ entry: } ; CHECK-LABEL: noDebug -; CHECK: addq $16, %rsp -; CHECK-NEXT: .cfi_adjust_cfa_offset -16 -; CHECK-NEXT: addq $8, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 24 -; CHECK-NEXT: popq %rbx -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: popq %r14 -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: retq +; CHECK: addq $24, %rsp +; CHECK: popq %rbx +; CHECK-NEXT: popq %r14 +; CHECK-NEXT: retq + define void @withDebug() !dbg !18 { entry: @@ -46,11 +42,9 @@ entry: ; CHECK-LABEL: withDebug ; CHECK: callq printf ; CHECK: callq printf -; CHECK-NEXT: addq $16, %rsp +; CHECK-NEXT: addq $24, %rsp ; CHECK: popq %rbx -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popq %r14 -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq declare { i64, i1 } @llvm.uadd.with.overflow.i64(i64, i64) diff --git a/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll b/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll index de9d6bf..f9ecf70 100644 --- a/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll +++ b/llvm/test/CodeGen/X86/frame-lowering-debug-intrinsic.ll @@ -9,7 +9,6 @@ define i64 @fn1NoDebug(i64 %a) { ; CHECK-LABEL: fn1NoDebug ; CHECK: popq %rcx -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: ret define i64 @fn1WithDebug(i64 %a) !dbg !4 { @@ -20,7 +19,6 @@ define i64 @fn1WithDebug(i64 %a) !dbg !4 { ; CHECK-LABEL: fn1WithDebug ; CHECK: popq %rcx -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: ret %struct.Buffer = type { i8, [63 x i8] } @@ -35,7 +33,6 @@ define void @fn2NoDebug(%struct.Buffer* byval align 64 %p1) { ; CHECK-NOT: sub ; CHECK: mov ; CHECK-NEXT: pop -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: ret define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !8 { @@ -49,7 +46,6 @@ define void @fn2WithDebug(%struct.Buffer* byval align 64 %p1) !dbg !8 { ; CHECK-NOT: sub ; CHECK: mov ; CHECK-NEXT: pop -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: ret declare i64 @fn(i64, i64) diff --git a/llvm/test/CodeGen/X86/haddsub-2.ll b/llvm/test/CodeGen/X86/haddsub-2.ll index 7126fb2..e32c745 100644 --- a/llvm/test/CodeGen/X86/haddsub-2.ll +++ b/llvm/test/CodeGen/X86/haddsub-2.ll @@ -724,17 +724,11 @@ define <16 x i16> @avx2_vphadd_w_test(<16 x i16> %a, <16 x i16> %b) { ; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0] ; SSE3-NEXT: popq %rbx -; SSE3-NEXT: .cfi_def_cfa_offset 48 ; SSE3-NEXT: popq %r12 -; SSE3-NEXT: .cfi_def_cfa_offset 40 ; SSE3-NEXT: popq %r13 -; SSE3-NEXT: .cfi_def_cfa_offset 32 ; SSE3-NEXT: popq %r14 -; SSE3-NEXT: .cfi_def_cfa_offset 24 ; SSE3-NEXT: popq %r15 -; SSE3-NEXT: .cfi_def_cfa_offset 16 ; SSE3-NEXT: popq %rbp -; SSE3-NEXT: .cfi_def_cfa_offset 8 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: avx2_vphadd_w_test: @@ -1357,17 +1351,11 @@ define <16 x i16> @avx2_hadd_w(<16 x i16> %a, <16 x i16> %b) { ; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0] ; SSE3-NEXT: popq %rbx -; SSE3-NEXT: .cfi_def_cfa_offset 48 ; SSE3-NEXT: popq %r12 -; SSE3-NEXT: .cfi_def_cfa_offset 40 ; SSE3-NEXT: popq %r13 -; SSE3-NEXT: .cfi_def_cfa_offset 32 ; SSE3-NEXT: popq %r14 -; SSE3-NEXT: .cfi_def_cfa_offset 24 ; SSE3-NEXT: popq %r15 -; SSE3-NEXT: .cfi_def_cfa_offset 16 ; SSE3-NEXT: popq %rbp -; SSE3-NEXT: .cfi_def_cfa_offset 8 ; SSE3-NEXT: retq ; ; SSSE3-LABEL: avx2_hadd_w: diff --git a/llvm/test/CodeGen/X86/hipe-cc64.ll b/llvm/test/CodeGen/X86/hipe-cc64.ll index ce2d0e9..efe07cf 100644 --- a/llvm/test/CodeGen/X86/hipe-cc64.ll +++ b/llvm/test/CodeGen/X86/hipe-cc64.ll @@ -87,7 +87,6 @@ define cc 11 { i64, i64, i64 } @tailcaller(i64 %hp, i64 %p) #0 { ; CHECK-NEXT: movl $47, %ecx ; CHECK-NEXT: movl $63, %r8d ; CHECK-NEXT: popq %rax - ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: jmp tailcallee %ret = tail call cc11 { i64, i64, i64 } @tailcallee(i64 %hp, i64 %p, i64 15, i64 31, i64 47, i64 63, i64 79) #1 diff --git a/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll b/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll index e3b25a53..fd503aa 100644 --- a/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll +++ b/llvm/test/CodeGen/X86/illegal-bitfield-loadstore.ll @@ -81,7 +81,6 @@ define void @i24_insert_bit(i24* %a, i1 zeroext %bit) { ; X86-NEXT: orl %edx, %eax ; X86-NEXT: movw %ax, (%ecx) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: i24_insert_bit: diff --git a/llvm/test/CodeGen/X86/imul.ll b/llvm/test/CodeGen/X86/imul.ll index 02782f7..e364b00 100644 --- a/llvm/test/CodeGen/X86/imul.ll +++ b/llvm/test/CodeGen/X86/imul.ll @@ -307,7 +307,6 @@ define i64 @test5(i64 %a) { ; X86-NEXT: subl %ecx, %edx ; X86-NEXT: subl %esi, %edx ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %tmp3 = mul i64 %a, -31 @@ -363,7 +362,6 @@ define i64 @test7(i64 %a) { ; X86-NEXT: subl %ecx, %edx ; X86-NEXT: subl %esi, %edx ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %tmp3 = mul i64 %a, -33 @@ -392,7 +390,6 @@ define i64 @testOverflow(i64 %a) { ; X86-NEXT: addl %esi, %edx ; X86-NEXT: subl {{[0-9]+}}(%esp), %edx ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %tmp3 = mul i64 %a, 9223372036854775807 diff --git a/llvm/test/CodeGen/X86/lea-opt-cse1.ll b/llvm/test/CodeGen/X86/lea-opt-cse1.ll index 4c9ec3e..05b4769 100644 --- a/llvm/test/CodeGen/X86/lea-opt-cse1.ll +++ b/llvm/test/CodeGen/X86/lea-opt-cse1.ll @@ -30,7 +30,6 @@ define void @test_func(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr { ; X86-NEXT: leal 1(%edx,%ecx), %ecx ; X86-NEXT: movl %ecx, 16(%eax) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0 diff --git a/llvm/test/CodeGen/X86/lea-opt-cse2.ll b/llvm/test/CodeGen/X86/lea-opt-cse2.ll index cee6f67..865dd49 100644 --- a/llvm/test/CodeGen/X86/lea-opt-cse2.ll +++ b/llvm/test/CodeGen/X86/lea-opt-cse2.ll @@ -46,9 +46,7 @@ define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 { ; X86-NEXT: leal 1(%esi,%edx), %ecx ; X86-NEXT: movl %ecx, 16(%eax) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: popl %edi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: br label %loop diff --git a/llvm/test/CodeGen/X86/lea-opt-cse3.ll b/llvm/test/CodeGen/X86/lea-opt-cse3.ll index ed3aff9..87949b4 100644 --- a/llvm/test/CodeGen/X86/lea-opt-cse3.ll +++ b/llvm/test/CodeGen/X86/lea-opt-cse3.ll @@ -91,7 +91,6 @@ define i32 @foo1_mult_basic_blocks(i32 %a, i32 %b) local_unnamed_addr #0 { ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB2_2: # %exit ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %mul = shl i32 %b, 2 @@ -144,7 +143,6 @@ define i32 @foo1_mult_basic_blocks_illegal_scale(i32 %a, i32 %b) local_unnamed_a ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: .LBB3_2: # %exit ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %mul = shl i32 %b, 1 diff --git a/llvm/test/CodeGen/X86/lea-opt-cse4.ll b/llvm/test/CodeGen/X86/lea-opt-cse4.ll index d068180..31f31a7 100644 --- a/llvm/test/CodeGen/X86/lea-opt-cse4.ll +++ b/llvm/test/CodeGen/X86/lea-opt-cse4.ll @@ -36,7 +36,6 @@ define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 { ; X86-NEXT: leal 1(%ecx,%edx), %ecx ; X86-NEXT: movl %ecx, 16(%eax) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: %h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0 @@ -111,9 +110,7 @@ define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 ; X86-NEXT: addl %ecx, %edx ; X86-NEXT: movl %edx, 16(%eax) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: popl %edi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl entry: br label %loop diff --git a/llvm/test/CodeGen/X86/legalize-shift-64.ll b/llvm/test/CodeGen/X86/legalize-shift-64.ll index 7dff2c2..ca4cfa5 100644 --- a/llvm/test/CodeGen/X86/legalize-shift-64.ll +++ b/llvm/test/CodeGen/X86/legalize-shift-64.ll @@ -117,13 +117,9 @@ define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) { ; CHECK-NEXT: movl %esi, 4(%eax) ; CHECK-NEXT: movl %edi, (%eax) ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: popl %edi -; CHECK-NEXT: .cfi_def_cfa_offset 12 ; CHECK-NEXT: popl %ebx -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: popl %ebp -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl $4 %shl = shl <2 x i64> %A, %B ret <2 x i64> %shl @@ -164,7 +160,6 @@ define i32 @test6() { ; CHECK-NEXT: .LBB5_4: # %if.then ; CHECK-NEXT: movl %ebp, %esp ; CHECK-NEXT: popl %ebp -; CHECK-NEXT: .cfi_def_cfa %esp, 4 ; CHECK-NEXT: retl %x = alloca i32, align 4 %t = alloca i64, align 8 diff --git a/llvm/test/CodeGen/X86/live-out-reg-info.ll b/llvm/test/CodeGen/X86/live-out-reg-info.ll index 170f735..b838065 100644 --- a/llvm/test/CodeGen/X86/live-out-reg-info.ll +++ b/llvm/test/CodeGen/X86/live-out-reg-info.ll @@ -18,7 +18,6 @@ define void @foo(i32 %a) { ; CHECK-NEXT: callq qux ; CHECK-NEXT: .LBB0_2: # %false ; CHECK-NEXT: popq %rax -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %t0 = lshr i32 %a, 23 br label %next diff --git a/llvm/test/CodeGen/X86/load-combine.ll b/llvm/test/CodeGen/X86/load-combine.ll index d46efc4..d1f5f41 100644 --- a/llvm/test/CodeGen/X86/load-combine.ll +++ b/llvm/test/CodeGen/X86/load-combine.ll @@ -376,7 +376,6 @@ define i32 @load_i32_by_i8_bswap_uses(i32* %arg) { ; CHECK-NEXT: orl %ecx, %eax ; CHECK-NEXT: orl %edx, %eax ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_bswap_uses: @@ -497,7 +496,6 @@ define i32 @load_i32_by_i8_bswap_store_in_between(i32* %arg, i32* %arg1) { ; CHECK-NEXT: movzbl 3(%ecx), %eax ; CHECK-NEXT: orl %edx, %eax ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl ; ; CHECK64-LABEL: load_i32_by_i8_bswap_store_in_between: diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll index 69d3a69..8983c3a 100644 --- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll +++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll @@ -1702,7 +1702,6 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i ; KNL_32-NEXT: vmovdqa64 %zmm2, %zmm0 ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp -; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: retl ; ; SKX-LABEL: test_gather_16i64: @@ -1737,7 +1736,6 @@ define <16 x i64> @test_gather_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i ; SKX_32-NEXT: vmovdqa64 %zmm2, %zmm0 ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp -; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: retl %res = call <16 x i64> @llvm.masked.gather.v16i64.v16p0i64(<16 x i64*> %ptrs, i32 4, <16 x i1> %mask, <16 x i64> %src0) ret <16 x i64> %res @@ -1821,7 +1819,6 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, < ; KNL_32-NEXT: vmovapd %zmm2, %zmm0 ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp -; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: retl ; ; SKX-LABEL: test_gather_16f64: @@ -1856,7 +1853,6 @@ define <16 x double> @test_gather_16f64(<16 x double*> %ptrs, <16 x i1> %mask, < ; SKX_32-NEXT: vmovapd %zmm2, %zmm0 ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp -; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: retl %res = call <16 x double> @llvm.masked.gather.v16f64.v16p0f64(<16 x double*> %ptrs, i32 4, <16 x i1> %mask, <16 x double> %src0) ret <16 x double> %res @@ -1938,7 +1934,6 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> % ; KNL_32-NEXT: vpscatterdq %zmm1, (,%ymm0) {%k2} ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp -; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: vzeroupper ; KNL_32-NEXT: retl ; @@ -1972,7 +1967,6 @@ define void @test_scatter_16i64(<16 x i64*> %ptrs, <16 x i1> %mask, <16 x i64> % ; SKX_32-NEXT: vpscatterdq %zmm1, (,%ymm0) {%k2} ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp -; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: vzeroupper ; SKX_32-NEXT: retl call void @llvm.masked.scatter.v16i64.v16p0i64(<16 x i64> %src0, <16 x i64*> %ptrs, i32 4, <16 x i1> %mask) @@ -2056,7 +2050,6 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou ; KNL_32-NEXT: vscatterdpd %zmm1, (,%ymm0) {%k2} ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp -; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: vzeroupper ; KNL_32-NEXT: retl ; @@ -2090,7 +2083,6 @@ define void @test_scatter_16f64(<16 x double*> %ptrs, <16 x i1> %mask, <16 x dou ; SKX_32-NEXT: vscatterdpd %zmm1, (,%ymm0) {%k2} ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp -; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: vzeroupper ; SKX_32-NEXT: retl call void @llvm.masked.scatter.v16f64.v16p0f64(<16 x double> %src0, <16 x double*> %ptrs, i32 4, <16 x i1> %mask) @@ -2135,7 +2127,6 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6 ; KNL_32-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; KNL_32-NEXT: movl %ebp, %esp ; KNL_32-NEXT: popl %ebp -; KNL_32-NEXT: .cfi_def_cfa %esp, 4 ; KNL_32-NEXT: retl ; ; SKX-LABEL: test_pr28312: @@ -2163,7 +2154,6 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6 ; SKX_32-NEXT: vpaddq %ymm0, %ymm1, %ymm0 ; SKX_32-NEXT: movl %ebp, %esp ; SKX_32-NEXT: popl %ebp -; SKX_32-NEXT: .cfi_def_cfa %esp, 4 ; SKX_32-NEXT: retl %g1 = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %p1, i32 8, <4 x i1> %k, <4 x i64> undef) %g2 = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %p1, i32 8, <4 x i1> %k, <4 x i64> undef) diff --git a/llvm/test/CodeGen/X86/memset-nonzero.ll b/llvm/test/CodeGen/X86/memset-nonzero.ll index 98e0937..f0a957c 100644 --- a/llvm/test/CodeGen/X86/memset-nonzero.ll +++ b/llvm/test/CodeGen/X86/memset-nonzero.ll @@ -148,7 +148,6 @@ define void @memset_256_nonzero_bytes(i8* %x) { ; SSE-NEXT: movl $256, %edx # imm = 0x100 ; SSE-NEXT: callq memset ; SSE-NEXT: popq %rax -; SSE-NEXT: .cfi_def_cfa_offset 8 ; SSE-NEXT: retq ; ; SSE2FAST-LABEL: memset_256_nonzero_bytes: diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll index b909b7c..e414f55 100644 --- a/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll +++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-128.ll @@ -72,9 +72,7 @@ define <2 x i64> @merge_2i64_i64_12(i64* %ptr) nounwind uwtable noinline ssp { ; X32-SSE1-NEXT: movl %esi, 4(%eax) ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_2i64_i64_12: @@ -386,7 +384,6 @@ define <4 x i32> @merge_4i32_i32_23u5(i32* %ptr) nounwind uwtable noinline ssp { ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: movl %ecx, 12(%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_4i32_i32_23u5: @@ -438,9 +435,7 @@ define <4 x i32> @merge_4i32_i32_23u5_inc2(i32* %ptr) nounwind uwtable noinline ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: movl %ecx, 12(%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_4i32_i32_23u5_inc2: @@ -495,9 +490,7 @@ define <4 x i32> @merge_4i32_i32_23u5_inc3(i32* %ptr) nounwind uwtable noinline ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: movl %ecx, 12(%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_4i32_i32_23u5_inc3: @@ -656,9 +649,7 @@ define <4 x i32> @merge_4i32_i32_45zz_inc4(i32* %ptr) nounwind uwtable noinline ; X32-SSE1-NEXT: movl $0, 12(%eax) ; X32-SSE1-NEXT: movl $0, 8(%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_4i32_i32_45zz_inc4: @@ -710,9 +701,7 @@ define <4 x i32> @merge_4i32_i32_45zz_inc5(i32* %ptr) nounwind uwtable noinline ; X32-SSE1-NEXT: movl $0, 12(%eax) ; X32-SSE1-NEXT: movl $0, 8(%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_4i32_i32_45zz_inc5: @@ -762,9 +751,7 @@ define <8 x i16> @merge_8i16_i16_23u567u9(i16* %ptr) nounwind uwtable noinline s ; X32-SSE1-NEXT: movl %esi, 6(%eax) ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_8i16_i16_23u567u9: @@ -910,13 +897,9 @@ define <16 x i8> @merge_16i8_i8_01u3456789ABCDuF(i8* %ptr) nounwind uwtable noin ; X32-SSE1-NEXT: movl %esi, 3(%eax) ; X32-SSE1-NEXT: movw %bp, (%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 16 ; X32-SSE1-NEXT: popl %edi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 12 ; X32-SSE1-NEXT: popl %ebx -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %ebp -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_16i8_i8_01u3456789ABCDuF: @@ -1146,9 +1129,7 @@ define <2 x i64> @merge_2i64_i64_12_volatile(i64* %ptr) nounwind uwtable noinlin ; X32-SSE1-NEXT: movl %esi, 4(%eax) ; X32-SSE1-NEXT: movl %edx, (%eax) ; X32-SSE1-NEXT: popl %esi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 8 ; X32-SSE1-NEXT: popl %edi -; X32-SSE1-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE1-NEXT: retl $4 ; ; X32-SSE41-LABEL: merge_2i64_i64_12_volatile: diff --git a/llvm/test/CodeGen/X86/movtopush.ll b/llvm/test/CodeGen/X86/movtopush.ll index ddcc383..051c8a7 100644 --- a/llvm/test/CodeGen/X86/movtopush.ll +++ b/llvm/test/CodeGen/X86/movtopush.ll @@ -382,10 +382,8 @@ entry: ; LINUX: pushl $1 ; LINUX: .cfi_adjust_cfa_offset 4 ; LINUX: calll good -; LINUX: addl $16, %esp +; LINUX: addl $28, %esp ; LINUX: .cfi_adjust_cfa_offset -16 -; LINUX: addl $12, %esp -; LINUX: .cfi_def_cfa_offset 4 ; LINUX-NOT: add ; LINUX: retl define void @pr27140() optsize { diff --git a/llvm/test/CodeGen/X86/mul-constant-result.ll b/llvm/test/CodeGen/X86/mul-constant-result.ll index f778397..011b63c 100644 --- a/llvm/test/CodeGen/X86/mul-constant-result.ll +++ b/llvm/test/CodeGen/X86/mul-constant-result.ll @@ -34,116 +34,84 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: .LBB0_6: ; X86-NEXT: addl %eax, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_39: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: .LBB0_40: ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_7: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_8: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $2, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_9: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_10: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: addl %eax, %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_11: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (,%eax,8), %ecx ; X86-NEXT: jmp .LBB0_12 ; X86-NEXT: .LBB0_13: ; X86-NEXT: shll $3, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_14: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_15: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: addl %eax, %eax ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_16: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: leal (%eax,%ecx,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_17: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $2, %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_18: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,2), %ecx ; X86-NEXT: leal (%eax,%ecx,4), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_19: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,2), %ecx ; X86-NEXT: jmp .LBB0_20 ; X86-NEXT: .LBB0_21: ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_22: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $4, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_23: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: shll $4, %ecx ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_24: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: addl %eax, %eax ; X86-NEXT: leal (%eax,%eax,8), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_25: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: shll $2, %ecx ; X86-NEXT: jmp .LBB0_12 @@ -151,26 +119,20 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: shll $2, %eax ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_27: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: leal (%eax,%ecx,4), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_28: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: .LBB0_20: ; X86-NEXT: leal (%eax,%ecx,4), %ecx ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_29: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,2), %ecx ; X86-NEXT: shll $3, %ecx ; X86-NEXT: jmp .LBB0_12 @@ -178,17 +140,13 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: shll $3, %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_31: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_32: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %ecx ; X86-NEXT: leal (%ecx,%ecx,2), %ecx ; X86-NEXT: jmp .LBB0_12 @@ -196,27 +154,21 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: leal (%eax,%eax,8), %eax ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_34: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %ecx ; X86-NEXT: leal (%ecx,%ecx,2), %ecx ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_35: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %ecx ; X86-NEXT: leal (%ecx,%ecx,2), %ecx ; X86-NEXT: addl %eax, %ecx ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_36: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: shll $5, %ecx ; X86-NEXT: subl %eax, %ecx @@ -228,13 +180,10 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 { ; X86-NEXT: subl %eax, %ecx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_38: -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $5, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-HSW-LABEL: mult: @@ -908,11 +857,8 @@ define i32 @foo() local_unnamed_addr #0 { ; X86-NEXT: negl %ecx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 12 ; X86-NEXT: popl %edi -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: popl %ebx -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-HSW-LABEL: foo: @@ -1126,15 +1072,10 @@ define i32 @foo() local_unnamed_addr #0 { ; X64-HSW-NEXT: negl %ecx ; X64-HSW-NEXT: movl %ecx, %eax ; X64-HSW-NEXT: addq $8, %rsp -; X64-HSW-NEXT: .cfi_def_cfa_offset 40 ; X64-HSW-NEXT: popq %rbx -; X64-HSW-NEXT: .cfi_def_cfa_offset 32 ; X64-HSW-NEXT: popq %r14 -; X64-HSW-NEXT: .cfi_def_cfa_offset 24 ; X64-HSW-NEXT: popq %r15 -; X64-HSW-NEXT: .cfi_def_cfa_offset 16 ; X64-HSW-NEXT: popq %rbp -; X64-HSW-NEXT: .cfi_def_cfa_offset 8 ; X64-HSW-NEXT: retq %1 = tail call i32 @mult(i32 1, i32 0) %2 = icmp ne i32 %1, 1 diff --git a/llvm/test/CodeGen/X86/mul-i256.ll b/llvm/test/CodeGen/X86/mul-i256.ll index 1e05b95..0a48ae7 100644 --- a/llvm/test/CodeGen/X86/mul-i256.ll +++ b/llvm/test/CodeGen/X86/mul-i256.ll @@ -349,15 +349,10 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 { ; X32-NEXT: movl %eax, 24(%ecx) ; X32-NEXT: movl %edx, 28(%ecx) ; X32-NEXT: addl $88, %esp -; X32-NEXT: .cfi_def_cfa_offset 20 ; X32-NEXT: popl %esi -; X32-NEXT: .cfi_def_cfa_offset 16 ; X32-NEXT: popl %edi -; X32-NEXT: .cfi_def_cfa_offset 12 ; X32-NEXT: popl %ebx -; X32-NEXT: .cfi_def_cfa_offset 8 ; X32-NEXT: popl %ebp -; X32-NEXT: .cfi_def_cfa_offset 4 ; X32-NEXT: retl ; ; X64-LABEL: test: @@ -426,11 +421,8 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 { ; X64-NEXT: movq %rax, 16(%r9) ; X64-NEXT: movq %rdx, 24(%r9) ; X64-NEXT: popq %rbx -; X64-NEXT: .cfi_def_cfa_offset 24 ; X64-NEXT: popq %r14 -; X64-NEXT: .cfi_def_cfa_offset 16 ; X64-NEXT: popq %r15 -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: %av = load i256, i256* %a diff --git a/llvm/test/CodeGen/X86/mul128.ll b/llvm/test/CodeGen/X86/mul128.ll index 0c11f17..70a6173 100644 --- a/llvm/test/CodeGen/X86/mul128.ll +++ b/llvm/test/CodeGen/X86/mul128.ll @@ -86,15 +86,10 @@ define i128 @foo(i128 %t, i128 %u) { ; X86-NEXT: movl %edx, 12(%ecx) ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: addl $8, %esp -; X86-NEXT: .cfi_def_cfa_offset 20 ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 16 ; X86-NEXT: popl %edi -; X86-NEXT: .cfi_def_cfa_offset 12 ; X86-NEXT: popl %ebx -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: popl %ebp -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl $4 %k = mul i128 %t, %u ret i128 %k diff --git a/llvm/test/CodeGen/X86/pr21792.ll b/llvm/test/CodeGen/X86/pr21792.ll index 54eb1fc..74f6c5a 100644 --- a/llvm/test/CodeGen/X86/pr21792.ll +++ b/llvm/test/CodeGen/X86/pr21792.ll @@ -28,7 +28,6 @@ define void @func(<4 x float> %vx) { ; CHECK-NEXT: leaq stuff+8(%r9), %r9 ; CHECK-NEXT: callq toto ; CHECK-NEXT: popq %rax -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %tmp2 = bitcast <4 x float> %vx to <2 x i64> diff --git a/llvm/test/CodeGen/X86/pr29061.ll b/llvm/test/CodeGen/X86/pr29061.ll index b62d082..0cbe75f 100644 --- a/llvm/test/CodeGen/X86/pr29061.ll +++ b/llvm/test/CodeGen/X86/pr29061.ll @@ -15,7 +15,6 @@ define void @t1(i8 signext %c) { ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: popl %edi -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: tail call void asm sideeffect "", "{di},~{dirflag},~{fpsr},~{flags}"(i8 %c) @@ -33,7 +32,6 @@ define void @t2(i8 signext %c) { ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: tail call void asm sideeffect "", "{si},~{dirflag},~{fpsr},~{flags}"(i8 %c) diff --git a/llvm/test/CodeGen/X86/pr29112.ll b/llvm/test/CodeGen/X86/pr29112.ll index d791936..cc670ee 100644 --- a/llvm/test/CodeGen/X86/pr29112.ll +++ b/llvm/test/CodeGen/X86/pr29112.ll @@ -65,7 +65,6 @@ define <4 x float> @bar(<4 x float>* %a1p, <4 x float>* %a2p, <4 x float> %a3, < ; CHECK-NEXT: vaddps {{[0-9]+}}(%rsp), %xmm1, %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: addq $88, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %a1 = shufflevector <16 x float>%c1, <16 x float>%c2, <4 x i32> diff --git a/llvm/test/CodeGen/X86/pr30430.ll b/llvm/test/CodeGen/X86/pr30430.ll index 06007a3..0254c09 100644 --- a/llvm/test/CodeGen/X86/pr30430.ll +++ b/llvm/test/CodeGen/X86/pr30430.ll @@ -108,7 +108,6 @@ define <16 x float> @makefloat(float %f1, float %f2, float %f3, float %f4, float ; CHECK-NEXT: vmovss %xmm14, (%rsp) # 4-byte Spill ; CHECK-NEXT: movq %rbp, %rsp ; CHECK-NEXT: popq %rbp -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: retq entry: %__A.addr.i = alloca float, align 4 diff --git a/llvm/test/CodeGen/X86/pr32241.ll b/llvm/test/CodeGen/X86/pr32241.ll index 02f3bb1..f48fef5 100644 --- a/llvm/test/CodeGen/X86/pr32241.ll +++ b/llvm/test/CodeGen/X86/pr32241.ll @@ -50,9 +50,7 @@ define i32 @_Z3foov() { ; CHECK-NEXT: movw %dx, {{[0-9]+}}(%esp) ; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: addl $16, %esp -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: %aa = alloca i16, align 2 diff --git a/llvm/test/CodeGen/X86/pr32256.ll b/llvm/test/CodeGen/X86/pr32256.ll index 5b6126f..f6e254a 100644 --- a/llvm/test/CodeGen/X86/pr32256.ll +++ b/llvm/test/CodeGen/X86/pr32256.ll @@ -27,7 +27,6 @@ define void @_Z1av() { ; CHECK-NEXT: andb $1, %al ; CHECK-NEXT: movb %al, {{[0-9]+}}(%esp) ; CHECK-NEXT: addl $2, %esp -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: %b = alloca i8, align 1 diff --git a/llvm/test/CodeGen/X86/pr32282.ll b/llvm/test/CodeGen/X86/pr32282.ll index 67a0332..d6e6f6e 100644 --- a/llvm/test/CodeGen/X86/pr32282.ll +++ b/llvm/test/CodeGen/X86/pr32282.ll @@ -43,7 +43,6 @@ define void @foo() { ; X86-NEXT: orl %eax, %edx ; X86-NEXT: setne {{[0-9]+}}(%esp) ; X86-NEXT: popl %eax -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: foo: diff --git a/llvm/test/CodeGen/X86/pr32284.ll b/llvm/test/CodeGen/X86/pr32284.ll index 59be67f..11eb696 100644 --- a/llvm/test/CodeGen/X86/pr32284.ll +++ b/llvm/test/CodeGen/X86/pr32284.ll @@ -71,7 +71,6 @@ define void @foo() { ; 686-O0-NEXT: movzbl %al, %ecx ; 686-O0-NEXT: movl %ecx, (%esp) ; 686-O0-NEXT: addl $8, %esp -; 686-O0-NEXT: .cfi_def_cfa_offset 4 ; 686-O0-NEXT: retl ; ; 686-LABEL: foo: @@ -89,7 +88,6 @@ define void @foo() { ; 686-NEXT: setle %dl ; 686-NEXT: movl %edx, {{[0-9]+}}(%esp) ; 686-NEXT: addl $8, %esp -; 686-NEXT: .cfi_def_cfa_offset 4 ; 686-NEXT: retl entry: %a = alloca i8, align 1 @@ -234,15 +232,10 @@ define void @f1() { ; 686-O0-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill ; 686-O0-NEXT: movl %esi, (%esp) # 4-byte Spill ; 686-O0-NEXT: addl $36, %esp -; 686-O0-NEXT: .cfi_def_cfa_offset 20 ; 686-O0-NEXT: popl %esi -; 686-O0-NEXT: .cfi_def_cfa_offset 16 ; 686-O0-NEXT: popl %edi -; 686-O0-NEXT: .cfi_def_cfa_offset 12 ; 686-O0-NEXT: popl %ebx -; 686-O0-NEXT: .cfi_def_cfa_offset 8 ; 686-O0-NEXT: popl %ebp -; 686-O0-NEXT: .cfi_def_cfa_offset 4 ; 686-O0-NEXT: retl ; ; 686-LABEL: f1: @@ -284,11 +277,8 @@ define void @f1() { ; 686-NEXT: movl %eax, _ZN8struct_210member_2_0E ; 686-NEXT: movl $0, _ZN8struct_210member_2_0E+4 ; 686-NEXT: addl $1, %esp -; 686-NEXT: .cfi_def_cfa_offset 12 ; 686-NEXT: popl %esi -; 686-NEXT: .cfi_def_cfa_offset 8 ; 686-NEXT: popl %edi -; 686-NEXT: .cfi_def_cfa_offset 4 ; 686-NEXT: retl entry: %a = alloca i8, align 1 @@ -402,11 +392,8 @@ define void @f2() { ; 686-O0-NEXT: movw %cx, %di ; 686-O0-NEXT: movw %di, (%eax) ; 686-O0-NEXT: addl $2, %esp -; 686-O0-NEXT: .cfi_def_cfa_offset 12 ; 686-O0-NEXT: popl %esi -; 686-O0-NEXT: .cfi_def_cfa_offset 8 ; 686-O0-NEXT: popl %edi -; 686-O0-NEXT: .cfi_def_cfa_offset 4 ; 686-O0-NEXT: retl ; ; 686-LABEL: f2: @@ -427,7 +414,6 @@ define void @f2() { ; 686-NEXT: sete %dl ; 686-NEXT: movw %dx, (%eax) ; 686-NEXT: addl $2, %esp -; 686-NEXT: .cfi_def_cfa_offset 4 ; 686-NEXT: retl entry: %a = alloca i16, align 2 @@ -546,7 +532,6 @@ define void @f3() #0 { ; 686-O0-NEXT: popl %esi ; 686-O0-NEXT: popl %edi ; 686-O0-NEXT: popl %ebp -; 686-O0-NEXT: .cfi_def_cfa %esp, 4 ; 686-O0-NEXT: retl ; ; 686-LABEL: f3: @@ -573,7 +558,6 @@ define void @f3() #0 { ; 686-NEXT: movl %ecx, var_46 ; 686-NEXT: movl %ebp, %esp ; 686-NEXT: popl %ebp -; 686-NEXT: .cfi_def_cfa %esp, 4 ; 686-NEXT: retl entry: %a = alloca i64, align 8 diff --git a/llvm/test/CodeGen/X86/pr32329.ll b/llvm/test/CodeGen/X86/pr32329.ll index 9d1bb90..f6bdade 100644 --- a/llvm/test/CodeGen/X86/pr32329.ll +++ b/llvm/test/CodeGen/X86/pr32329.ll @@ -57,13 +57,9 @@ define void @foo() local_unnamed_addr { ; X86-NEXT: imull %eax, %ebx ; X86-NEXT: movb %bl, var_218 ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 16 ; X86-NEXT: popl %edi -; X86-NEXT: .cfi_def_cfa_offset 12 ; X86-NEXT: popl %ebx -; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: popl %ebp -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: foo: diff --git a/llvm/test/CodeGen/X86/pr32345.ll b/llvm/test/CodeGen/X86/pr32345.ll index 2bdeca2..f680288 100644 --- a/llvm/test/CodeGen/X86/pr32345.ll +++ b/llvm/test/CodeGen/X86/pr32345.ll @@ -84,7 +84,6 @@ define void @foo() { ; 6860-NEXT: popl %edi ; 6860-NEXT: popl %ebx ; 6860-NEXT: popl %ebp -; 6860-NEXT: .cfi_def_cfa %esp, 4 ; 6860-NEXT: retl ; ; X64-LABEL: foo: @@ -128,7 +127,6 @@ define void @foo() { ; 686-NEXT: movb %dl, (%eax) ; 686-NEXT: movl %ebp, %esp ; 686-NEXT: popl %ebp -; 686-NEXT: .cfi_def_cfa %esp, 4 ; 686-NEXT: retl bb: %tmp = alloca i64, align 8 diff --git a/llvm/test/CodeGen/X86/pr32451.ll b/llvm/test/CodeGen/X86/pr32451.ll index 5b7d137..67c0cb39 100644 --- a/llvm/test/CodeGen/X86/pr32451.ll +++ b/llvm/test/CodeGen/X86/pr32451.ll @@ -30,9 +30,7 @@ define i8** @japi1_convert_690(i8**, i8***, i32) { ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload ; CHECK-NEXT: movl %eax, (%ecx) ; CHECK-NEXT: addl $16, %esp -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: popl %ebx -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl top: %3 = alloca i8*** diff --git a/llvm/test/CodeGen/X86/pr34088.ll b/llvm/test/CodeGen/X86/pr34088.ll index 4d85722..2049c55 100644 --- a/llvm/test/CodeGen/X86/pr34088.ll +++ b/llvm/test/CodeGen/X86/pr34088.ll @@ -27,7 +27,6 @@ define i32 @pr34088() local_unnamed_addr { ; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%esp) ; CHECK-NEXT: movl %ebp, %esp ; CHECK-NEXT: popl %ebp -; CHECK-NEXT: .cfi_def_cfa %esp, 4 ; CHECK-NEXT: retl entry: %foo = alloca %struct.Foo, align 4 diff --git a/llvm/test/CodeGen/X86/pr9743.ll b/llvm/test/CodeGen/X86/pr9743.ll index ac3d457..73b3c7f 100644 --- a/llvm/test/CodeGen/X86/pr9743.ll +++ b/llvm/test/CodeGen/X86/pr9743.ll @@ -11,5 +11,4 @@ define void @f() { ; CHECK-NEXT: movq %rsp, %rbp ; CHECK-NEXT: .cfi_def_cfa_register %rbp ; CHECK-NEXT: popq %rbp -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/X86/push-cfi-debug.ll b/llvm/test/CodeGen/X86/push-cfi-debug.ll index 01fa12e..7f438e3 100644 --- a/llvm/test/CodeGen/X86/push-cfi-debug.ll +++ b/llvm/test/CodeGen/X86/push-cfi-debug.ll @@ -23,10 +23,8 @@ declare x86_stdcallcc void @stdfoo(i32, i32) #0 ; CHECK: .cfi_adjust_cfa_offset 4 ; CHECK: calll stdfoo ; CHECK: .cfi_adjust_cfa_offset -8 -; CHECK: addl $8, %esp +; CHECK: addl $20, %esp ; CHECK: .cfi_adjust_cfa_offset -8 -; CHECK: addl $12, %esp -; CHECK: .cfi_def_cfa_offset 4 define void @test1() #0 !dbg !4 { entry: tail call void @foo(i32 1, i32 2) #1, !dbg !10 diff --git a/llvm/test/CodeGen/X86/push-cfi-obj.ll b/llvm/test/CodeGen/X86/push-cfi-obj.ll index 2c9ec33..33291ec 100644 --- a/llvm/test/CodeGen/X86/push-cfi-obj.ll +++ b/llvm/test/CodeGen/X86/push-cfi-obj.ll @@ -12,7 +12,7 @@ ; LINUX-NEXT: ] ; LINUX-NEXT: Address: 0x0 ; LINUX-NEXT: Offset: 0x68 -; LINUX-NEXT: Size: 72 +; LINUX-NEXT: Size: 64 ; LINUX-NEXT: Link: 0 ; LINUX-NEXT: Info: 0 ; LINUX-NEXT: AddressAlignment: 4 @@ -22,9 +22,8 @@ ; LINUX-NEXT: SectionData ( ; LINUX-NEXT: 0000: 1C000000 00000000 017A504C 5200017C |.........zPLR..|| ; LINUX-NEXT: 0010: 08070000 00000000 1B0C0404 88010000 |................| -; LINUX-NEXT: 0020: 24000000 24000000 00000000 1D000000 |$...$...........| +; LINUX-NEXT: 0020: 1C000000 24000000 00000000 1D000000 |....$...........| ; LINUX-NEXT: 0030: 04000000 00410E08 8502420D 05432E10 |.....A....B..C..| -; LINUX-NEXT: 0040: 540C0404 410C0508 |T...A...| ; LINUX-NEXT: ) declare i32 @__gxx_personality_v0(...) @@ -36,7 +35,7 @@ entry: to label %continue unwind label %cleanup continue: ret void -cleanup: +cleanup: landingpad { i8*, i32 } cleanup ret void diff --git a/llvm/test/CodeGen/X86/push-cfi.ll b/llvm/test/CodeGen/X86/push-cfi.ll index 44f8bf8..91e579a 100644 --- a/llvm/test/CodeGen/X86/push-cfi.ll +++ b/llvm/test/CodeGen/X86/push-cfi.ll @@ -74,9 +74,8 @@ cleanup: ; LINUX-NEXT: pushl $1 ; LINUX-NEXT: .cfi_adjust_cfa_offset 4 ; LINUX-NEXT: call -; LINUX-NEXT: addl $16, %esp +; LINUX-NEXT: addl $28, %esp ; LINUX: .cfi_adjust_cfa_offset -16 -; LINUX: addl $12, %esp ; DARWIN-NOT: .cfi_escape ; DARWIN-NOT: pushl define void @test2_nofp() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { diff --git a/llvm/test/CodeGen/X86/return-ext.ll b/llvm/test/CodeGen/X86/return-ext.ll index c66e518..ef160f4 100644 --- a/llvm/test/CodeGen/X86/return-ext.ll +++ b/llvm/test/CodeGen/X86/return-ext.ll @@ -106,7 +106,6 @@ entry: ; CHECK: call ; CHECK-NEXT: movzbl ; CHECK-NEXT: {{pop|add}} -; CHECK-NEXT: .cfi_def_cfa_offset {{4|8}} ; CHECK-NEXT: ret } @@ -121,7 +120,6 @@ entry: ; CHECK: call ; CHECK-NEXT: movzbl ; CHECK-NEXT: {{pop|add}} -; CHECK-NEXT: .cfi_def_cfa_offset {{4|8}} ; CHECK-NEXT: ret } @@ -136,6 +134,5 @@ entry: ; CHECK: call ; CHECK-NEXT: movzwl ; CHECK-NEXT: {{pop|add}} -; CHECK-NEXT: .cfi_def_cfa_offset {{4|8}} ; CHECK-NEXT: ret } diff --git a/llvm/test/CodeGen/X86/rtm.ll b/llvm/test/CodeGen/X86/rtm.ll index a1feeb5..bd2d3e5 100644 --- a/llvm/test/CodeGen/X86/rtm.ll +++ b/llvm/test/CodeGen/X86/rtm.ll @@ -75,7 +75,6 @@ define void @f2(i32 %x) nounwind uwtable { ; X64-NEXT: xabort $1 ; X64-NEXT: callq f1 ; X64-NEXT: popq %rax -; X64-NEXT: .cfi_def_cfa_offset 8 ; X64-NEXT: retq entry: %x.addr = alloca i32, align 4 diff --git a/llvm/test/CodeGen/X86/select-mmx.ll b/llvm/test/CodeGen/X86/select-mmx.ll index 7ad8b6f..795990e 100644 --- a/llvm/test/CodeGen/X86/select-mmx.ll +++ b/llvm/test/CodeGen/X86/select-mmx.ll @@ -48,7 +48,6 @@ define i64 @test47(i64 %arg) { ; I32-NEXT: movl {{[0-9]+}}(%esp), %edx ; I32-NEXT: movl %ebp, %esp ; I32-NEXT: popl %ebp -; I32-NEXT: .cfi_def_cfa %esp, 4 ; I32-NEXT: retl %cond = icmp eq i64 %arg, 0 %slct = select i1 %cond, x86_mmx bitcast (i64 7 to x86_mmx), x86_mmx bitcast (i64 0 to x86_mmx) @@ -101,7 +100,6 @@ define i64 @test49(i64 %arg, i64 %x, i64 %y) { ; I32-NEXT: movl {{[0-9]+}}(%esp), %edx ; I32-NEXT: movl %ebp, %esp ; I32-NEXT: popl %ebp -; I32-NEXT: .cfi_def_cfa %esp, 4 ; I32-NEXT: retl %cond = icmp eq i64 %arg, 0 %xmmx = bitcast i64 %x to x86_mmx diff --git a/llvm/test/CodeGen/X86/setcc-lowering.ll b/llvm/test/CodeGen/X86/setcc-lowering.ll index d2d23ea..20c77a4 100644 --- a/llvm/test/CodeGen/X86/setcc-lowering.ll +++ b/llvm/test/CodeGen/X86/setcc-lowering.ll @@ -90,7 +90,6 @@ define void @pr26232(i64 %a, <16 x i1> %b) { ; KNL-32-NEXT: jne .LBB1_1 ; KNL-32-NEXT: # BB#2: # %for_exit600 ; KNL-32-NEXT: popl %esi -; KNL-32-NEXT: .cfi_def_cfa_offset 4 ; KNL-32-NEXT: retl allocas: br label %for_test11.preheader diff --git a/llvm/test/CodeGen/X86/shrink_vmul.ll b/llvm/test/CodeGen/X86/shrink_vmul.ll index a276720..79cf0f2 100644 --- a/llvm/test/CodeGen/X86/shrink_vmul.ll +++ b/llvm/test/CodeGen/X86/shrink_vmul.ll @@ -31,7 +31,6 @@ define void @mul_2xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; X86-NEXT: movq %xmm1, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_2xi8: @@ -90,7 +89,6 @@ define void @mul_4xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; X86-NEXT: movdqu %xmm1, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_4xi8: @@ -150,7 +148,6 @@ define void @mul_8xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: movdqu %xmm1, 16(%esi,%ecx,4) ; X86-NEXT: movdqu %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_8xi8: @@ -223,7 +220,6 @@ define void @mul_16xi8(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: movdqu %xmm4, 16(%esi,%ecx,4) ; X86-NEXT: movdqu %xmm3, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_16xi8: @@ -292,7 +288,6 @@ define void @mul_2xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; X86-NEXT: movq %xmm1, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_2xi16: @@ -347,7 +342,6 @@ define void @mul_4xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; X86-NEXT: movdqu %xmm1, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_4xi16: @@ -405,7 +399,6 @@ define void @mul_8xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i64 ; X86-NEXT: movdqu %xmm1, 16(%esi,%ecx,4) ; X86-NEXT: movdqu %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_8xi16: @@ -476,7 +469,6 @@ define void @mul_16xi16(i8* nocapture readonly %a, i8* nocapture readonly %b, i6 ; X86-NEXT: movdqu %xmm2, 16(%esi,%ecx,4) ; X86-NEXT: movdqu %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_16xi16: @@ -549,7 +541,6 @@ define void @mul_2xi8_sext(i8* nocapture readonly %a, i8* nocapture readonly %b, ; X86-NEXT: psrad $16, %xmm0 ; X86-NEXT: movq %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_2xi8_sext: @@ -615,7 +606,6 @@ define void @mul_2xi8_sext_zext(i8* nocapture readonly %a, i8* nocapture readonl ; X86-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] ; X86-NEXT: movq %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_2xi8_sext_zext: @@ -676,7 +666,6 @@ define void @mul_2xi16_sext(i8* nocapture readonly %a, i8* nocapture readonly %b ; X86-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; X86-NEXT: movq %xmm1, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_2xi16_sext: @@ -744,7 +733,6 @@ define void @mul_2xi16_sext_zext(i8* nocapture readonly %a, i8* nocapture readon ; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] ; X86-NEXT: movq %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_2xi16_sext_zext: @@ -825,7 +813,6 @@ define void @mul_16xi16_sext(i8* nocapture readonly %a, i8* nocapture readonly % ; X86-NEXT: movdqu %xmm2, 16(%esi,%ecx,4) ; X86-NEXT: movdqu %xmm0, (%esi,%ecx,4) ; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; ; X64-LABEL: mul_16xi16_sext: diff --git a/llvm/test/CodeGen/X86/statepoint-call-lowering.ll b/llvm/test/CodeGen/X86/statepoint-call-lowering.ll index d80c87b..bd2dd53 100644 --- a/llvm/test/CodeGen/X86/statepoint-call-lowering.ll +++ b/llvm/test/CodeGen/X86/statepoint-call-lowering.ll @@ -83,7 +83,6 @@ define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" { ; CHECK: callq return_i1 ; CHECK-NEXT: .Ltmp5: ; CHECK-NEXT: popq %rcx -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0, i32 addrspace(1)* %a) diff --git a/llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll b/llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll index 90f2002..b88ca03 100644 --- a/llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll +++ b/llvm/test/CodeGen/X86/statepoint-gctransition-call-lowering.ll @@ -69,7 +69,6 @@ define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" { ; CHECK: callq return_i1 ; CHECK-NEXT: .Ltmp4: ; CHECK-NEXT: popq %rcx -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq entry: %safepoint_token = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 1, i32 0, i32 0, i32 addrspace(1)* %a) diff --git a/llvm/test/CodeGen/X86/statepoint-invoke.ll b/llvm/test/CodeGen/X86/statepoint-invoke.ll index 5aa9025..784b932 100644 --- a/llvm/test/CodeGen/X86/statepoint-invoke.ll +++ b/llvm/test/CodeGen/X86/statepoint-invoke.ll @@ -142,7 +142,6 @@ normal_return: ; CHECK-LABEL: %normal_return ; CHECK: xorl %eax, %eax ; CHECK-NEXT: popq - ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %null.relocated = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token %sp1, i32 13, i32 13) %undef.relocated = call coldcc i64 addrspace(1)* @llvm.experimental.gc.relocate.p1i64(token %sp1, i32 14, i32 14) @@ -170,7 +169,6 @@ entry: normal_return: ; CHECK: leaq ; CHECK-NEXT: popq - ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %aa.rel = call coldcc i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %sp, i32 13, i32 13) %aa.converted = bitcast i32 addrspace(1)* %aa.rel to i64 addrspace(1)* @@ -179,7 +177,6 @@ normal_return: exceptional_return: ; CHECK: movl $15 ; CHECK-NEXT: popq - ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %landing_pad = landingpad token cleanup diff --git a/llvm/test/CodeGen/X86/throws-cfi-fp.ll b/llvm/test/CodeGen/X86/throws-cfi-fp.ll deleted file mode 100644 index bacd965..0000000 --- a/llvm/test/CodeGen/X86/throws-cfi-fp.ll +++ /dev/null @@ -1,98 +0,0 @@ -; RUN: llc %s -o - | FileCheck %s - -; ModuleID = 'throws-cfi-fp.cpp' -source_filename = "throws-cfi-fp.cpp" -target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - -$__clang_call_terminate = comdat any - -@_ZL11ShouldThrow = internal unnamed_addr global i1 false, align 1 -@_ZTIi = external constant i8* -@str = private unnamed_addr constant [20 x i8] c"Threw an exception!\00" - -; Function Attrs: uwtable -define void @_Z6throwsv() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { - -; CHECK-LABEL: _Z6throwsv: -; CHECK: popq %rbp -; CHECK-NEXT: .cfi_def_cfa %rsp, 8 -; CHECK-NEXT: retq -; CHECK-NEXT: .LBB0_1: -; CHECK-NEXT: .cfi_def_cfa %rbp, 16 - -entry: - %.b5 = load i1, i1* @_ZL11ShouldThrow, align 1 - br i1 %.b5, label %if.then, label %try.cont - -if.then: ; preds = %entry - %exception = tail call i8* @__cxa_allocate_exception(i64 4) - %0 = bitcast i8* %exception to i32* - store i32 1, i32* %0, align 16 - invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) - to label %unreachable unwind label %lpad - -lpad: ; preds = %if.then - %1 = landingpad { i8*, i32 } - catch i8* null - %2 = extractvalue { i8*, i32 } %1, 0 - %3 = tail call i8* @__cxa_begin_catch(i8* %2) - %puts = tail call i32 @puts(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @str, i64 0, i64 0)) - invoke void @__cxa_rethrow() - to label %unreachable unwind label %lpad1 - -lpad1: ; preds = %lpad - %4 = landingpad { i8*, i32 } - cleanup - invoke void @__cxa_end_catch() - to label %eh.resume unwind label %terminate.lpad - -try.cont: ; preds = %entry - ret void - -eh.resume: ; preds = %lpad1 - resume { i8*, i32 } %4 - -terminate.lpad: ; preds = %lpad1 - %5 = landingpad { i8*, i32 } - catch i8* null - %6 = extractvalue { i8*, i32 } %5, 0 - tail call void @__clang_call_terminate(i8* %6) - unreachable - -unreachable: ; preds = %lpad, %if.then - unreachable -} - -declare i8* @__cxa_allocate_exception(i64) - -declare void @__cxa_throw(i8*, i8*, i8*) - -declare i32 @__gxx_personality_v0(...) - -declare i8* @__cxa_begin_catch(i8*) - -declare void @__cxa_rethrow() - -declare void @__cxa_end_catch() - -; Function Attrs: noinline noreturn nounwind -declare void @__clang_call_terminate(i8*) - -declare void @_ZSt9terminatev() - -; Function Attrs: nounwind -declare i32 @puts(i8* nocapture readonly) - -attributes #0 = { "no-frame-pointer-elim"="true" } - -!llvm.dbg.cu = !{!2} -!llvm.module.flags = !{!8, !9, !10} - -!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 6.0.0 (https://github.com/llvm-mirror/clang.git 316ebefb7fff8ad324a08a694347500b6cd7c95f) (https://github.com/llvm-mirror/llvm.git dcae9be81fc17cdfbe989402354d3c8ecd0a2c79)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5) -!3 = !DIFile(filename: "throws-cfi-fp.cpp", directory: "epilogue-dwarf/test") -!4 = !{} -!5 = !{} -!8 = !{i32 2, !"Dwarf Version", i32 4} -!9 = !{i32 2, !"Debug Info Version", i32 3} -!10 = !{i32 1, !"wchar_size", i32 4} diff --git a/llvm/test/CodeGen/X86/throws-cfi-no-fp.ll b/llvm/test/CodeGen/X86/throws-cfi-no-fp.ll deleted file mode 100644 index 1483e6b..0000000 --- a/llvm/test/CodeGen/X86/throws-cfi-no-fp.ll +++ /dev/null @@ -1,97 +0,0 @@ -; RUN: llc %s -o - | FileCheck %s - -; ModuleID = 'throws-cfi-no-fp.cpp' -source_filename = "throws-cfi-no-fp.cpp" -target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - -$__clang_call_terminate = comdat any - -@_ZL11ShouldThrow = internal unnamed_addr global i1 false, align 1 -@_ZTIi = external constant i8* -@str = private unnamed_addr constant [20 x i8] c"Threw an exception!\00" - -; Function Attrs: uwtable -define void @_Z6throwsv() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { - -; CHECK-LABEL: _Z6throwsv: -; CHECK: popq %rbx -; CHECK-NEXT: .cfi_def_cfa_offset 8 -; CHECK-NEXT: retq -; CHECK-NEXT: .LBB0_1: -; CHECK-NEXT: .cfi_def_cfa_offset 16 - -entry: - %.b5 = load i1, i1* @_ZL11ShouldThrow, align 1 - br i1 %.b5, label %if.then, label %try.cont - -if.then: ; preds = %entry - %exception = tail call i8* @__cxa_allocate_exception(i64 4) - %0 = bitcast i8* %exception to i32* - store i32 1, i32* %0, align 16 - invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) - to label %unreachable unwind label %lpad - -lpad: ; preds = %if.then - %1 = landingpad { i8*, i32 } - catch i8* null - %2 = extractvalue { i8*, i32 } %1, 0 - %3 = tail call i8* @__cxa_begin_catch(i8* %2) - %puts = tail call i32 @puts(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @str, i64 0, i64 0)) - invoke void @__cxa_rethrow() #4 - to label %unreachable unwind label %lpad1 - -lpad1: ; preds = %lpad - %4 = landingpad { i8*, i32 } - cleanup - invoke void @__cxa_end_catch() - to label %eh.resume unwind label %terminate.lpad - -try.cont: ; preds = %entry - ret void - -eh.resume: ; preds = %lpad1 - resume { i8*, i32 } %4 - -terminate.lpad: ; preds = %lpad1 - %5 = landingpad { i8*, i32 } - catch i8* null - %6 = extractvalue { i8*, i32 } %5, 0 - tail call void @__clang_call_terminate(i8* %6) - unreachable - -unreachable: ; preds = %lpad, %if.then - unreachable -} - -declare i8* @__cxa_allocate_exception(i64) - -declare void @__cxa_throw(i8*, i8*, i8*) - -declare i32 @__gxx_personality_v0(...) - -declare i8* @__cxa_begin_catch(i8*) - -declare void @__cxa_rethrow() - -declare void @__cxa_end_catch() - -; Function Attrs: noinline noreturn nounwind -declare void @__clang_call_terminate(i8*) - -declare void @_ZSt9terminatev() - - -; Function Attrs: nounwind -declare i32 @puts(i8* nocapture readonly) - -!llvm.dbg.cu = !{!2} -!llvm.module.flags = !{!8, !9, !10} - -!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !3, producer: "clang version 6.0.0 (https://github.com/llvm-mirror/clang.git 316ebefb7fff8ad324a08a694347500b6cd7c95f) (https://github.com/llvm-mirror/llvm.git dcae9be81fc17cdfbe989402354d3c8ecd0a2c79)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, globals: !5) -!3 = !DIFile(filename: "throws-cfi-no-fp.cpp", directory: "epilogue-dwarf/test") -!4 = !{} -!5 = !{} -!8 = !{i32 2, !"Dwarf Version", i32 4} -!9 = !{i32 2, !"Debug Info Version", i32 3} -!10 = !{i32 1, !"wchar_size", i32 4} diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll index 25377f2..cd4b237 100644 --- a/llvm/test/CodeGen/X86/vector-sext.ll +++ b/llvm/test/CodeGen/X86/vector-sext.ll @@ -3333,17 +3333,11 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) { ; AVX1-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: popq %rbx -; AVX1-NEXT: .cfi_def_cfa_offset 48 ; AVX1-NEXT: popq %r12 -; AVX1-NEXT: .cfi_def_cfa_offset 40 ; AVX1-NEXT: popq %r13 -; AVX1-NEXT: .cfi_def_cfa_offset 32 ; AVX1-NEXT: popq %r14 -; AVX1-NEXT: .cfi_def_cfa_offset 24 ; AVX1-NEXT: popq %r15 -; AVX1-NEXT: .cfi_def_cfa_offset 16 ; AVX1-NEXT: popq %rbp -; AVX1-NEXT: .cfi_def_cfa_offset 8 ; AVX1-NEXT: retq ; ; AVX2-LABEL: load_sext_16i1_to_16i16: @@ -3430,17 +3424,11 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) { ; AVX2-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: popq %rbx -; AVX2-NEXT: .cfi_def_cfa_offset 48 ; AVX2-NEXT: popq %r12 -; AVX2-NEXT: .cfi_def_cfa_offset 40 ; AVX2-NEXT: popq %r13 -; AVX2-NEXT: .cfi_def_cfa_offset 32 ; AVX2-NEXT: popq %r14 -; AVX2-NEXT: .cfi_def_cfa_offset 24 ; AVX2-NEXT: popq %r15 -; AVX2-NEXT: .cfi_def_cfa_offset 16 ; AVX2-NEXT: popq %rbp -; AVX2-NEXT: .cfi_def_cfa_offset 8 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: load_sext_16i1_to_16i16: @@ -4836,7 +4824,6 @@ define i32 @sext_2i8_to_i32(<16 x i8> %A) nounwind uwtable readnone ssp { ; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm0 ; X32-SSE41-NEXT: movd %xmm0, %eax ; X32-SSE41-NEXT: popl %ecx -; X32-SSE41-NEXT: .cfi_def_cfa_offset 4 ; X32-SSE41-NEXT: retl entry: %Shuf = shufflevector <16 x i8> %A, <16 x i8> undef, <2 x i32> diff --git a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll index b107b60..efbe558 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll @@ -619,7 +619,6 @@ define <64 x i8> @test_mm512_mask_blend_epi8(<64 x i8> %A, <64 x i8> %W){ ; KNL32-NEXT: vpblendvb %ymm3, 8(%ebp), %ymm1, %ymm1 ; KNL32-NEXT: movl %ebp, %esp ; KNL32-NEXT: popl %ebp -; KNL32-NEXT: .cfi_def_cfa %esp, 4 ; KNL32-NEXT: retl entry: %0 = shufflevector <64 x i8> %A, <64 x i8> %W, <64 x i32> @@ -660,7 +659,6 @@ define <32 x i16> @test_mm512_mask_blend_epi16(<32 x i16> %A, <32 x i16> %W){ ; KNL32-NEXT: vpblendw {{.*#+}} ymm1 = mem[0],ymm1[1],mem[2],ymm1[3],mem[4],ymm1[5],mem[6],ymm1[7],mem[8],ymm1[9],mem[10],ymm1[11],mem[12],ymm1[13],mem[14],ymm1[15] ; KNL32-NEXT: movl %ebp, %esp ; KNL32-NEXT: popl %ebp -; KNL32-NEXT: .cfi_def_cfa %esp, 4 ; KNL32-NEXT: retl entry: %0 = shufflevector <32 x i16> %A, <32 x i16> %W, <32 x i32> diff --git a/llvm/test/CodeGen/X86/vector-shuffle-v1.ll b/llvm/test/CodeGen/X86/vector-shuffle-v1.ll index 0e69034..8d05729 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-v1.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-v1.ll @@ -630,7 +630,6 @@ define i64 @shuf64i1_zero(i64 %a) { ; AVX512F-NEXT: orq %rcx, %rax ; AVX512F-NEXT: movq %rbp, %rsp ; AVX512F-NEXT: popq %rbp -; AVX512F-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -663,7 +662,6 @@ define i64 @shuf64i1_zero(i64 %a) { ; AVX512VL-NEXT: orq %rcx, %rax ; AVX512VL-NEXT: movq %rbp, %rsp ; AVX512VL-NEXT: popq %rbp -; AVX512VL-NEXT: .cfi_def_cfa %rsp, 8 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/wide-integer-cmp.ll b/llvm/test/CodeGen/X86/wide-integer-cmp.ll index 9bd53c6..97460b3 100644 --- a/llvm/test/CodeGen/X86/wide-integer-cmp.ll +++ b/llvm/test/CodeGen/X86/wide-integer-cmp.ll @@ -105,13 +105,10 @@ define i32 @test_wide(i128 %a, i128 %b) { ; CHECK-NEXT: # BB#1: # %bb1 ; CHECK-NEXT: movl $1, %eax ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl ; CHECK-NEXT: .LBB4_2: # %bb2 -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: movl $2, %eax ; CHECK-NEXT: popl %esi -; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl entry: %cmp = icmp slt i128 %a, %b diff --git a/llvm/test/CodeGen/X86/x86-framelowering-trap.ll b/llvm/test/CodeGen/X86/x86-framelowering-trap.ll index 89f4528..f1590ab 100644 --- a/llvm/test/CodeGen/X86/x86-framelowering-trap.ll +++ b/llvm/test/CodeGen/X86/x86-framelowering-trap.ll @@ -6,7 +6,6 @@ target triple = "x86_64-unknown-linux-gnu" ; CHECK: pushq ; CHECK: ud2 ; CHECK-NEXT: popq -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq define void @bar() { entry: diff --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll index bc6a6ea..acad9f7 100644 --- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll +++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll @@ -1816,7 +1816,6 @@ define void @interleaved_store_vf64_i8_stride4(<64 x i8> %a, <64 x i8> %b, <64 x ; AVX1-NEXT: vmovaps %ymm9, 64(%rdi) ; AVX1-NEXT: vmovaps %ymm8, (%rdi) ; AVX1-NEXT: addq $24, %rsp -; AVX1-NEXT: .cfi_def_cfa_offset 8 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll b/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll index 929dafb..763d764 100644 --- a/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll +++ b/llvm/test/CodeGen/X86/x86-no_caller_saved_registers-preserve.ll @@ -20,7 +20,6 @@ define x86_64_sysvcc i32 @bar(i32 %a0, i32 %a1, float %b0) #0 { ; CHECK-NEXT: movl $4, %eax ; CHECK-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload ; CHECK-NEXT: popq %rdx -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq call void asm sideeffect "", "~{rax},~{rdx},~{xmm1},~{rdi},~{rsi},~{xmm0}"() ret i32 4