/// register.
///
/// X86 Example:
-/// %YMM0<def> = ...
-/// %XMM0<def> = ... (Kills %XMM0, all %XMM0s sub-registers, and %YMM0)
+/// %ymm0<def> = ...
+/// %xmm0<def> = ... (Kills %xmm0, all %xmm0s sub-registers, and %ymm0)
///
-/// %YMM0<def> = ...
-/// %XMM0<def> = ..., %YMM0<imp-use> (%YMM0 and all its sub-registers are alive)
+/// %ymm0<def> = ...
+/// %xmm0<def> = ..., %ymm0<imp-use> (%ymm0 and all its sub-registers are alive)
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LIVEPHYSREGS_H
/// substPhysReg - Substitute the current register with the physical register
/// Reg, taking any existing SubReg into account. For instance,
- /// substPhysReg(%EAX) will change %reg1024:sub_8bit to %AL.
+ /// substPhysReg(%eax) will change %reg1024:sub_8bit to %al.
///
void substPhysReg(unsigned Reg, const TargetRegisterInfo&);
/// %noreg - NoRegister
/// %vreg5 - a virtual register.
/// %vreg5:sub_8bit - a virtual register with sub-register index (with TRI).
-/// %EAX - a physical register
+/// %eax - a physical register
/// %physreg17 - a physical register when no TRI instance given.
///
/// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n';
///
/// Register units are named after their root registers:
///
-/// AL - Single root.
-/// FP0~ST7 - Dual roots.
+/// al - Single root.
+/// fp0~st7 - Dual roots.
///
/// Usage: OS << printRegUnit(Unit, TRI) << '\n';
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
// FIXME: The issue with predicated instruction is more complex. We are being
// conservatively here because the kill markers cannot be trusted after
// if-conversion:
- // %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
+ // %r6<def> = LDR %sp, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
// ...
- // STR %R0, %R6<kill>, %reg0, 0, pred:0, pred:%CPSR; mem:ST4[%395]
- // %R6<def> = LDR %SP, %reg0, 100, pred:0, pred:%CPSR; mem:LD4[FixedStack12]
- // STR %R0, %R6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
+ // STR %r0, %r6<kill>, %reg0, 0, pred:0, pred:%cpsr; mem:ST4[%395]
+ // %r6<def> = LDR %sp, %reg0, 100, pred:0, pred:%cpsr; mem:LD4[FixedStack12]
+ // STR %r0, %r6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
//
// The first R6 kill is not really a kill since it's killed by a predicated
// instruction which may not be executed. The second R6 def may or may not
// FIXME: The issue with predicated instruction is more complex. We are being
// conservative here because the kill markers cannot be trusted after
// if-conversion:
- // %R6<def> = LDR %SP, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
+ // %r6<def> = LDR %sp, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
// ...
- // STR %R0, %R6<kill>, %reg0, 0, pred:0, pred:%CPSR; mem:ST4[%395]
- // %R6<def> = LDR %SP, %reg0, 100, pred:0, pred:%CPSR; mem:LD4[FixedStack12]
- // STR %R0, %R6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
+ // STR %r0, %r6<kill>, %reg0, 0, pred:0, pred:%cpsr; mem:ST4[%395]
+ // %r6<def> = LDR %sp, %reg0, 100, pred:0, pred:%cpsr; mem:LD4[FixedStack12]
+ // STR %r0, %r6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
//
// The first R6 kill is not really a kill since it's killed by a predicated
// instruction which may not be executed. The second R6 def may or may not
if (DstSubReg == InsReg) {
// No need to insert an identity copy instruction.
// Watch out for case like this:
- // %RAX<def> = SUBREG_TO_REG 0, %EAX<kill>, 3
- // We must leave %RAX live.
+ // %rax<def> = SUBREG_TO_REG 0, %eax<kill>, 3
+ // We must leave %rax live.
if (DstReg != InsReg) {
MI->setDesc(TII->get(TargetOpcode::KILL));
MI->RemoveOperand(3); // SubIdx
// Starting with a code fragment like:
//
- // test %RAX, %RAX
+ // test %rax, %rax
// jne LblNotNull
//
// LblNull:
// Inst0
// Inst1
// ...
- // Def = Load (%RAX + <offset>)
+ // Def = Load (%rax + <offset>)
// ...
//
//
// we want to end up with
//
- // Def = FaultingLoad (%RAX + <offset>), LblNull
+ // Def = FaultingLoad (%rax + <offset>), LblNull
// jmp LblNotNull ;; explicit or fallthrough
//
// LblNotNull:
//
// To see why this is legal, consider the two possibilities:
//
- // 1. %RAX is null: since we constrain <offset> to be less than PageSize, the
+ // 1. %rax is null: since we constrain <offset> to be less than PageSize, the
// load instruction dereferences the null page, causing a segmentation
// fault.
//
- // 2. %RAX is not null: in this case we know that the load cannot fault, as
+ // 2. %rax is not null: in this case we know that the load cannot fault, as
// otherwise the load would've faulted in the original program too and the
// original program would've been undefined.
//
// Check if any of the regunits are live beyond the end of RI. That could
// happen when a physreg is defined as a copy of a virtreg:
//
- // %EAX = COPY %vreg5
- // FOO %vreg5 <--- MI, cancel kill because %EAX is live.
- // BAR %EAX<kill>
+ // %eax = COPY %vreg5
+ // FOO %vreg5 <--- MI, cancel kill because %eax is live.
+ // BAR %eax<kill>
//
- // There should be no kill flag on FOO when %vreg5 is rewritten as %EAX.
+ // There should be no kill flag on FOO when %vreg5 is rewritten as %eax.
for (auto &RUP : RU) {
const LiveRange &RURange = *RUP.first;
LiveRange::const_iterator &I = RUP.second;
// Go through implicit defs of CSMI and MI, and clear the kill flags on
// their uses in all the instructions between CSMI and MI.
// We might have made some of the kill flags redundant, consider:
- // subs ... %NZCV<imp-def> <- CSMI
- // csinc ... %NZCV<imp-use,kill> <- this kill flag isn't valid anymore
- // subs ... %NZCV<imp-def> <- MI, to be eliminated
- // csinc ... %NZCV<imp-use,kill>
+ // subs ... %nzcv<imp-def> <- CSMI
+ // csinc ... %nzcv<imp-use,kill> <- this kill flag isn't valid anymore
+ // subs ... %nzcv<imp-def> <- MI, to be eliminated
+ // csinc ... %nzcv<imp-use,kill>
// Since we eliminated MI, and reused a register imp-def'd by CSMI
- // (here %NZCV), that register, if it was killed before MI, should have
+ // (here %nzcv), that register, if it was killed before MI, should have
// that kill flag removed, because it's lifetime was extended.
if (CSMI->getParent() == MI->getParent()) {
for (MachineBasicBlock::iterator II = CSMI, IE = MI; II != IE; ++II)
// The two copies cancel out and the source of the first copy
// hasn't been overridden, eliminate the second one. e.g.
- // %ECX<def> = COPY %EAX
- // ... nothing clobbered EAX.
- // %EAX<def> = COPY %ECX
+ // %ecx<def> = COPY %eax
+ // ... nothing clobbered eax.
+ // %eax<def> = COPY %ecx
// =>
- // %ECX<def> = COPY %EAX
+ // %ecx<def> = COPY %eax
//
// or
//
- // %ECX<def> = COPY %EAX
- // ... nothing clobbered EAX.
- // %ECX<def> = COPY %EAX
+ // %ecx<def> = COPY %eax
+ // ... nothing clobbered eax.
+ // %ecx<def> = COPY %eax
// =>
- // %ECX<def> = COPY %EAX
+ // %ecx<def> = COPY %eax
if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def))
continue;
// BB#1: derived from LLVM BB %bb4.preheader
// Predecessors according to CFG: BB#0
// ...
- // %reg16385<def> = DEC64_32r %reg16437, %EFLAGS<imp-def,dead>
+ // %reg16385<def> = DEC64_32r %reg16437, %eflags<imp-def,dead>
// ...
- // JE_4 <BB#37>, %EFLAGS<imp-use>
+ // JE_4 <BB#37>, %eflags<imp-use>
// Successors according to CFG: BB#37 BB#2
//
// BB#2: derived from LLVM BB %bb.nph
unsigned DstReg = MI->getOperand(0).getReg();
unsigned SrcReg = MI->getOperand(1).getReg();
if (isNAPhysCopy(SrcReg) && TargetRegisterInfo::isVirtualRegister(DstReg)) {
- // %vreg = COPY %PHYSREG
+ // %vreg = COPY %physreg
// Avoid using a datastructure which can track multiple live non-allocatable
// phys->virt copies since LLVM doesn't seem to do this.
NAPhysToVirtMIs.insert({SrcReg, MI});
if (!(TargetRegisterInfo::isVirtualRegister(SrcReg) && isNAPhysCopy(DstReg)))
return false;
- // %PHYSREG = COPY %vreg
+ // %physreg = COPY %vreg
auto PrevCopy = NAPhysToVirtMIs.find(DstReg);
if (PrevCopy == NAPhysToVirtMIs.end()) {
// We can't remove the copy: there was an intervening clobber of the
// Track when a non-allocatable physical register is copied to a virtual
// register so that useless moves can be removed.
//
- // %PHYSREG is the map index; MI is the last valid `%vreg = COPY %PHYSREG`
- // without any intervening re-definition of %PHYSREG.
+ // %physreg is the map index; MI is the last valid `%vreg = COPY %physreg`
+ // without any intervening re-definition of %physreg.
DenseMap<unsigned, MachineInstr *> NAPhysToVirtMIs;
// Set of virtual registers that are copied from.
bb27 ...
...
%reg1037 = ADDri %reg1039, 1
- %reg1038 = ADDrs %reg1032, %reg1039, %NOREG, 10
+ %reg1038 = ADDrs %reg1032, %reg1039, %noreg, 10
Successors according to CFG: 0x8b03bf0 (#5)
bb76 (0x8b03bf0, LLVM BB @0x8b032d0, ID#5):
MachineInstr *CopyMI;
if (CP.isFlipped()) {
// Physreg is copied into vreg
- // %vregY = COPY %X
- // ... //< no other def of %X here
+ // %vregY = COPY %x
+ // ... //< no other def of %x here
// use %vregY
// =>
// ...
- // use %X
+ // use %x
CopyMI = MRI->getVRegDef(SrcReg);
} else {
// VReg is copied into physreg:
// %vregX = def
- // ... //< no other def or use of %Y here
- // %Y = COPY %vregX
+ // ... //< no other def or use of %y here
+ // %y = COPY %vregX
// =>
- // %Y = def
+ // %y = def
// ...
if (!MRI->hasOneNonDBGUse(SrcReg)) {
DEBUG(dbgs() << "\t\tMultiple vreg uses!\n");
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
OS << "SS#" << TargetRegisterInfo::stackSlot2Index(Reg);
else if (TargetRegisterInfo::isVirtualRegister(Reg))
OS << "%vreg" << TargetRegisterInfo::virtReg2Index(Reg);
- else if (TRI && Reg < TRI->getNumRegs())
- OS << '%' << TRI->getName(Reg);
- else
+ else if (TRI && Reg < TRI->getNumRegs()) {
+ OS << '%';
+ printLowerCase(TRI->getName(Reg), OS);
+ } else
OS << "%physreg" << Reg;
if (SubIdx) {
if (TRI)
// e.g.
// %reg1028<def> = EXTRACT_SUBREG %reg1027<kill>, 1
// %reg1029<def> = MOV8rr %reg1028
- // %reg1029<def> = SHR8ri %reg1029, 7, %EFLAGS<imp-def,dead>
+ // %reg1029<def> = SHR8ri %reg1029, 7, %eflags<imp-def,dead>
// insert => %reg1030<def> = MOV8rr %reg1028
- // %reg1030<def> = ADD8rr %reg1028<kill>, %reg1029<kill>, %EFLAGS<imp-def,dead>
+ // %reg1030<def> = ADD8rr %reg1028<kill>, %reg1029<kill>, %eflags<imp-def,dead>
// In this case, it might not be possible to coalesce the second MOV8rr
// instruction if the first one is coalesced. So it would be profitable to
// commute it:
// %reg1028<def> = EXTRACT_SUBREG %reg1027<kill>, 1
// %reg1029<def> = MOV8rr %reg1028
- // %reg1029<def> = SHR8ri %reg1029, 7, %EFLAGS<imp-def,dead>
+ // %reg1029<def> = SHR8ri %reg1029, 7, %eflags<imp-def,dead>
// insert => %reg1030<def> = MOV8rr %reg1029
- // %reg1030<def> = ADD8rr %reg1029<kill>, %reg1028<kill>, %EFLAGS<imp-def,dead>
+ // %reg1030<def> = ADD8rr %reg1029<kill>, %reg1028<kill>, %eflags<imp-def,dead>
if (!isPlainlyKilled(MI, regC, LIS))
return false;
// Ok, we have something like:
- // %reg1030<def> = ADD8rr %reg1028<kill>, %reg1029<kill>, %EFLAGS<imp-def,dead>
+ // %reg1030<def> = ADD8rr %reg1028<kill>, %reg1029<kill>, %eflags<imp-def,dead>
// let's see if it's worth commuting it.
// Look for situations like this:
++NumIdCopies;
// Copies like:
- // %R0 = COPY %R0<undef>
- // %AL = COPY %AL, %EAX<imp-def>
+ // %r0 = COPY %r0<undef>
+ // %al = COPY %al, %eax<imp-def>
// give us additional liveness information: The target (super-)register
// must not be valid before this point. Replace the COPY with a KILL
// instruction to maintain this information.
LiveIntervals *LIS) const {
// This is a bit of a hack. Consider this instruction:
//
- // %vreg0<def> = COPY %SP; GPR64all:%vreg0
+ // %vreg0<def> = COPY %sp; GPR64all:%vreg0
//
// We explicitly chose GPR64all for the virtual register so such a copy might
// be eliminated by RegisterCoalescer. However, that may not be possible, and
- // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
+ // %vreg0 may even spill. We can't spill %sp, and since it is in the GPR64all
// register class, TargetInstrInfo::foldMemoryOperand() is going to try.
//
// To prevent that, we are going to constrain the %vreg0 register class here.
// Handle the case where a copy is being spilled or filled but the source
// and destination register class don't match. For example:
//
- // %vreg0<def> = COPY %XZR; GPR64common:%vreg0
+ // %vreg0<def> = COPY %xzr; GPR64common:%vreg0
//
// In this case we can still safely fold away the COPY and generate the
// following spill code:
//
- // STRXui %XZR, <fi#0>
+ // STRXui %xzr, <fi#0>
//
// This also eliminates spilled cross register class COPYs (e.g. between x and
// d regs) of the same size. For example:
// Handle cases like spilling def of:
//
- // %vreg0:sub_32<def,read-undef> = COPY %WZR; GPR64common:%vreg0
+ // %vreg0:sub_32<def,read-undef> = COPY %wzr; GPR64common:%vreg0
//
// where the physical register source can be widened and stored to the full
// virtual reg destination stack slot, in this case producing:
//
- // STRXui %XZR, <fi#0>
+ // STRXui %xzr, <fi#0>
//
if (IsSpill && DstMO.isUndef() &&
TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
if (SExtIdx != -1) {
// Generate the sign extension for the proper result of the ldp.
// I.e., with X1, that would be:
- // %W1<def> = KILL %W1, %X1<imp-def>
- // %X1<def> = SBFMXri %X1<kill>, 0, 31
+ // %w1<def> = KILL %w1, %x1<imp-def>
+ // %x1<def> = SBFMXri %x1<kill>, 0, 31
MachineOperand &DstMO = MIB->getOperand(SExtIdx);
// Right now, DstMO has the extended register, since it comes from an
// extended opcode.
// to be caused by ALU instructions in the next instruction group that wrote
// to the $src_gpr registers of the VTX_READ.
// e.g.
- // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24
- // %T2_X<def> = MOV %ZERO
+ // %t3_x<def> = VTX_READ_PARAM_32_eg %t2_x<kill>, 24
+ // %t2_x<def> = MOV %zero
//Adding this constraint prevents this from happening.
let Constraints = "$src_gpr.ptr = $dst_gpr";
}
// to be caused by ALU instructions in the next instruction group that wrote
// to the $src_gpr registers of the VTX_READ.
// e.g.
- // %T3_X<def> = VTX_READ_PARAM_32_eg %T2_X<kill>, 24
- // %T2_X<def> = MOV %ZERO
+ // %t3_x<def> = VTX_READ_PARAM_32_eg %t2_x<kill>, 24
+ // %t2_x<def> = MOV %zero
//Adding this constraint prevents this from happening.
let Constraints = "$src_gpr.ptr = $dst_gpr";
}
// Prevent folding operands backwards in the function. For example,
// the COPY opcode must not be replaced by 1 in this example:
//
- // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
+ // %vreg3<def> = COPY %vgpr0; VGPR_32:%vreg3
// ...
- // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
+ // %vgpr0<def> = V_MOV_B32_e32 1, %exec<imp-use>
MachineOperand &Dst = MI.getOperand(0);
if (Dst.isReg() &&
!TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
return;
- // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used.
+ // Lane means which subreg of %vgpra_vgprb_vgprc_vgprd is used.
// Note that subregs are packed, i.e. Lane==0 is the first bit set
// in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
// set, etc.
/// EXEC to update the predicates.
///
/// For example:
-/// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
-/// %SGPR0 = SI_IF %VCC
-/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
-/// %SGPR0 = SI_ELSE %SGPR0
-/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
-/// SI_END_CF %SGPR0
+/// %vcc = V_CMP_GT_F32 %vgpr1, %vgpr2
+/// %sgpr0 = SI_IF %vcc
+/// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0
+/// %sgpr0 = SI_ELSE %sgpr0
+/// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr0
+/// SI_END_CF %sgpr0
///
/// becomes:
///
-/// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask
-/// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
+/// %sgpr0 = S_AND_SAVEEXEC_B64 %vcc // Save and update the exec mask
+/// %sgpr0 = S_XOR_B64 %sgpr0, %exec // Clear live bits from saved exec mask
/// S_CBRANCH_EXECZ label0 // This instruction is an optional
/// // optimization which allows us to
/// // branch if all the bits of
/// // EXEC are zero.
-/// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
+/// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0 // Do the IF block of the branch
///
/// label0:
-/// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block
-/// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
+/// %sgpr0 = S_OR_SAVEEXEC_B64 %exec // Restore the exec mask for the Then block
+/// %exec = S_XOR_B64 %sgpr0, %exec // Clear live bits from saved exec mask
/// S_BRANCH_EXECZ label1 // Use our branch optimization
/// // instruction again.
-/// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block
+/// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr // Do the THEN block
/// label1:
-/// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits
+/// %exec = S_OR_B64 %exec, %sgpr0 // Re-enable saved exec mask bits
//===----------------------------------------------------------------------===//
#include "AMDGPU.h"
if (!HasFP) {
if (SavedRegs.test(ARM::R7)) {
--RegDeficit;
- DEBUG(dbgs() << "%R7 is saved low register, RegDeficit = "
+ DEBUG(dbgs() << "%r7 is saved low register, RegDeficit = "
<< RegDeficit << "\n");
} else {
AvailableRegs.push_back(ARM::R7);
DEBUG(dbgs()
- << "%R7 is non-saved low register, adding to AvailableRegs\n");
+ << "%r7 is non-saved low register, adding to AvailableRegs\n");
}
}
MF.getFrameInfo().isReturnAddressTaken())) {
if (SavedRegs.test(ARM::LR)) {
--RegDeficit;
- DEBUG(dbgs() << "%LR is saved register, RegDeficit = " << RegDeficit
+ DEBUG(dbgs() << "%lr is saved register, RegDeficit = " << RegDeficit
<< "\n");
} else {
AvailableRegs.push_back(ARM::LR);
- DEBUG(dbgs() << "%LR is not saved, adding to AvailableRegs\n");
+ DEBUG(dbgs() << "%lr is not saved, adding to AvailableRegs\n");
}
}
if (OddReg == EvenReg && EvenDeadKill) {
// If the two source operands are the same, the kill marker is
// probably on the first one. e.g.
- // t2STRDi8 %R5<kill>, %R5, %R9<kill>, 0, 14, %reg0
+ // t2STRDi8 %r5<kill>, %r5, %r9<kill>, 0, 14, %reg0
EvenDeadKill = false;
OddDeadKill = true;
}
}
}
// Defs and clobbers can overlap, e.g.
- // %D0<def,dead> = COPY %vreg5, %R0<imp-def>, %R1<imp-def>
+ // %d0<def,dead> = COPY %vreg5, %r0<imp-def>, %r1<imp-def>
for (RegisterRef R : Defs)
Clobbers.erase(R);
{
const MachineOperand &VO = MI.getOperand(1);
// The operand of CONST32 can be a blockaddress, e.g.
- // %vreg0<def> = CONST32 <blockaddress(@eat, %L)>
+ // %vreg0<def> = CONST32 <blockaddress(@eat, %l)>
// Do this check for all instructions for safety.
if (!VO.isImm())
return false;
BrI.setDesc(JD);
while (BrI.getNumOperands() > 0)
BrI.RemoveOperand(0);
- // This ensures that all implicit operands (e.g. %R31<imp-def>, etc)
+ // This ensures that all implicit operands (e.g. %r31<imp-def>, etc)
// are present in the rewritten branch.
for (auto &Op : NI->operands())
BrI.addOperand(Op);
// kill flag for a register (a removeRegisterKilled() analogous to
// addRegisterKilled) that handles aliased register correctly.
// * or has a killed aliased register use of I1's use reg
- // %D4<def> = A2_tfrpi 16
- // %R6<def> = A2_tfr %R9
- // %R8<def> = KILL %R8, %D4<imp-use,kill>
+ // %d4<def> = A2_tfrpi 16
+ // %r6<def> = A2_tfr %r9
+ // %r8<def> = KILL %r8, %d4<imp-use,kill>
// If we want to move R6 = across the KILL instruction we would have
- // to remove the %D4<imp-use,kill> operand. For now, we are
+ // to remove the %d4<imp-use,kill> operand. For now, we are
// conservative and disallow the move.
// we can't move I1 across it.
if (MI.isDebugValue()) {
//
// %vreg40<def> = L2_loadrub_io %vreg39<kill>, 1
// %vreg41<def> = S2_tstbit_i %vreg40<kill>, 0
-// J2_jumpt %vreg41<kill>, <BB#5>, %PC<imp-def,dead>
-// J2_jump <BB#4>, %PC<imp-def,dead>
+// J2_jumpt %vreg41<kill>, <BB#5>, %pc<imp-def,dead>
+// J2_jump <BB#4>, %pc<imp-def,dead>
// Successors according to CFG: BB#4(62) BB#5(62)
//
// BB#4: derived from LLVM BB %if.then
// %vreg12<def> = PHI %vreg6, <BB#3>, %vreg11, <BB#4>
// %vreg13<def> = A2_addp %vreg7, %vreg12
// %vreg42<def> = C2_cmpeqi %vreg9, 10
-// J2_jumpf %vreg42<kill>, <BB#3>, %PC<imp-def,dead>
-// J2_jump <BB#6>, %PC<imp-def,dead>
+// J2_jumpf %vreg42<kill>, <BB#3>, %pc<imp-def,dead>
+// J2_jump <BB#6>, %pc<imp-def,dead>
// Successors according to CFG: BB#6(4) BB#3(124)
//
// would become:
// %vreg46<def> = PS_pselect %vreg41, %vreg6, %vreg11
// %vreg13<def> = A2_addp %vreg7, %vreg46
// %vreg42<def> = C2_cmpeqi %vreg9, 10
-// J2_jumpf %vreg42<kill>, <BB#3>, %PC<imp-def,dead>
-// J2_jump <BB#6>, %PC<imp-def,dead>
+// J2_jumpf %vreg42<kill>, <BB#3>, %pc<imp-def,dead>
+// J2_jump <BB#6>, %pc<imp-def,dead>
// Successors according to CFG: BB#6 BB#3
#include "Hexagon.h"
MachineOperand &MO = PredDef->getOperand(i);
if (MO.isReg()) {
// Skip all implicit references. In one case there was:
- // %vreg140<def> = FCMPUGT32_rr %vreg138, %vreg139, %USR<imp-use>
+ // %vreg140<def> = FCMPUGT32_rr %vreg138, %vreg139, %usr<imp-use>
if (MO.isImplicit())
continue;
if (MO.isUse()) {
}
// Inspired by this pair:
-// %R13<def> = L2_loadri_io %R29, 136; mem:LD4[FixedStack0]
-// S2_storeri_io %R29, 132, %R1<kill>; flags: mem:ST4[FixedStack1]
+// %r13<def> = L2_loadri_io %r29, 136; mem:LD4[FixedStack0]
+// S2_storeri_io %r29, 132, %r1<kill>; flags: mem:ST4[FixedStack1]
// Currently AA considers the addresses in these instructions to be aliasing.
bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint(
MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
case Hexagon::EH_RETURN_JMPR:
case Hexagon::PS_jmpret:
// jumpr r31
- // Actual form JMPR %PC<imp-def>, %R31<imp-use>, %R0<imp-use,internal>.
+ // Actual form JMPR %pc<imp-def>, %r31<imp-use>, %r0<imp-use,internal>.
DstReg = MI.getOperand(0).getReg();
if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))
return HexagonII::HSIG_L2;
case Hexagon::C2_cmovenewif:
// if ([!]P0[.new]) Rd = #0
// Actual form:
- // %R16<def> = C2_cmovenewit %P0<internal>, 0, %R16<imp-use,undef>;
+ // %r16<def> = C2_cmovenewit %p0<internal>, 0, %r16<imp-use,undef>;
DstReg = MI.getOperand(0).getReg();
SrcReg = MI.getOperand(1).getReg();
if (isIntRegForSubInst(DstReg) &&
// using -- if (QRI->isSubRegister(feederReg, cmpReg1) logic
// before the callsite of this function
// But we can not as it comes in the following fashion.
- // %D0<def> = Hexagon_S2_lsr_r_p %D0<kill>, %R2<kill>
- // %R0<def> = KILL %R0, %D0<imp-use,kill>
- // %P0<def> = CMPEQri %R0<kill>, 0
+ // %d0<def> = Hexagon_S2_lsr_r_p %d0<kill>, %r2<kill>
+ // %r0<def> = KILL %r0, %d0<imp-use,kill>
+ // %p0<def> = CMPEQri %r0<kill>, 0
// Hence, we need to check if it's a KILL instruction.
if (II->getOpcode() == TargetOpcode::KILL)
return false;
// to new value jump. If they are in the path, bail out.
// KILL sets kill flag on the opcode. It also sets up a
// single register, out of pair.
- // %D0<def> = S2_lsr_r_p %D0<kill>, %R2<kill>
- // %R0<def> = KILL %R0, %D0<imp-use,kill>
- // %P0<def> = C2_cmpeqi %R0<kill>, 0
+ // %d0<def> = S2_lsr_r_p %d0<kill>, %r2<kill>
+ // %r0<def> = KILL %r0, %d0<imp-use,kill>
+ // %p0<def> = C2_cmpeqi %r0<kill>, 0
// PHI can be anything after RA.
// COPY can remateriaze things in between feeder, compare and nvj.
if (MII->getOpcode() == TargetOpcode::KILL ||
// ...
// %vreg16<def> = NOT_p %vreg15<kill>
// ...
-// JMP_c %vreg16<kill>, <BB#1>, %PC<imp-def,dead>
+// JMP_c %vreg16<kill>, <BB#1>, %pc<imp-def,dead>
//
// Into
// %vreg15<def> = CMPGTrr %vreg6, %vreg2;
// ...
-// JMP_cNot %vreg15<kill>, <BB#1>, %PC<imp-def,dead>;
+// JMP_cNot %vreg15<kill>, <BB#1>, %pc<imp-def,dead>;
//
// Note: The peephole pass makes the instrucstions like
// %vreg170<def> = SXTW %vreg166 or %vreg16<def> = NOT_p %vreg15<kill>
shouldTFRICallBind(HII, DAG->SUnits[su], DAG->SUnits[su+1]))
DAG->SUnits[su].addPred(SDep(&DAG->SUnits[su-1], SDep::Barrier));
// Prevent redundant register copies between two calls, which are caused by
- // both the return value and the argument for the next call being in %R0.
+ // both the return value and the argument for the next call being in %r0.
// Example:
// 1: <call1>
- // 2: %VregX = COPY %R0
- // 3: <use of %VregX>
- // 4: %R0 = ...
+ // 2: %vregX = COPY %r0
+ // 3: <use of %vregX>
+ // 4: %r0 = ...
// 5: <call2>
// The scheduler would often swap 3 and 4, so an additional register is
// needed. This code inserts a Barrier dependence between 3 & 4 to prevent
- // this. The same applies for %D0 and %V0/%W0, which are also handled.
+ // this. The same applies for %d0 and %v0/%w0, which are also handled.
else if (SchedRetvalOptimization) {
const MachineInstr *MI = DAG->SUnits[su].getInstr();
if (MI->isCopy() && (MI->readsRegister(Hexagon::R0, &TRI) ||
MI->readsRegister(Hexagon::V0, &TRI))) {
- // %vregX = COPY %R0
+ // %vregX = COPY %r0
VRegHoldingRet = MI->getOperand(0).getReg();
RetRegister = MI->getOperand(1).getReg();
LastUseOfRet = nullptr;
// <use of %vregX>
LastUseOfRet = &DAG->SUnits[su];
else if (LastUseOfRet && MI->definesRegister(RetRegister, &TRI))
- // %R0 = ...
+ // %r0 = ...
DAG->SUnits[su].addPred(SDep(LastUseOfRet, SDep::Barrier));
}
}
// If data definition is because of implicit definition of the register,
// do not newify the store. Eg.
- // %R9<def> = ZXTH %R12, %D6<imp-use>, %R12<imp-def>
- // S2_storerh_io %R8, 2, %R12<kill>; mem:ST2[%scevgep343]
+ // %r9<def> = ZXTH %r12, %d6<imp-use>, %r12<imp-def>
+ // S2_storerh_io %r8, 2, %r12<kill>; mem:ST2[%scevgep343]
for (auto &MO : PacketMI.operands()) {
if (MO.isRegMask() && MO.clobbersPhysReg(DepReg))
return false;
// Handle imp-use of super reg case. There is a target independent side
// change that should prevent this situation but I am handling it for
// just-in-case. For example, we cannot newify R2 in the following case:
- // %R3<def> = A2_tfrsi 0;
- // S2_storeri_io %R0<kill>, 0, %R2<kill>, %D1<imp-use,kill>;
+ // %r3<def> = A2_tfrsi 0;
+ // S2_storeri_io %r0<kill>, 0, %r2<kill>, %d1<imp-use,kill>;
for (auto &MO : MI.operands()) {
if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == DepReg)
return false;
// Go through the packet instructions and search for an anti dependency between
// them and DepReg from MI. Consider this case:
// Trying to add
-// a) %R1<def> = TFRI_cdNotPt %P3, 2
+// a) %r1<def> = TFRI_cdNotPt %p3, 2
// to this packet:
// {
-// b) %P0<def> = C2_or %P3<kill>, %P0<kill>
-// c) %P3<def> = C2_tfrrp %R23
-// d) %R1<def> = C2_cmovenewit %P3, 4
+// b) %p0<def> = C2_or %p3<kill>, %p0<kill>
+// c) %p3<def> = C2_tfrrp %r23
+// d) %r1<def> = C2_cmovenewit %p3, 4
// }
// The P3 from a) and d) will be complements after
// a)'s P3 is converted to .new form
// One corner case deals with the following scenario:
// Trying to add
- // a) %R24<def> = A2_tfrt %P0, %R25
+ // a) %r24<def> = A2_tfrt %p0, %r25
// to this packet:
// {
- // b) %R25<def> = A2_tfrf %P0, %R24
- // c) %P0<def> = C2_cmpeqi %R26, 1
+ // b) %r25<def> = A2_tfrf %p0, %r24
+ // c) %p0<def> = C2_cmpeqi %r26, 1
// }
//
// On general check a) and b) are complements, but presence of c) will
// There are certain anti-dependencies that cannot be ignored.
// Specifically:
- // J2_call ... %R0<imp-def> ; SUJ
+ // J2_call ... %r0<imp-def> ; SUJ
// R0 = ... ; SUI
// Those cannot be packetized together, since the call will observe
// the effect of the assignment to R0.
case Hexagon::J2_jumpr:
case Hexagon::PS_jmpret:
// jumpr r31
- // Actual form JMPR %PC<imp-def>, %R31<imp-use>, %R0<imp-use,internal>.
+ // Actual form JMPR %pc<imp-def>, %r31<imp-use>, %r0<imp-use,internal>.
DstReg = MCI.getOperand(0).getReg();
if (Hexagon::R31 == DstReg)
return HexagonII::HSIG_L2;
case Hexagon::C2_cmovenewif:
// if ([!]P0[.new]) Rd = #0
// Actual form:
- // %R16<def> = C2_cmovenewit %P0<internal>, 0, %R16<imp-use,undef>;
+ // %r16<def> = C2_cmovenewit %p0<internal>, 0, %r16<imp-use,undef>;
DstReg = MCI.getOperand(0).getReg(); // Rd
PredReg = MCI.getOperand(1).getReg(); // P0
if (HexagonMCInstrInfo::isIntRegForSubInst(DstReg) &&
if (!HexagonMCInstrInfo::bundleSize(MCB)) {
// There once was a bundle:
- // BUNDLE %D2<imp-def>, %R4<imp-def>, %R5<imp-def>, %D7<imp-def>, ...
- // * %D2<def> = IMPLICIT_DEF; flags:
- // * %D7<def> = IMPLICIT_DEF; flags:
+ // BUNDLE %d2<imp-def>, %r4<imp-def>, %r5<imp-def>, %d7<imp-def>, ...
+ // * %d2<def> = IMPLICIT_DEF; flags:
+ // * %d7<def> = IMPLICIT_DEF; flags:
// After the IMPLICIT_DEFs were removed by the asm printer, the bundle
// became empty.
DEBUG(dbgs() << "Skipping empty bundle");
if (!HexagonMCInstrInfo::bundleSize(MCB)) {
// There once was a bundle:
- // BUNDLE %D2<imp-def>, %R4<imp-def>, %R5<imp-def>, %D7<imp-def>, ...
- // * %D2<def> = IMPLICIT_DEF; flags:
- // * %D7<def> = IMPLICIT_DEF; flags:
+ // BUNDLE %d2<imp-def>, %r4<imp-def>, %r5<imp-def>, %d7<imp-def>, ...
+ // * %d2<def> = IMPLICIT_DEF; flags:
+ // * %d7<def> = IMPLICIT_DEF; flags:
// After the IMPLICIT_DEFs were removed by the asm printer, the bundle
// became empty.
DEBUG(dbgs() << "Skipping empty bundle");
MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), get(NewOpc));
// For MIPSR6 JI*C requires an immediate 0 as an operand, JIALC(64) an
- // immediate 0 as an operand and requires the removal of it's %RA<imp-def>
+ // immediate 0 as an operand and requires the removal of it's %ra<imp-def>
// implicit operand as copying the implicit operations of the instructio we're
// looking at will give us the correct flags.
if (NewOpc == Mips::JIC || NewOpc == Mips::JIALC || NewOpc == Mips::JIC64 ||
return LowerPATCHPOINT(SM, *MI);
case PPC::MoveGOTtoLR: {
- // Transform %LR = MoveGOTtoLR
+ // Transform %lr = MoveGOTtoLR
// Into this: bl _GLOBAL_OFFSET_TABLE_@local-4
// _GLOBAL_OFFSET_TABLE_@local-4 (instruction preceding
// _GLOBAL_OFFSET_TABLE_) has exactly one instruction:
}
case PPC::MovePCtoLR:
case PPC::MovePCtoLR8: {
- // Transform %LR = MovePCtoLR
+ // Transform %lr = MovePCtoLR
// Into this, where the label is the PIC base:
// bl L1$pb
// L1$pb:
return;
}
case PPC::UpdateGBR: {
- // Transform %Rd = UpdateGBR(%Rt, %Ri)
- // Into: lwz %Rt, .L0$poff - .L0$pb(%Ri)
- // add %Rd, %Rt, %Ri
+ // Transform %rd = UpdateGBR(%rt, %ri)
+ // Into: lwz %rt, .L0$poff - .L0$pb(%ri)
+ // add %rd, %rt, %ri
// Get the offset from the GOT Base Register to the GOT
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin);
MCSymbol *PICOffset =
const MCOperand TR = TmpInst.getOperand(1);
const MCOperand PICR = TmpInst.getOperand(0);
- // Step 1: lwz %Rt, .L$poff - .L$pb(%Ri)
+ // Step 1: lwz %rt, .L$poff - .L$pb(%ri)
TmpInst.getOperand(1) =
MCOperand::createExpr(MCBinaryExpr::createSub(Exp, PB, OutContext));
TmpInst.getOperand(0) = TR;
return;
}
case PPC::LWZtoc: {
- // Transform %R3 = LWZtoc <ga:@min1>, %R2
+ // Transform %r3 = LWZtoc <ga:@min1>, %r2
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin);
// Change the opcode to LWZ, and the global address operand to be a
case PPC::LDtocCPT:
case PPC::LDtocBA:
case PPC::LDtoc: {
- // Transform %X3 = LDtoc <ga:@min1>, %X2
+ // Transform %x3 = LDtoc <ga:@min1>, %x2
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin);
// Change the opcode to LD, and the global address operand to be a
}
case PPC::ADDIStocHA: {
- // Transform %Xd = ADDIStocHA %X2, <ga:@sym>
+ // Transform %xd = ADDIStocHA %x2, <ga:@sym>
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin);
// Change the opcode to ADDIS8. If the global address is external, has
return;
}
case PPC::LDtocL: {
- // Transform %Xd = LDtocL <ga:@sym>, %Xs
+ // Transform %xd = LDtocL <ga:@sym>, %xs
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin);
// Change the opcode to LD. If the global address is external, has
return;
}
case PPC::ADDItocL: {
- // Transform %Xd = ADDItocL %Xs, <ga:@sym>
+ // Transform %xd = ADDItocL %xs, <ga:@sym>
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin);
// Change the opcode to ADDI8. If the global address is external, then
return;
}
case PPC::ADDISgotTprelHA: {
- // Transform: %Xd = ADDISgotTprelHA %X2, <ga:@sym>
- // Into: %Xd = ADDIS8 %X2, sym@got@tlsgd@ha
+ // Transform: %xd = ADDISgotTprelHA %x2, <ga:@sym>
+ // Into: %xd = ADDIS8 %x2, sym@got@tlsgd@ha
assert(Subtarget->isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
}
case PPC::LDgotTprelL:
case PPC::LDgotTprelL32: {
- // Transform %Xd = LDgotTprelL <ga:@sym>, %Xs
+ // Transform %xd = LDgotTprelL <ga:@sym>, %xs
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin);
// Change the opcode to LD.
return;
}
case PPC::ADDIStlsgdHA: {
- // Transform: %Xd = ADDIStlsgdHA %X2, <ga:@sym>
- // Into: %Xd = ADDIS8 %X2, sym@got@tlsgd@ha
+ // Transform: %xd = ADDIStlsgdHA %x2, <ga:@sym>
+ // Into: %xd = ADDIS8 %x2, sym@got@tlsgd@ha
assert(Subtarget->isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
return;
}
case PPC::ADDItlsgdL:
- // Transform: %Xd = ADDItlsgdL %Xs, <ga:@sym>
- // Into: %Xd = ADDI8 %Xs, sym@got@tlsgd@l
+ // Transform: %xd = ADDItlsgdL %xs, <ga:@sym>
+ // Into: %xd = ADDI8 %xs, sym@got@tlsgd@l
case PPC::ADDItlsgdL32: {
- // Transform: %Rd = ADDItlsgdL32 %Rs, <ga:@sym>
- // Into: %Rd = ADDI %Rs, sym@got@tlsgd
+ // Transform: %rd = ADDItlsgdL32 %rs, <ga:@sym>
+ // Into: %rd = ADDI %rs, sym@got@tlsgd
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
MCSymbol *MOSymbol = getSymbol(GValue);
return;
}
case PPC::GETtlsADDR:
- // Transform: %X3 = GETtlsADDR %X3, <ga:@sym>
+ // Transform: %x3 = GETtlsADDR %x3, <ga:@sym>
// Into: BL8_NOP_TLS __tls_get_addr(sym at tlsgd)
case PPC::GETtlsADDR32: {
- // Transform: %R3 = GETtlsADDR32 %R3, <ga:@sym>
+ // Transform: %r3 = GETtlsADDR32 %r3, <ga:@sym>
// Into: BL_TLS __tls_get_addr(sym at tlsgd)@PLT
EmitTlsCall(MI, MCSymbolRefExpr::VK_PPC_TLSGD);
return;
}
case PPC::ADDIStlsldHA: {
- // Transform: %Xd = ADDIStlsldHA %X2, <ga:@sym>
- // Into: %Xd = ADDIS8 %X2, sym@got@tlsld@ha
+ // Transform: %xd = ADDIStlsldHA %x2, <ga:@sym>
+ // Into: %xd = ADDIS8 %x2, sym@got@tlsld@ha
assert(Subtarget->isPPC64() && "Not supported for 32-bit PowerPC");
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
return;
}
case PPC::ADDItlsldL:
- // Transform: %Xd = ADDItlsldL %Xs, <ga:@sym>
- // Into: %Xd = ADDI8 %Xs, sym@got@tlsld@l
+ // Transform: %xd = ADDItlsldL %xs, <ga:@sym>
+ // Into: %xd = ADDI8 %xs, sym@got@tlsld@l
case PPC::ADDItlsldL32: {
- // Transform: %Rd = ADDItlsldL32 %Rs, <ga:@sym>
- // Into: %Rd = ADDI %Rs, sym@got@tlsld
+ // Transform: %rd = ADDItlsldL32 %rs, <ga:@sym>
+ // Into: %rd = ADDI %rs, sym@got@tlsld
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
MCSymbol *MOSymbol = getSymbol(GValue);
return;
}
case PPC::GETtlsldADDR:
- // Transform: %X3 = GETtlsldADDR %X3, <ga:@sym>
+ // Transform: %x3 = GETtlsldADDR %x3, <ga:@sym>
// Into: BL8_NOP_TLS __tls_get_addr(sym at tlsld)
case PPC::GETtlsldADDR32: {
- // Transform: %R3 = GETtlsldADDR32 %R3, <ga:@sym>
+ // Transform: %r3 = GETtlsldADDR32 %r3, <ga:@sym>
// Into: BL_TLS __tls_get_addr(sym at tlsld)@PLT
EmitTlsCall(MI, MCSymbolRefExpr::VK_PPC_TLSLD);
return;
}
case PPC::ADDISdtprelHA:
- // Transform: %Xd = ADDISdtprelHA %Xs, <ga:@sym>
- // Into: %Xd = ADDIS8 %Xs, sym@dtprel@ha
+ // Transform: %xd = ADDISdtprelHA %xs, <ga:@sym>
+ // Into: %xd = ADDIS8 %xs, sym@dtprel@ha
case PPC::ADDISdtprelHA32: {
- // Transform: %Rd = ADDISdtprelHA32 %Rs, <ga:@sym>
- // Into: %Rd = ADDIS %Rs, sym@dtprel@ha
+ // Transform: %rd = ADDISdtprelHA32 %rs, <ga:@sym>
+ // Into: %rd = ADDIS %rs, sym@dtprel@ha
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
MCSymbol *MOSymbol = getSymbol(GValue);
return;
}
case PPC::ADDIdtprelL:
- // Transform: %Xd = ADDIdtprelL %Xs, <ga:@sym>
- // Into: %Xd = ADDI8 %Xs, sym@dtprel@l
+ // Transform: %xd = ADDIdtprelL %xs, <ga:@sym>
+ // Into: %xd = ADDI8 %xs, sym@dtprel@l
case PPC::ADDIdtprelL32: {
- // Transform: %Rd = ADDIdtprelL32 %Rs, <ga:@sym>
- // Into: %Rd = ADDI %Rs, sym@dtprel@l
+ // Transform: %rd = ADDIdtprelL32 %rs, <ga:@sym>
+ // Into: %rd = ADDI %rs, sym@dtprel@l
const MachineOperand &MO = MI->getOperand(2);
const GlobalValue *GValue = MO.getGlobal();
MCSymbol *MOSymbol = getSymbol(GValue);
case PPC::MFOCRF:
case PPC::MFOCRF8:
if (!Subtarget->hasMFOCRF()) {
- // Transform: %R3 = MFOCRF %CR7
- // Into: %R3 = MFCR ;; cr7
+ // Transform: %r3 = MFOCRF %cr7
+ // Into: %r3 = MFCR ;; cr7
unsigned NewOpcode =
MI->getOpcode() == PPC::MFOCRF ? PPC::MFCR : PPC::MFCR8;
OutStreamer->AddComment(PPCInstPrinter::
case PPC::MTOCRF:
case PPC::MTOCRF8:
if (!Subtarget->hasMFOCRF()) {
- // Transform: %CR7 = MTOCRF %R3
- // Into: MTCRF mask, %R3 ;; cr7
+ // Transform: %cr7 = MTOCRF %r3
+ // Into: MTCRF mask, %r3 ;; cr7
unsigned NewOpcode =
MI->getOpcode() == PPC::MTOCRF ? PPC::MTCRF : PPC::MTCRF8;
unsigned Mask = 0x80 >> OutContext.getRegisterInfo()
/// expands to the following machine code:
///
/// BB#0: derived from LLVM BB %entry
-/// Live Ins: %F1 %F3 %X6
+/// Live Ins: %f1 %f3 %x6
/// <SNIP1>
-/// %vreg0<def> = COPY %F1; F8RC:%vreg0
+/// %vreg0<def> = COPY %f1; F8RC:%vreg0
/// %vreg5<def> = CMPLWI %vreg4<kill>, 0; CRRC:%vreg5 GPRC:%vreg4
-/// %vreg8<def> = LXSDX %ZERO8, %vreg7<kill>, %RM<imp-use>;
+/// %vreg8<def> = LXSDX %zero8, %vreg7<kill>, %rm<imp-use>;
/// mem:LD8[ConstantPool] F8RC:%vreg8 G8RC:%vreg7
/// BCC 76, %vreg5, <BB#2>; CRRC:%vreg5
/// Successors according to CFG: BB#1(?%) BB#2(?%)
/// %vreg13<def> = PHI %vreg12, <BB#3>, %vreg2, <BB#2>;
/// F8RC:%vreg13,%vreg12,%vreg2
/// <SNIP3>
-/// BLR8 %LR8<imp-use>, %RM<imp-use>, %F1<imp-use>
+/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
///
/// When this pattern is detected, branch coalescing will try to collapse
/// it by moving code in BB#2 to BB#0 and/or BB#4 and removing BB#3.
/// If all conditions are meet, IR should collapse to:
///
/// BB#0: derived from LLVM BB %entry
-/// Live Ins: %F1 %F3 %X6
+/// Live Ins: %f1 %f3 %x6
/// <SNIP1>
-/// %vreg0<def> = COPY %F1; F8RC:%vreg0
+/// %vreg0<def> = COPY %f1; F8RC:%vreg0
/// %vreg5<def> = CMPLWI %vreg4<kill>, 0; CRRC:%vreg5 GPRC:%vreg4
-/// %vreg8<def> = LXSDX %ZERO8, %vreg7<kill>, %RM<imp-use>;
+/// %vreg8<def> = LXSDX %zero8, %vreg7<kill>, %rm<imp-use>;
/// mem:LD8[ConstantPool] F8RC:%vreg8 G8RC:%vreg7
/// <SNIP2>
/// BCC 76, %vreg5, <BB#4>; CRRC:%vreg5
/// %vreg13<def> = PHI %vreg12, <BB#1>, %vreg2, <BB#0>;
/// F8RC:%vreg13,%vreg12,%vreg2
/// <SNIP3>
-/// BLR8 %LR8<imp-use>, %RM<imp-use>, %F1<imp-use>
+/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
///
/// Branch Coalescing does not split blocks, it moves everything in the same
/// direction ensuring it does not break use/definition semantics.
// or externally available linkage, a non-local function address, or a
// jump table address (not yet needed), or if we are generating code
// for large code model, we generate:
- // LDtocL(GV, ADDIStocHA(%X2, GV))
+ // LDtocL(GV, ADDIStocHA(%x2, GV))
// Otherwise we generate:
- // ADDItocL(ADDIStocHA(%X2, GV), GV)
+ // ADDItocL(ADDIStocHA(%x2, GV), GV)
// Either way, start with the ADDIStocHA:
unsigned HighPartReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDIStocHA),
// The first source operand is a TargetGlobalAddress or a TargetJumpTable.
// If it must be toc-referenced according to PPCSubTarget, we generate:
- // LDtocL(<ga:@sym>, ADDIStocHA(%X2, <ga:@sym>))
+ // LDtocL(<ga:@sym>, ADDIStocHA(%x2, <ga:@sym>))
// Otherwise we generate:
- // ADDItocL(ADDIStocHA(%X2, <ga:@sym>), <ga:@sym>)
+ // ADDItocL(ADDIStocHA(%x2, <ga:@sym>), <ga:@sym>)
SDValue GA = N->getOperand(0);
SDValue TOCbase = N->getOperand(1);
SDNode *Tmp = CurDAG->getMachineNode(PPC::ADDIStocHA, dl, MVT::i64,
/// local dynamic TLS on PPC32.
PPC32_PICGOT,
- /// G8RC = ADDIS_GOT_TPREL_HA %X2, Symbol - Used by the initial-exec
+ /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec
/// TLS model, produces an ADDIS8 instruction that adds the GOT
/// base to sym\@got\@tprel\@ha.
ADDIS_GOT_TPREL_HA,
/// TLS sequence.
ADD_TLS,
- /// G8RC = ADDIS_TLSGD_HA %X2, Symbol - For the general-dynamic TLS
+ /// G8RC = ADDIS_TLSGD_HA %x2, Symbol - For the general-dynamic TLS
/// model, produces an ADDIS8 instruction that adds the GOT base
/// register to sym\@got\@tlsgd\@ha.
ADDIS_TLSGD_HA,
- /// %X3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
+ /// %x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
/// model, produces an ADDI8 instruction that adds G8RReg to
/// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
/// ADDIS_TLSGD_L_ADDR until after register assignment.
ADDI_TLSGD_L,
- /// %X3 = GET_TLS_ADDR %X3, Symbol - For the general-dynamic TLS
+ /// %x3 = GET_TLS_ADDR %x3, Symbol - For the general-dynamic TLS
/// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
/// ADDIS_TLSGD_L_ADDR until after register assignment.
GET_TLS_ADDR,
/// register assignment.
ADDI_TLSGD_L_ADDR,
- /// G8RC = ADDIS_TLSLD_HA %X2, Symbol - For the local-dynamic TLS
+ /// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS
/// model, produces an ADDIS8 instruction that adds the GOT base
/// register to sym\@got\@tlsld\@ha.
ADDIS_TLSLD_HA,
- /// %X3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
+ /// %x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
/// model, produces an ADDI8 instruction that adds G8RReg to
/// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
/// ADDIS_TLSLD_L_ADDR until after register assignment.
ADDI_TLSLD_L,
- /// %X3 = GET_TLSLD_ADDR %X3, Symbol - For the local-dynamic TLS
+ /// %x3 = GET_TLSLD_ADDR %x3, Symbol - For the local-dynamic TLS
/// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
/// ADDIS_TLSLD_L_ADDR until after register assignment.
GET_TLSLD_ADDR,
/// following register assignment.
ADDI_TLSLD_L_ADDR,
- /// G8RC = ADDIS_DTPREL_HA %X3, Symbol - For the local-dynamic TLS
+ /// G8RC = ADDIS_DTPREL_HA %x3, Symbol - For the local-dynamic TLS
/// model, produces an ADDIS8 instruction that adds X3 to
/// sym\@dtprel\@ha.
ADDIS_DTPREL_HA,
// For a method return value, we check the ZExt/SExt flags in attribute.
// We assume the following code sequence for method call.
- // ADJCALLSTACKDOWN 32, %R1<imp-def,dead>, %R1<imp-use>
+ // ADJCALLSTACKDOWN 32, %r1<imp-def,dead>, %r1<imp-use>
// BL8_NOP <ga:@func>,...
- // ADJCALLSTACKUP 32, 0, %R1<imp-def,dead>, %R1<imp-use>
- // %vreg5<def> = COPY %X3; G8RC:%vreg5
+ // ADJCALLSTACKUP 32, 0, %r1<imp-def,dead>, %r1<imp-use>
+ // %vreg5<def> = COPY %x3; G8RC:%vreg5
if (SrcReg == PPC::X3) {
const MachineBasicBlock *MBB = MI.getParent();
MachineBasicBlock::const_instr_iterator II =
}
// We're looking for a sequence like this:
- // %F0<def> = LFD 0, %X3<kill>, %QF0<imp-def>; mem:LD8[%a](tbaa=!2)
- // %QF1<def> = QVESPLATI %QF0<kill>, 0, %RM<imp-use>
+ // %f0<def> = LFD 0, %x3<kill>, %qf0<imp-def>; mem:LD8[%a](tbaa=!2)
+ // %qf1<def> = QVESPLATI %qf0<kill>, 0, %rm<imp-use>
for (auto SI = Splats.begin(); SI != Splats.end();) {
MachineInstr *SMI = *SI;
// ...
// %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
// %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
- // %RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
+ // %rm<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
// ...
// %vreg9<def,tied1> = XSMADDADP %vreg9<tied0>, %vreg17, %vreg19,
- // %RM<imp-use>; VSLRC:%vreg9,%vreg17,%vreg19
+ // %rm<imp-use>; VSLRC:%vreg9,%vreg17,%vreg19
// ...
// Where we can eliminate the copy by changing from the A-type to the
// M-type instruction. Specifically, for this example, this means:
// %vreg5<def,tied1> = XSMADDADP %vreg5<tied0>, %vreg17, %vreg16,
- // %RM<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
+ // %rm<imp-use>; VSLRC:%vreg5,%vreg17,%vreg16
// is replaced by:
// %vreg16<def,tied1> = XSMADDMDP %vreg16<tied0>, %vreg18, %vreg9,
- // %RM<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9
+ // %rm<imp-use>; VSLRC:%vreg16,%vreg18,%vreg9
// and we remove: %vreg5<def> = COPY %vreg9; VSLRC:%vreg5,%vreg9
SlotIndex FMAIdx = LIS->getInstructionIndex(MI);
// walking the MIs we may as well test liveness here.
//
// FIXME: There is a case that occurs in practice, like this:
- // %vreg9<def> = COPY %F1; VSSRC:%vreg9
+ // %vreg9<def> = COPY %f1; VSSRC:%vreg9
// ...
// %vreg6<def> = COPY %vreg9; VSSRC:%vreg6,%vreg9
// %vreg7<def> = COPY %vreg9; VSSRC:%vreg7,%vreg9
return !(MFI.hasCalls() // has calls
|| MRI.isPhysRegUsed(SP::L0) // Too many registers needed
- || MRI.isPhysRegUsed(SP::O6) // %SP is used
- || hasFP(MF)); // need %FP
+ || MRI.isPhysRegUsed(SP::O6) // %sp is used
+ || hasFP(MF)); // need %fp
}
void SparcFrameLowering::remapRegsForLeafProc(MachineFunction &MF) const {
// Also do a forward search to handle cases where an instruction after the
// compare can be converted like
//
- // LTEBRCompare %F0S, %F0S, %CC<imp-def> LTEBRCompare %F0S, %F0S, %CC<imp-def>
- // %F2S<def> = LER %F0S
+ // LTEBRCompare %f0s, %f0s, %cc<imp-def> LTEBRCompare %f0s, %f0s, %cc<imp-def>
+ // %f2s<def> = LER %f0s
//
MBBI = Compare, MBBE = MBB.end();
while (++MBBI != MBBE) {
cond_next140 (0xa910740, LLVM BB @0xa90beb0):
%reg1078 = MOV32ri -3
- %reg1079 = ADD32rm %reg1078, %reg1068, 1, %NOREG, 0
- %reg1037 = MOV32rm %reg1024, 1, %NOREG, 40
+ %reg1079 = ADD32rm %reg1078, %reg1068, 1, %noreg, 0
+ %reg1037 = MOV32rm %reg1024, 1, %noreg, 40
%reg1080 = IMUL32rr %reg1079, %reg1037
- %reg1081 = MOV32rm %reg1058, 1, %NOREG, 0
+ %reg1081 = MOV32rm %reg1058, 1, %noreg, 0
%reg1038 = LEA32r %reg1081, 1, %reg1080, -3
- %reg1036 = MOV32rm %reg1024, 1, %NOREG, 32
+ %reg1036 = MOV32rm %reg1024, 1, %noreg, 32
%reg1082 = SHL32ri %reg1038, 4
%reg1039 = ADD32rr %reg1036, %reg1082
- %reg1083 = MOVAPSrm %reg1059, 1, %NOREG, 0
+ %reg1083 = MOVAPSrm %reg1059, 1, %noreg, 0
%reg1034 = SHUFPSrr %reg1083, %reg1083, 170
%reg1032 = SHUFPSrr %reg1083, %reg1083, 0
%reg1035 = SHUFPSrr %reg1083, %reg1083, 255
Still ok. After register allocation:
cond_next140 (0xa910740, LLVM BB @0xa90beb0):
- %EAX = MOV32ri -3
- %EDX = MOV32rm <fi#3>, 1, %NOREG, 0
- ADD32rm %EAX<def&use>, %EDX, 1, %NOREG, 0
- %EDX = MOV32rm <fi#7>, 1, %NOREG, 0
- %EDX = MOV32rm %EDX, 1, %NOREG, 40
- IMUL32rr %EAX<def&use>, %EDX
- %ESI = MOV32rm <fi#5>, 1, %NOREG, 0
- %ESI = MOV32rm %ESI, 1, %NOREG, 0
- MOV32mr <fi#4>, 1, %NOREG, 0, %ESI
- %EAX = LEA32r %ESI, 1, %EAX, -3
- %ESI = MOV32rm <fi#7>, 1, %NOREG, 0
- %ESI = MOV32rm %ESI, 1, %NOREG, 32
- %EDI = MOV32rr %EAX
- SHL32ri %EDI<def&use>, 4
- ADD32rr %EDI<def&use>, %ESI
- %XMM0 = MOVAPSrm %ECX, 1, %NOREG, 0
- %XMM1 = MOVAPSrr %XMM0
- SHUFPSrr %XMM1<def&use>, %XMM1, 170
- %XMM2 = MOVAPSrr %XMM0
- SHUFPSrr %XMM2<def&use>, %XMM2, 0
- %XMM3 = MOVAPSrr %XMM0
- SHUFPSrr %XMM3<def&use>, %XMM3, 255
- SHUFPSrr %XMM0<def&use>, %XMM0, 85
- %EBX = MOV32rr %EDI
- AND32ri8 %EBX<def&use>, 15
- CMP32ri8 %EBX, 0
+ %eax = MOV32ri -3
+ %edx = MOV32rm <fi#3>, 1, %noreg, 0
+ ADD32rm %eax<def&use>, %edx, 1, %noreg, 0
+ %edx = MOV32rm <fi#7>, 1, %noreg, 0
+ %edx = MOV32rm %edx, 1, %noreg, 40
+ IMUL32rr %eax<def&use>, %edx
+ %esi = MOV32rm <fi#5>, 1, %noreg, 0
+ %esi = MOV32rm %esi, 1, %noreg, 0
+ MOV32mr <fi#4>, 1, %noreg, 0, %esi
+ %eax = LEA32r %esi, 1, %eax, -3
+ %esi = MOV32rm <fi#7>, 1, %noreg, 0
+ %esi = MOV32rm %esi, 1, %noreg, 32
+ %edi = MOV32rr %eax
+ SHL32ri %edi<def&use>, 4
+ ADD32rr %edi<def&use>, %esi
+ %xmm0 = MOVAPSrm %ecx, 1, %noreg, 0
+ %xmm1 = MOVAPSrr %xmm0
+ SHUFPSrr %xmm1<def&use>, %xmm1, 170
+ %xmm2 = MOVAPSrr %xmm0
+ SHUFPSrr %xmm2<def&use>, %xmm2, 0
+ %xmm3 = MOVAPSrr %xmm0
+ SHUFPSrr %xmm3<def&use>, %xmm3, 255
+ SHUFPSrr %xmm0<def&use>, %xmm0, 85
+ %ebx = MOV32rr %edi
+ AND32ri8 %ebx<def&use>, 15
+ CMP32ri8 %ebx, 0
JE mbb<cond_next204,0xa914d30>
This looks really bad. The problem is shufps is a destructive opcode. Since it
Before regalloc, we have:
- %reg1025<def> = IMUL32rri8 %reg1024, 45, %EFLAGS<imp-def>
+ %reg1025<def> = IMUL32rri8 %reg1024, 45, %eflags<imp-def>
JMP mbb<bb2,0x203afb0>
Successors according to CFG: 0x203afb0 (#3)
bb1: 0x203af60, LLVM BB @0x1e02310, ID#2:
Predecessors according to CFG: 0x203aec0 (#0)
- %reg1026<def> = IMUL32rri8 %reg1024, 78, %EFLAGS<imp-def>
+ %reg1026<def> = IMUL32rri8 %reg1024, 78, %eflags<imp-def>
Successors according to CFG: 0x203afb0 (#3)
bb2: 0x203afb0, LLVM BB @0x1e02340, ID#3:
// A SwiftError is passed in R12.
CCIfSwiftError<CCIfType<[i64], CCAssignToReg<[R12]>>>,
- // For Swift Calling Convention, pass sret in %RAX.
+ // For Swift Calling Convention, pass sret in %rax.
CCIfCC<"CallingConv::Swift",
CCIfSRet<CCIfType<[i64], CCAssignToReg<[RAX]>>>>,
// Generate the DIV/IDIV instruction.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
- // For i8 remainder, we can't reference AH directly, as we'll end
- // up with bogus copies like %R9B = COPY %AH. Reference AX
- // instead to prevent AH references in a REX instruction.
+ // For i8 remainder, we can't reference ah directly, as we'll end
+ // up with bogus copies like %r9b = COPY %ah. Reference ax
+ // instead to prevent ah references in a rex instruction.
//
// The current assumption of the fast register allocator is that isel
// won't generate explicit references to the GR8_NOREX registers. If
/// So, it handles pattern like this:
///
/// BB#2: derived from LLVM BB %if.then
-/// Live Ins: %RDI
+/// Live Ins: %rdi
/// Predecessors according to CFG: BB#0
-/// %AX<def> = MOV16rm %RDI<kill>, 1, %noreg, 0, %noreg, %EAX<imp-def>; mem:LD2[%p]
-/// No %EAX<imp-use>
+/// %ax<def> = MOV16rm %rdi<kill>, 1, %noreg, 0, %noreg, %eax<imp-def>; mem:LD2[%p]
+/// No %eax<imp-use>
/// Successors according to CFG: BB#3(?%)
///
/// BB#3: derived from LLVM BB %if.end
-/// Live Ins: %EAX Only %AX is actually live
+/// Live Ins: %eax Only %ax is actually live
/// Predecessors according to CFG: BB#2 BB#1
-/// %AX<def> = KILL %AX, %EAX<imp-use,kill>
-/// RET 0, %AX
+/// %ax<def> = KILL %ax, %eax<imp-use,kill>
+/// RET 0, %ax
static bool isLive(const MachineInstr &MI,
const LivePhysRegs &LiveRegs,
const TargetRegisterInfo *TRI,
// Push the fixed live-in registers.
for (unsigned i = Bundle.FixCount; i > 0; --i) {
- DEBUG(dbgs() << "Live-in st(" << (i-1) << "): %FP"
+ DEBUG(dbgs() << "Live-in st(" << (i-1) << "): %fp"
<< unsigned(Bundle.FixStack[i-1]) << '\n');
pushReg(Bundle.FixStack[i-1]);
}
while (Kills && Defs) {
unsigned KReg = countTrailingZeros(Kills);
unsigned DReg = countTrailingZeros(Defs);
- DEBUG(dbgs() << "Renaming %FP" << KReg << " as imp %FP" << DReg << "\n");
+ DEBUG(dbgs() << "Renaming %fp" << KReg << " as imp %fp" << DReg << "\n");
std::swap(Stack[getSlot(KReg)], Stack[getSlot(DReg)]);
std::swap(RegMap[KReg], RegMap[DReg]);
Kills &= ~(1 << KReg);
unsigned KReg = getStackEntry(0);
if (!(Kills & (1 << KReg)))
break;
- DEBUG(dbgs() << "Popping %FP" << KReg << "\n");
+ DEBUG(dbgs() << "Popping %fp" << KReg << "\n");
popStackAfter(I2);
Kills &= ~(1 << KReg);
}
// Manually kill the rest.
while (Kills) {
unsigned KReg = countTrailingZeros(Kills);
- DEBUG(dbgs() << "Killing %FP" << KReg << "\n");
+ DEBUG(dbgs() << "Killing %fp" << KReg << "\n");
freeStackSlotBefore(I, KReg);
Kills &= ~(1 << KReg);
}
// Load zeros for all the imp-defs.
while(Defs) {
unsigned DReg = countTrailingZeros(Defs);
- DEBUG(dbgs() << "Defining %FP" << DReg << " as 0\n");
+ DEBUG(dbgs() << "Defining %fp" << DReg << " as 0\n");
BuildMI(*MBB, I, DebugLoc(), TII->get(X86::LD_F0));
pushReg(DReg);
Defs &= ~(1 << DReg);
// FIXME: There are instructions which are being manually built without
// explicit uses/defs so we also have to check the MCInstrDesc. We should be
// able to remove the extra checks once those are fixed up. For example,
- // sometimes we might get something like %RAX<def> = POP64r 1. This won't be
+ // sometimes we might get something like %rax<def> = POP64r 1. This won't be
// caught by modifiesRegister or readsRegister even though the instruction
// really ought to be formed so that modifiesRegister/readsRegister would
// catch it.
// This is an optimization that lets us get away without emitting a nop in
// many cases.
//
- // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %R9) takes two
+ // NB! In some cases the encoding for PUSH64r (e.g. PUSH64r %r9) takes two
// bytes too, so the check on MinSize is important.
MCI.setOpcode(X86::PUSH64rmr);
} else {
; Check that we correctly deal with repeated operands.
; The following testcase creates:
-; %D1<def> = FADDDrr %D0<kill>, %D0
+; %d1<def> = FADDDrr %d0<kill>, %d0
; We'll get a crash if we naively look at the first operand, remove it
; from the substitution list then look at the second operand.
; RUN: llc < %s -mtriple=arm64-apple-ios -verify-machineinstrs | FileCheck %s
; LdStOpt bug created illegal instruction:
-; %D1<def>, %D2<def> = LDPSi %X0, 1
+; %d1<def>, %d2<def> = LDPSi %x0, 1
; rdar://11512047
%0 = type opaque
;
; CHECK: Before post-MI-sched:
; CHECK-LABEL: # Machine code for function test1:
-; CHECK: SU(2): STRWui %WZR
-; CHECK: SU(3): %X21<def>, %X20<def> = LDPXi %SP
+; CHECK: SU(2): STRWui %wzr
+; CHECK: SU(3): %x21<def>, %x20<def> = LDPXi %sp
; CHECK: Predecessors:
; CHECK-NEXT: SU(0): Out
; CHECK-NEXT: SU(0): Out
; Check that the dead register definition pass is considering implicit defs.
; When rematerializing through truncates, the coalescer may produce instructions
; with dead defs, but live implicit-defs of subregs:
-; E.g. %X1<def, dead> = MOVi64imm 2, %W1<imp-def>; %X1:GPR64, %W1:GPR32
+; E.g. %x1<def, dead> = MOVi64imm 2, %w1<imp-def>; %x1:GPR64, %w1:GPR32
; These instructions are live, and their definitions should not be rewritten.
;
; <rdar://problem/16492408>
; CHECK: Successors:
; CHECK-NEXT: SU(5): Data Latency=4 Reg=%vreg2
; CHECK-NEXT: SU(4): Ord Latency=0
-; CHECK: SU(3): STRWui %WZR, %vreg0, 0; mem:ST4[%ptr1] GPR64common:%vreg0
+; CHECK: SU(3): STRWui %wzr, %vreg0, 0; mem:ST4[%ptr1] GPR64common:%vreg0
; CHECK: Successors:
; CHECK: SU(4): Ord Latency=0
-; CHECK: SU(4): STRWui %WZR, %vreg1, 0; mem:ST4[%ptr2] GPR64common:%vreg1
-; CHECK: SU(5): %W0<def> = COPY %vreg2; GPR32:%vreg2
+; CHECK: SU(4): STRWui %wzr, %vreg1, 0; mem:ST4[%ptr2] GPR64common:%vreg1
+; CHECK: SU(5): %w0<def> = COPY %vreg2; GPR32:%vreg2
; CHECK: ** ScheduleDAGMI::schedule picking next node
define i32 @misched_bug(i32* %ptr1, i32* %ptr2) {
entry:
; Check that no scheduling dependencies are created between the paired loads and the store during post-RA MI scheduling.
;
; CHECK-LABEL: # Machine code for function foo:
-; CHECK: SU(2): %W{{[0-9]+}}<def>, %W{{[0-9]+}}<def> = LDPWi
+; CHECK: SU(2): %w{{[0-9]+}}<def>, %w{{[0-9]+}}<def> = LDPWi
; CHECK: Successors:
; CHECK-NOT: ch SU(4)
; CHECK: SU(3)
-; CHECK: SU(4): STRWui %WZR, %X{{[0-9]+}}
+; CHECK: SU(4): STRWui %wzr, %x{{[0-9]+}}
define i32 @foo() {
entry:
%0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @G2, i64 0, i64 0), align 4
body: |
bb.0:
; CHECK: Adding MCLOH_AdrpAdrp:
- ; CHECK-NEXT: %X1<def> = ADRP <ga:@g3>
- ; CHECK-NEXT: %X1<def> = ADRP <ga:@g4>
+ ; CHECK-NEXT: %x1<def> = ADRP <ga:@g3>
+ ; CHECK-NEXT: %x1<def> = ADRP <ga:@g4>
; CHECK-NEXT: Adding MCLOH_AdrpAdrp:
- ; CHECK-NEXT: %X1<def> = ADRP <ga:@g2>
- ; CHECK-NEXT: %X1<def> = ADRP <ga:@g3>
+ ; CHECK-NEXT: %x1<def> = ADRP <ga:@g2>
+ ; CHECK-NEXT: %x1<def> = ADRP <ga:@g3>
; CHECK-NEXT: Adding MCLOH_AdrpAdrp:
- ; CHECK-NEXT: %X0<def> = ADRP <ga:@g0>
- ; CHECK-NEXT: %X0<def> = ADRP <ga:@g1>
+ ; CHECK-NEXT: %x0<def> = ADRP <ga:@g0>
+ ; CHECK-NEXT: %x0<def> = ADRP <ga:@g1>
%x0 = ADRP target-flags(aarch64-page) @g0
%x0 = ADRP target-flags(aarch64-page) @g1
%x1 = ADRP target-flags(aarch64-page) @g2
bb.1:
; CHECK-NEXT: Adding MCLOH_AdrpAdd:
- ; CHECK-NEXT: %X20<def> = ADRP <ga:@g0>
- ; CHECK-NEXT: %X3<def> = ADDXri %X20, <ga:@g0>
+ ; CHECK-NEXT: %x20<def> = ADRP <ga:@g0>
+ ; CHECK-NEXT: %x3<def> = ADDXri %x20, <ga:@g0>
; CHECK-NEXT: Adding MCLOH_AdrpAdd:
- ; CHECK-NEXT: %X1<def> = ADRP <ga:@g0>
- ; CHECK-NEXT: %X1<def> = ADDXri %X1, <ga:@g0>
+ ; CHECK-NEXT: %x1<def> = ADRP <ga:@g0>
+ ; CHECK-NEXT: %x1<def> = ADDXri %x1, <ga:@g0>
%x1 = ADRP target-flags(aarch64-page) @g0
%x9 = SUBXri undef %x11, 5, 0 ; should not affect MCLOH formation
%x1 = ADDXri %x1, target-flags(aarch64-pageoff) @g0, 0
bb.5:
; CHECK-NEXT: Adding MCLOH_AdrpLdr:
- ; CHECK-NEXT: %X5<def> = ADRP <ga:@g2>
- ; CHECK-NEXT: %S6<def> = LDRSui %X5, <ga:@g2>
+ ; CHECK-NEXT: %x5<def> = ADRP <ga:@g2>
+ ; CHECK-NEXT: %s6<def> = LDRSui %x5, <ga:@g2>
; CHECK-NEXT: Adding MCLOH_AdrpLdr:
- ; CHECK-NEXT: %X4<def> = ADRP <ga:@g2>
- ; CHECK-NEXT: %X4<def> = LDRXui %X4, <ga:@g2>
+ ; CHECK-NEXT: %x4<def> = ADRP <ga:@g2>
+ ; CHECK-NEXT: %x4<def> = LDRXui %x4, <ga:@g2>
%x4 = ADRP target-flags(aarch64-page) @g2
%x4 = LDRXui %x4, target-flags(aarch64-pageoff) @g2
%x5 = ADRP target-flags(aarch64-page) @g2
bb.6:
; CHECK-NEXT: Adding MCLOH_AdrpLdrGot:
- ; CHECK-NEXT: %X5<def> = ADRP <ga:@g2>
- ; CHECK-NEXT: %X6<def> = LDRXui %X5, <ga:@g2>
+ ; CHECK-NEXT: %x5<def> = ADRP <ga:@g2>
+ ; CHECK-NEXT: %x6<def> = LDRXui %x5, <ga:@g2>
; CHECK-NEXT: Adding MCLOH_AdrpLdrGot:
- ; CHECK-NEXT: %X4<def> = ADRP <ga:@g2>
- ; CHECK-NEXT: %X4<def> = LDRXui %X4, <ga:@g2>
+ ; CHECK-NEXT: %x4<def> = ADRP <ga:@g2>
+ ; CHECK-NEXT: %x4<def> = LDRXui %x4, <ga:@g2>
%x4 = ADRP target-flags(aarch64-page, aarch64-got) @g2
%x4 = LDRXui %x4, target-flags(aarch64-pageoff, aarch64-got) @g2
%x5 = ADRP target-flags(aarch64-page, aarch64-got) @g2
bb.8:
; CHECK-NEXT: Adding MCLOH_AdrpAddLdr:
- ; CHECK-NEXT: %X7<def> = ADRP <ga:@g3>[TF=1]
- ; CHECK-NEXT: %X8<def> = ADDXri %X7, <ga:@g3>
- ; CHECK-NEXT: %D1<def> = LDRDui %X8, 8
+ ; CHECK-NEXT: %x7<def> = ADRP <ga:@g3>[TF=1]
+ ; CHECK-NEXT: %x8<def> = ADDXri %x7, <ga:@g3>
+ ; CHECK-NEXT: %d1<def> = LDRDui %x8, 8
%x7 = ADRP target-flags(aarch64-page) @g3
%x8 = ADDXri %x7, target-flags(aarch64-pageoff) @g3, 0
%d1 = LDRDui %x8, 8
bb.9:
; CHECK-NEXT: Adding MCLOH_AdrpAdd:
- ; CHECK-NEXT: %X3<def> = ADRP <ga:@g3>
- ; CHECK-NEXT: %X3<def> = ADDXri %X3, <ga:@g3>
+ ; CHECK-NEXT: %x3<def> = ADRP <ga:@g3>
+ ; CHECK-NEXT: %x3<def> = ADDXri %x3, <ga:@g3>
; CHECK-NEXT: Adding MCLOH_AdrpAdd:
- ; CHECK-NEXT: %X5<def> = ADRP <ga:@g3>
- ; CHECK-NEXT: %X2<def> = ADDXri %X5, <ga:@g3>
+ ; CHECK-NEXT: %x5<def> = ADRP <ga:@g3>
+ ; CHECK-NEXT: %x2<def> = ADDXri %x5, <ga:@g3>
; CHECK-NEXT: Adding MCLOH_AdrpAddStr:
- ; CHECK-NEXT: %X1<def> = ADRP <ga:@g3>
- ; CHECK-NEXT: %X1<def> = ADDXri %X1, <ga:@g3>
- ; CHECK-NEXT: STRXui %XZR, %X1, 16
+ ; CHECK-NEXT: %x1<def> = ADRP <ga:@g3>
+ ; CHECK-NEXT: %x1<def> = ADDXri %x1, <ga:@g3>
+ ; CHECK-NEXT: STRXui %xzr, %x1, 16
%x1 = ADRP target-flags(aarch64-page) @g3
%x1 = ADDXri %x1, target-flags(aarch64-pageoff) @g3, 0
STRXui %xzr, %x1, 16
bb.10:
; CHECK-NEXT: Adding MCLOH_AdrpLdr:
- ; CHECK-NEXT: %X2<def> = ADRP <ga:@g3>
- ; CHECK-NEXT: %X2<def> = LDRXui %X2, <ga:@g3>
+ ; CHECK-NEXT: %x2<def> = ADRP <ga:@g3>
+ ; CHECK-NEXT: %x2<def> = LDRXui %x2, <ga:@g3>
; CHECK-NEXT: Adding MCLOH_AdrpLdrGotLdr:
- ; CHECK-NEXT: %X1<def> = ADRP <ga:@g4>
- ; CHECK-NEXT: %X1<def> = LDRXui %X1, <ga:@g4>
- ; CHECK-NEXT: %X1<def> = LDRXui %X1, 24
+ ; CHECK-NEXT: %x1<def> = ADRP <ga:@g4>
+ ; CHECK-NEXT: %x1<def> = LDRXui %x1, <ga:@g4>
+ ; CHECK-NEXT: %x1<def> = LDRXui %x1, 24
%x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4
%x1 = LDRXui %x1, target-flags(aarch64-pageoff, aarch64-got) @g4
%x1 = LDRXui %x1, 24
bb.11:
; CHECK-NEXT: Adding MCLOH_AdrpLdr
- ; CHECK-NEXT: %X5<def> = ADRP <ga:@g1>
- ; CHECK-NEXT: %X5<def> = LDRXui %X5, <ga:@g1>
+ ; CHECK-NEXT: %x5<def> = ADRP <ga:@g1>
+ ; CHECK-NEXT: %x5<def> = LDRXui %x5, <ga:@g1>
; CHECK-NEXT: Adding MCLOH_AdrpLdrGotStr:
- ; CHECK-NEXT: %X1<def> = ADRP <ga:@g4>
- ; CHECK-NEXT: %X1<def> = LDRXui %X1, <ga:@g4>
- ; CHECK-NEXT: STRXui %XZR, %X1, 32
+ ; CHECK-NEXT: %x1<def> = ADRP <ga:@g4>
+ ; CHECK-NEXT: %x1<def> = LDRXui %x1, <ga:@g4>
+ ; CHECK-NEXT: STRXui %xzr, %x1, 32
%x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4
%x1 = LDRXui %x1, target-flags(aarch64-pageoff, aarch64-got) @g4
STRXui %xzr, %x1, 32
bb.12:
; CHECK-NOT: MCLOH_AdrpAdrp
; CHECK: Adding MCLOH_AdrpAddLdr
- ; %X9<def> = ADRP <ga:@g4>
- ; %X9<def> = ADDXri %X9, <ga:@g4>
- ; %X5<def> = LDRXui %X9, 0
+ ; %x9<def> = ADRP <ga:@g4>
+ ; %x9<def> = ADDXri %x9, <ga:@g4>
+ ; %x5<def> = LDRXui %x9, 0
%x9 = ADRP target-flags(aarch64-page, aarch64-got) @g4
%x9 = ADDXri %x9, target-flags(aarch64-pageoff, aarch64-got) @g4, 0
%x5 = LDRXui %x9, 0
; This file check a bug in MachineCopyPropagation pass. The last COPY will be
; incorrectly removed if the machine instructions are as follows:
-; %Q5_Q6<def> = COPY %Q2_Q3
-; %D5<def> =
-; %D3<def> =
-; %D3<def> = COPY %D6
+; %q5_q6<def> = COPY %q2_q3
+; %d5<def> =
+; %d3<def> =
+; %d3<def> = COPY %d6
; This is caused by a bug in function SourceNoLongerAvailable(), which fails to
-; remove the relationship of D6 and "%Q5_Q6<def> = COPY %Q2_Q3".
+; remove the relationship of D6 and "%q5_q6<def> = COPY %q2_q3".
@failed = internal unnamed_addr global i1 false
; CHECK: ldr w[[REG:[0-9]+]], [sp, #8]
; CHECK-NEXT: .Ltmp
call void @llvm.dbg.value(metadata i32 %.0, i64 0, metadata !15, metadata !13), !dbg !16
-; CHECK-NEXT: //DEBUG_VALUE: func:c <- %W[[REG]]
+; CHECK-NEXT: //DEBUG_VALUE: func:c <- %w[[REG]]
%5 = add nsw i32 %.0, %0, !dbg !22
call void @llvm.dbg.value(metadata i32 %5, i64 0, metadata !15, metadata !13), !dbg !16
ret i32 %5, !dbg !23
# Check that the instructions are not dependent on each other, even though
# they all read/write to the zero register.
# CHECK-LABEL: MI Scheduling
-# CHECK: SU(0): %WZR<def,dead> = SUBSWri %W1, 0, 0, %NZCV<imp-def,dead>
+# CHECK: SU(0): %wzr<def,dead> = SUBSWri %w1, 0, 0, %nzcv<imp-def,dead>
# CHECK: # succs left : 0
# CHECK-NOT: Successors:
-# CHECK: SU(1): %W2<def> = COPY %WZR
+# CHECK: SU(1): %w2<def> = COPY %wzr
# CHECK: # succs left : 0
# CHECK-NOT: Successors:
-# CHECK: SU(2): %WZR<def,dead> = SUBSWri %W3, 0, 0, %NZCV<imp-def,dead>
+# CHECK: SU(2): %wzr<def,dead> = SUBSWri %w3, 0, 0, %nzcv<imp-def,dead>
# CHECK: # succs left : 0
# CHECK-NOT: Successors:
-# CHECK: SU(3): %W4<def> = COPY %WZR
+# CHECK: SU(3): %w4<def> = COPY %wzr
# CHECK: # succs left : 0
# CHECK-NOT: Successors:
name: func
; %2 = load i32, i32 addrspace(1)* %in
;
; The instruction selection phase will generate ISA that looks like this:
-; %OQAP = LDS_READ_RET
-; %vreg0 = MOV %OQAP
+; %oqap = LDS_READ_RET
+; %vreg0 = MOV %oqap
; %vreg1 = VTX_READ_32
; %vreg2 = ADD_INT %vreg1, %vreg0
;
; The bottom scheduler will schedule the two ALU instructions first:
;
; UNSCHEDULED:
-; %OQAP = LDS_READ_RET
+; %oqap = LDS_READ_RET
; %vreg1 = VTX_READ_32
;
; SCHEDULED:
;
-; vreg0 = MOV %OQAP
+; vreg0 = MOV %oqap
; vreg2 = ADD_INT %vreg1, %vreg2
;
; The lack of proper aliasing results in the local memory read (LDS_READ_RET)
; final program which looks like this:
;
; Alu clause:
-; %OQAP = LDS_READ_RET
+; %oqap = LDS_READ_RET
; VTX clause:
; %vreg1 = VTX_READ_32
; Alu clause:
-; vreg0 = MOV %OQAP
+; vreg0 = MOV %oqap
; vreg2 = ADD_INT %vreg1, %vreg2
;
-; This is an illegal program because the OQAP def and use know occur in
+; This is an illegal program because the oqap def and use know occur in
; different ALU clauses.
;
; This test checks this scenario and makes sure it doesn't result in an
; CHECK: s_load_dwordx2 s[4:5]
; FIXME: Why is the SGPR4_SGPR5 reference being removed from DBG_VALUE?
-; CHECK: ; kill: %SGPR4_SGPR5<def> %SGPR4_SGPR5<kill>
+; CHECK: ; kill: %sgpr4_sgpr5<def> %sgpr4_sgpr5<kill>
; CHECK-NEXT: ;DEBUG_VALUE: test_debug_value:globalptr_arg <- undef
; CHECK: buffer_store_dword
# Check there is no SReg_32 pressure created by DS_* instructions because of M0 use
# CHECK: ScheduleDAGMILive::schedule starting
-# CHECK: SU({{.*}} = DS_READ_B32 {{.*}} %M0<imp-use>, %EXEC<imp-use>
+# CHECK: SU({{.*}} = DS_READ_B32 {{.*}} %m0<imp-use>, %exec<imp-use>
# CHECK: Pressure Diff : {{$}}
# CHECK: SU({{.*}} DS_WRITE_B32
define void @vst(i8* %m, [4 x i64] %v) {
entry:
; CHECK: vst:
-; CHECK: VST1d64Q %R{{[0-9]+}}<kill>, 8, %D{{[0-9]+}}, pred:14, pred:%noreg, %Q{{[0-9]+}}_Q{{[0-9]+}}<imp-use,kill>
+; CHECK: VST1d64Q %r{{[0-9]+}}<kill>, 8, %d{{[0-9]+}}, pred:14, pred:%noreg, %q{{[0-9]+}}_q{{[0-9]+}}<imp-use,kill>
%v0 = extractvalue [4 x i64] %v, 0
%v1 = extractvalue [4 x i64] %v, 1
%struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B, <8 x i8>* %C) nounwind {
; CHECK: vtbx4:
-; CHECK: VTBX4 {{.*}}, pred:14, pred:%noreg, %Q{{[0-9]+}}_Q{{[0-9]+}}<imp-use>
+; CHECK: VTBX4 {{.*}}, pred:14, pred:%noreg, %q{{[0-9]+}}_q{{[0-9]+}}<imp-use>
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = load %struct.__neon_int8x8x4_t, %struct.__neon_int8x8x4_t* %B
%tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
ret void
}
-; CHECK: tBL pred:14, pred:%noreg, <es:__chkstk>, %LR<imp-def>, %SP<imp-use>, %R4<imp-use,kill>, %R4<imp-def>, %R12<imp-def,dead>, %CPSR<imp-def,dead>
+; CHECK: tBL pred:14, pred:%noreg, <es:__chkstk>, %lr<imp-def>, %sp<imp-use>, %r4<imp-use,kill>, %r4<imp-def>, %r12<imp-def,dead>, %cpsr<imp-def,dead>
tail call void @llvm.dbg.value(metadata %struct.tag_s* %c, metadata !13, metadata !DIExpression()), !dbg !21
tail call void @llvm.dbg.value(metadata i64 %x, metadata !14, metadata !DIExpression()), !dbg !22
tail call void @llvm.dbg.value(metadata i64 %y, metadata !17, metadata !DIExpression()), !dbg !23
-;CHECK: @DEBUG_VALUE: foo:y <- [DW_OP_plus_uconst 8] [%R7+0]
+;CHECK: @DEBUG_VALUE: foo:y <- [DW_OP_plus_uconst 8] [%r7+0]
tail call void @llvm.dbg.value(metadata %struct.tag_s* %ptr1, metadata !18, metadata !DIExpression()), !dbg !24
tail call void @llvm.dbg.value(metadata %struct.tag_s* %ptr2, metadata !19, metadata !DIExpression()), !dbg !25
%1 = icmp eq %struct.tag_s* %c, null, !dbg !26
;CHECK: vadd.f32 q4, q8, q8
;CHECK-NEXT: LBB0_1
-;CHECK: @DEBUG_VALUE: x <- %Q4{{$}}
-;CHECK-NEXT: @DEBUG_VALUE: y <- %Q4{{$}}
+;CHECK: @DEBUG_VALUE: x <- %q4{{$}}
+;CHECK-NEXT: @DEBUG_VALUE: y <- %q4{{$}}
;CHECK: beq LBB0_1
; debug value as KILL'ed, resulting in a DEBUG_VALUE node changing codegen! (or
; hopefully, triggering an assert).
- ; CHECK: BUNDLE %ITSTATE<imp-def,dead>
- ; CHECK: * DBG_VALUE %R1, %noreg, !"u"
- ; CHECK-NOT: * DBG_VALUE %R1<kill>, %noreg, !"u"
+ ; CHECK: BUNDLE %itstate<imp-def,dead>
+ ; CHECK: * DBG_VALUE %r1, %noreg, !"u"
+ ; CHECK-NOT: * DBG_VALUE %r1<kill>, %noreg, !"u"
declare arm_aapcscc void @g(%struct.s*, i8*, i32) #1
; CHECK-LABEL: bpf_prog2:
; CHECK: r0 = *(u16 *)skb[12] # encoding: [0x28,0x00,0x00,0x00,0x0c,0x00,0x00,0x00]
; CHECK: r0 = *(u16 *)skb[16] # encoding: [0x28,0x00,0x00,0x00,0x10,0x00,0x00,0x00]
-; CHECK: implicit-def: %R1
+; CHECK: implicit-def: %r1
; CHECK: r1 =
; CHECK: call 1 # encoding: [0x85,0x00,0x00,0x00,0x01,0x00,0x00,0x00]
; CHECK: call 2 # encoding: [0x85,0x00,0x00,0x00,0x02,0x00,0x00,0x00]
define hidden void @thunk_undef_double(i32 %this, double %volume) unnamed_addr align 2 {
; ALL-LABEL: thunk_undef_double:
-; O32: # implicit-def: %A2
-; O32: # implicit-def: %A3
+; O32: # implicit-def: %a2
+; O32: # implicit-def: %a3
; NOT-R6C: jr $[[TGT]]
; R6C: jrc $[[TGT]]
; CHECK-NEXT: cmpld 7, 4, 5
; CHECK-NEXT: mfocrf 10, 1
; CHECK-NEXT: rlwinm 10, 10, 29, 31, 31
-; CHECK-NEXT: # implicit-def: %X4
+; CHECK-NEXT: # implicit-def: %x4
; CHECK-NEXT: mr 4, 10
; CHECK-NEXT: clrldi 4, 4, 32
; CHECK-NEXT: std 4, 0(3)
lnext:
%elementArray = load i32*, i32** %elementArrayPtr, align 8
; CHECK: lwz [[LDREG:[0-9]+]], 124(1) # 4-byte Folded Reload
-; CHECK: # implicit-def: %X[[TEMPREG:[0-9]+]]
+; CHECK: # implicit-def: %x[[TEMPREG:[0-9]+]]
%element = load i32, i32* %elementArray, align 4
; CHECK: mr [[TEMPREG]], [[LDREG]]
; CHECK: clrldi 4, [[TEMPREG]], 32
; Make sure that the MMO on the store has no offset from the byval
; variable itself (we used to have mem:ST8[%v+64]).
-; CHECK: STD %X5<kill>, 176, %X1; mem:ST8[%v](align=16)
+; CHECK: STD %x5<kill>, 176, %x1; mem:ST8[%v](align=16)
; CHECK-NEXT: xori 3, 3, 65534
; CHECK-NEXT: cntlzw 3, 3
; CHECK-NEXT: srwi 3, 3, 5
-; CHECK-NEXT: # implicit-def: %X4
+; CHECK-NEXT: # implicit-def: %x4
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: mr 3, 4
; CHECK-NEXT: blr
%2 = zext i32 %1 to i64
%3 = shl i64 %2, 48
%4 = ashr exact i64 %3, 48
-; CHECK: ANDIo8 {{[^,]+}}, 65520, %CR0<imp-def,dead>;
+; CHECK: ANDIo8 {{[^,]+}}, 65520, %cr0<imp-def,dead>;
; CHECK: CMPLDI
; CHECK: BCC
-; CHECK: ANDIo8 {{[^,]+}}, 65520, %CR0<imp-def>;
-; CHECK: COPY %CR0
+; CHECK: ANDIo8 {{[^,]+}}, 65520, %cr0<imp-def>;
+; CHECK: COPY %cr0
; CHECK: BCC
%5 = icmp eq i64 %4, 0
br i1 %5, label %foo, label %bar
; CHECK-LABEL: fn2
define signext i32 @fn2(i64 %a, i64 %b) {
-; CHECK: OR8o {{[^, ]+}}, {{[^, ]+}}, %CR0<imp-def>;
-; CHECK: [[CREG:[^, ]+]]<def> = COPY %CR0
+; CHECK: OR8o {{[^, ]+}}, {{[^, ]+}}, %cr0<imp-def>;
+; CHECK: [[CREG:[^, ]+]]<def> = COPY %cr0
; CHECK: BCC 12, [[CREG]]<kill>
%1 = or i64 %b, %a
%2 = icmp sgt i64 %1, -1
; CHECK-LABEL: fn3
define signext i32 @fn3(i32 %a) {
-; CHECK: ANDIo {{[^, ]+}}, 10, %CR0<imp-def>;
-; CHECK: [[CREG:[^, ]+]]<def> = COPY %CR0
+; CHECK: ANDIo {{[^, ]+}}, 10, %cr0<imp-def>;
+; CHECK: [[CREG:[^, ]+]]<def> = COPY %cr0
; CHECK: BCC 76, [[CREG]]<kill>
%1 = and i32 %a, 10
%2 = icmp ne i32 %1, 0
; CHECK: ********** Function: foo
; CHECK: ********** FAST REGISTER ALLOCATION **********
-; CHECK: %X3<def> = COPY %vreg
-; CHECK-NEXT: %X4<def> = COPY %vreg
+; CHECK: %x3<def> = COPY %vreg
+; CHECK-NEXT: %x4<def> = COPY %vreg
; CHECK-NEXT: BLR
; Test 32-bit signed division and remainder.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -asm-verbose=0 | FileCheck %s
declare i32 @foo()
; Test 32-bit unsigned division and remainder.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -asm-verbose=0 | FileCheck %s
declare i32 @foo()
; Test 64-bit signed division and remainder when the divisor is
; a signed-extended i32.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -asm-verbose=0 | FileCheck %s
declare i64 @foo()
; Testg 64-bit signed division and remainder.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -asm-verbose=0 | FileCheck %s
declare i64 @foo()
; Testg 64-bit unsigned division and remainder.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -asm-verbose=0 | FileCheck %s
declare i64 @foo()
; Test that divisions by constants are implemented as multiplications.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -asm-verbose=0 | FileCheck %s
; Check signed 32-bit division.
define i32 @f1(i32 %a) {
; Test high-part i64->i128 multiplications.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -asm-verbose=0 | FileCheck %s
declare i64 @foo()
; Test signed high-part i64->i128 multiplications on z14.
;
-; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 | FileCheck %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z14 -asm-verbose=0 | FileCheck %s
declare i64 @foo()
; CHECK-NEXT: lbh %r1, 0(%r2)
; CHECK-NEXT: ldgr %f0, %r1
; CHECK-NEXT: ldgr %f2, %r0
-; CHECK-NEXT: # kill: %F0S<def> %F0S<kill> %F0D<kill>
-; CHECK-NEXT: # kill: %F2S<def> %F2S<kill> %F2D<kill>
+; CHECK-NEXT: # kill: %f0s<def> %f0s<kill> %f0d<kill>
+; CHECK-NEXT: # kill: %f2s<def> %f2s<kill> %f2d<kill>
; CHECK-NEXT: br %r14
%L17 = load <2 x i8>, <2 x i8>* %a
%Se21 = sext <2 x i8> %L17 to <2 x i32>
; RUN: llc < %s -verify-machineinstrs
;
; This test case is transformed into a single basic block by the machine
-; branch folding pass. That makes a complete mess of the %EFLAGS liveness, but
+; branch folding pass. That makes a complete mess of the %eflags liveness, but
; we don't care about liveness this late anyway.
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
; rdar://7842028
; Do not delete partially dead copy instructions.
-; %RDI<def,dead> = MOV64rr %RAX<kill>, %EDI<imp-def>
-; REP_MOVSD %ECX<imp-def,dead>, %EDI<imp-def,dead>, %ESI<imp-def,dead>, %ECX<imp-use,kill>, %EDI<imp-use,kill>, %ESI<imp-use,kill>
+; %rdi<def,dead> = MOV64rr %rax<kill>, %edi<imp-def>
+; REP_MOVSD %ecx<imp-def,dead>, %edi<imp-def,dead>, %esi<imp-def,dead>, %ecx<imp-use,kill>, %edi<imp-use,kill>, %esi<imp-use,kill>
%struct.F = type { %struct.FC*, i32, i32, i8, i32, i32, i32 }
;BB#5: derived from LLVM BB %bb10
; Predecessors according to CFG: BB#4 BB#5
; %reg1024<def> = MOV_Fp8080 %reg1034
-; %reg1025<def> = MUL_Fp80m32 %reg1024, %RIP, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
+; %reg1025<def> = MUL_Fp80m32 %reg1024, %rip, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
; %reg1034<def> = MOV_Fp8080 %reg1025
-; FP_REG_KILL %FP0<imp-def>, %FP1<imp-def>, %FP2<imp-def>, %FP3<imp-def>, %FP4<imp-def>, %FP5<imp-def>, %FP6<imp-def>
+; FP_REG_KILL %fp0<imp-def>, %fp1<imp-def>, %fp2<imp-def>, %fp3<imp-def>, %fp4<imp-def>, %fp5<imp-def>, %fp6<imp-def>
; JMP_4 <BB#5>
; Successors according to CFG: BB#5
;
-; The X86FP pass needs good kill flags, like on %FP0 representing %reg1034:
+; The X86FP pass needs good kill flags, like on %fp0 representing %reg1034:
;BB#5: derived from LLVM BB %bb10
; Predecessors according to CFG: BB#4 BB#5
-; %FP0<def> = LD_Fp80m <fi#3>, 1, %reg0, 0, %reg0; mem:LD10[FixedStack3](align=4)
-; %FP1<def> = MOV_Fp8080 %FP0<kill>
-; %FP2<def> = MUL_Fp80m32 %FP1, %RIP, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
-; %FP0<def> = MOV_Fp8080 %FP2
-; ST_FpP80m <fi#3>, 1, %reg0, 0, %reg0, %FP0<kill>; mem:ST10[FixedStack3](align=4)
-; ST_FpP80m <fi#4>, 1, %reg0, 0, %reg0, %FP1<kill>; mem:ST10[FixedStack4](align=4)
-; ST_FpP80m <fi#5>, 1, %reg0, 0, %reg0, %FP2<kill>; mem:ST10[FixedStack5](align=4)
-; FP_REG_KILL %FP0<imp-def>, %FP1<imp-def>, %FP2<imp-def>, %FP3<imp-def>, %FP4<imp-def>, %FP5<imp-def>, %FP6<imp-def>
+; %fp0<def> = LD_Fp80m <fi#3>, 1, %reg0, 0, %reg0; mem:LD10[FixedStack3](align=4)
+; %fp1<def> = MOV_Fp8080 %fp0<kill>
+; %fp2<def> = MUL_Fp80m32 %fp1, %rip, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
+; %fp0<def> = MOV_Fp8080 %fp2
+; ST_FpP80m <fi#3>, 1, %reg0, 0, %reg0, %fp0<kill>; mem:ST10[FixedStack3](align=4)
+; ST_FpP80m <fi#4>, 1, %reg0, 0, %reg0, %fp1<kill>; mem:ST10[FixedStack4](align=4)
+; ST_FpP80m <fi#5>, 1, %reg0, 0, %reg0, %fp2<kill>; mem:ST10[FixedStack5](align=4)
+; FP_REG_KILL %fp0<imp-def>, %fp1<imp-def>, %fp2<imp-def>, %fp3<imp-def>, %fp4<imp-def>, %fp5<imp-def>, %fp6<imp-def>
; JMP_4 <BB#5>
; Successors according to CFG: BB#5
!18 = !DIFile(filename: "f.c", directory: "/tmp")
!19 = !{}
-;CHECK: DEBUG_VALUE: bar:x <- %E
+;CHECK: DEBUG_VALUE: bar:x <- %e
;CHECK: Ltmp
;CHECK: DEBUG_VALUE: foo:y <- 1{{$}}
!20 = !{i32 1, !"Debug Info Version", i32 3}
; Function Attrs: noinline nounwind optsize readnone ssp
define i32 @_ZN3foo3bazEi(%struct.foo* nocapture %this, i32 %x) #0 align 2 !dbg !4 {
entry:
- ; CHECK: DEBUG_VALUE: baz:this <- %RDI{{$}}
+ ; CHECK: DEBUG_VALUE: baz:this <- %rdi{{$}}
tail call void @llvm.dbg.value(metadata %struct.foo* %this, i64 0, metadata !13, metadata !16), !dbg !17
tail call void @llvm.dbg.value(metadata i32 %x, i64 0, metadata !18, metadata !16), !dbg !17
%0 = mul nsw i32 %x, 7, !dbg !19
define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
; X64-LABEL: test_add_i32:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: leal (%rsi,%rdi), %eax
; X64-NEXT: retq
;
define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
; X64-LABEL: test_add_i16:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: leal (%rsi,%rdi), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-LABEL: test_add_i16:
define i64 @test_zext_i1(i8 %a) {
; X64-LABEL: test_zext_i1:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: andq $1, %rdi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
%val = trunc i32 %a to i1
%r = zext i1 %val to i8
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andw $1, %ax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
%val = trunc i32 %a to i1
%r = zext i1 %val to i16
;
; X64-LABEL: test_gep_i8:
; X64: # BB#0:
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: movsbq %sil, %rax
; X64-NEXT: leaq (%rdi,%rax,4), %rax
; X64-NEXT: retq
;
; X64-LABEL: test_gep_i16:
; X64: # BB#0:
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: movswq %si, %rax
; X64-NEXT: leaq (%rdi,%rax,4), %rax
; X64-NEXT: retq
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: negl %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retl
entry:
%or = or i64 %argc, -4294967296
;
; X64-LINUX-LABEL: test6:
; X64-LINUX: # BB#0: # %entry
-; X64-LINUX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-LINUX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-LINUX-NEXT: shlq $32, %rsi
; X64-LINUX-NEXT: leaq (%rsi,%rdi), %rax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test6:
; X64-WIN32: # BB#0: # %entry
-; X64-WIN32-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
+; X64-WIN32-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; X64-WIN32-NEXT: shlq $32, %rdx
; X64-WIN32-NEXT: leaq (%rdx,%rcx), %rax
; X64-WIN32-NEXT: retq
define i8 @e(i32* nocapture %a, i32 %b) nounwind {
; CHECK-LABEL: e:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; CHECK-NEXT: movl (%rdi), %ecx
; CHECK-NEXT: leal (%rsi,%rcx), %edx
; CHECK-NEXT: addl %esi, %edx
; X32-LABEL: foo:
; X32: # BB#0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: andl $1, %eax
; X64-LABEL: foo:
; X64: # BB#0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %al, %eax
; X64-NEXT: andl $1, %eax
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: divw {{[0-9]+}}(%esp)
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<def>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
; X32-NEXT: andl $1, %eax
; X32-NEXT: retl
;
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: movl %edi, %eax
; X64-NEXT: divw %si
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<def>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
; X64-NEXT: andl $1, %eax
; X64-NEXT: retq
%q = trunc i32 %p to i16
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: shrq $63, %rax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %RAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %rax<kill>
; CHECK-NEXT: retq
entry:
%tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
define <8 x float> @castA(<4 x float> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castA:
; AVX: ## BB#0:
-; AVX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: retq
define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castB:
; AVX: ## BB#0:
-; AVX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX-NEXT: retq
define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp {
; AVX1-LABEL: castC:
; AVX1: ## BB#0:
-; AVX1-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX1-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: castC:
; AVX2: ## BB#0:
-; AVX2-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX2-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: retq
define <4 x float> @castD(<8 x float> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castD:
; AVX: ## BB#0:
-; AVX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%shuffle.i = shufflevector <8 x float> %m, <8 x float> %m, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
define <2 x i64> @castE(<4 x i64> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castE:
; AVX: ## BB#0:
-; AVX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%shuffle.i = shufflevector <4 x i64> %m, <4 x i64> %m, <2 x i32> <i32 0, i32 1>
define <2 x double> @castF(<4 x double> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castF:
; AVX: ## BB#0:
-; AVX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%shuffle.i = shufflevector <4 x double> %m, <4 x double> %m, <2 x i32> <i32 0, i32 1>
; CHECK-NEXT: vcmpeqsd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovq %xmm0, %rax
; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; CHECK-NEXT: retq
%cmp29 = fcmp oeq double undef, 0.000000e+00
%res = zext i1 %cmp29 to i32
define <4 x double> @test_mm256_castpd128_pd256(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm256_castpd128_pd256:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castpd128_pd256:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> %a0, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x double> %res
define <2 x double> @test_mm256_castpd256_pd128(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_castpd256_pd128:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castpd256_pd128:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> %a0, <2 x i32> <i32 0, i32 1>
define <8 x float> @test_mm256_castps128_ps256(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm256_castps128_ps256:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castps128_ps256:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x float> %res
define <4 x float> @test_mm256_castps256_ps128(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_castps256_ps128:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castps256_ps128:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> %a0, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
define <4 x i64> @test_mm256_castsi128_si256(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_castsi128_si256:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castsi128_si256:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> %a0, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x i64> %res
define <2 x i64> @test_mm256_castsi256_si128(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_castsi256_si128:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castsi256_si128:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shufflevector <4 x i64> %a0, <4 x i64> %a0, <2 x i32> <i32 0, i32 1>
define <4 x double> @test_mm256_insertf128_pd(<4 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_insertf128_pd:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insertf128_pd:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; X64-NEXT: retq
%ext = shufflevector <2 x double> %a1, <2 x double> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
define <4 x i64> @test_mm256_insertf128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_insertf128_si256:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insertf128_si256:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; X64-NEXT: retq
%ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
define <8 x float> @test_mm256_set_m128(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a1, <4 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
define <4 x double> @test_mm256_set_m128d(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128d:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128d:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x double> %a0 to <4 x float>
define <4 x i64> @test_mm256_set_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128i:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128i:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x float>
define <8 x float> @test_mm256_setr_m128(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
define <4 x double> @test_mm256_setr_m128d(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128d:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128d:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x double> %a0 to <4 x float>
define <4 x i64> @test_mm256_setr_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128i:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128i:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x float>
define <8 x i32> @test_x86_avx_vinsertf128_si_256_2(<8 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_2:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 2)
define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) {
; CHECK-LABEL: test_x86_avx_extractf128_pd_256_2:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 2)
; CHECK_O0-LABEL: mov00:
; CHECK_O0: # BB#0:
; CHECK_O0-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK_O0-NEXT: # implicit-def: %YMM1
+; CHECK_O0-NEXT: # implicit-def: %ymm1
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
; CHECK_O0-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK_O0-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3,4,5,6,7]
; CHECK_O0-LABEL: mov01:
; CHECK_O0: # BB#0:
; CHECK_O0-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK_O0-NEXT: # implicit-def: %YMM1
+; CHECK_O0-NEXT: # implicit-def: %ymm1
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
; CHECK_O0-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK_O0-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3]
;
; CHECK_O0-LABEL: storev16i16:
; CHECK_O0: # BB#0:
-; CHECK_O0-NEXT: # implicit-def: %RAX
+; CHECK_O0-NEXT: # implicit-def: %rax
; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
store <16 x i16> %a, <16 x i16>* undef, align 32
unreachable
;
; CHECK_O0-LABEL: storev16i16_01:
; CHECK_O0: # BB#0:
-; CHECK_O0-NEXT: # implicit-def: %RAX
+; CHECK_O0-NEXT: # implicit-def: %rax
; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
store <16 x i16> %a, <16 x i16>* undef, align 4
unreachable
;
; CHECK_O0-LABEL: storev32i8:
; CHECK_O0: # BB#0:
-; CHECK_O0-NEXT: # implicit-def: %RAX
+; CHECK_O0-NEXT: # implicit-def: %rax
; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
store <32 x i8> %a, <32 x i8>* undef, align 32
unreachable
;
; CHECK_O0-LABEL: storev32i8_01:
; CHECK_O0: # BB#0:
-; CHECK_O0-NEXT: # implicit-def: %RAX
+; CHECK_O0-NEXT: # implicit-def: %rax
; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
store <32 x i8> %a, <32 x i8>* undef, align 4
unreachable
}
-; It is faster to make two saves, if the data is already in XMM registers. For
+; It is faster to make two saves, if the data is already in xmm registers. For
; example, after making an integer operation.
define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
; CHECK-LABEL: double_save:
;
; CHECK_O0-LABEL: double_save:
; CHECK_O0: # BB#0:
-; CHECK_O0-NEXT: # implicit-def: %YMM2
+; CHECK_O0-NEXT: # implicit-def: %ymm2
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
;
; CHECK_O0-LABEL: f_f:
; CHECK_O0: # BB#0: # %allocas
-; CHECK_O0-NEXT: # implicit-def: %AL
+; CHECK_O0-NEXT: # implicit-def: %al
; CHECK_O0-NEXT: testb $1, %al
; CHECK_O0-NEXT: jne .LBB8_1
; CHECK_O0-NEXT: jmp .LBB8_2
; CHECK_O0-NEXT: .LBB8_1: # %cif_mask_all
; CHECK_O0-NEXT: .LBB8_2: # %cif_mask_mixed
-; CHECK_O0-NEXT: # implicit-def: %AL
+; CHECK_O0-NEXT: # implicit-def: %al
; CHECK_O0-NEXT: testb $1, %al
; CHECK_O0-NEXT: jne .LBB8_3
; CHECK_O0-NEXT: jmp .LBB8_4
; CHECK_O0-NEXT: movl $-1, %eax
; CHECK_O0-NEXT: vmovd %eax, %xmm0
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
-; CHECK_O0-NEXT: # implicit-def: %RCX
-; CHECK_O0-NEXT: # implicit-def: %YMM2
+; CHECK_O0-NEXT: # implicit-def: %rcx
+; CHECK_O0-NEXT: # implicit-def: %ymm2
; CHECK_O0-NEXT: vmaskmovps %ymm2, %ymm1, (%rcx)
; CHECK_O0-NEXT: .LBB8_4: # %cif_mixed_test_any_check
allocas:
; CHECK_O0: # BB#0:
; CHECK_O0-NEXT: vmovdqu (%rsi), %xmm0
; CHECK_O0-NEXT: vmovdqu 16(%rsi), %xmm1
-; CHECK_O0-NEXT: # implicit-def: %YMM2
+; CHECK_O0-NEXT: # implicit-def: %ymm2
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
; CHECK_O0: # BB#0:
; CHECK_O0-NEXT: vmovdqa (%rsi), %xmm0
; CHECK_O0-NEXT: vmovdqa 16(%rsi), %xmm1
-; CHECK_O0-NEXT: # implicit-def: %YMM2
+; CHECK_O0-NEXT: # implicit-def: %ymm2
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
; CHECK: # BB#0: # %for_exit499
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: # implicit-def: %YMM0
+; CHECK-NEXT: # implicit-def: %ymm0
; CHECK-NEXT: jne .LBB4_2
; CHECK-NEXT: # BB#1: # %load.i1247
; CHECK-NEXT: pushq %rbp
define <4 x double> @insert_undef_pd(<4 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: insert_undef_pd:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> undef, <2 x double> %a1, i8 0)
define <8 x float> @insert_undef_ps(<8 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: insert_undef_ps:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %a1, i8 0)
define <8 x i32> @insert_undef_si(<8 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: insert_undef_si:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> undef, <4 x i32> %a1, i8 0)
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=VZ --check-prefix=AVX
; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=VZ --check-prefix=AVX512
-; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-unknown-unknown -mattr=+avx,+fast-partial-ymm-or-zmm-write | FileCheck %s --check-prefix=ALL --check-prefix=NO-VZ --check-prefix=FAST-YMM-ZMM
+; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-unknown-unknown -mattr=+avx,+fast-partial-ymm-or-zmm-write | FileCheck %s --check-prefix=ALL --check-prefix=NO-VZ --check-prefix=FAST-ymm-zmm
; RUN: llc < %s -x86-use-vzeroupper -mtriple=x86_64-unknown-unknown -mcpu=btver2 | FileCheck %s --check-prefix=ALL --check-prefix=NO-VZ --check-prefix=BTVER2
declare i32 @foo()
; VZ-LABEL: test02:
; VZ: # BB#0:
; VZ-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; VZ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; VZ-NEXT: vzeroupper
; VZ-NEXT: jmp do_sse # TAILCALL
;
; NO-VZ-LABEL: test02:
; NO-VZ: # BB#0:
; NO-VZ-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; NO-VZ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; NO-VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; NO-VZ-NEXT: jmp do_sse # TAILCALL
%add.i = fadd <8 x float> %a, %b
%add.low = call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %add.i, i8 0)
; VZ-LABEL: test04:
; VZ: # BB#0:
; VZ-NEXT: pushq %rax
-; VZ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; VZ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; VZ-NEXT: callq do_avx
-; VZ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; VZ-NEXT: popq %rax
; VZ-NEXT: vzeroupper
; VZ-NEXT: retq
; NO-VZ-LABEL: test04:
; NO-VZ: # BB#0:
; NO-VZ-NEXT: pushq %rax
-; NO-VZ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; NO-VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; NO-VZ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; NO-VZ-NEXT: callq do_avx
-; NO-VZ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; NO-VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; NO-VZ-NEXT: popq %rax
; NO-VZ-NEXT: retq
%shuf = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; X32: # BB#0:
; X32-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64: # BB#0:
; X64-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%B = trunc <4 x i64> %A to <4 x i32>
; X32: # BB#0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64: # BB#0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%B = trunc <8 x i32> %A to <8 x i16>
define <4 x i64> @test_mm256_broadcastsi128_si256(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastsi128_si256:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
define <4 x i64> @test0_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; CHECK-LABEL: test0_mm256_inserti128_si256:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}}
%ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
; NOGATHER: # BB#0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %XMM2
+; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB0_2
; NOGATHER-NEXT: # BB#1: # %cond.load
; NOGATHER: # BB#0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %XMM2
+; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB1_2
; NOGATHER-NEXT: # BB#1: # %cond.load
; NOGATHER: # BB#0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %XMM2
+; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB2_2
; NOGATHER-NEXT: # BB#1: # %cond.load
; NOGATHER: # BB#0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %XMM2
+; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB3_2
; NOGATHER-NEXT: # BB#1: # %cond.load
; NOGATHER-LABEL: masked_gather_v4i32:
; NOGATHER: # BB#0: # %entry
; NOGATHER-NEXT: vpextrb $0, %xmm1, %eax
-; NOGATHER-NEXT: # implicit-def: %XMM3
+; NOGATHER-NEXT: # implicit-def: %xmm3
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB4_2
; NOGATHER-NEXT: # BB#1: # %cond.load
; NOGATHER-LABEL: masked_gather_v4float:
; NOGATHER: # BB#0: # %entry
; NOGATHER-NEXT: vpextrb $0, %xmm1, %eax
-; NOGATHER-NEXT: # implicit-def: %XMM3
+; NOGATHER-NEXT: # implicit-def: %xmm3
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB5_2
; NOGATHER-NEXT: # BB#1: # %cond.load
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm4
; NOGATHER-NEXT: vmovdqa 32(%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %YMM2
+; NOGATHER-NEXT: # implicit-def: %ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_2
; NOGATHER-NEXT: # BB#1: # %cond.load
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm4
; NOGATHER-NEXT: vmovdqa 32(%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %YMM2
+; NOGATHER-NEXT: # implicit-def: %ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_2
; NOGATHER-NEXT: # BB#1: # %cond.load
; NOGATHER: # BB#0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %YMM2
+; NOGATHER-NEXT: # implicit-def: %ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB8_2
; NOGATHER-NEXT: # BB#1: # %cond.load
; NOGATHER: # BB#0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %YMM2
+; NOGATHER-NEXT: # implicit-def: %ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB9_2
; NOGATHER-NEXT: # BB#1: # %cond.load
; NOGATHER: # BB#0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %XMM2
+; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB10_2
; NOGATHER-NEXT: # BB#1: # %cond.load
; NOGATHER: # BB#0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %XMM2
+; NOGATHER-NEXT: # implicit-def: %xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB11_2
; NOGATHER-NEXT: # BB#1: # %cond.load
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shl <8 x i16> %lhs, %rhs
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = lshr <8 x i16> %lhs, %rhs
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%shl = shl <8 x i16> %r, %a
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%lshr = lshr <8 x i16> %r, %a
;
; AVX512DQ-LABEL: imulq256:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq256:
;
; AVX512DQ-LABEL: imulq128:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vminpd:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512F-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512F-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
;
; AVX512BW-LABEL: test_mask_vminpd:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512BW-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512BW-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512BW-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512BW-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
;
; AVX512DQ-LABEL: test_mask_vminpd:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512DQ-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512DQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512DQ-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vmaxpd:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512F-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512F-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
;
; AVX512BW-LABEL: test_mask_vmaxpd:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512BW-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512BW-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512BW-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512BW-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
;
; AVX512DQ-LABEL: test_mask_vmaxpd:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
+; AVX512DQ-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
; AVX512DQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512DQ-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
define <16 x float> @test3(<4 x float> %a) {
; CHECK-LABEL: test3:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,2,3,4,18,16,7,8,9,10,11,12,13,14,15]
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL-NEXT: callq _func8xi1
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL-NEXT: vpslld $31, %ymm0, %ymm0
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0
-; KNL_X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL_X32-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL_X32-NEXT: calll _func8xi1
; KNL_X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL_X32-NEXT: vpslld $31, %ymm0, %ymm0
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL-NEXT: callq _func8xi1
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0
-; KNL_X32-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL_X32-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL_X32-NEXT: calll _func8xi1
; KNL_X32-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL_X32-NEXT: vpsllq $63, %zmm0, %zmm0
; CHECK-NEXT: korw %k2, %k1, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
entry:
%0 = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %x, i32 13, i16 -1, i32 4)
;
; AVX512DQ-LABEL: slto4f64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
%b = sitofp <4 x i64> %a to <4 x double>
ret <4 x double> %b
;
; AVX512DQ-LABEL: slto2f64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <2 x i64> %a to <2 x double>
;
; AVX512DQ-LABEL: sltof2f32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <2 x i64> %a to <2 x float>
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vmovups (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%a1 = load <4 x i64>, <4 x i64>* %a, align 8
;
; AVX512DQ-LABEL: f64to4sl:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
%b = fptosi <4 x double> %a to <4 x i64>
ret <4 x i64> %b
;
; AVX512DQ-LABEL: f32to4sl:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
%b = fptosi <4 x float> %a to <4 x i64>
ret <4 x i64> %b
;
; AVX512DQ-LABEL: slto4f32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <4 x i64> %a to <4 x float>
;
; AVX512DQ-LABEL: ulto4f32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = uitofp <4 x i64> %a to <4 x float>
define <8 x i32> @f32to8ui(<8 x float> %a) nounwind {
; NOVL-LABEL: f32to8ui:
; NOVL: # BB#0:
-; NOVL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
-; NOVL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NOVL-NEXT: retq
;
; VL-LABEL: f32to8ui:
define <4 x i32> @f32to4ui(<4 x float> %a) nounwind {
; NOVL-LABEL: f32to4ui:
; NOVL: # BB#0:
-; NOVL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
-; NOVL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
; NOVL: # BB#0:
; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0
; NOVL-NEXT: vpmovdw %zmm0, %ymm0
-; NOVL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
; NOVL: # BB#0:
; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0
; NOVL-NEXT: vpmovdw %zmm0, %ymm0
-; NOVL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
define <4 x i32> @f64to4ui(<4 x double> %a) nounwind {
; NOVL-LABEL: f64to4ui:
; NOVL: # BB#0:
-; NOVL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vcvttpd2udq %zmm0, %ymm0
-; NOVL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
define <4 x double> @uito4f64(<4 x i32> %a) nounwind {
; NOVL-LABEL: uito4f64:
; NOVL: # BB#0:
-; NOVL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; NOVL-NEXT: vcvtudq2pd %ymm0, %zmm0
-; NOVL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NOVL-NEXT: retq
;
; VL-LABEL: uito4f64:
define <8 x float> @uito8f32(<8 x i32> %a) nounwind {
; NOVL-LABEL: uito8f32:
; NOVL: # BB#0:
-; NOVL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
-; NOVL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NOVL-NEXT: retq
;
; VL-LABEL: uito8f32:
define <4 x float> @uito4f32(<4 x i32> %a) nounwind {
; NOVL-LABEL: uito4f32:
; NOVL: # BB#0:
-; NOVL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
-; NOVL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
define <8 x float> @sbto8f32(<8 x float> %a) {
; NOVLDQ-LABEL: sbto8f32:
; NOVLDQ: # BB#0:
-; NOVLDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NOVLDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
;
; AVX512DQ-LABEL: sbto8f32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
define <8 x float> @ubto8f32(<8 x i32> %a) {
; NOVL-LABEL: ubto8f32:
; NOVL: # BB#0:
-; NOVL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVL-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; NOVL-NEXT: vpmovqd %zmm0, %ymm0
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
-; NOVL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NOVL-NEXT: retq
;
; VL-LABEL: ubto8f32:
define <8 x double> @ubto8f64(<8 x i32> %a) {
; NOVL-LABEL: ubto8f64:
; NOVL: # BB#0:
-; NOVL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVL-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; KNL-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x8mem_to_8x32:
; KNL-NEXT: vpmovsxbd (%rdi), %ymm1
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x8mem_to_8x32:
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16mem_to_8x32:
; KNL-NEXT: vpmovsxwd (%rdi), %ymm1
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x16mem_to_8x32mask:
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16_to_8x32mask:
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i8_to_16i1:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq
%mask_b = trunc <16 x i8>%a to <16 x i1>
%mask = bitcast <16 x i1> %mask_b to i16
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i32_to_16i1:
; SKX-NEXT: vpslld $31, %zmm0, %zmm0
; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%mask_b = trunc <16 x i32>%a to <16 x i1>
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_8i16_to_8i1:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq
%mask_b = trunc <8 x i16>%a to <8 x i1>
%mask = bitcast <8 x i1> %mask_b to i8
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_i32_to_i1:
; SKX-NEXT: kmovw %edi, %k1
; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq
%a_i = trunc i32 %a to i1
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %a_i, i32 0
; KNL: # BB#0:
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i16:
define <8 x i16> @extract_subvector128_v32i16_first_element(<32 x i16> %x) nounwind {
; SKX-LABEL: extract_subvector128_v32i16_first_element:
; SKX: ## BB#0:
-; SKX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; SKX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
define <16 x i8> @extract_subvector128_v64i8_first_element(<64 x i8> %x) nounwind {
; SKX-LABEL: extract_subvector128_v64i8_first_element:
; SKX: ## BB#0:
-; SKX-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; SKX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: fhadd_16:
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
-; SKX-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; SKX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; KNL-NEXT: vsubps %zmm1, %zmm0, %zmm0
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: fhsub_16:
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SKX-NEXT: vsubps %zmm1, %zmm0, %zmm0
-; SKX-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; SKX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_low:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; SKX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; SKX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; SKX-NEXT: retq
%x226 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
%x228 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5 ,i32 13, i32 7, i32 15>
; KNL-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14]
; KNL-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15]
; KNL-NEXT: vpaddd %zmm0, %zmm2, %zmm0
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: hadd_16_3_sv:
; SKX-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14]
; SKX-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15]
; SKX-NEXT: vpaddd %zmm0, %zmm2, %zmm0
-; SKX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; SKX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; SKX-NEXT: retq
%x226 = shufflevector <16 x i32> %x225, <16 x i32> %x227, <16 x i32> <i32 0, i32 2, i32 16, i32 18
, i32 4, i32 6, i32 20, i32 22, i32 8, i32 10, i32 24, i32 26, i32 12, i32 14, i32 28, i32 30>
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_eel:
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; SKX-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; SKX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x226 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movl (%rsp,%rdi,4), %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test13:
; SKX-NEXT: kmovw %eax, %k1
; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq
%cmp_res = icmp ult i32 %a, %b
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %cmp_res, i32 0
; KNL-NEXT: vpslld $31, %zmm2, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
; SKX-NEXT: vpmovd2m %zmm2, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; SKX-NEXT: vpmovq2m %zmm2, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrw $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <32 x i16> %x, i32 1
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrw $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <16 x i16> %x, i32 1
; CHECK: ## BB#0:
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vpextrw $3, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%r1 = extractelement <8 x i16> %x, i32 1
%r2 = extractelement <8 x i16> %x, i32 3
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrb $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <64 x i8> %x, i32 1
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrb $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <32 x i8> %x, i32 1
; CHECK: ## BB#0:
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vpextrb $3, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%r1 = extractelement <16 x i8> %x, i32 1
%r2 = extractelement <16 x i8> %x, i32 3
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; SKX-NEXT: vpmovd2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <4 x i32> %x, %y
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-NEXT: kshiftrw $1, %k0, %k0
; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <2 x i64> %x, %y
define i64 @test_extractelement_variable_v2i64(<2 x i64> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v2i64:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movq -24(%rsp,%rdi,8), %rax
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: movq (%rsp,%rdi,8), %rax
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: movq (%rsp,%rdi,8), %rax
define double @test_extractelement_variable_v2f64(<2 x double> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v2f64:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
define i32 @test_extractelement_variable_v4i32(<4 x i32> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v4i32:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: movl -24(%rsp,%rdi,4), %eax
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: movl (%rsp,%rdi,4), %eax
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movl (%rsp,%rdi,4), %eax
define float @test_extractelement_variable_v4f32(<4 x float> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v4f32:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
define i16 @test_extractelement_variable_v8i16(<8 x i16> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v8i16:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: movzwl -24(%rsp,%rdi,2), %eax
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movzwl (%rsp,%rdi,2), %eax
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
; KNL-NEXT: vmovaps %ymm0, (%rsp)
; KNL-NEXT: andl $31, %edi
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; SKX-NEXT: vmovaps %zmm0, (%rsp)
; SKX-NEXT: andl $31, %edi
; SKX-NEXT: movzwl (%rsp,%rdi,2), %eax
define i8 @test_extractelement_variable_v16i8(<16 x i8> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v16i8:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $31, %edi
; CHECK-NEXT: movq %rsp, %rax
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
; KNL-NEXT: vmovaps %ymm0, (%rsp)
; KNL-NEXT: andl $63, %edi
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; SKX-NEXT: vmovaps %zmm0, (%rsp)
; SKX-NEXT: andl $63, %edi
; SKX-NEXT: movq %rsp, %rax
define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v2i1:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
;
; SKX-LABEL: test_extractelement_varible_v2i1:
; SKX: ## BB#0:
-; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; SKX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k0
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v4i1:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; KNL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
;
; SKX-LABEL: test_extractelement_varible_v4i1:
; SKX: ## BB#0:
-; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; SKX-NEXT: vpcmpnleud %xmm1, %xmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
-; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vmovdqa64 %zmm0, (%rsp)
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; SKX-NEXT: vpcmpnleud %ymm1, %ymm0, %k0
; SKX-NEXT: vpmovm2q %k0, %zmm0
; SKX-NEXT: vmovdqa64 %zmm0, (%rsp)
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vmovdqa32 %zmm0, (%rsp)
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %zmm0
; SKX-NEXT: vmovdqa32 %zmm0, (%rsp)
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-32, %rsp
; KNL-NEXT: subq $64, %rsp
-; KNL-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm2, %ymm1, %ymm1
; KNL-NEXT: vpxor %ymm2, %ymm0, %ymm0
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; SKX-NEXT: vpcmpnleub %ymm1, %ymm0, %k0
; SKX-NEXT: vpmovm2w %k0, %zmm0
; SKX-NEXT: vmovdqa32 %zmm0, (%rsp)
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: vmovdqa32 %zmm0, (%rsp)
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
ret i16 %res
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
ret i16 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
ret i8 %res
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
ret i16 %res
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
ret i16 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
ret i8 %res
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
ret i8 %res
define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, <16 x float> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_insertf32x4_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; CHECK-NEXT: ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, <16 x i32> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_inserti32x4_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; CHECK-NEXT: ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512(<4 x float> %x0, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: kmovw %edi, %k1
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x4_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm1 {%k1}
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512(<4 x i32> %x0, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: kmovw %edi, %k1
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x4_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vptestmq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 -1)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 %m)
; CHECK-NEXT: vptestmd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 -1)
%res1 = call i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 %m)
; CHECK-NEXT: kmovw %k1, %ecx
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2)
%res1 = call i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32> %x0, <16 x i32> %x1, i16-1)
; CHECK-NEXT: kmovw %k1, %ecx
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8-1)
; CHECK-NEXT: kandw %k0, %k1, %k0
; CHECK-NEXT: kandw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kand.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kand.w(i16 %t1, i16 %a1)
; CHECK-NEXT: kandnw %k2, %k1, %k1
; CHECK-NEXT: kandnw %k0, %k1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kandn.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kandn.w(i16 %t1, i16 %a1)
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: knotw %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.knot.w(i16 %a0)
ret i16 %res
; CHECK-NEXT: korw %k0, %k1, %k0
; CHECK-NEXT: korw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kor.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kor.w(i16 %t1, i16 %a1)
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: kunpckbw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.kunpck.bw(i16 %a0, i16 %a1)
ret i16 %res
; CHECK-NEXT: kxorw %k0, %k1, %k0
; CHECK-NEXT: kxorw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kxnor.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kxnor.w(i16 %t1, i16 %a1)
; CHECK-NEXT: kxorw %k0, %k1, %k0
; CHECK-NEXT: kxorw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kxor.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kxor.w(i16 %t1, i16 %a1)
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %b, i32 2, i16 -1, i32 8)
ret i16 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpneqpd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 4, i8 -1, i32 4)
ret i8 %res
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res4 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 5, i8 %x3, i32 8)
; CHECK-NEXT: orb %cl, %dl
; CHECK-NEXT: orb %sil, %al
; CHECK-NEXT: orb %dl, %al
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res1 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 2, i8 -1, i32 4)
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 %x3, i32 4)
; CHECK-NEXT: andb %cl, %dl
; CHECK-NEXT: andb %sil, %al
; CHECK-NEXT: andb %dl, %al
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res1 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 2, i8 -1, i32 4)
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 -1, i32 8)
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: mask16:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotw %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask16:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: knotw %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask16:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: knotw %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512DQ-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: mask8:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotb %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask8:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: knotw %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask8:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: knotb %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512DQ-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: korw %k0, %k2, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: mand16_mem:
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: korw %k0, %k2, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mand16_mem:
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: korw %k0, %k2, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mand16_mem:
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: korw %k0, %k2, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512DQ-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; AVX512DQ-NEXT: retq
%ma = load <16 x i1>, <16 x i1>* %x
%mb = load <16 x i1>, <16 x i1>* %y
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kshiftrw $8, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: shuf_test1:
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kshiftrw $8, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq
;
; AVX512BW-LABEL: shuf_test1:
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kshiftrw $8, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuf_test1:
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kshiftrw $8, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512DQ-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; AVX512DQ-NEXT: retq
%v1 = bitcast i16 %v to <16 x i1>
%mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: andl $1, %eax
-; AVX512BW-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
; AVX512DQ-NEXT: andl $1, %eax
-; AVX512DQ-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512DQ-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%cmp_res = icmp ugt <16 x i32> %a, %b
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: andb $1, %al
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andb $1, %al
-; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: andb $1, %al
-; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
; AVX512DQ-NEXT: andb $1, %al
-; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512DQ-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%cmp_res = icmp ugt <16 x i32> %a, %b
; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0
; AVX512BW-NEXT: LBB17_3:
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-NEXT: LBB18_3:
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-NEXT: cmovgw %ax, %cx
; AVX512BW-NEXT: kmovd %ecx, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW-NEXT: kshiftlw $7, %k0, %k0
; AVX512BW-NEXT: korw %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
define void @test22(<4 x i1> %a, <4 x i1>* %addr) {
; KNL-LABEL: test22:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL-NEXT: vpslld $31, %ymm0, %ymm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
;
; AVX512BW-LABEL: test22:
; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512BW-NEXT: vpslld $31, %ymm0, %ymm0
; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
;
; AVX512DQ-LABEL: test22:
; AVX512DQ: ## BB#0:
-; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vpslld $31, %ymm0, %ymm0
; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512DQ-NEXT: kmovb %k0, (%rdi)
define void @test23(<2 x i1> %a, <2 x i1>* %addr) {
; KNL-LABEL: test23:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
;
; AVX512BW-LABEL: test23:
; AVX512BW: ## BB#0:
-; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
;
; AVX512DQ-LABEL: test23:
; AVX512DQ: ## BB#0:
-; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512DQ-NEXT: kmovb %k0, (%rdi)
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ: ## BB#0:
; AVX512DQ-NEXT: kmovb (%rdi), %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = load <2 x i1>, <2 x i1>* %a
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512BW-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ: ## BB#0:
; AVX512DQ-NEXT: kmovb (%rdi), %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = load <4 x i1>, <4 x i1>* %a
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_add:
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_add:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_add:
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512DQ-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_sub:
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_sub:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_sub:
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512DQ-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kandw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_mul:
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kandw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_mul:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kandw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_mul:
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kandw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512DQ-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_add:
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_add:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_add:
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorb %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512DQ-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_sub:
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_sub:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_sub:
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorb %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512DQ-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kandw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_mul:
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kandb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_mul:
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kandw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_mul:
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kandb %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512DQ-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vcmpunordss (%rdi), %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%b.val = load float, float* %b
%bv0 = insertelement <4 x float> undef, float %b.val, i32 0
; X32-NEXT: vpmovm2b %k2, %zmm0
; X32-NEXT: vpmovm2b %k1, %zmm1
; X32-NEXT: vpmovm2b %k0, %zmm2
-; X32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; X32-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<kill>
-; X32-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<kill>
+; X32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; X32-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<kill>
+; X32-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<kill>
; X32-NEXT: calll _test_argv32i1helper
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
; X32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm5 # 16-byte Reload
; WIN64-NEXT: vpmovm2b %k2, %zmm0
; WIN64-NEXT: vpmovm2b %k1, %zmm1
; WIN64-NEXT: vpmovm2b %k0, %zmm2
-; WIN64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; WIN64-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<kill>
-; WIN64-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<kill>
+; WIN64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; WIN64-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<kill>
+; WIN64-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<kill>
; WIN64-NEXT: callq test_argv32i1helper
; WIN64-NEXT: nop
; WIN64-NEXT: addq $32, %rsp
; LINUXOSX64-NEXT: vpmovm2b %k2, %zmm0
; LINUXOSX64-NEXT: vpmovm2b %k1, %zmm1
; LINUXOSX64-NEXT: vpmovm2b %k0, %zmm2
-; LINUXOSX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
-; LINUXOSX64-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<kill>
-; LINUXOSX64-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<kill>
+; LINUXOSX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; LINUXOSX64-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<kill>
+; LINUXOSX64-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<kill>
; LINUXOSX64-NEXT: callq test_argv32i1helper
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm9 # 16-byte Reload
; X32-NEXT: vpmovm2b %k2, %zmm0
; X32-NEXT: vpmovm2b %k1, %zmm1
; X32-NEXT: vpmovm2b %k0, %zmm2
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; X32-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<kill>
-; X32-NEXT: # kill: %XMM2<def> %XMM2<kill> %ZMM2<kill>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<kill>
+; X32-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: calll _test_argv16i1helper
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
; WIN64-NEXT: vpmovm2b %k2, %zmm0
; WIN64-NEXT: vpmovm2b %k1, %zmm1
; WIN64-NEXT: vpmovm2b %k0, %zmm2
-; WIN64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; WIN64-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<kill>
-; WIN64-NEXT: # kill: %XMM2<def> %XMM2<kill> %ZMM2<kill>
+; WIN64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; WIN64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<kill>
+; WIN64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<kill>
; WIN64-NEXT: vzeroupper
; WIN64-NEXT: callq test_argv16i1helper
; WIN64-NEXT: nop
; LINUXOSX64-NEXT: vpmovm2b %k2, %zmm0
; LINUXOSX64-NEXT: vpmovm2b %k1, %zmm1
; LINUXOSX64-NEXT: vpmovm2b %k0, %zmm2
-; LINUXOSX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; LINUXOSX64-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<kill>
-; LINUXOSX64-NEXT: # kill: %XMM2<def> %XMM2<kill> %ZMM2<kill>
+; LINUXOSX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; LINUXOSX64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<kill>
+; LINUXOSX64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<kill>
; LINUXOSX64-NEXT: vzeroupper
; LINUXOSX64-NEXT: callq test_argv16i1helper
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
; X32-LABEL: caller_retv16i1:
; X32: # BB#0: # %entry
; X32-NEXT: calll _test_retv16i1
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<def>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
; X32-NEXT: incl %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; WIN64-LABEL: caller_retv16i1:
; WIN64-NEXT: .seh_savexmm 6, 0
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: callq test_retv16i1
-; WIN64-NEXT: # kill: %AX<def> %AX<kill> %EAX<def>
+; WIN64-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
; WIN64-NEXT: incl %eax
-; WIN64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; WIN64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; WIN64-NEXT: vmovaps (%rsp), %xmm6 # 16-byte Reload
; WIN64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
; WIN64-NEXT: addq $40, %rsp
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: callq test_retv16i1
-; LINUXOSX64-NEXT: # kill: %AX<def> %AX<kill> %EAX<def>
+; LINUXOSX64-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
; LINUXOSX64-NEXT: incl %eax
-; LINUXOSX64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; LINUXOSX64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; LINUXOSX64-NEXT: popq %rcx
; LINUXOSX64-NEXT: retq
entry:
; X32-NEXT: vpmovm2w %k2, %zmm0
; X32-NEXT: vpmovm2w %k1, %zmm1
; X32-NEXT: vpmovm2w %k0, %zmm2
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; X32-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<kill>
-; X32-NEXT: # kill: %XMM2<def> %XMM2<kill> %ZMM2<kill>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<kill>
+; X32-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: calll _test_argv8i1helper
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
; WIN64-NEXT: vpmovm2w %k2, %zmm0
; WIN64-NEXT: vpmovm2w %k1, %zmm1
; WIN64-NEXT: vpmovm2w %k0, %zmm2
-; WIN64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; WIN64-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<kill>
-; WIN64-NEXT: # kill: %XMM2<def> %XMM2<kill> %ZMM2<kill>
+; WIN64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; WIN64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<kill>
+; WIN64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<kill>
; WIN64-NEXT: vzeroupper
; WIN64-NEXT: callq test_argv8i1helper
; WIN64-NEXT: nop
; LINUXOSX64-NEXT: vpmovm2w %k2, %zmm0
; LINUXOSX64-NEXT: vpmovm2w %k1, %zmm1
; LINUXOSX64-NEXT: vpmovm2w %k0, %zmm2
-; LINUXOSX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
-; LINUXOSX64-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<kill>
-; LINUXOSX64-NEXT: # kill: %XMM2<def> %XMM2<kill> %ZMM2<kill>
+; LINUXOSX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; LINUXOSX64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<kill>
+; LINUXOSX64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<kill>
; LINUXOSX64-NEXT: vzeroupper
; LINUXOSX64-NEXT: callq test_argv8i1helper
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
; X32-LABEL: caller_retv8i1:
; X32: # BB#0: # %entry
; X32-NEXT: calll _test_retv8i1
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<def>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<def>
; X32-NEXT: kmovd %eax, %k0
; X32-NEXT: vpmovm2w %k0, %zmm0
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; WIN64-NEXT: .seh_savexmm 6, 0
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: callq test_retv8i1
-; WIN64-NEXT: # kill: %AL<def> %AL<kill> %EAX<def>
+; WIN64-NEXT: # kill: %al<def> %al<kill> %eax<def>
; WIN64-NEXT: kmovd %eax, %k0
; WIN64-NEXT: vpmovm2w %k0, %zmm0
-; WIN64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; WIN64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; WIN64-NEXT: vmovaps (%rsp), %xmm6 # 16-byte Reload
; WIN64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
; WIN64-NEXT: addq $40, %rsp
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: callq test_retv8i1
-; LINUXOSX64-NEXT: # kill: %AL<def> %AL<kill> %EAX<def>
+; LINUXOSX64-NEXT: # kill: %al<def> %al<kill> %eax<def>
; LINUXOSX64-NEXT: kmovd %eax, %k0
; LINUXOSX64-NEXT: vpmovm2w %k0, %zmm0
-; LINUXOSX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; LINUXOSX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; LINUXOSX64-NEXT: popq %rax
; LINUXOSX64-NEXT: vzeroupper
; LINUXOSX64-NEXT: retq
; X32-LABEL: test_argReti1:
; X32: # BB#0:
; X32-NEXT: incb %al
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti1:
; WIN64: # BB#0:
; WIN64-NEXT: incb %al
-; WIN64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; WIN64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti1:
; LINUXOSX64: # BB#0:
; LINUXOSX64-NEXT: incb %al
-; LINUXOSX64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; LINUXOSX64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; LINUXOSX64-NEXT: retq
%add = add i1 %a, 1
ret i1 %add
; X32-LABEL: test_argReti8:
; X32: # BB#0:
; X32-NEXT: incb %al
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti8:
; WIN64: # BB#0:
; WIN64-NEXT: incb %al
-; WIN64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; WIN64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti8:
; LINUXOSX64: # BB#0:
; LINUXOSX64-NEXT: incb %al
-; LINUXOSX64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; LINUXOSX64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; LINUXOSX64-NEXT: retq
%add = add i8 %a, 1
ret i8 %add
; X32-LABEL: test_argReti16:
; X32: # BB#0:
; X32-NEXT: incl %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti16:
; WIN64: # BB#0:
; WIN64-NEXT: incl %eax
-; WIN64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; WIN64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti16:
; LINUXOSX64: # BB#0:
; LINUXOSX64-NEXT: incl %eax
-; LINUXOSX64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; LINUXOSX64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; LINUXOSX64-NEXT: retq
%add = add i16 %a, 1
ret i16 %add
; X32-NEXT: pushl %esp
; X32-NEXT: incl %eax
; X32-NEXT: calll _test_argReti16
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<def>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
; X32-NEXT: incl %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: incl %eax
; WIN64-NEXT: callq test_argReti16
-; WIN64-NEXT: # kill: %AX<def> %AX<kill> %EAX<def>
+; WIN64-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
; WIN64-NEXT: incl %eax
-; WIN64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; WIN64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: callq test_argReti16
-; LINUXOSX64-NEXT: # kill: %AX<def> %AX<kill> %EAX<def>
+; LINUXOSX64-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
; LINUXOSX64-NEXT: incl %eax
-; LINUXOSX64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; LINUXOSX64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: retq
%b = add i16 %a, 1
ret <16 x i32> %c
}
-; Test regcall when running multiple input parameters - callee saved XMMs
+; Test regcall when running multiple input parameters - callee saved xmms
define x86_regcallcc <32 x float> @testf32_inp(<32 x float> %a, <32 x float> %b, <32 x float> %c) nounwind {
; X32-LABEL: testf32_inp:
; X32: # BB#0:
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_16i8_to_16i1:
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%mask_b = trunc <16 x i8>%a to <16 x i1>
%mask = bitcast <16 x i1> %mask_b to i16
; GENERIC-NEXT: vpslld $31, %zmm0, %zmm0
; GENERIC-NEXT: vptestmd %zmm0, %zmm0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-NEXT: vpslld $31, %zmm0, %zmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%mask_b = trunc <16 x i32>%a to <16 x i1>
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_8i16_to_8i1:
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%mask_b = trunc <8 x i16>%a to <8 x i1>
%mask = bitcast <8 x i1> %mask_b to i8
; GENERIC-NEXT: kmovw %edi, %k1
; GENERIC-NEXT: korw %k1, %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_i32_to_i1:
; SKX-NEXT: kmovw %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%a_i = trunc i32 %a to i1
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %a_i, i32 0
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: knotw %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask16:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: knotw %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: knotb %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask8:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: knotb %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
; GENERIC-NEXT: kxorw %k1, %k0, %k0
; GENERIC-NEXT: korw %k0, %k2, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mand16_mem:
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: korw %k0, %k2, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%ma = load <16 x i1>, <16 x i1>* %x
%mb = load <16 x i1>, <16 x i1>* %y
; GENERIC-NEXT: kmovd %edi, %k0
; GENERIC-NEXT: kshiftrw $8, %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: shuf_test1:
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: kshiftrw $8, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%v1 = bitcast i16 %v to <16 x i1>
%mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; GENERIC-NEXT: kshiftrw $15, %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
; GENERIC-NEXT: andl $1, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-NEXT: kshiftrw $15, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
; SKX-NEXT: andl $1, %eax # sched: [1:0.25]
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%cmp_res = icmp ugt <16 x i32> %a, %b
; GENERIC-NEXT: kshiftrw $15, %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
; GENERIC-NEXT: andb $1, %al # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-NEXT: kshiftrw $15, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
; SKX-NEXT: andb $1, %al # sched: [1:0.25]
-; SKX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%cmp_res = icmp ugt <16 x i32> %a, %b
; GENERIC-NEXT: kmovd %esi, %k1
; GENERIC-NEXT: kxorw %k1, %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_add:
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
; GENERIC-NEXT: kmovd %esi, %k1
; GENERIC-NEXT: kxorw %k1, %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_sub:
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
; GENERIC-NEXT: kmovd %esi, %k1
; GENERIC-NEXT: kandw %k1, %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_mul:
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kandw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
; GENERIC-NEXT: kmovd %esi, %k1
; GENERIC-NEXT: kxorb %k1, %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_add:
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
; GENERIC-NEXT: kmovd %esi, %k1
; GENERIC-NEXT: kxorb %k1, %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_sub:
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
; GENERIC-NEXT: kmovd %esi, %k1
; GENERIC-NEXT: kandb %k1, %k0, %k0
; GENERIC-NEXT: kmovd %k0, %eax
-; GENERIC-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_mul:
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kandb %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
; X86-NEXT: kmovw %eax, %k1
; X86-NEXT: korw %k1, %k0, %k0
; X86-NEXT: kmovw %k0, %eax
-; X86-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: select05_mem:
; X64-NEXT: kmovw %eax, %k1
; X64-NEXT: korw %k1, %k0, %k0
; X64-NEXT: kmovw %k0, %eax
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
%mask = load <8 x i1> , <8 x i1>* %m
%a = load <8 x i1> , <8 x i1>* %a.0
; X86-NEXT: kmovw %eax, %k1
; X86-NEXT: kandw %k1, %k0, %k0
; X86-NEXT: kmovw %k0, %eax
-; X86-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: select06_mem:
; X64-NEXT: kmovw %eax, %k1
; X64-NEXT: kandw %k1, %k0, %k0
; X64-NEXT: kmovw %k0, %eax
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
%mask = load <8 x i1> , <8 x i1>* %m
%a = load <8 x i1> , <8 x i1>* %a.0
; X86-NEXT: kandw %k0, %k1, %k0
; X86-NEXT: korw %k2, %k0, %k0
; X86-NEXT: kmovw %k0, %eax
-; X86-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: select07:
; X64-NEXT: kandw %k0, %k1, %k0
; X64-NEXT: korw %k2, %k0, %k0
; X64-NEXT: kmovw %k0, %eax
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
%mask = bitcast i8 %m to <8 x i1>
%a = bitcast i8 %a.0 to <8 x i1>
; KNL-NEXT: vpsrlq $1, %ymm0, %ymm0
; KNL-NEXT: vpsllq $12, %ymm0, %ymm0
; KNL-NEXT: vpsraq $12, %zmm0, %zmm0
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: shift_4_i64:
define <4 x i64> @variable_sra3(<4 x i64> %x, <4 x i64> %y) {
; KNL-LABEL: variable_sra3:
; KNL: # BB#0:
-; KNL-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: variable_sra3:
; KNL-NEXT: vpmovsxwd %xmm0, %ymm0
; KNL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: variable_sra4:
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <16,17,5,1,14,14,13,17,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm1, %ymm2, %ymm0
-; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <19,1,5,31,9,12,17,9,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,0,3,4,6,4,7]
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6],ymm1[7]
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%res = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 12>
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <13,0,0,6,u,u,u,u>
; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %vp
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = <3,3,15,9,u,u,u,u>
; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x float>, <16 x float>* %vp
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> <i32 0, i32 6>
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[3],ymm1[2]
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; CHECK-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <8 x double>, <8 x double>* %vp
define <4 x i8> @trunc_qb_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qb_256:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
define void @trunc_qb_256_mem(<4 x i64> %i, <4 x i8>* %res) #0 {
; KNL-LABEL: trunc_qb_256_mem:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; KNL-NEXT: vmovd %xmm0, (%rdi)
define <4 x i16> @trunc_qw_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qw_256:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
define void @trunc_qw_256_mem(<4 x i64> %i, <4 x i16>* %res) #0 {
; KNL-LABEL: trunc_qw_256_mem:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; KNL-NEXT: vmovq %xmm0, (%rdi)
define <4 x i32> @trunc_qd_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qd_256:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
define void @trunc_qd_256_mem(<4 x i64> %i, <4 x i32>* %res) #0 {
; KNL-LABEL: trunc_qd_256_mem:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
; KNL-NEXT: vzeroupper
define <8 x i8> @trunc_db_256(<8 x i32> %i) #0 {
; KNL-LABEL: trunc_db_256:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
define void @trunc_db_256_mem(<8 x i32> %i, <8 x i8>* %res) #0 {
; KNL-LABEL: trunc_db_256_mem:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; KNL-NEXT: vmovq %xmm0, (%rdi)
define <8 x i16> @trunc_dw_256(<8 x i32> %i) #0 {
; KNL-LABEL: trunc_dw_256:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
define void @trunc_dw_256_mem(<8 x i32> %i, <8 x i16>* %res) #0 {
; KNL-LABEL: trunc_dw_256_mem:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
; KNL-NEXT: vzeroupper
define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_mask:
; ALL: # BB#0:
-; ALL-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
+; ALL-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
; ALL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; ALL-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; ALL-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
define <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_maskz:
; ALL: # BB#0:
-; ALL-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; ALL-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; ALL-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_mask_load:
; ALL: # BB#0:
-; ALL-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; ALL-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; ALL-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1}
define <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_maskz_load:
; ALL: # BB#0:
-; ALL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; ALL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; ALL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; ALL-NEXT: vpcmpneqd %zmm1, %zmm0, %k1
; ALL-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
; KNL-LABEL: test9:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; KNL-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test9:
define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
; KNL-LABEL: test10:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test10:
; KNL-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; KNL-NEXT: kunpckbw %k0, %k1, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; SKX-NEXT: kunpckbw %k0, %k1, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%res = icmp eq <16 x i64> %a, %b
define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) nounwind {
; KNL-LABEL: test35:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vmovups (%rdi), %ymm2
; KNL-NEXT: vcmpltps %zmm2, %zmm0, %k1
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test35:
define <8 x float> @test41(<8 x float> %x, <8 x float> %x1, float* %ptr) nounwind {
; KNL-LABEL: test41:
; KNL: ## BB#0:
-; KNL-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vbroadcastss (%rdi), %ymm2
; KNL-NEXT: vcmpltps %zmm2, %zmm0, %k1
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; KNL-NEXT: retq
;
; SKX-LABEL: test41:
; CHECK-NEXT: vpextrb $0, %xmm0, %eax
; CHECK-NEXT: vpextrb $4, %xmm0, %edx
; CHECK-NEXT: vpextrb $8, %xmm0, %ecx
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
-; CHECK-NEXT: # kill: %DL<def> %DL<kill> %EDX<kill>
-; CHECK-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
+; CHECK-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; CHECK-NEXT: retq
%cmp.i = icmp slt <3 x i8> %x, %a
%res = sext <3 x i1> %cmp.i to <3 x i8>
; AVX512F-32-NEXT: vpblendvb %ymm0, %ymm3, %ymm2, %ymm2
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm3[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm2, %k0
-; AVX512F-32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill> %EAX<def>
+; AVX512F-32-NEXT: # kill: %al<def> %al<kill> %eax<kill> %eax<def>
; AVX512F-32-NEXT: shrb $7, %al
; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm2
; AVX512F-32-NEXT: vpblendvb %ymm7, %ymm4, %ymm0, %ymm0
; AVX512F-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
-; AVX512F-32-NEXT: # kill: %BL<def> %BL<kill> %EBX<kill> %EBX<def>
+; AVX512F-32-NEXT: # kill: %bl<def> %bl<kill> %ebx<kill> %ebx<def>
; AVX512F-32-NEXT: shrb $7, %bl
; AVX512F-32-NEXT: kmovd %ebx, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
; AVX512F-32-NEXT: vpblendvb %ymm0, %ymm3, %ymm2, %ymm2
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm3[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm2, %k0
-; AVX512F-32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill> %EAX<def>
+; AVX512F-32-NEXT: # kill: %al<def> %al<kill> %eax<kill> %eax<def>
; AVX512F-32-NEXT: shrb $7, %al
; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm2
; AVX512F-32-NEXT: vpblendvb %ymm7, %ymm4, %ymm0, %ymm0
; AVX512F-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
-; AVX512F-32-NEXT: # kill: %BL<def> %BL<kill> %EBX<kill> %EBX<def>
+; AVX512F-32-NEXT: # kill: %bl<def> %bl<kill> %ebx<kill> %ebx<def>
; AVX512F-32-NEXT: shrb $7, %bl
; AVX512F-32-NEXT: kmovd %ebx, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
; CHECK-NEXT: kshiftlq $48, %k0, %k0
; CHECK-NEXT: kshiftrq $48, %k0, %k1
; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %addr, i32 4, <16 x i1>%mask, <16 x i8> undef)
ret <16 x i8> %res
; CHECK-NEXT: kshiftlq $32, %k0, %k0
; CHECK-NEXT: kshiftrq $32, %k0, %k1
; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; CHECK-NEXT: retq
%res = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> zeroinitializer)
ret <32 x i8> %res
; CHECK-NEXT: kshiftld $24, %k0, %k0
; CHECK-NEXT: kshiftrd $24, %k0, %k1
; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %addr, i32 4, <8 x i1>%mask, <8 x i16> undef)
ret <8 x i16> %res
; CHECK-NEXT: kshiftld $16, %k0, %k0
; CHECK-NEXT: kshiftrd $16, %k0, %k1
; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %addr, i32 4, <16 x i1>%mask, <16 x i16> zeroinitializer)
ret <16 x i16> %res
define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
; CHECK-LABEL: test_mask_store_16xi8:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; CHECK-NEXT: ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kshiftlq $48, %k0, %k0
define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
; CHECK-LABEL: test_mask_store_32xi8:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; CHECK-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kshiftlq $32, %k0, %k0
define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
; CHECK-LABEL: test_mask_store_8xi16:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; CHECK-NEXT: ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
; CHECK-NEXT: vpmovw2m %zmm0, %k0
; CHECK-NEXT: kshiftld $24, %k0, %k0
define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
; CHECK-LABEL: test_mask_store_16xi16:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; CHECK-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kshiftld $16, %k0, %k0
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16> %a, <16 x i16> %b, i16 -1)
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16> %a, <16 x i16> %b, i16 %mask)
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16> %a, <16 x i16> %b, i16 -1)
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16> %a, <16 x i16> %b, i16 %mask)
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8> %a, <16 x i8> %b, i16 -1)
ret i16 %res
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16> %a, <8 x i16> %b, i8 -1)
ret i8 %res
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16> %a, <8 x i16> %b, i8 %mask)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x64,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8> %a, <16 x i8> %b, i16 -1)
ret i16 %res
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x64,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16> %a, <8 x i16> %b, i8 -1)
ret i8 %res
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16> %a, <8 x i16> %b, i8 %mask)
ret i8 %res
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2)
%res1 = call i16 @llvm.x86.avx512.ptestm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16-1)
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8-1)
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestm.w.256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2)
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestnm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2)
%res1 = call i16 @llvm.x86.avx512.ptestnm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16-1)
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8-1)
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestnm.w.256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2)
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovb2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.cvtb2mask.128(<16 x i8> %x0)
ret i16 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovw2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16> %x0)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovw2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.cvtw2mask.256(<16 x i16> %x0)
ret i16 %res
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vptestmb %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmb %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vptestmw %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmw %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vptestnmb %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmb %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vptestnmw %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmw %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vptestmw %ymm0, %ymm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmw %ymm0, %ymm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vptestnmw %ymm0, %ymm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmw %ymm0, %ymm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x8_512(<8 x float> %x0, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x8_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x2_512(<2 x double> %x0, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x8_512(<8 x i32> %x0, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x8_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x2_512(<2 x i64> %x0, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x2_512(<4 x float> %x0, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x2_512(<4 x i32> %x0, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_512:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vfpclasspd $4, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double> %x0, i32 4, i8 -1)
; CHECK-NEXT: vfpclassps $4, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.fpclass.ps.512(<16 x float> %x0, i32 4, i16 %x1)
%res1 = call i16 @llvm.x86.avx512.mask.fpclass.ps.512(<16 x float> %x0, i32 4, i16 -1)
; CHECK-NEXT: vfpclasssd $4, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 4, i8 -1)
; CHECK: ## BB#0:
; CHECK-NEXT: vfpclasssd $4, (%rdi), %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%x0 = load <2 x double>, <2 x double>* %x0ptr
%res = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 4, i8 -1)
; CHECK-NEXT: vfpclassss $4, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 -1)
; CHECK: ## BB#0:
; CHECK-NEXT: vfpclassss $4, (%rdi), %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%x0 = load <4 x float>, <4 x float>* %x0ptr
%res = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 -1)
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovd2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.cvtd2mask.512(<16 x i32> %x0)
ret i16 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovq2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.cvtq2mask.512(<8 x i64> %x0)
ret i8 %res
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotb %k0, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
; CHECK-NEXT: kxorb %k1, %k0, %k0
; CHECK-NEXT: korb %k0, %k2, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%ma = load <8 x i1>, <8 x i1>* %x
%mb = load <8 x i1>, <8 x i1>* %y
define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256(<2 x double> %x0, <4 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x18,0xc8,0x01]
define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256(<2 x i64> %x0, <4 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x38,0xc8,0x01]
define <8 x float>@test_int_x86_avx512_mask_broadcastf32x2_256(<4 x float> %x0, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xc8,0x01]
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x2_256(<4 x i32> %x0, <8 x i32> %x2, i8 %x3, i64 * %y_ptr) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vmovq (%rsi), %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0x16]
; CHECK-NEXT: ## xmm2 = mem[0],zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vfpclassps $4, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float> %x0, i32 4, i8 -1)
; CHECK-NEXT: vfpclassps $4, %ymm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x28,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float> %x0, i32 4, i8 -1)
; CHECK-NEXT: vfpclasspd $2, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x66,0xc0,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double> %x0, i32 4, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double> %x0, i32 2, i8 -1)
; CHECK-NEXT: vfpclasspd $4, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double> %x0, i32 4, i8 -1)
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovd2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32> %x0)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovd2m %ymm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x28,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtd2mask.256(<8 x i32> %x0)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovq2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtq2mask.128(<2 x i64> %x0)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpmovq2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtq2mask.256(<4 x i64> %x0)
ret i8 %res
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vptestmd %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestmd %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vptestnmq %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
; CHECK: # BB#0: # %entry
; CHECK-NEXT: vptestnmd %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestnmq %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestnmd %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32> %a, <8 x i32> %b, i8 -1)
ret i8 %res
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32> %a, <8 x i32> %b, i8 %mask)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64> %a, <4 x i64> %b, i8 -1)
ret i8 %res
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64> %a, <4 x i64> %b, i8 %mask)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32> %a, <8 x i32> %b, i8 -1)
ret i8 %res
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32> %a, <8 x i32> %b, i8 %mask)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64> %a, <4 x i64> %b, i8 -1)
ret i8 %res
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64> %a, <4 x i64> %b, i8 %mask)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32> %a, <4 x i32> %b, i8 -1)
ret i8 %res
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32> %a, <4 x i32> %b, i8 %mask)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64> %a, <2 x i64> %b, i8 -1)
ret i8 %res
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64> %a, <2 x i64> %b, i8 %mask)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32> %a, <4 x i32> %b, i8 -1)
ret i8 %res
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32> %a, <4 x i32> %b, i8 %mask)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64> %a, <2 x i64> %b, i8 -1)
ret i8 %res
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64> %a, <2 x i64> %b, i8 %mask)
ret i8 %res
define <8 x float>@test_int_x86_avx512_mask_broadcastf32x4_256(<4 x float> %x0, <8 x float> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xc8,0x01]
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x4_256(<4 x i32> %x0, <8 x i32> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_256:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x38,0xc8,0x01]
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8-1)
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8-1)
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8-1)
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8-1)
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8-1)
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8-1)
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8-1)
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8-1)
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpleps %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7c,0x28,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> %a, <8 x float> %b, i32 2, i8 -1)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpleps %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7c,0x08,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> %a, <4 x float> %b, i32 2, i8 -1)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vcmplepd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0xfd,0x28,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> %a, <4 x double> %b, i32 2, i8 -1)
ret i8 %res
; CHECK: ## BB#0:
; CHECK-NEXT: vcmplepd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> %a, <2 x double> %b, i32 2, i8 -1)
ret i8 %res
;
; NoVLX-LABEL: test256_3:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k1
; NoVLX-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%mask = icmp sge <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x1, <8 x i32> %y
;
; NoVLX-LABEL: test256_5:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %x, %y
;
; NoVLX-LABEL: test256_5b:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpeqd %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %y, %x
;
; NoVLX-LABEL: test256_6:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sgt <8 x i32> %x, %y
;
; NoVLX-LABEL: test256_6b:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp slt <8 x i32> %y, %x
;
; NoVLX-LABEL: test256_7:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sle <8 x i32> %x, %y
;
; NoVLX-LABEL: test256_7b:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sge <8 x i32> %y, %x
;
; NoVLX-LABEL: test256_8:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpleud %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp ule <8 x i32> %x, %y
;
; NoVLX-LABEL: test256_8b:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp uge <8 x i32> %y, %x
;
; NoVLX-LABEL: test256_9:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM3<def> %YMM3<kill> %ZMM3<def>
-; NoVLX-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
+; NoVLX-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; NoVLX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%mask1 = icmp eq <8 x i32> %x1, %y1
%mask0 = icmp eq <8 x i32> %x, %y
;
; NoVLX-LABEL: test256_12:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm3
; NoVLX-NEXT: vpcmpleud %zmm3, %zmm0, %k1
; NoVLX-NEXT: vpcmpled %zmm1, %zmm2, %k1 {%k1}
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%mask1 = icmp sge <8 x i32> %x1, %y1
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
;
; NoVLX-LABEL: test256_14:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm2
; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <8 x i32> undef, i32 %yb, i32 0
;
; NoVLX-LABEL: test256_15:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm3
; NoVLX-NEXT: vpcmpgtd %zmm3, %zmm0, %k1
; NoVLX-NEXT: vpcmpled %zmm1, %zmm2, %k1 {%k1}
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%mask1 = icmp sge <8 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
;
; NoVLX-LABEL: test256_17:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpneqd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp ne <8 x i32> %x, %y
;
; NoVLX-LABEL: test256_18:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpneqd %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp ne <8 x i32> %y, %x
;
; NoVLX-LABEL: test256_19:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpnltud %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp uge <8 x i32> %x, %y
;
; NoVLX-LABEL: test256_20:
; NoVLX: # BB#0:
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp uge <8 x i32> %y, %x
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask:
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask_mem:
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask:
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem:
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask:
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask_mem:
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vpcmpnltq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpnltq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask:
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask_mem:
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovaps (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask_mem:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovaps (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask_mem_b:
; NoVLX: # BB#0: # %entry
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovaps (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovaps (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovaps (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vmovaps (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX: # BB#0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX: # BB#0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: movzbl %al, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: movzbl %al, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: movzbl %al, %eax
-; VLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: movzbl %al, %eax
-; NoVLX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
; X86_64: # BB#0: # %entry
; X86_64-NEXT: vptestmq %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_test_epi64_mask:
; I386: # BB#0: # %entry
; I386-NEXT: vptestmq %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
; X86_64: # BB#0: # %entry
; X86_64-NEXT: vptestmd %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_test_epi32_mask:
; I386: # BB#0: # %entry
; I386-NEXT: vptestmd %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
; X86_64: # BB#0: # %entry
; X86_64-NEXT: vptestmq %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
; I386: # BB#0: # %entry
; I386-NEXT: vptestmq %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
; X86_64: # BB#0: # %entry
; X86_64-NEXT: vptestmd %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
; I386: # BB#0: # %entry
; I386-NEXT: vptestmd %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmq %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_test_epi64_mask:
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmq %xmm0, %xmm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmd %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_test_epi32_mask:
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmd %xmm0, %xmm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmq %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmq %ymm0, %ymm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmd %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmd %ymm0, %ymm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
; X86_64: # BB#0: # %entry
; X86_64-NEXT: vptestnmq %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_testn_epi64_mask:
; I386: # BB#0: # %entry
; I386-NEXT: vptestnmq %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
; X86_64: # BB#0: # %entry
; X86_64-NEXT: vptestnmd %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_testn_epi32_mask:
; I386: # BB#0: # %entry
; I386-NEXT: vptestnmd %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
; X86_64: # BB#0: # %entry
; X86_64-NEXT: vptestnmq %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
; I386: # BB#0: # %entry
; I386-NEXT: vptestnmq %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
; X86_64: # BB#0: # %entry
; X86_64-NEXT: vptestnmd %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
; I386: # BB#0: # %entry
; I386-NEXT: vptestnmd %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmq %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_testn_epi64_mask:
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmq %xmm0, %xmm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmd %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_testn_epi32_mask:
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmd %xmm0, %xmm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmq %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmq %ymm0, %ymm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmd %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmd %ymm0, %ymm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
; USE_BASE_64: movq %rsp, %rbx
; USE_BASE_32: movl %esp, %ebx
;
-; Make sure the base pointer is saved before the RBX argument for
+; Make sure the base pointer is saved before the rbx argument for
; cmpxchg16b is set.
;
-; Because of how the test is written, we spill SAVE_RBX.
+; Because of how the test is written, we spill SAVE_rbx.
; However, it would have been perfectly fine to just keep it in register.
-; USE_BASE: movq %rbx, [[SAVE_RBX_SLOT:[0-9]*\(%[er]bx\)]]
+; USE_BASE: movq %rbx, [[SAVE_rbx_SLOT:[0-9]*\(%[er]bx\)]]
;
-; SAVE_RBX must be in register before we clobber rbx.
+; SAVE_rbx must be in register before we clobber rbx.
; It is fine to use any register but rbx and the ones defined and use
; by cmpxchg. Since such regex would be complicated to write, just stick
; to the numbered registers. The bottom line is: if this test case fails
; because of that regex, this is likely just the regex being too conservative.
-; USE_BASE: movq [[SAVE_RBX_SLOT]], [[SAVE_RBX:%r[0-9]+]]
+; USE_BASE: movq [[SAVE_rbx_SLOT]], [[SAVE_rbx:%r[0-9]+]]
;
; USE_BASE: movq {{[^ ]+}}, %rbx
; USE_BASE-NEXT: cmpxchg16b
-; USE_BASE-NEXT: movq [[SAVE_RBX]], %rbx
+; USE_BASE-NEXT: movq [[SAVE_rbx]], %rbx
;
; DONT_USE_BASE-NOT: movq %rsp, %rbx
; DONT_USE_BASE-NOT: movl %esp, %ebx
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm2
; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i16:
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i16:
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: retq
%x0 = icmp sgt <8 x i16> %a, %b
%x1 = icmp sgt <8 x i16> %c, %d
; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i32:
; AVX12-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i32:
; SSE2-SSSE3-NEXT: cmpltps %xmm2, %xmm3
; SSE2-SSSE3-NEXT: andps %xmm1, %xmm3
; SSE2-SSSE3-NEXT: movmskps %xmm3, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f32:
; AVX12-NEXT: vcmpltps %xmm2, %xmm3, %xmm1
; AVX12-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4f32:
; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v16i8:
; AVX12-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v16i8:
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtb %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: retq
%x0 = icmp sgt <16 x i8> %a, %b
%x1 = icmp sgt <16 x i8> %c, %d
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i8:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i8:
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i8:
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i16:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i16:
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i16:
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i32:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i32:
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i32:
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2i64:
; AVX12-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2i64:
; SSE2-SSSE3-NEXT: cmpltpd %xmm2, %xmm3
; SSE2-SSSE3-NEXT: andpd %xmm1, %xmm3
; SSE2-SSSE3-NEXT: movmskpd %xmm3, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2f64:
; AVX12-NEXT: vcmpltpd %xmm2, %xmm3, %xmm1
; AVX12-NEXT: vandpd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2f64:
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i8:
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i8:
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i16:
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i16:
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i8:
; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i8:
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: retq
%x0 = icmp sgt <8 x i8> %a, %b
%x1 = icmp sgt <8 x i8> %c, %d
; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
; SSE2-SSSE3-NEXT: andps %xmm0, %xmm2
; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v4i64:
; AVX1-NEXT: vpackssdw %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskps %xmm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskps %xmm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
; SSE2-SSSE3-NEXT: andps %xmm2, %xmm6
; SSE2-SSSE3-NEXT: movmskps %xmm6, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f64:
; AVX12-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
; SSE2-SSSE3-NEXT: packsswb %xmm5, %xmm4
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm4
; SSE2-SSSE3-NEXT: pmovmskb %xmm4, %eax
-; SSE2-SSSE3-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v16i16:
; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %k1
; AVX512BW-NEXT: vpcmpgtw %ymm3, %ymm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <16 x i16> %a, %b
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm4
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm4
; SSE2-SSSE3-NEXT: pmovmskb %xmm4, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v8i32:
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512F-NEXT: vpcmpgtd %ymm3, %ymm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512BW-NEXT: vpcmpgtd %ymm3, %ymm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <8 x i32> %a, %b
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm6
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm6
; SSE2-SSSE3-NEXT: pmovmskb %xmm6, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8f32:
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
; AVX512F-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512F-NEXT: vcmpltps %ymm2, %ymm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512BW-NEXT: vcmpltps %ymm2, %ymm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = fcmp ogt <8 x float> %a, %b
; SSE-NEXT: psraw $15, %xmm8
; SSE-NEXT: packsswb %xmm0, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
-; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: v8i64:
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <8 x i64> %a, %b
; SSE-NEXT: psraw $15, %xmm8
; SSE-NEXT: packsswb %xmm0, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
-; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX12-LABEL: v8f64:
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512F-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512BW-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = fcmp ogt <8 x double> %a, %b
; SSE-NEXT: packsswb %xmm10, %xmm8
; SSE-NEXT: pand %xmm0, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: v16i32:
; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <16 x i32> %a, %b
; SSE-NEXT: packsswb %xmm10, %xmm8
; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX12-LABEL: v16f32:
; AVX12-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1
; AVX512F-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k1
; AVX512BW-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = fcmp ogt <16 x float> %a, %b
define <2 x i64> @ext_i2_2i64(i2 %a0) {
; SSE2-SSSE3-LABEL: ext_i2_2i64:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
;
; AVX1-LABEL: ext_i2_2i64:
; AVX1: # BB#0:
-; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
;
; AVX2-LABEL: ext_i2_2i64:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = bitcast i2 %a0 to <2 x i1>
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = bitcast i4 %a0 to <4 x i1>
define <4 x i64> @ext_i4_4i64(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i64:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
;
; AVX1-LABEL: ext_i4_4i64:
; AVX1: # BB#0:
-; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
;
; AVX2-LABEL: ext_i4_4i64:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8]
; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
%1 = bitcast i4 %a0 to <4 x i1>
%2 = sext <4 x i1> %1 to <4 x i64>
define <8 x i64> @ext_i8_8i64(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i64:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
;
; AVX1-LABEL: ext_i8_8i64:
; AVX1: # BB#0:
-; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
;
; AVX2-LABEL: ext_i8_8i64:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8]
define <2 x i64> @ext_i2_2i64(i2 %a0) {
; SSE2-SSSE3-LABEL: ext_i2_2i64:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
;
; AVX1-LABEL: ext_i2_2i64:
; AVX1: # BB#0:
-; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
;
; AVX2-LABEL: ext_i2_2i64:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
; AVX512F-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VLBW-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512VLBW-NEXT: kmovd %eax, %k1
; AVX512VLBW-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512VLBW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512VLBW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
%1 = bitcast i2 %a0 to <2 x i1>
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VLBW-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512VLBW-NEXT: kmovd %eax, %k1
; AVX512VLBW-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
-; AVX512VLBW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VLBW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
%1 = bitcast i4 %a0 to <4 x i1>
define <4 x i64> @ext_i4_4i64(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i64:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
;
; AVX1-LABEL: ext_i4_4i64:
; AVX1: # BB#0:
-; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
;
; AVX2-LABEL: ext_i4_4i64:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8]
; AVX512F-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i4_4i64:
; AVX512VLBW-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512VLBW-NEXT: kmovd %eax, %k1
; AVX512VLBW-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512VLBW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512VLBW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VLBW-NEXT: retq
%1 = bitcast i4 %a0 to <4 x i1>
%2 = zext <4 x i1> %1 to <4 x i64>
define <8 x i64> @ext_i8_8i64(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i64:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
;
; AVX1-LABEL: ext_i8_8i64:
; AVX1: # BB#0:
-; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
;
; AVX2-LABEL: ext_i8_8i64:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8]
define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i2_2i1:
; SSE2-SSSE3: # BB#0:
-; SSE2-SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
;
; AVX1-LABEL: bitcast_i2_2i1:
; AVX1: # BB#0:
-; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
;
; AVX2-LABEL: bitcast_i2_2i1:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = bitcast i2 %a0 to <2 x i1>
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = bitcast i4 %a0 to <4 x i1>
; X86-NEXT: flds {{[0-9]+}}(%esp)
; X86-NEXT: fucompp
; X86-NEXT: fnstsw %ax
-; X86-NEXT: # kill: %AH<def> %AH<kill> %AX<kill>
+; X86-NEXT: # kill: %ah<def> %ah<kill> %ax<kill>
; X86-NEXT: sahf
; X86-NEXT: setp %al
; X86-NEXT: retl
; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i16:
; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i16:
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: retq
%x = icmp sgt <8 x i16> %a, %b
%res = bitcast <8 x i1> %x to i8
; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i32:
; AVX12: # BB#0:
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i32:
; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f32:
; AVX12: # BB#0:
; AVX12-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4f32:
; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v16i8:
; AVX12: # BB#0:
; AVX12-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v16i8:
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: retq
%x = icmp sgt <16 x i8> %a, %b
%res = bitcast <16 x i1> %x to i16
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i8:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i8:
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i8:
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i16:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i16:
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i16:
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i32:
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i32:
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i32:
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2i64:
; AVX12: # BB#0:
; AVX12-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2i64:
; SSE2-SSSE3: # BB#0:
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2f64:
; AVX12: # BB#0:
; AVX12-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2f64:
; SSE2-SSSE3-NEXT: psrad $24, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i8:
; AVX12-NEXT: vpsrad $24, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i8:
; SSE2-SSSE3-NEXT: psrad $16, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i16:
; AVX12-NEXT: vpsrad $16, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i16:
; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i8:
; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i8:
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: vpsraw $8, %xmm0, %xmm0
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: retq
%x = icmp sgt <8 x i8> %a, %b
%res = bitcast <8 x i1> %x to i8
; SSE2-SSSE3-NEXT: pcmpgtw %xmm2, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v16i16:
; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <16 x i16> %a, %b
; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v8i32:
; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F: # BB#0:
; AVX512F-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <8 x i32> %a, %b
; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm2
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm2
; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8f32:
; AVX12: # BB#0:
; AVX12-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX12-NEXT: vmovmskps %ymm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
; AVX512F: # BB#0:
; AVX512F-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = fcmp ogt <8 x float> %a, %b
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm1
; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v4i64:
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovmskpd %ymm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm2
; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm2
; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f64:
; AVX12: # BB#0:
; AVX12-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX12-NEXT: vmovmskpd %ymm0, %eax
-; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: packsswb %xmm2, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: v16i32:
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F: # BB#0:
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <16 x i32> %a, %b
; SSE-NEXT: packssdw %xmm5, %xmm4
; SSE-NEXT: packsswb %xmm6, %xmm4
; SSE-NEXT: pmovmskb %xmm4, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: v16f32:
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F: # BB#0:
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = fcmp ogt <16 x float> %a, %b
; SSE-NEXT: packssdw %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm0, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
-; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: v8i64:
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovmskps %ymm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F: # BB#0:
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <8 x i64> %a, %b
; SSE-NEXT: packssdw %xmm6, %xmm4
; SSE-NEXT: packsswb %xmm0, %xmm4
; SSE-NEXT: pmovmskb %xmm4, %eax
-; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: v8f64:
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovmskps %ymm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F: # BB#0:
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = fcmp ogt <8 x double> %a, %b
; X86-NEXT: andl $43690, %ecx # imm = 0xAAAA
; X86-NEXT: shrl %ecx
; X86-NEXT: leal (%ecx,%edx,2), %edx
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; X86-NEXT: # kill: %DX<def> %DX<kill> %EDX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_v2i16:
;
; X64-LABEL: test_bitreverse_i32:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: bswapl %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
;
; X64-LABEL: test_bitreverse_i24:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: bswapl %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
; X86-NEXT: andl $43690, %eax # imm = 0xAAAA
; X86-NEXT: shrl %eax
; X86-NEXT: leal (%eax,%ecx,2), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_i16:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: rolw $8, %di
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl $3855, %eax # imm = 0xF0F
; X64-NEXT: andl $43690, %eax # imm = 0xAAAA
; X64-NEXT: shrl %eax
; X64-NEXT: leal (%rax,%rcx,2), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%b = call i16 @llvm.bitreverse.i16(i16 %a)
ret i16 %b
; GENERIC-NEXT: notl %edi # sched: [1:0.33]
; GENERIC-NEXT: andw (%rdx), %di # sched: [6:0.50]
; GENERIC-NEXT: addl %edi, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andn_i16:
; HASWELL-NEXT: notl %edi # sched: [1:0.25]
; HASWELL-NEXT: andw (%rdx), %di # sched: [1:0.50]
; HASWELL-NEXT: addl %edi, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andn_i16:
; BROADWELL-NEXT: notl %edi # sched: [1:0.25]
; BROADWELL-NEXT: andw (%rdx), %di # sched: [6:0.50]
; BROADWELL-NEXT: addl %edi, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andn_i16:
; SKYLAKE-NEXT: notl %edi # sched: [1:0.25]
; SKYLAKE-NEXT: andw (%rdx), %di # sched: [6:0.50]
; SKYLAKE-NEXT: addl %edi, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andn_i16:
; BTVER2-NEXT: notl %edi # sched: [1:0.50]
; BTVER2-NEXT: andw (%rdx), %di # sched: [4:1.00]
; BTVER2-NEXT: addl %edi, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andn_i16:
; ZNVER1-NEXT: notl %edi # sched: [1:0.25]
; ZNVER1-NEXT: andw (%rdx), %di # sched: [5:0.50]
; ZNVER1-NEXT: addl %edi, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a2
%2 = xor i16 %a0, -1
; GENERIC-NEXT: tzcntw (%rsi), %cx
; GENERIC-NEXT: tzcntw %di, %ax
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cttz_i16:
; HASWELL-NEXT: tzcntw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cttz_i16:
; BROADWELL-NEXT: tzcntw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cttz_i16:
; SKYLAKE-NEXT: tzcntw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cttz_i16:
; BTVER2-NEXT: tzcntw (%rsi), %cx
; BTVER2-NEXT: tzcntw %di, %ax
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cttz_i16:
; ZNVER1-NEXT: tzcntw (%rsi), %cx # sched: [6:0.50]
; ZNVER1-NEXT: tzcntw %di, %ax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a1
%2 = tail call i16 @llvm.cttz.i16( i16 %1, i1 false )
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $256, %eax # imm = 0x100
; CHECK-NEXT: tzcntl %eax, %eax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%tmp = tail call i8 @llvm.cttz.i8( i8 %x, i1 false )
ret i8 %tmp
; CHECK: # BB#0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: tzcntl %eax, %eax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
%tmp = tail call i8 @llvm.cttz.i8( i8 %x, i1 true )
ret i8 %tmp
; BMI1-NEXT: movl $32, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: movl $-1, %eax
-; BMI1-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; BMI1-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; BMI1-NEXT: shrl %cl, %eax
; BMI1-NEXT: andl %edi, %eax
; BMI1-NEXT: retq
; BMI1-NEXT: movl $32, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: shll %cl, %edi
-; BMI1-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; BMI1-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; BMI1-NEXT: shrl %cl, %edi
; BMI1-NEXT: movl %edi, %eax
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64b:
; BMI2: # BB#0: # %entry
-; BMI2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; BMI2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
entry:
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: movq $-1, %rax
-; BMI1-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; BMI1-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; BMI1-NEXT: shrq %cl, %rax
; BMI1-NEXT: andq %rdi, %rax
; BMI1-NEXT: retq
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: movq $-1, %rax
-; BMI1-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; BMI1-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; BMI1-NEXT: shrq %cl, %rax
; BMI1-NEXT: andq %rdi, %rax
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64d:
; BMI2: # BB#0: # %entry
-; BMI2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; BMI2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
entry:
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: shlq %cl, %rdi
-; BMI1-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; BMI1-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; BMI1-NEXT: shrq %cl, %rdi
; BMI1-NEXT: movq %rdi, %rax
; BMI1-NEXT: retq
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: shlq %cl, %rdi
-; BMI1-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; BMI1-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; BMI1-NEXT: shrq %cl, %rdi
; BMI1-NEXT: movq %rdi, %rax
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64f:
; BMI2: # BB#0: # %entry
-; BMI2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; BMI2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
entry:
; CHECK-NEXT: rdrandw %cx
; CHECK-NEXT: cmovbw %di, %ax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%1 = tail call { i16, i32 } @llvm.x86.rdrand.16() nounwind
%2 = extractvalue { i16, i32 } %1, 0
; CHECK-NEXT: rdseedw %cx
; CHECK-NEXT: cmovbw %di, %ax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%1 = tail call { i16, i32 } @llvm.x86.rdseed.16() nounwind
%2 = extractvalue { i16, i32 } %1, 0
;
; X64-LABEL: PR15215_good:
; X64: # BB#0: # %entry
-; X64-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; X64-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; X64-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: andl $1, %edi
; X64-NEXT: andl $1, %esi
; X64-NEXT: andl $1, %edx
define <8 x i64> @test_mm512_epi64(<8 x i32> %a, <8 x i32> %b) {
; AVX512CD-LABEL: test_mm512_epi64:
; AVX512CD: # BB#0: # %entry
-; AVX512CD-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512CD-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512CD-NEXT: vpbroadcastmb2q %k0, %zmm0
; AVX512CD-NEXT: retq
define <4 x i64> @test_mm256_epi64(<8 x i32> %a, <8 x i32> %b) {
; AVX512CD-LABEL: test_mm256_epi64:
; AVX512CD: # BB#0: # %entry
-; AVX512CD-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512CD-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512CD-NEXT: kmovw %k0, %eax
; AVX512CD-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB0_1:
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB1_1:
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %ah, %eax # NOREX
; CHECK-NEXT: retl
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB2_1:
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %ah, %edx # NOREX
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: jmp .LBB3_6
; CHECK-NEXT: .LBB3_1:
; CHECK-NEXT: movzbl %cl, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; CHECK-NEXT: divb %bl
; CHECK-NEXT: movzbl %al, %esi
; CHECK-NEXT: testl $-256, %edi
; CHECK-NEXT: jne .LBB3_5
; CHECK-NEXT: .LBB3_4:
; CHECK-NEXT: movzbl %cl, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; CHECK-NEXT: divb %bl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: .LBB3_6:
; CHECK-NEXT: .LBB8_1:
; CHECK-NEXT: movb $4, %al
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB9_1:
; CHECK-NEXT: movb $4, %al
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: retl
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: divl %esi
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<def>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<def>
; CHECK-NEXT: retq
%result = sdiv i64 %a, %b
ret i64 %result
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: divl %esi
-; CHECK-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
+; CHECK-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
; CHECK-NEXT: movq %rdx, %rax
; CHECK-NEXT: retq
%result = srem i64 %a, %b
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: divl %esi
-; CHECK-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<def>
+; CHECK-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<def>
; CHECK-NEXT: addq %rdx, %rax
; CHECK-NEXT: retq
%resultdiv = sdiv i64 %a, %b
; X32: # BB#0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: cttz_i8:
; X64: # BB#0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsfl %eax, %eax
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i8:
; X32-CLZ: # BB#0:
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8:
; X64-CLZ: # BB#0:
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: tzcntl %eax, %eax
-; X64-CLZ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-CLZ-NEXT: retq
%tmp = call i8 @llvm.cttz.i8( i8 %x, i1 true )
ret i8 %tmp
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $7, %eax
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i8:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsrl %eax, %eax
; X64-NEXT: xorl $7, %eax
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i8:
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: lzcntl %eax, %eax
; X32-CLZ-NEXT: addl $-24, %eax
-; X32-CLZ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8:
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: lzcntl %eax, %eax
; X64-CLZ-NEXT: addl $-24, %eax
-; X64-CLZ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-CLZ-NEXT: retq
%tmp2 = call i8 @llvm.ctlz.i8( i8 %x, i1 true )
ret i8 %tmp2
; X32: # BB#0:
; X32-NEXT: bsrw {{[0-9]+}}(%esp), %ax
; X32-NEXT: xorl $15, %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i16:
; X64: # BB#0:
; X64-NEXT: bsrw %di, %ax
; X64-NEXT: xorl $15, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i16:
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $7, %eax
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
; X32-NEXT: .LBB8_1:
; X32-NEXT: movb $8, %al
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i8_zero_test:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsrl %eax, %eax
; X64-NEXT: xorl $7, %eax
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
; X64-NEXT: .LBB8_1:
; X64-NEXT: movb $8, %al
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i8_zero_test:
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: lzcntl %eax, %eax
; X32-CLZ-NEXT: addl $-24, %eax
-; X32-CLZ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8_zero_test:
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: lzcntl %eax, %eax
; X64-CLZ-NEXT: addl $-24, %eax
-; X64-CLZ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-CLZ-NEXT: retq
%tmp1 = call i8 @llvm.ctlz.i8(i8 %n, i1 false)
ret i8 %tmp1
; X32-NEXT: # BB#2: # %cond.false
; X32-NEXT: bsrw %ax, %ax
; X32-NEXT: xorl $15, %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
; X32-NEXT: .LBB9_1:
; X32-NEXT: movw $16, %ax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i16_zero_test:
; X64-NEXT: # BB#2: # %cond.false
; X64-NEXT: bsrw %di, %ax
; X64-NEXT: xorl $15, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
; X64-NEXT: .LBB9_1:
; X64-NEXT: movw $16, %ax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i16_zero_test:
; X32-NEXT: # BB#2: # %cond.false
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
; X32-NEXT: .LBB12_1
; X32-NEXT: movb $8, %al
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: cttz_i8_zero_test:
; X64-NEXT: # BB#2: # %cond.false
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsfl %eax, %eax
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
; X64-NEXT: .LBB12_1:
; X64-NEXT: movb $8, %al
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i8_zero_test:
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: orl $256, %eax # imm = 0x100
; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8_zero_test:
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: orl $256, %eax # imm = 0x100
; X64-CLZ-NEXT: tzcntl %eax, %eax
-; X64-CLZ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-CLZ-NEXT: retq
%tmp1 = call i8 @llvm.cttz.i8(i8 %n, i1 false)
ret i8 %tmp1
; X32-NEXT: orb $2, %al
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: cttz_i8_knownbits:
; X64-NEXT: orb $2, %dil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsfl %eax, %eax
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i8_knownbits:
; X32-CLZ-NEXT: orb $2, %al
; X32-CLZ-NEXT: movzbl %al, %eax
; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8_knownbits:
; X64-CLZ-NEXT: orb $2, %dil
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: tzcntl %eax, %eax
-; X64-CLZ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-CLZ-NEXT: retq
%x2 = or i8 %x, 2
%tmp = call i8 @llvm.cttz.i8(i8 %x2, i1 true )
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $7, %eax
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i8_knownbits:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsrl %eax, %eax
; X64-NEXT: xorl $7, %eax
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i8_knownbits:
; X32-CLZ-NEXT: movzbl %al, %eax
; X32-CLZ-NEXT: lzcntl %eax, %eax
; X32-CLZ-NEXT: addl $-24, %eax
-; X32-CLZ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8_knownbits:
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: lzcntl %eax, %eax
; X64-CLZ-NEXT: addl $-24, %eax
-; X64-CLZ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-CLZ-NEXT: retq
%x2 = or i8 %x, 64
define void @test6(i32 %a, i32 %x, i32* %y.ptr, i64* %z.ptr) {
; CHECK-LABEL: test6:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: cmovnsl (%rdx), %esi
; CHECK-NEXT: movq %rsi, (%rcx)
; CMOV-NEXT: movb $-19, %al
; CMOV-NEXT: .LBB0_2:
; CMOV-NEXT: movzbl %al, %eax
-; CMOV-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CMOV-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_zpromotion_8_to_16:
; NO_CMOV-NEXT: movb $-19, %al
; NO_CMOV-NEXT: .LBB0_2:
; NO_CMOV-NEXT: movzbl %al, %eax
-; NO_CMOV-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NO_CMOV-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NO_CMOV-NEXT: retl
%t0 = select i1 %c, i8 117, i8 -19
%ret = zext i8 %t0 to i16
; CMOV-NEXT: movb $-19, %al
; CMOV-NEXT: .LBB6_2:
; CMOV-NEXT: movsbl %al, %eax
-; CMOV-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CMOV-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_spromotion_8_to_16:
; NO_CMOV-NEXT: movb $-19, %al
; NO_CMOV-NEXT: .LBB6_2:
; NO_CMOV-NEXT: movsbl %al, %eax
-; NO_CMOV-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; NO_CMOV-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; NO_CMOV-NEXT: retl
%t0 = select i1 %c, i8 117, i8 -19
%ret = sext i8 %t0 to i16
; CHECK-NEXT: shrb $7, %al
; CHECK-NEXT: movzbl %al, %ecx
; CHECK-NEXT: xorl $1, %ecx
-; CHECK-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; CHECK-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; CHECK-NEXT: sarl %cl, %edx
; CHECK-NEXT: movb {{.*}}(%rip), %al
; CHECK-NEXT: testb %al, %al
; Considering merging %vreg7 with %vreg10
; RHS = %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d
; LHS = %vreg10 = [16d,64L:2)[64L,160L:1)[192L,240L:1)[272d,304L:3)[304L,352d:1)[352d,400d:0)[400d,400S:4) 0@352d 1@64L-phidef 2@16d-phikill 3@272d-phikill 4@400d
-; Remat: %vreg10<def> = MOV64r0 %vreg10<imp-def>, %EFLAGS<imp-def,dead>, %vreg10<imp-def>; GR64:%vreg10
+; Remat: %vreg10<def> = MOV64r0 %vreg10<imp-def>, %eflags<imp-def,dead>, %vreg10<imp-def>; GR64:%vreg10
; Shrink: %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d
; live-in at 240L
; live-in at 416L
;
; AVX512F-LABEL: combine_v4i64_abs_abs:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpabsq %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: combine_v4i64_abs_abs:
;
; KNL-LABEL: test4:
; KNL: # BB#0:
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: movw $7, %ax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; KNL-NEXT: retq
%res = call <4 x float> @llvm.masked.expandload.v4f32(float* %base, <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x float> %src0)
ret <4 x float>%res
;
; KNL-LABEL: test5:
; KNL: # BB#0:
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: movb $2, %al
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpexpandq (%rdi), %zmm0 {%k1}
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; KNL-NEXT: retq
%res = call <2 x i64> @llvm.masked.expandload.v2i64(i64* %base, <2 x i1> <i1 false, i1 true>, <2 x i64> %src0)
ret <2 x i64>%res
;
; KNL-LABEL: test7:
; KNL: # BB#0:
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
;
; KNL-LABEL: test10:
; KNL: # BB#0:
-; KNL-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
; KNL-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL-NEXT: vpmovsxdq %xmm1, %ymm1
;
; KNL-LABEL: test11:
; KNL: # BB#0:
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: vpsllq $63, %xmm1, %xmm1
; KNL-NEXT: vpsraq $63, %zmm1, %zmm1
; KNL-NEXT: vmovdqa %xmm1, %xmm1
;
; KNL-LABEL: test12:
; KNL: # BB#0:
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
; KNL-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL-NEXT: vmovdqa %xmm1, %xmm1
;
; KNL-LABEL: test13:
; KNL: # BB#0:
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; KNL-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; KNL-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
%res = call <2 x float> @llvm.masked.expandload.v2f32(float* %base, <2 x i1> %mask, <2 x float> %src0)
;
; KNL-LABEL: test14:
; KNL: # BB#0:
-; KNL-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; KNL-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
; CHECK-NEXT: divl %esi
; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: .LBB0_2: # %cond.end.i
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
entry:
br i1 %C, label %cond.end.i, label %cond.false.i
; CHECK: # BB#0:
; CHECK-NEXT: andl $127, %edi
; CHECK-NEXT: popcntw %di, %ax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %AX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %ax<kill>
; CHECK-NEXT: retq
%x2 = and i8 %x, 127
%count = tail call i8 @llvm.ctpop.i8(i8 %x2)
;
; X64-LABEL: t:
; X64: ## BB#0: ## %entry
-; X64-NEXT: ## kill: %EDX<def> %EDX<kill> %RDX<def>
-; X64-NEXT: ## kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: ## kill: %edx<def> %edx<kill> %rdx<def>
+; X64-NEXT: ## kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: imull %ecx, %esi
; X64-NEXT: leal (%rsi,%rdx), %eax
; X64-NEXT: cltq
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull $63551, %eax, %eax # imm = 0xF83F
; X32-NEXT: shrl $21, %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test1:
; X64: # BB#0: # %entry
; X64-NEXT: imull $63551, %edi, %eax # imm = 0xF83F
; X64-NEXT: shrl $21, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
entry:
%div = udiv i16 %x, 33
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull $43691, %eax, %eax # imm = 0xAAAB
; X32-NEXT: shrl $17, %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test2:
; X64: # BB#0: # %entry
; X64-NEXT: imull $43691, %esi, %eax # imm = 0xAAAB
; X64-NEXT: shrl $17, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
entry:
%div = udiv i16 %c, 3
; X32-NEXT: imull $171, %eax, %eax
; X32-NEXT: shrl $9, %eax
; X32-NEXT: movzwl %ax, %eax
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test3:
; X64-NEXT: imull $171, %esi, %eax
; X64-NEXT: shrl $9, %eax
; X64-NEXT: movzwl %ax, %eax
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
entry:
%div = udiv i8 %c, 3
; X32-NEXT: shrl $31, %ecx
; X32-NEXT: shrl $16, %eax
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test4:
; X64-NEXT: shrl $31, %ecx
; X64-NEXT: shrl $16, %eax
; X64-NEXT: addl %ecx, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
entry:
%div = sdiv i16 %x, 33 ; <i32> [#uses=1]
; X64-NEXT: movl %edi, %eax
; X64-NEXT: imulq $365384439, %rax, %rax # imm = 0x15C752F7
; X64-NEXT: shrq $59, %rax
-; X64-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-NEXT: retq
%tmp1 = udiv i32 %A, 1577682821 ; <i32> [#uses=1]
ret i32 %tmp1
; X32-NEXT: shrl $31, %ecx
; X32-NEXT: sarl $18, %eax
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test6:
; X64-NEXT: shrl $31, %ecx
; X64-NEXT: sarl $18, %eax
; X64-NEXT: addl %ecx, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
entry:
%div = sdiv i16 %x, 10
;
; X64-LABEL: test7:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shrl $2, %edi
; X64-NEXT: imulq $613566757, %rdi, %rax # imm = 0x24924925
; X64-NEXT: shrq $32, %rax
-; X64-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-NEXT: retq
%div = udiv i32 %x, 28
ret i32 %div
; X32-NEXT: imull $211, %eax, %eax
; X32-NEXT: shrl $13, %eax
; X32-NEXT: movzwl %ax, %eax
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test8:
; X64-NEXT: imull $211, %eax, %eax
; X64-NEXT: shrl $13, %eax
; X64-NEXT: movzwl %ax, %eax
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
%div = udiv i8 %x, 78
ret i8 %div
; X32-NEXT: imull $71, %eax, %eax
; X32-NEXT: shrl $11, %eax
; X32-NEXT: movzwl %ax, %eax
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test9:
; X64-NEXT: imull $71, %eax, %eax
; X64-NEXT: shrl $11, %eax
; X64-NEXT: movzwl %ax, %eax
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
%div = udiv i8 %x, 116
ret i8 %div
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %ebx # NOREX
; X32-NEXT: movb %al, (%edx)
; X64-LABEL: ui8:
; X64: # BB#0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %esi # NOREX
; X64-NEXT: movb %al, (%rdx)
; X32-LABEL: test_udivrem_zext_ah:
; X32: # BB#0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %ecx # NOREX
; X32-NEXT: movb %al, z
; X64-LABEL: test_udivrem_zext_ah:
; X64: # BB#0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %ecx # NOREX
; X64-NEXT: movb %al, {{.*}}(%rip)
; X32-LABEL: test_urem_zext_ah:
; X32: # BB#0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %eax # NOREX
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test_urem_zext_ah:
; X64: # BB#0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %eax # NOREX
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
%1 = urem i8 %x, %y
ret i8 %1
; X32: # BB#0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X32-NEXT: divb %cl
; X32-NEXT: movzbl %ah, %eax # NOREX
; X32-NEXT: addb %cl, %al
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test_urem_noext_ah:
; X64: # BB#0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %eax # NOREX
; X64-NEXT: addb %sil, %al
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
%1 = urem i8 %x, %y
%2 = add i8 %1, %y
; X32-LABEL: test_urem_zext64_ah:
; X32: # BB#0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %eax # NOREX
; X32-NEXT: xorl %edx, %edx
; X64-LABEL: test_urem_zext64_ah:
; X64: # BB#0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %eax # NOREX
; X64-NEXT: retq
; X32-NEXT: cbtw
; X32-NEXT: idivb {{[0-9]+}}(%esp)
; X32-NEXT: movsbl %ah, %eax # NOREX
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test_srem_sext_ah:
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
; X64-NEXT: movsbl %ah, %eax # NOREX
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
%1 = srem i8 %x, %y
ret i8 %1
; X32-NEXT: idivb %cl
; X32-NEXT: movsbl %ah, %eax # NOREX
; X32-NEXT: addb %cl, %al
-; X32-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test_srem_noext_ah:
; X64-NEXT: idivb %sil
; X64-NEXT: movsbl %ah, %eax # NOREX
; X64-NEXT: addb %sil, %al
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
%1 = srem i8 %x, %y
%2 = add i8 %1, %y
; X32-LABEL: pr25754:
; X32: # BB#0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %ecx # NOREX
; X32-NEXT: movzbl %al, %eax
; X64-LABEL: pr25754:
; X64: # BB#0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %ecx # NOREX
; X64-NEXT: movzbl %al, %eax
; Save AL.
; CHECK: PUSH32r killed %eax
- ; Copy EDI into EFLAGS
+ ; Copy edi into EFLAGS
; CHECK-NEXT: %eax = MOV32rr %edi
; CHECK-NEXT: %al = ADD8ri %al, 127, implicit-def %eflags
; CHECK-NEXT: SAHF implicit-def %eflags, implicit %ah
; SSE2: # BB#0:
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: shrl $8, %eax
-; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_1:
; SSE41: # BB#0:
; SSE41-NEXT: pextrb $1, %xmm0, %eax
-; SSE41-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_1:
; AVX: # BB#0:
; AVX-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX-NEXT: retq
%b = extractelement <16 x i8> %a, i256 1
ret i8 %b
; SSE2: # BB#0:
; SSE2-NEXT: pextrw $5, %xmm0, %eax
; SSE2-NEXT: shrl $8, %eax
-; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_11:
; SSE41: # BB#0:
; SSE41-NEXT: pextrb $11, %xmm0, %eax
-; SSE41-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_11:
; AVX: # BB#0:
; AVX-NEXT: vpextrb $11, %xmm0, %eax
-; AVX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX-NEXT: retq
%b = extractelement <16 x i8> %a, i256 11
ret i8 %b
; SSE2-LABEL: extractelement_v16i8_14:
; SSE2: # BB#0:
; SSE2-NEXT: pextrw $7, %xmm0, %eax
-; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_14:
; SSE41: # BB#0:
; SSE41-NEXT: pextrb $14, %xmm0, %eax
-; SSE41-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_14:
; AVX: # BB#0:
; AVX-NEXT: vpextrb $14, %xmm0, %eax
-; AVX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX-NEXT: retq
%b = extractelement <16 x i8> %a, i256 14
ret i8 %b
; SSE2: # BB#0:
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: shrl $8, %eax
-; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v32i8_1:
; SSE41: # BB#0:
; SSE41-NEXT: pextrb $1, %xmm0, %eax
-; SSE41-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v32i8_1:
; AVX: # BB#0:
; AVX-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%b = extractelement <32 x i8> %a, i256 1
; SSE2: # BB#0:
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: shrl $8, %eax
-; SSE2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v32i8_17:
; SSE41: # BB#0:
; SSE41-NEXT: pextrb $1, %xmm1, %eax
-; SSE41-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE41-NEXT: retq
;
; AVX1-LABEL: extractelement_v32i8_17:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpextrb $1, %xmm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpextrb $1, %xmm0, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
%b = extractelement <32 x i8> %a, i256 17
; SSE-LABEL: extractelement_v8i16_0:
; SSE: # BB#0:
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v8i16_0:
; AVX: # BB#0:
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX-NEXT: retq
%b = extractelement <8 x i16> %a, i256 0
ret i16 %b
; SSE-LABEL: extractelement_v8i16_3:
; SSE: # BB#0:
; SSE-NEXT: pextrw $3, %xmm0, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v8i16_3:
; AVX: # BB#0:
; AVX-NEXT: vpextrw $3, %xmm0, %eax
-; AVX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX-NEXT: retq
%b = extractelement <8 x i16> %a, i256 3
ret i16 %b
; SSE-LABEL: extractelement_v16i16_0:
; SSE: # BB#0:
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v16i16_0:
; AVX: # BB#0:
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%b = extractelement <16 x i16> %a, i256 0
; SSE-LABEL: extractelement_v16i16_13:
; SSE: # BB#0:
; SSE-NEXT: pextrw $5, %xmm1, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: extractelement_v16i16_13:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpextrw $5, %xmm0, %eax
-; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpextrw $5, %xmm0, %eax
-; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
%b = extractelement <16 x i16> %a, i256 13
; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X32-NEXT: vcvtps2ph $0, %xmm0, %xmm0
; X32-NEXT: vmovd %xmm0, %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test_cvtss_sh:
; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64-NEXT: vcvtps2ph $0, %xmm0, %xmm0
; X64-NEXT: vmovd %xmm0, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%ins0 = insertelement <4 x float> undef, float %a0, i32 0
%ins1 = insertelement <4 x float> %ins0, float 0.000000e+00, i32 1
; SDAG-NEXT: cmpeqss %xmm1, %xmm0
; SDAG-NEXT: movd %xmm0, %eax
; SDAG-NEXT: andl $1, %eax
-; SDAG-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SDAG-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_oeq:
; SDAG-NEXT: cmpneqss %xmm1, %xmm0
; SDAG-NEXT: movd %xmm0, %eax
; SDAG-NEXT: andl $1, %eax
-; SDAG-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SDAG-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_une:
; SDAG-NEXT: cmpeqss %xmm0, %xmm1
; SDAG-NEXT: movd %xmm1, %eax
; SDAG-NEXT: andl $1, %eax
-; SDAG-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SDAG-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_oeq3:
; SDAG-NEXT: cmpneqss %xmm0, %xmm1
; SDAG-NEXT: movd %xmm1, %eax
; SDAG-NEXT: andl $1, %eax
-; SDAG-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SDAG-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_une3:
; AVX1-LABEL: test_load_nt8xfloat:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-LABEL: test_load_nt4xdouble:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-LABEL: test_load_nt32xi8:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-LABEL: test_load_nt16xi16:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-LABEL: test_load_nt8xi32:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-LABEL: test_load_nt4xi64:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-LABEL: test_load_nt16xfloat:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm2, %xmm1
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-LABEL: test_load_nt8xdouble:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm2, %xmm1
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-LABEL: test_load_nt64xi8:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm2, %xmm1
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-LABEL: test_load_nt32xi16:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm2, %xmm1
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-LABEL: test_load_nt16xi32:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm2, %xmm1
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-LABEL: test_load_nt8xi64:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vmovntdqa (%rdi), %xmm0
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: vmovntdqa 16(%rdi), %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovntdqa 32(%rdi), %xmm2
-; AVX1-NEXT: # implicit-def: %YMM1
+; AVX1-NEXT: # implicit-def: %ymm1
; AVX1-NEXT: vmovaps %xmm2, %xmm1
; AVX1-NEXT: vmovntdqa 48(%rdi), %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; X32-NEXT: andb $1, %al
; X32-NEXT: negb %al
; X32-NEXT: movsbl %al, %eax
-; X32-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-NEXT: andb $1, %dil
; X64-NEXT: negb %dil
; X64-NEXT: movsbl %dil, %eax
-; X64-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
; X64-NEXT: ## -- End function
%z = trunc i16 %x to i1
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
; X32-NEXT: movzbl %al, %eax
-; X32-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64: ## BB#0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
; X64-NEXT: ## -- End function
%z = trunc i16 %x to i1
; X32-LABEL: test9:
; X32: ## BB#0:
; X32-NEXT: movsbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test9:
; X64: ## BB#0:
; X64-NEXT: movsbl %dil, %eax
-; X64-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
; X64-NEXT: ## -- End function
%u = sext i8 %x to i16
; X32-LABEL: test12:
; X32: ## BB#0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test12:
; X64: ## BB#0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
; X64-NEXT: ## -- End function
%u = zext i8 %x to i16
; CHECK-LABEL: shl_i16:
; CHECK: ## BB#0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: %CL<def> %CX<kill>
+; CHECK-NEXT: ## kill: %cl<def> %cx<kill>
; CHECK-NEXT: shlw %cl, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
; CHECK-LABEL: shl_i32:
; CHECK: ## BB#0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: %CL<def> %ECX<kill>
+; CHECK-NEXT: ## kill: %cl<def> %ecx<kill>
; CHECK-NEXT: shll %cl, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
; CHECK-LABEL: shl_i64:
; CHECK: ## BB#0:
; CHECK-NEXT: movq %rsi, %rcx
-; CHECK-NEXT: ## kill: %CL<def> %RCX<kill>
+; CHECK-NEXT: ## kill: %cl<def> %rcx<kill>
; CHECK-NEXT: shlq %cl, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
; CHECK-LABEL: lshr_i16:
; CHECK: ## BB#0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: %CL<def> %CX<kill>
+; CHECK-NEXT: ## kill: %cl<def> %cx<kill>
; CHECK-NEXT: shrw %cl, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
; CHECK-LABEL: lshr_i32:
; CHECK: ## BB#0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: %CL<def> %ECX<kill>
+; CHECK-NEXT: ## kill: %cl<def> %ecx<kill>
; CHECK-NEXT: shrl %cl, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
; CHECK-LABEL: lshr_i64:
; CHECK: ## BB#0:
; CHECK-NEXT: movq %rsi, %rcx
-; CHECK-NEXT: ## kill: %CL<def> %RCX<kill>
+; CHECK-NEXT: ## kill: %cl<def> %rcx<kill>
; CHECK-NEXT: shrq %cl, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
; CHECK-LABEL: ashr_i16:
; CHECK: ## BB#0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: %CL<def> %CX<kill>
+; CHECK-NEXT: ## kill: %cl<def> %cx<kill>
; CHECK-NEXT: sarw %cl, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
; CHECK-LABEL: ashr_i32:
; CHECK: ## BB#0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: %CL<def> %ECX<kill>
+; CHECK-NEXT: ## kill: %cl<def> %ecx<kill>
; CHECK-NEXT: sarl %cl, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
; CHECK-LABEL: ashr_i64:
; CHECK: ## BB#0:
; CHECK-NEXT: movq %rsi, %rcx
-; CHECK-NEXT: ## kill: %CL<def> %RCX<kill>
+; CHECK-NEXT: ## kill: %cl<def> %rcx<kill>
; CHECK-NEXT: sarq %cl, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
define i16 @shl_imm1_i16(i16 %a) {
; CHECK-LABEL: shl_imm1_i16:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal (,%rdi,2), %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%c = shl i16 %a, 1
ret i16 %c
define i32 @shl_imm1_i32(i32 %a) {
; CHECK-LABEL: shl_imm1_i32:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal (,%rdi,2), %eax
; CHECK-NEXT: retq
%c = shl i32 %a, 1
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shrl $8, %eax
; X64-NEXT: addb %dil, %al
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-LABEL: test_movb_hreg:
; CHECK: long
; CHECK: fild
-; CHECK-NOT: ESP
+; CHECK-NOT: esp
; CHECK-NOT: esp
; CHECK: {{$}}
; CHECK: ret
; Test the GHC call convention works (x86-32)
-@base = external global i32 ; assigned to register: EBX
-@sp = external global i32 ; assigned to register: EBP
-@hp = external global i32 ; assigned to register: EDI
-@r1 = external global i32 ; assigned to register: ESI
+@base = external global i32 ; assigned to register: ebx
+@sp = external global i32 ; assigned to register: ebp
+@hp = external global i32 ; assigned to register: edi
+@r1 = external global i32 ; assigned to register: esi
define void @zap(i32 %a, i32 %b) nounwind {
entry:
; Check the GHC call convention works (x86-64)
@base = external global i64 ; assigned to register: R13
-@sp = external global i64 ; assigned to register: RBP
+@sp = external global i64 ; assigned to register: rbp
@hp = external global i64 ; assigned to register: R12
-@r1 = external global i64 ; assigned to register: RBX
+@r1 = external global i64 ; assigned to register: rbx
@r2 = external global i64 ; assigned to register: R14
-@r3 = external global i64 ; assigned to register: RSI
-@r4 = external global i64 ; assigned to register: RDI
+@r3 = external global i64 ; assigned to register: rsi
+@r4 = external global i64 ; assigned to register: rdi
@r5 = external global i64 ; assigned to register: R8
@r6 = external global i64 ; assigned to register: R9
@splim = external global i64 ; assigned to register: R15
-@f1 = external global float ; assigned to register: XMM1
-@f2 = external global float ; assigned to register: XMM2
-@f3 = external global float ; assigned to register: XMM3
-@f4 = external global float ; assigned to register: XMM4
-@d1 = external global double ; assigned to register: XMM5
-@d2 = external global double ; assigned to register: XMM6
+@f1 = external global float ; assigned to register: xmm1
+@f2 = external global float ; assigned to register: xmm2
+@f3 = external global float ; assigned to register: xmm3
+@f4 = external global float ; assigned to register: xmm4
+@d1 = external global double ; assigned to register: xmm5
+@d2 = external global double ; assigned to register: xmm6
define void @zap(i64 %a, i64 %b) nounwind {
entry:
define void @test_shl1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; CHECK-LABEL: test_shl1:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; CHECK-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; CHECK-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; CHECK-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB5_2
; CHECK-NEXT: # BB#1: # %if
define void @test_shr1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; CHECK-LABEL: test_shr1:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; CHECK-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; CHECK-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; CHECK-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB6_2
; CHECK-NEXT: # BB#1: # %if
define void @test_shr2(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; CHECK-LABEL: test_shr2:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; CHECK-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; CHECK-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; CHECK-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB7_2
; CHECK-NEXT: # BB#1: # %if
define void @test_shl(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; CHECK-LABEL: test_shl:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; CHECK-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; CHECK-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; CHECK-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: je .LBB8_2
; CHECK-NEXT: # BB#1: # %if
define void @test_add(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; CHECK-LABEL: test_add:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; CHECK-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; CHECK-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; CHECK-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; CHECK-NEXT: kmovb (%rsi), %k0
; CHECK-NEXT: kmovb (%rdx), %k1
; CHECK-NEXT: testb $1, %dil
; BWON-F16C-NEXT: callq __truncdfhf2
; BWON-F16C-NEXT: movl %eax, %r15d
; BWON-F16C-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; BWON-F16C-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; BWON-F16C-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; BWON-F16C-NEXT: vzeroupper
; BWON-F16C-NEXT: callq __truncdfhf2
; BWON-F16C-NEXT: movl %eax, %ebp
; Test the LiveIntervals::handleMove() function.
;
; Moving the DIV32r instruction exercises the regunit update code because
-; %EDX has a live range into the function and is used by the DIV32r.
+; %edx has a live range into the function and is used by the DIV32r.
;
; Here sinking a kill + dead def:
-; 144B -> 180B: DIV32r %vreg4, %EAX<imp-def>, %EDX<imp-def,dead>, %EFLAGS<imp-def,dead>, %EAX<imp-use,kill>, %EDX<imp-use>
+; 144B -> 180B: DIV32r %vreg4, %eax<imp-def>, %edx<imp-def,dead>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
; %vreg4: [48r,144r:0) 0@48r
; --> [48r,180r:0) 0@48r
; DH: [0B,16r:0)[128r,144r:2)[144r,144d:1) 0@0B-phi 1@144r 2@128r
}
; Same as above, but moving a kill + live def:
-; 144B -> 180B: DIV32r %vreg4, %EAX<imp-def,dead>, %EDX<imp-def>, %EFLAGS<imp-def,dead>, %EAX<imp-use,kill>, %EDX<imp-use>
+; 144B -> 180B: DIV32r %vreg4, %eax<imp-def,dead>, %edx<imp-def>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
; %vreg4: [48r,144r:0) 0@48r
; --> [48r,180r:0) 0@48r
; DH: [0B,16r:0)[128r,144r:2)[144r,184r:1) 0@0B-phi 1@144r 2@128r
}
; Move EFLAGS dead def across another def:
-; handleMove 208B -> 36B: %EDX<def> = MOV32r0 %EFLAGS<imp-def,dead>
+; handleMove 208B -> 36B: %edx<def> = MOV32r0 %EFLAGS<imp-def,dead>
; EFLAGS: [20r,20d:4)[160r,160d:3)[208r,208d:0)[224r,224d:1)[272r,272d:2)[304r,304d:5) 0@208r 1@224r 2@272r 3@160r 4@20r 5@304r
; --> [20r,20d:4)[36r,36d:0)[160r,160d:3)[224r,224d:1)[272r,272d:2)[304r,304d:5) 0@36r 1@224r 2@272r 3@160r 4@20r 5@304r
;
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp sgt <8 x i16> %a0, %1
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
; X86-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm2, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
; X64-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp sgt <16 x i8> %a0, %1
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: pandn %xmm0, %xmm2
; X86-SSE2-NEXT: por %xmm1, %xmm2
; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: pandn %xmm0, %xmm2
; X64-SSE2-NEXT: por %xmm1, %xmm2
; X64-SSE2-NEXT: movd %xmm2, %eax
-; X64-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm2, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpmaxsb %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pminsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pminsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp slt <8 x i16> %a0, %1
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pminsb %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
; X86-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm2, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pminsb %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
; X64-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp slt <16 x i8> %a0, %1
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pminsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pminsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: pandn %xmm0, %xmm2
; X86-SSE2-NEXT: por %xmm1, %xmm2
; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pminsb %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: pandn %xmm0, %xmm2
; X64-SSE2-NEXT: por %xmm1, %xmm2
; X64-SSE2-NEXT: movd %xmm2, %eax
-; X64-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pminsb %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pminsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pminsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pminsb %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm2, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pminsb %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpminsb %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: pandn %xmm0, %xmm3
; X86-SSE2-NEXT: por %xmm2, %xmm3
; X86-SSE2-NEXT: movd %xmm3, %eax
-; X86-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
; X64-SSE2-NEXT: pandn %xmm0, %xmm3
; X64-SSE2-NEXT: por %xmm2, %xmm3
; X64-SSE2-NEXT: movd %xmm3, %eax
-; X64-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp ugt <8 x i16> %a0, %1
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
; X86-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
; X64-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp ugt <16 x i8> %a0, %1
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm3, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm3, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: pandn %xmm0, %xmm2
; X86-SSE2-NEXT: por %xmm1, %xmm2
; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: pandn %xmm0, %xmm2
; X64-SSE2-NEXT: por %xmm1, %xmm2
; X64-SSE2-NEXT: movd %xmm2, %eax
-; X64-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpmaxub %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm3, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
; X86-SSE42: ## BB#0:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
; X86-AVX: ## BB#0:
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm3, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
; X64-SSE42: ## BB#0:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
; X64-AVX: ## BB#0:
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp ult <8 x i16> %a0, %1
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pminub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pminub %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
; X86-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pminub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pminub %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
; X64-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp ult <16 x i8> %a0, %1
; X86-SSE2-NEXT: pandn %xmm0, %xmm2
; X86-SSE2-NEXT: por %xmm4, %xmm2
; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
; X86-SSE42-NEXT: pminuw %xmm1, %xmm0
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
; X86-AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: pandn %xmm0, %xmm2
; X64-SSE2-NEXT: por %xmm4, %xmm2
; X64-SSE2-NEXT: movd %xmm2, %eax
-; X64-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
; X64-SSE42-NEXT: pminuw %xmm1, %xmm0
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
; X64-AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pminub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pminub %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pminub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pminub %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: pandn %xmm0, %xmm4
; X86-SSE2-NEXT: por %xmm2, %xmm4
; X86-SSE2-NEXT: movd %xmm4, %eax
-; X86-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
; X86-SSE42-NEXT: pminuw %xmm1, %xmm0
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
; X86-AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: pandn %xmm0, %xmm4
; X64-SSE2-NEXT: por %xmm2, %xmm4
; X64-SSE2-NEXT: movd %xmm4, %eax
-; X64-SSE2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
; X64-SSE42-NEXT: pminuw %xmm1, %xmm0
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
; X64-AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pminub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pminub %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pminub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pminub %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpminub %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
; X86-NO-CMOV-NEXT: sarw $15, %cx
; X86-NO-CMOV-NEXT: addl %ecx, %eax
; X86-NO-CMOV-NEXT: xorl %ecx, %eax
-; X86-NO-CMOV-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NO-CMOV-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NO-CMOV-NEXT: retl
;
; X86-CMOV-LABEL: test_i16:
; X64-NEXT: movzwl 4(%rdi), %eax
; X64-NEXT: movzbl 6(%rdi), %ecx
; X64-NEXT: movb %cl, 6(%rdi)
-; X64-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<kill> %RCX<def>
+; X64-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<kill> %rcx<def>
; X64-NEXT: shll $16, %ecx
; X64-NEXT: orl %eax, %ecx
; X64-NEXT: shlq $32, %rcx
; X64-NEXT: movzwl 4(%rdi), %eax
; X64-NEXT: movzbl 6(%rdi), %ecx
; X64-NEXT: movb %cl, 6(%rdi)
-; X64-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<kill> %RCX<def>
+; X64-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<kill> %rcx<def>
; X64-NEXT: shll $16, %ecx
; X64-NEXT: orl %eax, %ecx
; X64-NEXT: shlq $32, %rcx
; X64-NEXT: movzwl 4(%rdi), %ecx
; X64-NEXT: movzbl 6(%rdi), %edx
; X64-NEXT: movb %dl, 6(%rdi)
-; X64-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill> %RDX<def>
+; X64-NEXT: # kill: %edx<def> %edx<kill> %rdx<kill> %rdx<def>
; X64-NEXT: shll $16, %edx
; X64-NEXT: orl %ecx, %edx
; X64-NEXT: shlq $32, %rdx
define i32 @test2(i32 %a) {
; X64-LABEL: test2:
; X64: # BB#0: # %entry
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: leal (%rax,%rdi), %eax
define i32 @test3(i32 %a) {
; X64-LABEL: test3:
; X64: # BB#0: # %entry
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: leal (%rax,%rdi), %eax
; RUN: not llc < %s -mtriple i386-unknown-linux-gnu -mattr +avx -o /dev/null 2> %t
; RUN: FileCheck %s --input-file %t
-define <4 x float> @testXMM_1(<4 x float> %_xmm0, i32 %_l) {
+define <4 x float> @testxmm_1(<4 x float> %_xmm0, i32 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "vmovhlps $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i32 %_l, <4 x float> %_xmm0)
ret <4 x float> %0
}
-define <4 x float> @testXMM_2(<4 x float> %_xmm0, i32 %_l) {
+define <4 x float> @testxmm_2(<4 x float> %_xmm0, i32 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "movapd $1, $0", "=v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i32 %_l)
ret <4 x float> %0
}
-define <4 x float> @testXMM_3(<4 x float> %_xmm0, i32 %_l) {
+define <4 x float> @testxmm_3(<4 x float> %_xmm0, i32 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "vmovapd $1, $0", "=v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i32 %_l)
ret <4 x float> %0
}
-define <4 x float> @testXMM_4(<4 x float> %_xmm0, i32 %_l) {
+define <4 x float> @testxmm_4(<4 x float> %_xmm0, i32 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "vmpsadbw $$0, $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i32 %_l, <4 x float> %_xmm0)
ret <4 x float> %0
}
-define <4 x float> @testXMM_5(<4 x float> %_xmm0, i32 %_l) {
+define <4 x float> @testxmm_5(<4 x float> %_xmm0, i32 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "vminpd $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i32 %_l, i32 %_l)
ret <4 x float> %0
}
-define i32 @testXMM_6(i32 returned %_l) {
+define i32 @testxmm_6(i32 returned %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
tail call void asm sideeffect "vmovd $0, %eax", "v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i32 %_l)
ret i32 %_l
}
-define <4 x float> @testXMM_7(<4 x float> returned %_xmm0) {
+define <4 x float> @testxmm_7(<4 x float> returned %_xmm0) {
; CHECK: error: inline assembly requires more registers than available
entry:
tail call void asm sideeffect "vmovmskps $0, %eax", "v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(<4 x float> %_xmm0)
ret <4 x float> %_xmm0
}
-define i32 @testXMM_8(<4 x float> %_xmm0, i32 %_l) {
+define i32 @testxmm_8(<4 x float> %_xmm0, i32 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call i32 asm "vmulsd $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i32 %_l, <4 x float> %_xmm0)
ret i32 %0
}
-define <4 x float> @testXMM_9(<4 x float> %_xmm0, i32 %_l) {
+define <4 x float> @testxmm_9(<4 x float> %_xmm0, i32 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "vorpd $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i32 %_l, <4 x float> %_xmm0)
ret <4 x float> %0
}
-define <4 x float> @testXMM_10(<4 x float> %_xmm0, i32 %_l) {
+define <4 x float> @testxmm_10(<4 x float> %_xmm0, i32 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "pabsb $1, $0", "=v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i32 %_l)
ret <4 x float> %0
}
-define <4 x float> @testXMM_11(<4 x float> %_xmm0, i32 %_l) {
+define <4 x float> @testxmm_11(<4 x float> %_xmm0, i32 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "vpabsd $1, $0", "=v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i32 %_l)
ret <4 x float> %0
}
-define <8 x float> @testYMM_1(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_1(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vmovsldup $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM_2(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_2(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vmovapd $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
ret <8 x float> %0
}
-define <8 x float> @testYMM_3(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_3(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vminpd $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM_4(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_4(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vorpd $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vmulps $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM_6(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_6(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vmulpd $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM_7(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_7(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vmovups $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
ret <8 x float> %0
}
-define <8 x float> @testYMM_8(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_8(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vmovupd $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
; RUN: llc < %s -mtriple x86_64-unknown-linux-gnu -mattr +avx | FileCheck %s
; RUN: llc < %s -mtriple x86_64-unknown-linux-gnu -mattr +avx512f | FileCheck %s
-define <4 x float> @testXMM_1(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_1(<4 x float> %_xmm0, i64 %_l) {
; CHECK: vmovhlps %xmm1, %xmm0, %xmm0
entry:
%0 = tail call <4 x float> asm "vmovhlps $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0)
ret <4 x float> %0
}
-define <4 x float> @testXMM_2(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_2(<4 x float> %_xmm0, i64 %_l) {
; CHECK: movapd %xmm0, %xmm0
entry:
%0 = tail call <4 x float> asm "movapd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l)
ret <4 x float> %0
}
-define <4 x float> @testXMM_3(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_3(<4 x float> %_xmm0, i64 %_l) {
; CHECK: vmovapd %xmm0, %xmm0
entry:
%0 = tail call <4 x float> asm "vmovapd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l)
ret <4 x float> %0
}
-define <4 x float> @testXMM_4(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_4(<4 x float> %_xmm0, i64 %_l) {
; CHECK: vmpsadbw $0, %xmm1, %xmm0, %xmm0
entry:
%0 = tail call <4 x float> asm "vmpsadbw $$0, $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0)
ret <4 x float> %0
}
-define <4 x float> @testXMM_5(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_5(<4 x float> %_xmm0, i64 %_l) {
; CHECK: vminpd %xmm0, %xmm0, %xmm0
entry:
%0 = tail call <4 x float> asm "vminpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, i64 %_l)
ret <4 x float> %0
}
-define i64 @testXMM_6(i64 returned %_l) {
+define i64 @testxmm_6(i64 returned %_l) {
; CHECK: vmovd %xmm0, %eax
entry:
tail call void asm sideeffect "vmovd $0, %eax", "v,~{dirflag},~{fpsr},~{flags}"(i64 %_l)
ret i64 %_l
}
-define <4 x float> @testXMM_7(<4 x float> returned %_xmm0) {
+define <4 x float> @testxmm_7(<4 x float> returned %_xmm0) {
; CHECK: vmovmskps %xmm0, %eax
entry:
tail call void asm sideeffect "vmovmskps $0, %rax", "v,~{dirflag},~{fpsr},~{flags}"(<4 x float> %_xmm0)
ret <4 x float> %_xmm0
}
-define i64 @testXMM_8(<4 x float> %_xmm0, i64 %_l) {
+define i64 @testxmm_8(<4 x float> %_xmm0, i64 %_l) {
; CHECK: vmulsd %xmm1, %xmm0, %xmm0
entry:
%0 = tail call i64 asm "vmulsd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0)
ret i64 %0
}
-define <4 x float> @testXMM_9(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_9(<4 x float> %_xmm0, i64 %_l) {
; CHECK: vorpd %xmm1, %xmm0, %xmm0
entry:
%0 = tail call <4 x float> asm "vorpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0)
ret <4 x float> %0
}
-define <4 x float> @testXMM_10(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_10(<4 x float> %_xmm0, i64 %_l) {
; CHECK: pabsb %xmm0, %xmm0
entry:
%0 = tail call <4 x float> asm "pabsb $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l)
ret <4 x float> %0
}
-define <4 x float> @testXMM_11(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_11(<4 x float> %_xmm0, i64 %_l) {
; CHECK: vpabsd %xmm0, %xmm0
entry:
%0 = tail call <4 x float> asm "vpabsd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(i64 %_l)
ret <4 x float> %0
}
-define <8 x float> @testYMM_1(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_1(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: vmovsldup %ymm0, %ymm0
entry:
%0 = tail call <8 x float> asm "vmovsldup $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM_2(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_2(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: vmovapd %ymm1, %ymm0
entry:
%0 = tail call <8 x float> asm "vmovapd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
ret <8 x float> %0
}
-define <8 x float> @testYMM_3(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_3(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: vminpd %ymm1, %ymm0, %ymm0
entry:
%0 = tail call <8 x float> asm "vminpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM_4(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_4(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: vorpd %ymm1, %ymm0, %ymm0
entry:
%0 = tail call <8 x float> asm "vorpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: vmulps %ymm1, %ymm0, %ymm0
entry:
%0 = tail call <8 x float> asm "vmulps $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM_6(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_6(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: vmulpd %ymm1, %ymm0, %ymm0
entry:
%0 = tail call <8 x float> asm "vmulpd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM_7(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_7(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: vmovups %ymm1, %ymm0
entry:
%0 = tail call <8 x float> asm "vmovups $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
ret <8 x float> %0
}
-define <8 x float> @testYMM_8(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_8(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: vmovupd %ymm1, %ymm0
entry:
%0 = tail call <8 x float> asm "vmovupd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
; RUN: llc < %s -mtriple x86_64-unknown-linux-gnu -mattr +avx512f | FileCheck %s
-define <16 x float> @testZMM_1(<16 x float> %_zmm0, <16 x float> %_zmm1) {
+define <16 x float> @testzmm_1(<16 x float> %_zmm0, <16 x float> %_zmm1) {
entry:
; CHECK: vpternlogd $0, %zmm1, %zmm0, %zmm0
%0 = tail call <16 x float> asm "vpternlogd $$0, $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1, <16 x float> %_zmm0)
ret <16 x float> %0
}
-define <16 x float> @testZMM_2(<16 x float> %_zmm0, <16 x float> %_zmm1) {
+define <16 x float> @testzmm_2(<16 x float> %_zmm0, <16 x float> %_zmm1) {
entry:
; CHECK: vpabsq %zmm1, %zmm0
%0 = tail call <16 x float> asm "vpabsq $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1)
}
-define <16 x float> @testZMM_3(<16 x float> %_zmm0, <16 x float> %_zmm1) {
+define <16 x float> @testzmm_3(<16 x float> %_zmm0, <16 x float> %_zmm1) {
entry:
; CHECK: vpaddd %zmm1, %zmm1, %zmm0
%0 = tail call <16 x float> asm "vpaddd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1, <16 x float> %_zmm1)
}
-define <16 x float> @testZMM_4(<16 x float> %_zmm0, <16 x float> %_zmm1) {
+define <16 x float> @testzmm_4(<16 x float> %_zmm0, <16 x float> %_zmm1) {
entry:
; CHECK: vpaddq %zmm1, %zmm1, %zmm0
%0 = tail call <16 x float> asm "vpaddq $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1, <16 x float> %_zmm1)
}
-define <16 x float> @testZMM_5(<16 x float> %_zmm0, <16 x float> %_zmm1) {
+define <16 x float> @testzmm_5(<16 x float> %_zmm0, <16 x float> %_zmm1) {
entry:
; CHECK: vpandd %zmm1, %zmm1, %zmm0
%0 = tail call <16 x float> asm "vpandd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1, <16 x float> %_zmm1)
}
-define <16 x float> @testZMM_6(<16 x float> %_zmm0, <16 x float> %_zmm1) {
+define <16 x float> @testzmm_6(<16 x float> %_zmm0, <16 x float> %_zmm1) {
entry:
; CHECK: vpandnd %zmm1, %zmm1, %zmm0
%0 = tail call <16 x float> asm "vpandnd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1, <16 x float> %_zmm1)
}
-define <16 x float> @testZMM_7(<16 x float> %_zmm0, <16 x float> %_zmm1) {
+define <16 x float> @testzmm_7(<16 x float> %_zmm0, <16 x float> %_zmm1) {
entry:
; CHECK: vpmaxsd %zmm1, %zmm1, %zmm0
%0 = tail call <16 x float> asm "vpmaxsd $1, $2, $0", "=v,v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1, <16 x float> %_zmm1)
}
-define <16 x float> @testZMM_8(<16 x float> %_zmm0, <16 x float> %_zmm1) {
+define <16 x float> @testzmm_8(<16 x float> %_zmm0, <16 x float> %_zmm1) {
entry:
; CHECK: vmovups %zmm1, %zmm0
%0 = tail call <16 x float> asm "vmovups $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1)
}
-define <16 x float> @testZMM_9(<16 x float> %_zmm0, <16 x float> %_zmm1) {
+define <16 x float> @testzmm_9(<16 x float> %_zmm0, <16 x float> %_zmm1) {
entry:
; CHECK: vmovupd %zmm1, %zmm0
%0 = tail call <16 x float> asm "vmovupd $1, $0", "=v,v,~{dirflag},~{fpsr},~{flags}"(<16 x float> %_zmm1)
; RUN: not llc < %s -mtriple i386-unknown-linux-gnu -mattr +avx512vl -o /dev/null 2> %t
; RUN: FileCheck %s --input-file %t
-define <4 x float> @testXMM_1(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_1(<4 x float> %_xmm0, i64 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "vmovhlps $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0)
}
-define <4 x float> @testXMM_2(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_2(<4 x float> %_xmm0, i64 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "vmovapd $1, $0", "=v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i64 %_l)
}
-define <4 x float> @testXMM_3(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_3(<4 x float> %_xmm0, i64 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "vminpd $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i64 %_l, i64 %_l)
}
-define i64 @testXMM_4(<4 x float> %_xmm0, i64 %_l) {
+define i64 @testxmm_4(<4 x float> %_xmm0, i64 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call i64 asm "vmulsd $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0)
}
-define <4 x float> @testXMM_5(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_5(<4 x float> %_xmm0, i64 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "vpabsq $1, $0", "=v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(i64 %_l)
}
-define <4 x float> @testXMM_6(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_6(<4 x float> %_xmm0, i64 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "vpandd $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(<4 x float> %_xmm0, i64 %_l)
}
-define <4 x float> @testXMM_7(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_7(<4 x float> %_xmm0, i64 %_l) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <4 x float> asm "vpandnd $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{dirflag},~{fpsr},~{flags}"(<4 x float> %_xmm0, i64 %_l)
}
-define <8 x float> @testYMM_1(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_1(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vmovsldup $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
}
-define <8 x float> @testYMM_2(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_2(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vmovapd $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
}
-define <8 x float> @testYMM_3(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_3(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vminpd $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm1)
}
-define <8 x float> @testYMM_4(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_4(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vpabsq $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
}
-define <8 x float> @testYMM_5(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_5(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vpandd $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
}
-define <8 x float> @testYMM_6(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_6(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vpandnd $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
}
-define <8 x float> @testYMM_7(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_7(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vpminud $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
}
-define <8 x float> @testYMM_8(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_8(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vpmaxsd $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
}
-define <8 x float> @testYMM_9(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_9(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vmovups $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
}
-define <8 x float> @testYMM_10(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_10(<8 x float> %_ymm0, <8 x float> %_ymm1) {
; CHECK: error: inline assembly requires more registers than available
entry:
%0 = tail call <8 x float> asm "vmovupd $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
; RUN: llc < %s -mtriple x86_64-unknown-linux-gnu -mattr +avx512vl | FileCheck %s
-define <4 x float> @testXMM_1(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_1(<4 x float> %_xmm0, i64 %_l) {
entry:
; CHECK: vmovhlps %xmm17, %xmm16, %xmm16
%0 = tail call <4 x float> asm "vmovhlps $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0)
ret <4 x float> %0
}
-define <4 x float> @testXMM_2(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_2(<4 x float> %_xmm0, i64 %_l) {
entry:
; CHECK: vmovapd %xmm16, %xmm16
%0 = tail call <4 x float> asm "vmovapd $1, $0", "=v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"(i64 %_l)
ret <4 x float> %0
}
-define <4 x float> @testXMM_3(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_3(<4 x float> %_xmm0, i64 %_l) {
entry:
; CHECK: vminpd %xmm16, %xmm16, %xmm16
%0 = tail call <4 x float> asm "vminpd $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"(i64 %_l, i64 %_l)
ret <4 x float> %0
}
-define i64 @testXMM_4(<4 x float> %_xmm0, i64 %_l) {
+define i64 @testxmm_4(<4 x float> %_xmm0, i64 %_l) {
entry:
; CHECK: vmulsd %xmm17, %xmm16, %xmm16
%0 = tail call i64 asm "vmulsd $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"(i64 %_l, <4 x float> %_xmm0)
ret i64 %0
}
-define <4 x float> @testXMM_5(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_5(<4 x float> %_xmm0, i64 %_l) {
entry:
; CHECK: vpabsq %xmm16, %xmm16
%0 = tail call <4 x float> asm "vpabsq $1, $0", "=v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"(i64 %_l)
ret <4 x float> %0
}
-define <4 x float> @testXMM_6(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_6(<4 x float> %_xmm0, i64 %_l) {
entry:
; CHECK: vpandd %xmm16, %xmm17, %xmm16
%0 = tail call <4 x float> asm "vpandd $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"(<4 x float> %_xmm0, i64 %_l)
ret <4 x float> %0
}
-define <4 x float> @testXMM_7(<4 x float> %_xmm0, i64 %_l) {
+define <4 x float> @testxmm_7(<4 x float> %_xmm0, i64 %_l) {
entry:
; CHECK: vpandnd %xmm16, %xmm17, %xmm16
%0 = tail call <4 x float> asm "vpandnd $1, $2, $0", "=v,v,v,~{xmm0},~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{dirflag},~{fpsr},~{flags}"(<4 x float> %_xmm0, i64 %_l)
ret <4 x float> %0
}
-define <8 x float> @testYMM_1(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_1(<8 x float> %_ymm0, <8 x float> %_ymm1) {
entry:
; CHECK: vmovsldup %ymm16, %ymm16
%0 = tail call <8 x float> asm "vmovsldup $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{ymm8},~{ymm9},~{ymm10},~{ymm11},~{ymm12},~{ymm13},~{ymm14},~{ymm15},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
ret <8 x float> %0
}
-define <8 x float> @testYMM_2(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_2(<8 x float> %_ymm0, <8 x float> %_ymm1) {
entry:
; CHECK: vmovapd %ymm16, %ymm16
%0 = tail call <8 x float> asm "vmovapd $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{ymm8},~{ymm9},~{ymm10},~{ymm11},~{ymm12},~{ymm13},~{ymm14},~{ymm15},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
ret <8 x float> %0
}
-define <8 x float> @testYMM_3(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_3(<8 x float> %_ymm0, <8 x float> %_ymm1) {
entry:
; CHECK: vminpd %ymm16, %ymm16, %ymm16
%0 = tail call <8 x float> asm "vminpd $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{ymm8},~{ymm9},~{ymm10},~{ymm11},~{ymm12},~{ymm13},~{ymm14},~{ymm15},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm1)
ret <8 x float> %0
}
-define <8 x float> @testYMM_4(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_4(<8 x float> %_ymm0, <8 x float> %_ymm1) {
entry:
; CHECK: vpabsq %ymm16, %ymm16
%0 = tail call <8 x float> asm "vpabsq $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{ymm8},~{ymm9},~{ymm10},~{ymm11},~{ymm12},~{ymm13},~{ymm14},~{ymm15},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
ret <8 x float> %0
}
-define <8 x float> @testYMM_5(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_5(<8 x float> %_ymm0, <8 x float> %_ymm1) {
entry:
; CHECK: vpandd %ymm16, %ymm17, %ymm16
%0 = tail call <8 x float> asm "vpandd $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{ymm8},~{ymm9},~{ymm10},~{ymm11},~{ymm12},~{ymm13},~{ymm14},~{ymm15},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM_6(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_6(<8 x float> %_ymm0, <8 x float> %_ymm1) {
entry:
; CHECK: vpandnd %ymm16, %ymm17, %ymm16
%0 = tail call <8 x float> asm "vpandnd $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{ymm8},~{ymm9},~{ymm10},~{ymm11},~{ymm12},~{ymm13},~{ymm14},~{ymm15},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM_7(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_7(<8 x float> %_ymm0, <8 x float> %_ymm1) {
entry:
; CHECK: vpminud %ymm16, %ymm17, %ymm16
%0 = tail call <8 x float> asm "vpminud $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{ymm8},~{ymm9},~{ymm10},~{ymm11},~{ymm12},~{ymm13},~{ymm14},~{ymm15},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM_8(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_8(<8 x float> %_ymm0, <8 x float> %_ymm1) {
entry:
; CHECK: vpmaxsd %ymm16, %ymm17, %ymm16
%0 = tail call <8 x float> asm "vpmaxsd $1, $2, $0", "=v,v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{ymm8},~{ymm9},~{ymm10},~{ymm11},~{ymm12},~{ymm13},~{ymm14},~{ymm15},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1, <8 x float> %_ymm0)
ret <8 x float> %0
}
-define <8 x float> @testYMM_9(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_9(<8 x float> %_ymm0, <8 x float> %_ymm1) {
entry:
; CHECK: vmovups %ymm16, %ymm16
%0 = tail call <8 x float> asm "vmovups $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{ymm8},~{ymm9},~{ymm10},~{ymm11},~{ymm12},~{ymm13},~{ymm14},~{ymm15},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
ret <8 x float> %0
}
-define <8 x float> @testYMM_10(<8 x float> %_ymm0, <8 x float> %_ymm1) {
+define <8 x float> @testymm_10(<8 x float> %_ymm0, <8 x float> %_ymm1) {
entry:
; CHECK: vmovupd %ymm16, %ymm16
%0 = tail call <8 x float> asm "vmovupd $1, $0", "=v,v,~{ymm0},~{ymm1},~{ymm2},~{ymm3},~{ymm4},~{ymm5},~{ymm6},~{ymm7},~{ymm8},~{ymm9},~{ymm10},~{ymm11},~{ymm12},~{ymm13},~{ymm14},~{ymm15},~{dirflag},~{fpsr},~{flags}"(<8 x float> %_ymm1)
; inline-asm instruction and the ST register was live across another
; inline-asm instruction.
;
-; INLINEASM <es:frndint> [sideeffect] [attdialect], $0:[regdef], %ST0<imp-def,tied5>, $1:[reguse tiedto:$0], %ST0<tied3>, $2:[clobber], %EFLAGS<earlyclobber,imp-def,dead>
-; INLINEASM <es:fldcw $0> [sideeffect] [mayload] [attdialect], $0:[mem], %EAX<undef>, 1, %noreg, 0, %noreg, $1:[clobber], %EFLAGS<earlyclobber,imp-def,dead>
-; %FP0<def> = COPY %ST0
+; INLINEASM <es:frndint> [sideeffect] [attdialect], $0:[regdef], %st0<imp-def,tied5>, $1:[reguse tiedto:$0], %st0<tied3>, $2:[clobber], %eflags<earlyclobber,imp-def,dead>
+; INLINEASM <es:fldcw $0> [sideeffect] [mayload] [attdialect], $0:[mem], %eax<undef>, 1, %noreg, 0, %noreg, $1:[clobber], %eflags<earlyclobber,imp-def,dead>
+; %fp0<def> = COPY %st0
%struct.fpu_t = type { [8 x x86_fp80], x86_fp80, %struct.anon1, %struct.anon2, i32, i8, [15 x i8] }
%struct.anon1 = type { i32, i32, i32 }
; RUN: not llc -mtriple=i686-pc-win32 < %s 2>&1 | FileCheck %s
-; FIXME: This is miscompiled due to our unconditional use of ESI as the base
+; FIXME: This is miscompiled due to our unconditional use of esi as the base
; pointer.
; XFAIL: *
; CHECK-DAG: movl 4(%esp), %eax
; CHECK: ## InlineAsm Start
; CHECK: ## InlineAsm End
-; Everything is set up in EAX:EDX, return immediately.
+; Everything is set up in eax:edx, return immediately.
; CHECK-NEXT: retl
; The tied operands are not necessarily in the same order as the defs.
define i32 @test(i32 %a) {
; LNX1-LABEL: test:
; LNX1: # BB#0:
-; LNX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; LNX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; LNX1-NEXT: leal (%rdi,%rdi,2), %eax
; LNX1-NEXT: retq
;
; LNX2-LABEL: test:
; LNX2: # BB#0:
-; LNX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; LNX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; LNX2-NEXT: leal (%rdi,%rdi,2), %eax
; LNX2-NEXT: retq
;
; NACL-LABEL: test:
; NACL: # BB#0:
-; NACL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; NACL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; NACL-NEXT: leal (%rdi,%rdi,2), %eax
; NACL-NEXT: retq
;
; WIN-LABEL: test:
; WIN: # BB#0:
-; WIN-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; WIN-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; WIN-NEXT: leal (%rcx,%rcx,2), %eax
; WIN-NEXT: retq
%tmp2 = mul i32 %a, 3
define i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo:
; X64: # BB#0: # %entry
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx
; X64-NEXT: leal 4(%rdi,%rsi,4), %eax
; X64-NEXT: imull %ecx, %eax
define i32 @foo1(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo1:
; X64: # BB#0: # %entry
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 4(%rdi,%rsi,4), %ecx
; X64-NEXT: leal 4(%rdi,%rsi,8), %eax
; X64-NEXT: imull %ecx, %eax
define i32 @foo1_mult_basic_blocks(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo1_mult_basic_blocks:
; X64: # BB#0: # %entry
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 4(%rdi,%rsi,4), %ecx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl $10, %ecx
define i32 @foo1_mult_basic_blocks_illegal_scale(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo1_mult_basic_blocks_illegal_scale:
; X64: # BB#0: # %entry
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl $10, %ecx
define i32 @test_lea_offset(i32) {
; GENERIC-LABEL: test_lea_offset:
; GENERIC: # BB#0:
-; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_offset:
; ATOM: # BB#0:
-; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal -24(%rdi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
;
; SLM-LABEL: test_lea_offset:
; SLM: # BB#0:
-; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal -24(%rdi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_offset:
; SANDY: # BB#0:
-; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_offset:
; HASWELL: # BB#0:
-; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_offset:
; BROADWELL: # BB#0:
-; BROADWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_offset:
; SKYLAKE: # BB#0:
-; SKYLAKE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_offset:
; BTVER2: # BB#0:
-; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_offset:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal -24(%rdi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = add nsw i32 %0, -24
define i32 @test_lea_offset_big(i32) {
; GENERIC-LABEL: test_lea_offset_big:
; GENERIC: # BB#0:
-; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_offset_big:
; ATOM: # BB#0:
-; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal 1024(%rdi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
;
; SLM-LABEL: test_lea_offset_big:
; SLM: # BB#0:
-; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal 1024(%rdi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_offset_big:
; SANDY: # BB#0:
-; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_offset_big:
; HASWELL: # BB#0:
-; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_offset_big:
; BROADWELL: # BB#0:
-; BROADWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_offset_big:
; SKYLAKE: # BB#0:
-; SKYLAKE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_offset_big:
; BTVER2: # BB#0:
-; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_offset_big:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal 1024(%rdi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = add nsw i32 %0, 1024
define i32 @test_lea_add(i32, i32) {
; GENERIC-LABEL: test_lea_add:
; GENERIC: # BB#0:
-; GENERIC-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add:
; ATOM: # BB#0:
-; ATOM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal (%rdi,%rsi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
;
; SLM-LABEL: test_lea_add:
; SLM: # BB#0:
-; SLM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal (%rdi,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add:
; SANDY: # BB#0:
-; SANDY-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add:
; HASWELL: # BB#0:
-; HASWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add:
; BROADWELL: # BB#0:
-; BROADWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; BROADWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add:
; SKYLAKE: # BB#0:
-; SKYLAKE-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SKYLAKE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add:
; BTVER2: # BB#0:
-; BTVER2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add nsw i32 %1, %0
define i32 @test_lea_add_offset(i32, i32) {
; GENERIC-LABEL: test_lea_add_offset:
; GENERIC: # BB#0:
-; GENERIC-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $16, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_offset:
; ATOM: # BB#0:
-; ATOM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
;
; SLM-LABEL: test_lea_add_offset:
; SLM: # BB#0:
-; SLM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_offset:
; SANDY: # BB#0:
-; SANDY-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $16, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_offset:
; HASWELL: # BB#0:
-; HASWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $16, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_offset:
; BROADWELL: # BB#0:
-; BROADWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; BROADWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $16, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_offset:
; SKYLAKE: # BB#0:
-; SKYLAKE-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SKYLAKE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $16, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_offset:
; BTVER2: # BB#0:
-; BTVER2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_offset:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add i32 %0, 16
define i32 @test_lea_add_offset_big(i32, i32) {
; GENERIC-LABEL: test_lea_add_offset_big:
; GENERIC: # BB#0:
-; GENERIC-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $-4096, %eax # imm = 0xF000
; GENERIC-NEXT: # sched: [1:0.33]
;
; ATOM-LABEL: test_lea_add_offset_big:
; ATOM: # BB#0:
-; ATOM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
;
; SLM-LABEL: test_lea_add_offset_big:
; SLM: # BB#0:
-; SLM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_offset_big:
; SANDY: # BB#0:
-; SANDY-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $-4096, %eax # imm = 0xF000
; SANDY-NEXT: # sched: [1:0.33]
;
; HASWELL-LABEL: test_lea_add_offset_big:
; HASWELL: # BB#0:
-; HASWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $-4096, %eax # imm = 0xF000
; HASWELL-NEXT: # sched: [1:0.25]
;
; BROADWELL-LABEL: test_lea_add_offset_big:
; BROADWELL: # BB#0:
-; BROADWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; BROADWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $-4096, %eax # imm = 0xF000
; BROADWELL-NEXT: # sched: [1:0.25]
;
; SKYLAKE-LABEL: test_lea_add_offset_big:
; SKYLAKE: # BB#0:
-; SKYLAKE-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SKYLAKE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $-4096, %eax # imm = 0xF000
; SKYLAKE-NEXT: # sched: [1:0.25]
;
; BTVER2-LABEL: test_lea_add_offset_big:
; BTVER2: # BB#0:
-; BTVER2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_offset_big:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add i32 %0, -4096
define i32 @test_lea_mul(i32) {
; GENERIC-LABEL: test_lea_mul:
; GENERIC: # BB#0:
-; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_mul:
; ATOM: # BB#0:
-; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
;
; SLM-LABEL: test_lea_mul:
; SLM: # BB#0:
-; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul:
; SANDY: # BB#0:
-; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_mul:
; HASWELL: # BB#0:
-; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_mul:
; BROADWELL: # BB#0:
-; BROADWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_mul:
; SKYLAKE: # BB#0:
-; SKYLAKE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_mul:
; BTVER2: # BB#0:
-; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i32 %0, 3
define i32 @test_lea_mul_offset(i32) {
; GENERIC-LABEL: test_lea_mul_offset:
; GENERIC: # BB#0:
-; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $-32, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_mul_offset:
; ATOM: # BB#0:
-; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
;
; SLM-LABEL: test_lea_mul_offset:
; SLM: # BB#0:
-; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul_offset:
; SANDY: # BB#0:
-; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $-32, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_mul_offset:
; HASWELL: # BB#0:
-; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $-32, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_mul_offset:
; BROADWELL: # BB#0:
-; BROADWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $-32, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_mul_offset:
; SKYLAKE: # BB#0:
-; SKYLAKE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $-32, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_mul_offset:
; BTVER2: # BB#0:
-; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul_offset:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i32 %0, 3
define i32 @test_lea_mul_offset_big(i32) {
; GENERIC-LABEL: test_lea_mul_offset_big:
; GENERIC: # BB#0:
-; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $10000, %eax # imm = 0x2710
; GENERIC-NEXT: # sched: [1:0.33]
;
; ATOM-LABEL: test_lea_mul_offset_big:
; ATOM: # BB#0:
-; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
;
; SLM-LABEL: test_lea_mul_offset_big:
; SLM: # BB#0:
-; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul_offset_big:
; SANDY: # BB#0:
-; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $10000, %eax # imm = 0x2710
; SANDY-NEXT: # sched: [1:0.33]
;
; HASWELL-LABEL: test_lea_mul_offset_big:
; HASWELL: # BB#0:
-; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $10000, %eax # imm = 0x2710
; HASWELL-NEXT: # sched: [1:0.25]
;
; BROADWELL-LABEL: test_lea_mul_offset_big:
; BROADWELL: # BB#0:
-; BROADWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $10000, %eax # imm = 0x2710
; BROADWELL-NEXT: # sched: [1:0.25]
;
; SKYLAKE-LABEL: test_lea_mul_offset_big:
; SKYLAKE: # BB#0:
-; SKYLAKE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $10000, %eax # imm = 0x2710
; SKYLAKE-NEXT: # sched: [1:0.25]
;
; BTVER2-LABEL: test_lea_mul_offset_big:
; BTVER2: # BB#0:
-; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul_offset_big:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i32 %0, 9
define i32 @test_lea_add_scale(i32, i32) {
; GENERIC-LABEL: test_lea_add_scale:
; GENERIC: # BB#0:
-; GENERIC-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_scale:
; ATOM: # BB#0:
-; ATOM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
;
; SLM-LABEL: test_lea_add_scale:
; SLM: # BB#0:
-; SLM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale:
; SANDY: # BB#0:
-; SANDY-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_scale:
; HASWELL: # BB#0:
-; HASWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_scale:
; BROADWELL: # BB#0:
-; BROADWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; BROADWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_scale:
; SKYLAKE: # BB#0:
-; SKYLAKE-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SKYLAKE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_scale:
; BTVER2: # BB#0:
-; BTVER2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i32 %1, 1
define i32 @test_lea_add_scale_offset(i32, i32) {
; GENERIC-LABEL: test_lea_add_scale_offset:
; GENERIC: # BB#0:
-; GENERIC-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $96, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_scale_offset:
; ATOM: # BB#0:
-; ATOM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
;
; SLM-LABEL: test_lea_add_scale_offset:
; SLM: # BB#0:
-; SLM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale_offset:
; SANDY: # BB#0:
-; SANDY-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $96, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_scale_offset:
; HASWELL: # BB#0:
-; HASWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $96, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_scale_offset:
; BROADWELL: # BB#0:
-; BROADWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; BROADWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $96, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_scale_offset:
; SKYLAKE: # BB#0:
-; SKYLAKE-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SKYLAKE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $96, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_scale_offset:
; BTVER2: # BB#0:
-; BTVER2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale_offset:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i32 %1, 2
define i32 @test_lea_add_scale_offset_big(i32, i32) {
; GENERIC-LABEL: test_lea_add_scale_offset_big:
; GENERIC: # BB#0:
-; GENERIC-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; GENERIC-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; GENERIC-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $-1200, %eax # imm = 0xFB50
; GENERIC-NEXT: # sched: [1:0.33]
;
; ATOM-LABEL: test_lea_add_scale_offset_big:
; ATOM: # BB#0:
-; ATOM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ATOM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ATOM-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
;
; SLM-LABEL: test_lea_add_scale_offset_big:
; SLM: # BB#0:
-; SLM-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale_offset_big:
; SANDY: # BB#0:
-; SANDY-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SANDY-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SANDY-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $-1200, %eax # imm = 0xFB50
; SANDY-NEXT: # sched: [1:0.33]
;
; HASWELL-LABEL: test_lea_add_scale_offset_big:
; HASWELL: # BB#0:
-; HASWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; HASWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HASWELL-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $-1200, %eax # imm = 0xFB50
; HASWELL-NEXT: # sched: [1:0.25]
;
; BROADWELL-LABEL: test_lea_add_scale_offset_big:
; BROADWELL: # BB#0:
-; BROADWELL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; BROADWELL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BROADWELL-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $-1200, %eax # imm = 0xFB50
; BROADWELL-NEXT: # sched: [1:0.25]
;
; SKYLAKE-LABEL: test_lea_add_scale_offset_big:
; SKYLAKE: # BB#0:
-; SKYLAKE-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SKYLAKE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SKYLAKE-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $-1200, %eax # imm = 0xFB50
; SKYLAKE-NEXT: # sched: [1:0.25]
;
; BTVER2-LABEL: test_lea_add_scale_offset_big:
; BTVER2: # BB#0:
-; BTVER2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; BTVER2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; BTVER2-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale_offset_big:
; ZNVER1: # BB#0:
-; ZNVER1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ZNVER1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ZNVER1-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i32 %1, 3
; ### FIXME: BB#3 and LBB0_1 should be merged
; CHECK-NEXT: ## BB#3:
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
; CHECK-NEXT: LBB0_1:
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
; CHECK-NEXT: LBB0_6:
; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: ## kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
entry:
%cmp5 = icmp sgt i32 %count, 0
; GENERIC-NEXT: lzcntw (%rsi), %cx
; GENERIC-NEXT: lzcntw %di, %ax
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ctlz_i16:
; HASWELL-NEXT: lzcntw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ctlz_i16:
; BROADWELL-NEXT: lzcntw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ctlz_i16:
; SKYLAKE-NEXT: lzcntw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ctlz_i16:
; BTVER2-NEXT: lzcntw (%rsi), %cx
; BTVER2-NEXT: lzcntw %di, %ax
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ctlz_i16:
; ZNVER1-NEXT: lzcntw (%rsi), %cx # sched: [6:0.50]
; ZNVER1-NEXT: lzcntw %di, %ax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a1
%2 = tail call i16 @llvm.ctlz.i16( i16 %1, i1 false )
; ALL-NEXT: sete %cl
; ALL-NEXT: orb %al, %cl
; ALL-NEXT: movzbl %cl, %eax
-; ALL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ALL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ALL-NEXT: retq
%cmp = icmp eq i16 %a, 0
%cmp1 = icmp eq i16 %b, 0
; FASTLZCNT-NEXT: lzcntq %rsi, %rax
; FASTLZCNT-NEXT: orl %ecx, %eax
; FASTLZCNT-NEXT: shrl $6, %eax
-; FASTLZCNT-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; FASTLZCNT-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp5:
; FASTLZCNT-NEXT: shrl $5, %ecx
; FASTLZCNT-NEXT: shrl $6, %eax
; FASTLZCNT-NEXT: orl %ecx, %eax
-; FASTLZCNT-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; FASTLZCNT-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp9:
define void @commute(i32 %test_case, i32 %scale) nounwind ssp {
; CHECK-LABEL: commute:
; CHECK: # BB#0: # %entry
-; CHECK-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: cmpl $2, %eax
; CHECK-NEXT: ja .LBB1_4
; CHECK-NEXT: imull %edi, %esi
; CHECK-NEXT: leal (%rsi,%rsi,2), %esi
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<kill>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<kill>
; CHECK-NEXT: callq printf
; CHECK-NEXT: addq $8, %rsp
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-LABEL: l_OUTLINED_FUNCTION_0:
; CHECK: movl $0, (%rax)
; CHECK-NEXT: movl $1, %edi
-; CHECK-NEXT: jmp _ext
\ No newline at end of file
+; CHECK-NEXT: jmp _ext
define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) {
; KNL_64-LABEL: test15:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_64-NEXT: vmovdqa %xmm1, %xmm1
; KNL_64-NEXT: vpmovsxdq %ymm0, %zmm2
; KNL_64-NEXT: vpslld $31, %ymm1, %ymm0
; KNL_64-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL_64-NEXT: vgatherqps (%rdi,%zmm2,4), %ymm0 {%k1}
-; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test15:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_32-NEXT: vmovdqa %xmm1, %xmm1
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpmovsxdq %ymm0, %zmm2
; KNL_32-NEXT: vpslld $31, %ymm1, %ymm0
; KNL_32-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL_32-NEXT: vgatherqps (%eax,%zmm2,4), %ymm0 {%k1}
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
define <4 x double> @test16(double* %base, <4 x i32> %ind, <4 x i1> %mask, <4 x double> %src0) {
; KNL_64-LABEL: test16:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
-; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL_64-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
+; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_64-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL_64-NEXT: vpmovsxdq %xmm1, %ymm1
;
; KNL_32-LABEL: test16:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL_32-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_32-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL_32-NEXT: vpmovsxdq %xmm1, %ymm1
define <2 x double> @test17(double* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x double> %src0) {
; KNL_64-LABEL: test17:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %XMM2<def> %XMM2<kill> %ZMM2<def>
-; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
+; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_64-NEXT: vmovdqa %xmm1, %xmm1
; KNL_64-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k1
;
; KNL_32-LABEL: test17:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM2<def> %XMM2<kill> %ZMM2<def>
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_32-NEXT: vmovdqa %xmm1, %xmm1
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpsllq $63, %zmm1, %zmm1
define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
; KNL_64-LABEL: test18:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL_64-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_64-NEXT: vmovdqa %xmm2, %xmm2
; KNL_64-NEXT: vpslld $31, %ymm2, %ymm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
;
; KNL_32-LABEL: test18:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_32-NEXT: vmovdqa %xmm2, %xmm2
; KNL_32-NEXT: vpmovsxdq %ymm1, %zmm1
; KNL_32-NEXT: vpslld $31, %ymm2, %ymm2
define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind) {
; KNL_64-LABEL: test19:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
-; KNL_64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL_64-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
+; KNL_64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_64-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL_64-NEXT: vpmovsxdq %xmm1, %ymm1
;
; KNL_32-LABEL: test19:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
-; KNL_32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL_32-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
+; KNL_32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_32-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL_32-NEXT: vpmovsxdq %xmm1, %ymm1
define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
; KNL_64-LABEL: test20:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,2],zero,zero
; KNL_64-NEXT: vmovaps %xmm2, %xmm2
; KNL_64-NEXT: vpslld $31, %ymm2, %ymm2
;
; KNL_32-LABEL: test20:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_32-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; KNL_32-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,2],zero,zero
; KNL_32-NEXT: vmovaps %xmm2, %xmm2
;
; SKX-LABEL: test20:
; SKX: # BB#0:
-; SKX-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; SKX-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; SKX-NEXT: vpsllq $63, %xmm2, %xmm2
; SKX-NEXT: vptestmq %xmm2, %xmm2, %k1
; SKX-NEXT: vscatterqps %xmm0, (,%ymm1) {%k1}
define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
; KNL_64-LABEL: test21:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; KNL_64-NEXT: vmovdqa %xmm2, %xmm2
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: vpsllq $63, %zmm2, %zmm2
;
; KNL_32-LABEL: test21:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; KNL_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; KNL_32-NEXT: vmovdqa %xmm2, %xmm2
; KNL_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_32-NEXT: vpsllq $63, %zmm2, %zmm2
;
; SKX-LABEL: test21:
; SKX: # BB#0:
-; SKX-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; SKX-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; SKX-NEXT: vpsllq $63, %xmm2, %xmm2
; SKX-NEXT: vptestmq %xmm2, %xmm2, %k1
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
;
; SKX_32-LABEL: test21:
; SKX_32: # BB#0:
-; SKX_32-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; SKX_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; SKX_32-NEXT: vpsllq $63, %xmm2, %xmm2
; SKX_32-NEXT: vptestmq %xmm2, %xmm2, %k1
; SKX_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x float> %src0) {
; KNL_64-LABEL: test22:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def>
+; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL_64-NEXT: vmovaps %xmm1, %xmm1
;
; KNL_32-LABEL: test22:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def>
+; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
; KNL_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL_32-NEXT: vmovaps %xmm1, %xmm1
define <2 x float> @test22a(float* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x float> %src0) {
; KNL_64-LABEL: test22a:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def>
-; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
+; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL_64-NEXT: vmovaps %xmm1, %xmm1
; KNL_64-NEXT: vpslld $31, %ymm1, %ymm1
;
; KNL_32-LABEL: test22a:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<def>
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL_32-NEXT: vmovaps %xmm1, %xmm1
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
define <2 x i32> @test23(i32* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i32> %src0) {
; KNL_64-LABEL: test23:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %XMM2<def> %XMM2<kill> %ZMM2<def>
-; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
+; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_64-NEXT: vmovdqa %xmm1, %xmm1
; KNL_64-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k1
;
; KNL_32-LABEL: test23:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM2<def> %XMM2<kill> %ZMM2<def>
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_32-NEXT: vmovdqa %xmm1, %xmm1
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpsllq $63, %zmm1, %zmm1
define <2 x i32> @test24(i32* %base, <2 x i32> %ind) {
; KNL_64-LABEL: test24:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_64-NEXT: movb $3, %al
; KNL_64-NEXT: kmovw %eax, %k1
; KNL_64-NEXT: vpgatherqq (%rdi,%zmm0,8), %zmm1 {%k1}
;
; KNL_32-LABEL: test24:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vmovdqa {{.*#+}} xmm1 = [1,0,1,0]
; KNL_32-NEXT: vpsllq $63, %zmm1, %zmm1
define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %src0) {
; KNL_64-LABEL: test25:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %XMM2<def> %XMM2<kill> %ZMM2<def>
-; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
+; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_64-NEXT: vmovdqa %xmm1, %xmm1
; KNL_64-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL_64-NEXT: vptestmq %zmm1, %zmm1, %k1
;
; KNL_32-LABEL: test25:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM2<def> %XMM2<kill> %ZMM2<def>
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_32-NEXT: vmovdqa %xmm1, %xmm1
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpsllq $63, %zmm1, %zmm1
define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) {
; KNL_64-LABEL: test26:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_64-NEXT: movb $3, %al
; KNL_64-NEXT: kmovw %eax, %k1
; KNL_64-NEXT: vpgatherqq (%rdi,%zmm0,8), %zmm1 {%k1}
;
; KNL_32-LABEL: test26:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; KNL_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vmovdqa {{.*#+}} xmm2 = [1,0,1,0]
; KNL_32-NEXT: vpsllq $63, %zmm2, %zmm2
; KNL_64-NEXT: movb $3, %al
; KNL_64-NEXT: kmovw %eax, %k1
; KNL_64-NEXT: vgatherqps (%rdi,%zmm1,4), %ymm0 {%k1}
-; KNL_64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-NEXT: movb $3, %cl
; KNL_32-NEXT: kmovw %ecx, %k1
; KNL_32-NEXT: vgatherqps (%eax,%zmm1,4), %ymm0 {%k1}
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) {
; KNL_64-LABEL: test28:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: movb $3, %al
; KNL_64-NEXT: kmovw %eax, %k1
;
; KNL_32-LABEL: test28:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
+; KNL_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
; KNL_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_32-NEXT: vmovdqa {{.*#+}} xmm2 = [1,0,1,0]
; KNL_32-NEXT: vpsllq $63, %zmm2, %zmm2
;
; SKX-LABEL: test28:
; SKX: # BB#0:
-; SKX-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; SKX-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; SKX-NEXT: movb $3, %al
; SKX-NEXT: kmovw %eax, %k1
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
;
; SKX_32-LABEL: test28:
; SKX_32: # BB#0:
-; SKX_32-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; SKX_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; SKX_32-NEXT: movb $3, %al
; SKX_32-NEXT: kmovw %eax, %k1
; SKX_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: vpsllq $2, %ymm1, %ymm1
; KNL_64-NEXT: vpaddq %ymm1, %ymm0, %ymm1
; KNL_64-NEXT: testb $1, %dil
-; KNL_64-NEXT: # implicit-def: %XMM0
+; KNL_64-NEXT: # implicit-def: %xmm0
; KNL_64-NEXT: je .LBB30_2
; KNL_64-NEXT: # BB#1: # %cond.load
; KNL_64-NEXT: vmovq %xmm1, %rax
; KNL_32-NEXT: vpslld $2, %xmm1, %xmm1
; KNL_32-NEXT: vpaddd %xmm1, %xmm0, %xmm1
; KNL_32-NEXT: testb $1, %al
-; KNL_32-NEXT: # implicit-def: %XMM0
+; KNL_32-NEXT: # implicit-def: %xmm0
; KNL_32-NEXT: je .LBB30_2
; KNL_32-NEXT: # BB#1: # %cond.load
; KNL_32-NEXT: vmovd %xmm1, %ecx
; SKX-NEXT: vpsllq $2, %ymm1, %ymm1
; SKX-NEXT: vpaddq %ymm1, %ymm0, %ymm1
; SKX-NEXT: testb $1, %al
-; SKX-NEXT: # implicit-def: %XMM0
+; SKX-NEXT: # implicit-def: %xmm0
; SKX-NEXT: je .LBB30_2
; SKX-NEXT: # BB#1: # %cond.load
; SKX-NEXT: vmovq %xmm1, %rax
; SKX_32-NEXT: vpslld $2, %xmm1, %xmm1
; SKX_32-NEXT: vpaddd %xmm1, %xmm0, %xmm2
; SKX_32-NEXT: testb $1, %al
-; SKX_32-NEXT: # implicit-def: %XMM1
+; SKX_32-NEXT: # implicit-def: %xmm1
; SKX_32-NEXT: je .LBB30_2
; SKX_32-NEXT: # BB#1: # %cond.load
; SKX_32-NEXT: vmovd %xmm2, %eax
define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i64> %d) {
; KNL_64-LABEL: test_pr28312:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL_64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_64-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL_64-NEXT: vpmovsxdq %xmm1, %ymm1
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-32, %esp
; KNL_32-NEXT: subl $32, %esp
-; KNL_32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_32-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL_32-NEXT: vpmovsxdq %xmm1, %ymm1
define <2 x float> @large_index(float* %base, <2 x i128> %ind, <2 x i1> %mask, <2 x float> %src0) {
; KNL_64-LABEL: large_index:
; KNL_64: # BB#0:
-; KNL_64-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; KNL_64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; KNL_64-NEXT: vmovaps %xmm0, %xmm0
; KNL_64-NEXT: vmovq %rcx, %xmm2
;
; KNL_32-LABEL: large_index:
; KNL_32: # BB#0:
-; KNL_32-NEXT: # kill: %XMM1<def> %XMM1<kill> %YMM1<def>
+; KNL_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
; KNL_32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; KNL_32-NEXT: vmovaps %xmm0, %xmm0
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; AVX-LABEL: loadv1:
; AVX: ## BB#0:
; AVX-NEXT: testq %rdi, %rdi
-; AVX-NEXT: ## implicit-def: %XMM1
+; AVX-NEXT: ## implicit-def: %xmm1
; AVX-NEXT: je LBB0_1
; AVX-NEXT: ## BB#2: ## %else
; AVX-NEXT: testq %rdi, %rdi
; AVX512F-LABEL: loadv1:
; AVX512F: ## BB#0:
; AVX512F-NEXT: testq %rdi, %rdi
-; AVX512F-NEXT: ## implicit-def: %XMM1
+; AVX512F-NEXT: ## implicit-def: %xmm1
; AVX512F-NEXT: jne LBB0_2
; AVX512F-NEXT: ## BB#1: ## %cond.load
; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; SKX-LABEL: loadv1:
; SKX: ## BB#0:
; SKX-NEXT: testq %rdi, %rdi
-; SKX-NEXT: ## implicit-def: %XMM1
+; SKX-NEXT: ## implicit-def: %xmm1
; SKX-NEXT: jne LBB0_2
; SKX-NEXT: ## BB#1: ## %cond.load
; SKX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
;
; AVX512F-LABEL: test11a:
; AVX512F: ## BB#0:
-; AVX512F-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512F-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpcmpeqd %zmm2, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $8, %k0, %k0
; AVX512F-NEXT: kshiftrw $8, %k0, %k1
; AVX512F-NEXT: vblendmps (%rdi), %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11a:
;
; AVX512F-LABEL: test11b:
; AVX512F: ## BB#0:
-; AVX512F-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512F-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpblendmd (%rdi), %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11b:
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1} {z}
-; AVX512F-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11c:
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
-; AVX512F-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11d:
;
; AVX512F-LABEL: test12:
; AVX512F: ## BB#0:
-; AVX512F-NEXT: ## kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512F-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpcmpeqd %zmm2, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $8, %k0, %k0
;
; AVX512F-LABEL: mload_constmask_v8f32:
; AVX512F: ## BB#0:
-; AVX512F-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: movw $7, %ax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v8f32:
;
; AVX512F-LABEL: mload_constmask_v8i32:
; AVX512F: ## BB#0:
-; AVX512F-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: movw $135, %ax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v8i32:
-; RUN: llc < %s -mtriple=i686-- -mattr=+sse2,-avx | grep -i EDI
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2,-avx | grep -i RDI
-; RUN: llc < %s -mtriple=i686-- -mattr=+avx | grep -i EDI
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | grep -i RDI
+; RUN: llc < %s -mtriple=i686-- -mattr=+sse2,-avx | grep -i edi
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2,-avx | grep -i rdi
+; RUN: llc < %s -mtriple=i686-- -mattr=+avx | grep -i edi
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | grep -i rdi
; rdar://6573467
define void @test(<16 x i8> %a, <16 x i8> %b, i32 %dummy, i8* %c) nounwind {
; MUL_HiLo PhysReg def copies should be just below the mul.
;
; CHECK: *** Final schedule for BB#1 ***
-; CHECK: %EAX<def> = COPY
-; CHECK-NEXT: MUL32r %vreg{{[0-9]+}}, %EAX<imp-def>, %EDX<imp-def>, %EFLAGS<imp-def,dead>, %EAX<imp-use>;
-; CHECK-NEXT: COPY %E{{[AD]}}X
-; CHECK-NEXT: COPY %E{{[AD]}}X
+; CHECK: %eax<def> = COPY
+; CHECK-NEXT: MUL32r %vreg{{[0-9]+}}, %eax<imp-def>, %edx<imp-def>, %eflags<imp-def,dead>, %eax<imp-use>;
+; CHECK-NEXT: COPY %e{{[ad]}}x
+; CHECK-NEXT: COPY %e{{[ad]}}x
; CHECK: DIVSSrm
define i64 @mulhoist(i32 %a, i32 %b) #0 {
entry:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movq %xmm0, %rdi
; CHECK-NEXT: shrq $63, %rdi
-; CHECK-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<kill>
+; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<kill>
; CHECK-NEXT: jmp _float_call_signbit_callee ## TAILCALL
entry:
%t0 = bitcast double %n to i64
; X86: # BB#0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_2:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 2
ret i16 %mul
; X86: # BB#0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_3:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,2), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 3
ret i16 %mul
; X86: # BB#0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_4:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (,%rdi,4), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 4
ret i16 %mul
; X86: # BB#0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_5:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 5
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_6:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: addl %edi, %edi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 6
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (,%ecx,8), %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_7:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (,%rdi,8), %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 7
ret i16 %mul
; X86: # BB#0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $3, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_8:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (,%rdi,8), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 8
ret i16 %mul
; X86: # BB#0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_9:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,8), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 9
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_10:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: addl %edi, %edi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 10
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,2), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_11:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rdi,%rax,2), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 11
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_12:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shll $2, %edi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 12
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_13:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 13
ret i16 %mul
; X86-NEXT: leal (%ecx,%ecx,2), %eax
; X86-NEXT: leal (%ecx,%eax,4), %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_14:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
; X64-NEXT: addl %edi, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 14
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_15:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 15
ret i16 %mul
; X86: # BB#0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $4, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_16:
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $4, %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_17:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $4, %eax
; X64-NEXT: leal (%rax,%rdi), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 17
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_18:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: addl %edi, %edi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 18
ret i16 %mul
; X86-NEXT: leal (%ecx,%ecx,4), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_19:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: shll $2, %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 19
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_20:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shll $2, %edi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 20
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_21:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 21
ret i16 %mul
; X86-NEXT: leal (%ecx,%ecx,4), %eax
; X86-NEXT: leal (%ecx,%eax,4), %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_22:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
; X64-NEXT: addl %edi, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 22
ret i16 %mul
; X86-NEXT: leal (%ecx,%ecx,2), %eax
; X86-NEXT: shll $3, %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_23:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: shll $3, %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 23
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $3, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_24:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shll $3, %edi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 24
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_25:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rax,%rax,4), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 25
ret i16 %mul
; X86-NEXT: leal (%ecx,%ecx,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_26:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 26
ret i16 %mul
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_27:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 27
ret i16 %mul
; X86-NEXT: leal (%ecx,%ecx,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_28:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
; X64-NEXT: addl %edi, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 28
ret i16 %mul
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_29:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
; X64-NEXT: addl %edi, %eax
; X64-NEXT: addl %edi, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 29
ret i16 %mul
; X86-NEXT: shll $5, %eax
; X86-NEXT: subl %ecx, %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_30:
; X64-NEXT: shll $5, %eax
; X64-NEXT: subl %edi, %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 30
ret i16 %mul
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $5, %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_31:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 31
ret i16 %mul
; X86: # BB#0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $5, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_32:
; X86-NEXT: leal 42(%eax,%eax,8), %ecx
; X86-NEXT: leal 2(%eax,%eax,4), %eax
; X86-NEXT: imull %ecx, %eax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: test_mul_spec:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 42(%rdi,%rdi,8), %ecx
; X64-NEXT: leal 2(%rdi,%rdi,4), %eax
; X64-NEXT: imull %ecx, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%mul = mul nsw i16 %x, 9
%add = add nsw i16 %mul, 42
;
; X64-HSW-LABEL: test_mul_by_2:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_2:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
;
; HSW-NOOPT-LABEL: test_mul_by_2:
; HSW-NOOPT: # BB#0:
-; HSW-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_2:
; JAG-NOOPT: # BB#0:
-; JAG-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_2:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_2:
; SLM-NOOPT: # BB#0:
-; SLM-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 2
;
; X64-HSW-LABEL: test_mul_by_3:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_3:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
;
; HSW-NOOPT-LABEL: test_mul_by_3:
; HSW-NOOPT: # BB#0:
-; HSW-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_3:
; JAG-NOOPT: # BB#0:
-; JAG-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_3:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_3:
; SLM-NOOPT: # BB#0:
-; SLM-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 3
;
; X64-HSW-LABEL: test_mul_by_4:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_4:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
;
; HSW-NOOPT-LABEL: test_mul_by_4:
; HSW-NOOPT: # BB#0:
-; HSW-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_4:
; JAG-NOOPT: # BB#0:
-; JAG-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_4:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_4:
; SLM-NOOPT: # BB#0:
-; SLM-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 4
;
; X64-HSW-LABEL: test_mul_by_5:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_5:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
;
; HSW-NOOPT-LABEL: test_mul_by_5:
; HSW-NOOPT: # BB#0:
-; HSW-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_5:
; JAG-NOOPT: # BB#0:
-; JAG-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_5:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_5:
; SLM-NOOPT: # BB#0:
-; SLM-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 5
;
; X64-HSW-LABEL: test_mul_by_6:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_6:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_6:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_7:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_7:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_7:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: subl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_8:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_8:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
;
; HSW-NOOPT-LABEL: test_mul_by_8:
; HSW-NOOPT: # BB#0:
-; HSW-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_8:
; JAG-NOOPT: # BB#0:
-; JAG-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_8:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_8:
; SLM-NOOPT: # BB#0:
-; SLM-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 8
;
; X64-HSW-LABEL: test_mul_by_9:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_9:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
;
; HSW-NOOPT-LABEL: test_mul_by_9:
; HSW-NOOPT: # BB#0:
-; HSW-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_9:
; JAG-NOOPT: # BB#0:
-; JAG-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_9:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_9:
; SLM-NOOPT: # BB#0:
-; SLM-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 9
;
; X64-HSW-LABEL: test_mul_by_10:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_10:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_10:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_11:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_11:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_12:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_12:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_12:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: shll $2, %edi # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_13:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_13:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_14:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
;
; X64-JAG-LABEL: test_mul_by_14:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
;
; X64-HSW-LABEL: test_mul_by_15:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_15:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_15:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rax,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_17:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: movl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: shll $4, %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rdi), %eax # sched: [1:0.50]
;
; X64-JAG-LABEL: test_mul_by_17:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: movl %edi, %eax # sched: [1:0.17]
; X64-JAG-NEXT: shll $4, %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rdi), %eax # sched: [1:0.50]
;
; X64-SLM-LABEL: test_mul_by_17:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: movl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: shll $4, %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rdi), %eax # sched: [1:1.00]
;
; X64-HSW-LABEL: test_mul_by_18:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_18:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_18:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_19:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: shll $2, %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
;
; X64-JAG-LABEL: test_mul_by_19:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: shll $2, %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
;
; X64-HSW-LABEL: test_mul_by_20:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_20:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_20:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: shll $2, %edi # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_21:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_21:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_22:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
;
; X64-JAG-LABEL: test_mul_by_22:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
;
; X64-HSW-LABEL: test_mul_by_23:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: shll $3, %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
;
; X64-JAG-LABEL: test_mul_by_23:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: shll $3, %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
;
; X64-HSW-LABEL: test_mul_by_24:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: shll $3, %edi # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_24:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: shll $3, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_24:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: shll $3, %edi # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_25:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_25:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_25:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rax,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_26:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
;
; X64-JAG-LABEL: test_mul_by_26:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
;
; X64-HSW-LABEL: test_mul_by_27:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_27:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_27:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rax,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; X64-HSW-LABEL: test_mul_by_28:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
;
; X64-JAG-LABEL: test_mul_by_28:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
;
; X64-HSW-LABEL: test_mul_by_29:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
;
; X64-JAG-LABEL: test_mul_by_29:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
;
; X64-HSW-LABEL: test_mul_spec:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %ecx # sched: [1:0.50]
; X64-HSW-NEXT: addl $42, %ecx # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
;
; X64-JAG-LABEL: test_mul_spec:
; X64-JAG: # BB#0:
-; X64-JAG-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-JAG-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:0.50]
; X64-JAG-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: imull %ecx, %eax # sched: [3:1.00]
;
; HSW-NOOPT-LABEL: test_mul_spec:
; HSW-NOOPT: # BB#0:
-; HSW-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,8), %ecx # sched: [1:0.50]
; HSW-NOOPT-NEXT: addl $42, %ecx # sched: [1:0.25]
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
;
; JAG-NOOPT-LABEL: test_mul_spec:
; JAG-NOOPT: # BB#0:
-; JAG-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; JAG-NOOPT-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:0.50]
; JAG-NOOPT-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: imull %ecx, %eax # sched: [3:1.00]
;
; X64-SLM-LABEL: test_mul_spec:
; X64-SLM: # BB#0:
-; X64-SLM-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SLM-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:1.00]
; X64-SLM-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: imull %ecx, %eax # sched: [3:1.00]
;
; SLM-NOOPT-LABEL: test_mul_spec:
; SLM-NOOPT: # BB#0:
-; SLM-NOOPT-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SLM-NOOPT-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:1.00]
; SLM-NOOPT-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: imull %ecx, %eax # sched: [3:1.00]
;
; X64-HSW-LABEL: mult:
; X64-HSW: # BB#0:
-; X64-HSW-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-HSW-NEXT: cmpl $1, %esi
; X64-HSW-NEXT: movl $1, %ecx
; X64-HSW-NEXT: movl %esi, %eax
; X64-HSW-NEXT: jmpq *.LJTI0_0(,%rdi,8)
; X64-HSW-NEXT: .LBB0_2:
; X64-HSW-NEXT: addl %eax, %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_36:
; X64-HSW-NEXT: xorl %eax, %eax
; X64-HSW-NEXT: .LBB0_37:
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_3:
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_4:
; X64-HSW-NEXT: shll $2, %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_5:
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_6:
; X64-HSW-NEXT: addl %eax, %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_7:
; X64-HSW-NEXT: leal (,%rax,8), %ecx
; X64-HSW-NEXT: jmp .LBB0_8
; X64-HSW-NEXT: .LBB0_9:
; X64-HSW-NEXT: shll $3, %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_10:
; X64-HSW-NEXT: leal (%rax,%rax,8), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_11:
; X64-HSW-NEXT: addl %eax, %eax
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_12:
; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx
; X64-HSW-NEXT: leal (%rax,%rcx,2), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_13:
; X64-HSW-NEXT: shll $2, %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_14:
; X64-HSW-NEXT: leal (%rax,%rax,2), %ecx
; X64-HSW-NEXT: leal (%rax,%rcx,4), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_15:
; X64-HSW-NEXT: leal (%rax,%rax,2), %ecx
; X64-HSW-NEXT: .LBB0_18:
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_19:
; X64-HSW-NEXT: shll $4, %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_20:
; X64-HSW-NEXT: movl %eax, %ecx
; X64-HSW-NEXT: .LBB0_21:
; X64-HSW-NEXT: addl %eax, %eax
; X64-HSW-NEXT: leal (%rax,%rax,8), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_22:
; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx
; X64-HSW-NEXT: .LBB0_23:
; X64-HSW-NEXT: shll $2, %eax
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_24:
; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx
; X64-HSW-NEXT: leal (%rax,%rcx,4), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_25:
; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx
; X64-HSW-NEXT: .LBB0_27:
; X64-HSW-NEXT: shll $3, %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_28:
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_29:
; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx
; X64-HSW-NEXT: .LBB0_30:
; X64-HSW-NEXT: leal (%rax,%rax,8), %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_31:
; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx
; X64-HSW-NEXT: .LBB0_17:
; X64-HSW-NEXT: addl %eax, %ecx
; X64-HSW-NEXT: movl %ecx, %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_33:
; X64-HSW-NEXT: movl %eax, %ecx
; X64-HSW-NEXT: .LBB0_8:
; X64-HSW-NEXT: subl %eax, %ecx
; X64-HSW-NEXT: movl %ecx, %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_35:
; X64-HSW-NEXT: shll $5, %eax
-; X64-HSW-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; X64-HSW-NEXT: retq
%3 = icmp eq i32 %1, 0
%4 = icmp sgt i32 %1, 1
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $1, %eax
; X32-NEXT: negl %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
%b = sext i1 %a to i16
ret i16 %b
; X32: # BB#0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: negl %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
%b = sext i1 %a to i16
ret i16 %b
define i64 @select_i64_neg1_or_0(i1 %a) {
; X64-LABEL: select_i64_neg1_or_0:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: andl $1, %edi
; X64-NEXT: negq %rdi
; X64-NEXT: movq %rdi, %rax
; This test case extracts a sub_8bit_hi sub-register:
;
-; %R8B<def> = COPY %BH, %EBX<imp-use,kill>
-; %ESI<def> = MOVZX32_NOREXrr8 %R8B<kill>
+; %r8b<def> = COPY %bh, %ebx<imp-use,kill>
+; %esi<def> = MOVZX32_NOREXrr8 %r8b<kill>
;
-; The register allocation above is invalid, %BH can only be encoded without an
+; The register allocation above is invalid, %bh can only be encoded without an
; REX prefix, so the destination register must be GR8_NOREX. The code above
; triggers an assertion in copyPhysReg.
;
; This test case extracts a sub_8bit_hi sub-register:
;
; %vreg2<def> = COPY %vreg1:sub_8bit_hi; GR8:%vreg2 GR64_ABCD:%vreg1
-; TEST8ri %vreg2, 1, %EFLAGS<imp-def>; GR8:%vreg2
+; TEST8ri %vreg2, 1, %eflags<imp-def>; GR8:%vreg2
;
; %vreg2 must be constrained to GR8_NOREX, or the COPY could become impossible.
;
;
; AVX2-LABEL: v3i64:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
; AVX2-NEXT: vpextrq $1, %xmm0, 16(%rdi)
;
; AVX2-LABEL: v3f64:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
; AVX2-NEXT: vmovhpd %xmm0, 16(%rdi)
;
; AVX2-LABEL: v5i32:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,5,1,6,3,u,u,u>
; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
;
; AVX2-LABEL: v5f32:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,5,1,6,3,u,u,u>
; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
;
; AVX2-LABEL: v7i32:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,6,3,6,1,7,4,u>
; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; AVX1-NEXT: vmovaps %ymm1, 32(%rdi)
; AVX1-NEXT: vmovaps %ymm1, (%rdi)
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; XOP-NEXT: vmovaps %ymm1, 32(%rdi)
; XOP-NEXT: vmovaps %ymm1, (%rdi)
-; XOP-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; XOP-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; XOP-NEXT: vzeroupper
; XOP-NEXT: retq
%shuffle = shufflevector <4 x double> %A, <4 x double> %A, <8 x i32> zeroinitializer
define i32 @or_shift1_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift1_and1:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi,2), %eax
; CHECK-NEXT: retq
define i32 @or_shift1_and1_swapped(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift1_and1_swapped:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi,2), %eax
; CHECK-NEXT: retq
define i32 @or_shift2_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift2_and1:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi,4), %eax
; CHECK-NEXT: retq
define i32 @or_shift3_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and1:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi,8), %eax
; CHECK-NEXT: retq
define i32 @or_shift3_and7(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and7:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $7, %esi
; CHECK-NEXT: leal (%rsi,%rdi,8), %eax
; CHECK-NEXT: retq
define i32 @or_shift4_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift4_and1:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: shll $4, %edi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi), %eax
define i32 @or_shift3_and8(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and8:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal (,%rdi,8), %eax
; CHECK-NEXT: andl $8, %esi
; CHECK-NEXT: orl %esi, %eax
; rdar://5571034
; This requires physreg joining, %vreg13 is live everywhere:
-; 304L %CL<def> = COPY %vreg13:sub_8bit; GR32_ABCD:%vreg13
+; 304L %cl<def> = COPY %vreg13:sub_8bit; GR32_ABCD:%vreg13
; 320L %vreg15<def> = COPY %vreg19; GR32:%vreg15 GR32_NOSP:%vreg19
-; 336L %vreg15<def> = SAR32rCL %vreg15, %EFLAGS<imp-def,dead>, %CL<imp-use,kill>; GR32:%vreg15
+; 336L %vreg15<def> = SAR32rCL %vreg15, %eflags<imp-def,dead>, %cl<imp-use,kill>; GR32:%vreg15
define void @foo(i32* nocapture %quadrant, i32* nocapture %ptr, i32 %bbSize, i32 %bbStart, i32 %shifts) nounwind ssp {
; CHECK-LABEL: foo:
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
; GENERIC-NEXT: popcntw (%rsi), %cx # sched: [9:1.00]
; GENERIC-NEXT: popcntw %di, %ax # sched: [3:1.00]
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_ctpop_i16:
; SLM-NEXT: popcntw (%rsi), %cx # sched: [6:1.00]
; SLM-NEXT: popcntw %di, %ax # sched: [3:1.00]
; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; SLM-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SLM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_ctpop_i16:
; SANDY-NEXT: popcntw (%rsi), %cx # sched: [9:1.00]
; SANDY-NEXT: popcntw %di, %ax # sched: [3:1.00]
; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SANDY-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ctpop_i16:
; HASWELL-NEXT: popcntw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: popcntw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ctpop_i16:
; BROADWELL-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: popcntw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ctpop_i16:
; SKYLAKE-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: popcntw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ctpop_i16:
; BTVER2-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; BTVER2-NEXT: popcntw %di, %ax # sched: [3:1.00]
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ctpop_i16:
; ZNVER1-NEXT: popcntw (%rsi), %cx # sched: [10:1.00]
; ZNVER1-NEXT: popcntw %di, %ax # sched: [3:1.00]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a1
%2 = tail call i16 @llvm.ctpop.i16( i16 %1 )
; X32-POPCNT: # BB#0:
; X32-POPCNT-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-POPCNT-NEXT: popcntw %ax, %ax
-; X32-POPCNT-NEXT: # kill: %AL<def> %AL<kill> %AX<kill>
+; X32-POPCNT-NEXT: # kill: %al<def> %al<kill> %ax<kill>
; X32-POPCNT-NEXT: retl
;
; X64-POPCNT-LABEL: cnt8:
; X64-POPCNT: # BB#0:
; X64-POPCNT-NEXT: movzbl %dil, %eax
; X64-POPCNT-NEXT: popcntw %ax, %ax
-; X64-POPCNT-NEXT: # kill: %AL<def> %AL<kill> %AX<kill>
+; X64-POPCNT-NEXT: # kill: %al<def> %al<kill> %ax<kill>
; X64-POPCNT-NEXT: retq
%cnt = tail call i8 @llvm.ctpop.i8(i8 %x)
ret i8 %cnt
; X32-NEXT: shll $8, %eax
; X32-NEXT: addl %ecx, %eax
; X32-NEXT: movzbl %ah, %eax
-; X32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: cnt16:
; X64-NEXT: shll $8, %ecx
; X64-NEXT: addl %eax, %ecx
; X64-NEXT: movzbl %ch, %eax # NOREX
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
;
; X32-POPCNT-LABEL: cnt16:
;
; X64-LABEL: PR22970_i32:
; X64: # BB#0:
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: andl $4095, %esi # imm = 0xFFF
; X64-NEXT: movl 32(%rdi,%rsi,4), %eax
; X64-NEXT: retq
define x86_thiscallcc i32* @fn4(i32* %this, i8* dereferenceable(1) %p1) {
entry:
- %DL = getelementptr inbounds i32, i32* %this, i32 0
- %call.i = tail call x86_thiscallcc i64 @fn1(i32* %DL)
+ %dl = getelementptr inbounds i32, i32* %this, i32 0
+ %call.i = tail call x86_thiscallcc i64 @fn1(i32* %dl)
%getTypeAllocSize___trans_tmp_2.i = getelementptr inbounds i32, i32* %this, i32 0
%0 = load i32, i32* %getTypeAllocSize___trans_tmp_2.i, align 4
- %call.i8 = tail call x86_thiscallcc i64 @fn1(i32* %DL)
+ %call.i8 = tail call x86_thiscallcc i64 @fn1(i32* %dl)
%1 = insertelement <2 x i64> undef, i64 %call.i, i32 0
%2 = insertelement <2 x i64> %1, i64 %call.i8, i32 1
%3 = add nsw <2 x i64> %2, <i64 7, i64 7>
; CHECK: # BB#0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $65534, %eax # imm = 0xFFFE
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
br label %bb
; CHECK: # BB#0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $2, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
br label %bb
; RUN: llc -mtriple=i686-pc-linux -print-after=postrapseudos < %s 2>&1 | FileCheck %s
-; CHECK: MOV8rr %{{[A-D]}}L, %E[[R:[A-D]]]X<imp-use,kill>, %E[[R]]X<imp-def>
+; CHECK: MOV8rr %{{[a-d]}}l, %e[[R:[a-d]]]x<imp-use,kill>, %e[[R]]x<imp-def>
define i32 @foo(i32 %i, i32 %k, i8* %p) {
%f = icmp ne i32 %i, %k
%s = zext i1 %f to i8
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %edi, -8
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %edi
-; CHECK-NEXT: # kill: %DI<def> %DI<kill> %EDI<kill>
+; CHECK-NEXT: # kill: %di<def> %di<kill> %edi<kill>
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: popl %edi
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %esi, -8
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %esi
-; CHECK-NEXT: # kill: %SI<def> %SI<kill> %ESI<kill>
+; CHECK-NEXT: # kill: %si<def> %si<kill> %esi<kill>
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: popl %esi
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3]
; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[0]
-; CHECK-NEXT: # implicit-def: %YMM2
+; CHECK-NEXT: # implicit-def: %ymm2
; CHECK-NEXT: vmovaps %xmm1, %xmm2
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm2
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
; CHECK-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0]
-; CHECK-NEXT: # implicit-def: %YMM3
+; CHECK-NEXT: # implicit-def: %ymm3
; CHECK-NEXT: vmovaps %xmm1, %xmm3
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm3
-; CHECK-NEXT: # implicit-def: %ZMM24
+; CHECK-NEXT: # implicit-def: %zmm24
; CHECK-NEXT: vmovaps %zmm3, %zmm24
; CHECK-NEXT: vinsertf64x4 $1, %ymm2, %zmm24, %zmm24
; CHECK-NEXT: vmovaps %zmm24, {{[0-9]+}}(%rsp)
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: divl %ecx
-; X64-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<def>
+; X64-NEXT: # kill: %eax<def> %eax<kill> %rax<def>
; X64-NEXT: .LBB0_3:
; X64-NEXT: testq %rax, %rax
; X64-NEXT: setne -{{[0-9]+}}(%rsp)
define void @f2() {
; X86-O0-LABEL: f2:
; X86-O0: # BB#0: # %entry
-; X86-O0-NEXT: # implicit-def: %RAX
+; X86-O0-NEXT: # implicit-def: %rax
; X86-O0-NEXT: movzbl var_7, %ecx
; X86-O0-NEXT: cmpb $0, var_7
; X86-O0-NEXT: setne %dl
; 686-O0-NEXT: .cfi_def_cfa_offset 14
; 686-O0-NEXT: .cfi_offset %esi, -12
; 686-O0-NEXT: .cfi_offset %edi, -8
-; 686-O0-NEXT: # implicit-def: %EAX
+; 686-O0-NEXT: # implicit-def: %eax
; 686-O0-NEXT: movzbl var_7, %ecx
; 686-O0-NEXT: cmpb $0, var_7
; 686-O0-NEXT: setne %dl
; X64-NEXT: imull %esi, %ecx
; X64-NEXT: addl $-1437483407, %ecx # imm = 0xAA51BE71
; X64-NEXT: movl $9, %edx
-; X64-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; X64-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; X64-NEXT: shlq %cl, %rdx
; X64-NEXT: movq %rdx, {{.*}}(%rip)
; X64-NEXT: cmpl %eax, %esi
define void @foo() {
; X640-LABEL: foo:
; X640: # BB#0: # %bb
-; X640-NEXT: # implicit-def: %RAX
+; X640-NEXT: # implicit-def: %rax
; X640-NEXT: movzwl var_22, %ecx
; X640-NEXT: movzwl var_27, %edx
; X640-NEXT: xorl %edx, %ecx
; X640-NEXT: movzwl var_27, %ecx
; X640-NEXT: subl $16610, %ecx # imm = 0x40E2
; X640-NEXT: movl %ecx, %ecx
-; X640-NEXT: # kill: %RCX<def> %ECX<kill>
-; X640-NEXT: # kill: %CL<def> %RCX<kill>
+; X640-NEXT: # kill: %rcx<def> %ecx<kill>
+; X640-NEXT: # kill: %cl<def> %rcx<kill>
; X640-NEXT: sarq %cl, %rsi
; X640-NEXT: movb %sil, %cl
; X640-NEXT: movb %cl, (%rax)
; 6860-NEXT: .cfi_offset %esi, -20
; 6860-NEXT: .cfi_offset %edi, -16
; 6860-NEXT: .cfi_offset %ebx, -12
-; 6860-NEXT: # implicit-def: %EAX
+; 6860-NEXT: # implicit-def: %eax
; 6860-NEXT: movw var_22, %cx
; 6860-NEXT: movzwl var_27, %edx
; 6860-NEXT: movw %dx, %si
; 6860-NEXT: xorw %si, %cx
-; 6860-NEXT: # implicit-def: %EDI
+; 6860-NEXT: # implicit-def: %edi
; 6860-NEXT: movw %cx, %di
; 6860-NEXT: xorl %edx, %edi
; 6860-NEXT: movw %di, %cx
; 6860-NEXT: movzwl var_27, %edx
; 6860-NEXT: movw %dx, %si
; 6860-NEXT: xorw %si, %cx
-; 6860-NEXT: # implicit-def: %EDI
+; 6860-NEXT: # implicit-def: %edi
; 6860-NEXT: movw %cx, %di
; 6860-NEXT: xorl %edx, %edi
; 6860-NEXT: movw %di, %cx
; X64-NEXT: movzwl %ax, %eax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-NEXT: addl $-16610, %ecx # imm = 0xBF1E
-; X64-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; X64-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; X64-NEXT: shrq %cl, %rax
; X64-NEXT: movb %al, (%rax)
; X64-NEXT: retq
define void @foo() {
; CHECK-LABEL: foo:
; CHECK: # BB#0:
-; CHECK-NEXT: # implicit-def: %RAX
+; CHECK-NEXT: # implicit-def: %rax
; CHECK-NEXT: jmpq *%rax
; CHECK-NEXT: .LBB0_1:
-; CHECK-NEXT: # implicit-def: %RAX
+; CHECK-NEXT: # implicit-def: %rax
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: pcmpeqd %xmm1, %xmm1
; CHECK-NEXT: movdqu %xmm1, (%rax)
; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
; CHECK-NEXT: vpermilpd {{.*#+}} xmm11 = xmm11[1,0]
; CHECK-NEXT: vpermilpd {{.*#+}} xmm13 = xmm13[1,0]
-; CHECK-NEXT: # kill: %YMM10<def> %YMM10<kill> %ZMM10<kill>
+; CHECK-NEXT: # kill: %ymm10<def> %ymm10<kill> %zmm10<kill>
; CHECK-NEXT: vextractf128 $1, %ymm10, %xmm10
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps %xmm10, %xmm0
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; CHECK-NEXT: # kill: %YMM9<def> %YMM9<kill> %ZMM9<kill>
+; CHECK-NEXT: # kill: %ymm9<def> %ymm9<kill> %zmm9<kill>
; CHECK-NEXT: vextractf128 $1, %ymm9, %xmm9
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps %xmm9, %xmm0
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; CHECK-NEXT: # kill: %YMM8<def> %YMM8<kill> %ZMM8<kill>
+; CHECK-NEXT: # kill: %ymm8<def> %ymm8<kill> %zmm8<kill>
; CHECK-NEXT: vextractf128 $1, %ymm8, %xmm8
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps %xmm8, %xmm0
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; CHECK-NEXT: # kill: %YMM7<def> %YMM7<kill> %ZMM7<kill>
+; CHECK-NEXT: # kill: %ymm7<def> %ymm7<kill> %zmm7<kill>
; CHECK-NEXT: vextractf128 $1, %ymm7, %xmm7
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps %xmm7, %xmm0
ret void
}
-attributes #0 = { nounwind optsize "no-frame-pointer-elim-non-leaf"}
\ No newline at end of file
+attributes #0 = { nounwind optsize "no-frame-pointer-elim-non-leaf"}
; SSE3-NEXT: pextrw $2, %xmm0, %ecx
; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; SSE3-NEXT: # kill: %DX<def> %DX<kill> %EDX<kill>
-; SSE3-NEXT: # kill: %CX<def> %CX<kill> %ECX<kill>
+; SSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE3-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
+; SSE3-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
; SSE3-NEXT: retl
;
; SSE41-LABEL: zext_i8:
; SSE41-NEXT: movd %xmm0, %eax
; SSE41-NEXT: pextrw $2, %xmm0, %edx
; SSE41-NEXT: pextrw $4, %xmm0, %ecx
-; SSE41-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; SSE41-NEXT: # kill: %DX<def> %DX<kill> %EDX<kill>
-; SSE41-NEXT: # kill: %CX<def> %CX<kill> %ECX<kill>
+; SSE41-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE41-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
+; SSE41-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
; SSE41-NEXT: retl
;
; AVX-32-LABEL: zext_i8:
; AVX-32-NEXT: vmovd %xmm0, %eax
; AVX-32-NEXT: vpextrw $2, %xmm0, %edx
; AVX-32-NEXT: vpextrw $4, %xmm0, %ecx
-; AVX-32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; AVX-32-NEXT: # kill: %DX<def> %DX<kill> %EDX<kill>
-; AVX-32-NEXT: # kill: %CX<def> %CX<kill> %ECX<kill>
+; AVX-32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX-32-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
+; AVX-32-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: zext_i8:
; AVX-64-NEXT: vmovd %xmm0, %eax
; AVX-64-NEXT: vpextrw $2, %xmm0, %edx
; AVX-64-NEXT: vpextrw $4, %xmm0, %ecx
-; AVX-64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; AVX-64-NEXT: # kill: %DX<def> %DX<kill> %EDX<kill>
-; AVX-64-NEXT: # kill: %CX<def> %CX<kill> %ECX<kill>
+; AVX-64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX-64-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
+; AVX-64-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
; AVX-64-NEXT: retq
%2 = zext <3 x i8> %0 to <3 x i16>
ret <3 x i16> %2
; SSE3-NEXT: movd %xmm0, %eax
; SSE3-NEXT: pextrw $2, %xmm0, %edx
; SSE3-NEXT: pextrw $4, %xmm0, %ecx
-; SSE3-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; SSE3-NEXT: # kill: %DX<def> %DX<kill> %EDX<kill>
-; SSE3-NEXT: # kill: %CX<def> %CX<kill> %ECX<kill>
+; SSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE3-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
+; SSE3-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
; SSE3-NEXT: retl
;
; SSE41-LABEL: sext_i8:
; SSE41-NEXT: movd %xmm0, %eax
; SSE41-NEXT: pextrw $2, %xmm0, %edx
; SSE41-NEXT: pextrw $4, %xmm0, %ecx
-; SSE41-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; SSE41-NEXT: # kill: %DX<def> %DX<kill> %EDX<kill>
-; SSE41-NEXT: # kill: %CX<def> %CX<kill> %ECX<kill>
+; SSE41-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE41-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
+; SSE41-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
; SSE41-NEXT: retl
;
; AVX-32-LABEL: sext_i8:
; AVX-32-NEXT: vmovd %xmm0, %eax
; AVX-32-NEXT: vpextrw $2, %xmm0, %edx
; AVX-32-NEXT: vpextrw $4, %xmm0, %ecx
-; AVX-32-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; AVX-32-NEXT: # kill: %DX<def> %DX<kill> %EDX<kill>
-; AVX-32-NEXT: # kill: %CX<def> %CX<kill> %ECX<kill>
+; AVX-32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX-32-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
+; AVX-32-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: sext_i8:
; AVX-64-NEXT: vmovd %xmm0, %eax
; AVX-64-NEXT: vpextrw $2, %xmm0, %edx
; AVX-64-NEXT: vpextrw $4, %xmm0, %ecx
-; AVX-64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
-; AVX-64-NEXT: # kill: %DX<def> %DX<kill> %EDX<kill>
-; AVX-64-NEXT: # kill: %CX<def> %CX<kill> %ECX<kill>
+; AVX-64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX-64-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
+; AVX-64-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
; AVX-64-NEXT: retq
%2 = sext <3 x i8> %0 to <3 x i16>
ret <3 x i16> %2
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-NEXT: vpslld $17, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
%shl = shl <8 x i32> %a, <i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17>
; We need to make sure that rematerialization into a physical register marks the
; super- or sub-register as dead after this rematerialization since only the
; original register is actually used later. Largely irrelevant for a trivial
-; example like this, since EAX is never used again, but easy to test.
+; example like this, since eax is never used again, but easy to test.
define i8 @test_remat() {
ret i8 0
; CHECK: REGISTER COALESCING
-; CHECK: Remat: %EAX<def,dead> = MOV32r0 %EFLAGS<imp-def,dead>, %AL<imp-def>
+; CHECK: Remat: %eax<def,dead> = MOV32r0 %eflags<imp-def,dead>, %al<imp-def>
}
; On the other hand, if it's already the correct width, we really shouldn't be
define i32 @test_remat32() {
ret i32 0
; CHECK: REGISTER COALESCING
-; CHECK: Remat: %EAX<def> = MOV32r0 %EFLAGS<imp-def,dead>
+; CHECK: Remat: %eax<def> = MOV32r0 %eflags<imp-def,dead>
}
; CHECK: # BB#0:
; CHECK-NEXT: movswq %di, %rax
; CHECK-NEXT: addl %eax, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; CHECK-NEXT: retq
%1 = shl i64 %a, 48
%2 = ashr exact i64 %1, 47
; CHECK: # BB#0:
; CHECK-NEXT: movswq %di, %rax
; CHECK-NEXT: shrq %rax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; CHECK-NEXT: retq
%1 = shl i64 %a, 48
%2 = ashr exact i64 %1, 49
; CHECK: # BB#0:
; CHECK-NEXT: movsbq %dil, %rax
; CHECK-NEXT: addl %eax, %eax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; CHECK-NEXT: retq
%1 = shl i64 %a, 56
%2 = ashr exact i64 %1, 55
; CHECK: # BB#0:
; CHECK-NEXT: movsbq %dil, %rax
; CHECK-NEXT: shrq %rax
-; CHECK-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; CHECK-NEXT: retq
%1 = shl i64 %a, 56
%2 = ashr exact i64 %1, 57
; GENERIC-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bsf16:
; ATOM-NEXT: bsfw (%rsi), %cx # sched: [16:8.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; ATOM-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ATOM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bsf16:
; SLM-NEXT: bsfw (%rsi), %cx # sched: [4:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; SLM-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SLM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bsf16:
; SANDY-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SANDY-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bsf16:
; HASWELL-NEXT: bsfw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bsf16:
; BROADWELL-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bsf16:
; SKYLAKE-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bsf16:
; SKX-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bsf16:
; BTVER2-NEXT: bsfw (%rsi), %cx # sched: [4:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bsf16:
; ZNVER1-NEXT: bsfw (%rsi), %cx # sched: [7:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call { i16, i16 } asm sideeffect "bsf $2, $0 \0A\09 bsf $3, $1", "=r,=r,r,*m,~{dirflag},~{fpsr},~{flags}"(i16 %a0, i16* %a1)
%2 = extractvalue { i16, i16 } %1, 0
; GENERIC-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bsr16:
; ATOM-NEXT: bsrw (%rsi), %cx # sched: [16:8.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; ATOM-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ATOM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bsr16:
; SLM-NEXT: bsrw (%rsi), %cx # sched: [4:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; SLM-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SLM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bsr16:
; SANDY-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SANDY-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bsr16:
; HASWELL-NEXT: bsrw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bsr16:
; BROADWELL-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bsr16:
; SKYLAKE-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bsr16:
; SKX-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bsr16:
; BTVER2-NEXT: bsrw (%rsi), %cx # sched: [4:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bsr16:
; ZNVER1-NEXT: bsrw (%rsi), %cx # sched: [7:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call { i16, i16 } asm sideeffect "bsr $2, $0 \0A\09 bsr $3, $1", "=r,=r,r,*m,~{dirflag},~{fpsr},~{flags}"(i16 %a0, i16* %a1)
%2 = extractvalue { i16, i16 } %1, 0
; MCU-NEXT: fucompp
; MCU-NEXT: fnstsw %ax
; MCU-NEXT: xorl %edx, %edx
-; MCU-NEXT: # kill: %AH<def> %AH<kill> %AX<kill>
+; MCU-NEXT: # kill: %ah<def> %ah<kill> %ax<kill>
; MCU-NEXT: sahf
; MCU-NEXT: seta %dl
; MCU-NEXT: movb (%ecx,%edx,4), %al
; GENERIC: ## BB#0: ## %entry
; GENERIC-NEXT: negw %di
; GENERIC-NEXT: sbbl %eax, %eax
-; GENERIC-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test17:
; ATOM: ## BB#0: ## %entry
; ATOM-NEXT: negw %di
; ATOM-NEXT: sbbl %eax, %eax
-; ATOM-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; ATOM-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; MCU: # BB#0: # %entry
; MCU-NEXT: negw %ax
; MCU-NEXT: sbbl %eax, %eax
-; MCU-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; MCU-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; MCU-NEXT: retl
entry:
%cmp = icmp ne i16 %x, 0
; MCU-NEXT: cmpl %eax, %ecx
; MCU-NEXT: fucom %st(0)
; MCU-NEXT: fnstsw %ax
-; MCU-NEXT: # kill: %AH<def> %AH<kill> %AX<kill>
+; MCU-NEXT: # kill: %ah<def> %ah<kill> %ax<kill>
; MCU-NEXT: sahf
; MCU-NEXT: jp .LBB24_4
; MCU-NEXT: # BB#5: # %CF244
; MCU-NEXT: negl %edx
; MCU-NEXT: andl $43, %edx
; MCU-NEXT: xorl %edx, %eax
-; MCU-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; MCU-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
define i32 @select_0_or_neg1(i1 %cond) {
; CHECK-LABEL: select_0_or_neg1:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: retq
define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_0_or_neg1_zeroext:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: retq
%sel = select i1 %cond, i32 0, i32 -1
define i32 @select_Cplus1_C(i1 %cond) {
; CHECK-LABEL: select_Cplus1_C:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: leal 41(%rdi), %eax
; CHECK-NEXT: retq
define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_Cplus1_C_zeroext:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 41(%rdi), %eax
; CHECK-NEXT: retq
%sel = select i1 %cond, i32 42, i32 41
; CHECK-NEXT: cmpl $43, %edi
; CHECK-NEXT: setl %al
; CHECK-NEXT: leal -1(,%rax,4), %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%cmp = icmp sgt i32 %x, 42
%sel = select i1 %cmp, i16 -1, i16 3
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: shll $6, %eax
; CHECK-NEXT: orl $7, %eax
-; CHECK-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%sel = select i1 %cond, i16 7, i16 71
ret i16 %sel
;
; KNL-32-LABEL: pr25080:
; KNL-32: # BB#0: # %entry
-; KNL-32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL-32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL-32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [8388607,8388607,8388607,8388607,8388607,8388607,8388607,8388607]
; KNL-32-NEXT: vptestnmd %zmm1, %zmm0, %k0
; KNL-32-NEXT: movb $15, %al
;
; X64-LABEL: select_0_or_1s:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: andl $1, %edi
; X64-NEXT: leal -1(%rdi), %eax
; X64-NEXT: retq
;
; X64-LABEL: select_0_or_1s_zeroext:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal -1(%rdi), %eax
; X64-NEXT: retq
%not = xor i1 %cond, 1
%0 = tail call <4 x i32> @llvm.x86.sha256rnds2(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c)
ret <4 x i32> %0
; CHECK: test_sha256rnds2rr
- ; CHECK: movaps %xmm0, [[XMM_TMP1:%xmm[1-9][0-9]?]]
+ ; CHECK: movaps %xmm0, [[xmm_TMP1:%xmm[1-9][0-9]?]]
; CHECK: movaps %xmm2, %xmm0
- ; CHECK: sha256rnds2 %xmm0, %xmm1, [[XMM_TMP1]]
+ ; CHECK: sha256rnds2 %xmm0, %xmm1, [[xmm_TMP1]]
}
define <4 x i32> @test_sha256rnds2rm(<4 x i32> %a, <4 x i32>* %b, <4 x i32> %c) nounwind uwtable {
%1 = tail call <4 x i32> @llvm.x86.sha256rnds2(<4 x i32> %a, <4 x i32> %0, <4 x i32> %c)
ret <4 x i32> %1
; CHECK: test_sha256rnds2rm
- ; CHECK: movaps %xmm0, [[XMM_TMP2:%xmm[1-9][0-9]?]]
+ ; CHECK: movaps %xmm0, [[xmm_TMP2:%xmm[1-9][0-9]?]]
; CHECK: movaps %xmm1, %xmm0
- ; CHECK: sha256rnds2 %xmm0, (%rdi), [[XMM_TMP2]]
+ ; CHECK: sha256rnds2 %xmm0, (%rdi), [[xmm_TMP2]]
}
declare <4 x i32> @llvm.x86.sha256msg1(<4 x i32>, <4 x i32>) nounwind readnone
;
; X64-LABEL: test_lshr_and:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shrl $2, %edi
; X64-NEXT: andl $3, %edi
; X64-NEXT: movl array(,%rdi,4), %eax
;
; X64-LABEL: test_exact4:
; X64: # BB#0:
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: subl %edi, %esi
; X64-NEXT: shrl $3, %esi
; X64-NEXT: leaq (%rdx,%rsi,4), %rax
;
; X64-LABEL: test_exact5:
; X64: # BB#0:
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: subl %edi, %esi
; X64-NEXT: shrl $3, %esi
; X64-NEXT: leaq (%rdx,%rsi,4), %rax
;
; X64-LABEL: test_exact6:
; X64: # BB#0:
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: subl %edi, %esi
; X64-NEXT: leaq (%rsi,%rdx), %rax
; X64-NEXT: retq
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: andl $31, %ecx
-; X86-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; X86-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; X86-NEXT: shldl %cl, %edx, %eax
; X86-NEXT: retl
;
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: andl $31, %ecx
-; X86-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; X86-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; X86-NEXT: shrdl %cl, %edx, %eax
; X86-NEXT: retl
;
; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: je .LBB3_1
; CHECK-NEXT: # BB#2: # %lor.end
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB3_1: # %lor.rhs
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: retq
entry:
%tobool = icmp ne i32 %b, 0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BWVL-NEXT: kmovd %eax, %k1
; AVX512BWVL-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
; AVX512BWVL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512BWVL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BWVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%strided.vec = shufflevector <32 x i8> %v, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
; CHECK: movl $___gxx_personality_sj0, -40(%ebp)
; UFC.__lsda = $LSDA
; CHECK: movl $[[LSDA:GCC_except_table[0-9]+]], -36(%ebp)
-; UFC.__jbuf[0] = $EBP
+; UFC.__jbuf[0] = $ebp
; CHECK: movl %ebp, -32(%ebp)
-; UFC.__jbuf[2] = $ESP
+; UFC.__jbuf[2] = $esp
; CHECK: movl %esp, -24(%ebp)
; UFC.__jbuf[1] = $EIP
; CHECK: movl $[[RESUME:LBB[0-9]+_[0-9]+]], -28(%ebp)
; UFC.__lsda = $LSDA
; CHECK-X64: leaq [[LSDA:GCC_except_table[0-9]+]](%rip), %rax
; CHECK-X64: movq %rax, -272(%rbp)
-; UFC.__jbuf[0] = $RBP
+; UFC.__jbuf[0] = $rbp
; CHECK-X64: movq %rbp, -264(%rbp)
-; UFC.__jbuf[2] = $RSP
+; UFC.__jbuf[2] = $rsp
; CHECK-X64: movq %rsp, -248(%rbp)
; UFC.__jbuf[1] = $RIP
; CHECK-X64: leaq .[[RESUME:LBB[0-9]+_[0-9]+]](%rip), %rax
; LINUXOSX: movaps {{.*(%r(b|s)p).*}}, {{%xmm(1[2-5])}} {{#+}} 16-byte Reload
; LINUXOSX: retq
-;test calling conventions - input parameters, callee saved XMMs
+;test calling conventions - input parameters, callee saved xmms
define x86_regcallcc <16 x float> @testf32_inp(<16 x float> %a, <16 x float> %b, <16 x float> %c) nounwind {
%x1 = fadd <16 x float> %a, %b
%x2 = fmul <16 x float> %a, %b
; GENERIC-LABEL: test_pextrw:
; GENERIC: # BB#0:
; GENERIC-NEXT: pextrw $6, %xmm0, %eax # sched: [3:1.00]
-; GENERIC-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pextrw:
; ATOM: # BB#0:
; ATOM-NEXT: pextrw $6, %xmm0, %eax # sched: [4:2.00]
-; ATOM-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ATOM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pextrw:
; SLM: # BB#0:
; SLM-NEXT: pextrw $6, %xmm0, %eax # sched: [4:1.00]
-; SLM-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SLM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pextrw:
; SANDY: # BB#0:
; SANDY-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
-; SANDY-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SANDY-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrw:
; HASWELL: # BB#0:
; HASWELL-NEXT: vpextrw $6, %xmm0, %eax # sched: [2:1.00]
-; HASWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pextrw:
; BROADWELL: # BB#0:
; BROADWELL-NEXT: vpextrw $6, %xmm0, %eax # sched: [2:1.00]
-; BROADWELL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pextrw:
; SKYLAKE: # BB#0:
; SKYLAKE-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
-; SKYLAKE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pextrw:
; SKX: # BB#0:
; SKX-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pextrw:
; BTVER2: # BB#0:
; BTVER2-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pextrw:
; ZNVER1: # BB#0:
; ZNVER1-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = extractelement <8 x i16> %a0, i32 6
ret i16 %1
; GENERIC-NEXT: movl $7, %eax # sched: [1:0.33]
; GENERIC-NEXT: movl $7, %edx # sched: [1:0.33]
; GENERIC-NEXT: pcmpestri $7, (%rdi), %xmm0 # sched: [4:2.33]
-; GENERIC-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; GENERIC-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; GENERIC-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-NEXT: movl $7, %edx # sched: [1:0.50]
; SLM-NEXT: movl %ecx, %esi # sched: [1:0.50]
; SLM-NEXT: pcmpestri $7, (%rdi), %xmm0 # sched: [21:21.00]
-; SLM-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; SLM-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SLM-NEXT: leal (%rcx,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-NEXT: movl $7, %eax # sched: [1:0.33]
; SANDY-NEXT: movl $7, %edx # sched: [1:0.33]
; SANDY-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [4:2.33]
-; SANDY-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; SANDY-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SANDY-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-NEXT: movl $7, %eax # sched: [1:0.25]
; HASWELL-NEXT: movl $7, %edx # sched: [1:0.25]
; HASWELL-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [18:4.00]
-; HASWELL-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; HASWELL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; HASWELL-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-NEXT: movl $7, %eax # sched: [1:0.25]
; BROADWELL-NEXT: movl $7, %edx # sched: [1:0.25]
; BROADWELL-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [23:4.00]
-; BROADWELL-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; BROADWELL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; BROADWELL-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-NEXT: movl $7, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: movl $7, %edx # sched: [1:0.25]
; SKYLAKE-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [24:4.00]
-; SKYLAKE-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; SKYLAKE-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SKYLAKE-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-NEXT: movl $7, %eax # sched: [1:0.25]
; SKX-NEXT: movl $7, %edx # sched: [1:0.25]
; SKX-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [24:4.00]
-; SKX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; SKX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SKX-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-NEXT: movl $7, %edx # sched: [1:0.17]
; BTVER2-NEXT: movl %ecx, %esi # sched: [1:0.17]
; BTVER2-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [19:10.00]
-; BTVER2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; BTVER2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; BTVER2-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25]
; ZNVER1-NEXT: movl %ecx, %esi # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [100:?]
-; ZNVER1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; ZNVER1-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; ZNVER1-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
; GENERIC-NEXT: pcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; GENERIC-NEXT: movl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: pcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00]
-; GENERIC-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; GENERIC-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; GENERIC-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-NEXT: pcmpistri $7, %xmm1, %xmm0 # sched: [17:17.00]
; SLM-NEXT: movl %ecx, %eax # sched: [1:0.50]
; SLM-NEXT: pcmpistri $7, (%rdi), %xmm0 # sched: [17:17.00]
-; SLM-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; SLM-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SLM-NEXT: leal (%rcx,%rax), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; SANDY-NEXT: movl %ecx, %eax # sched: [1:0.33]
; SANDY-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00]
-; SANDY-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; SANDY-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SANDY-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; HASWELL-NEXT: movl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [11:3.00]
-; HASWELL-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; HASWELL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; HASWELL-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; BROADWELL-NEXT: movl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00]
-; BROADWELL-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; BROADWELL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; BROADWELL-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [10:3.00]
; SKYLAKE-NEXT: movl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00]
-; SKYLAKE-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; SKYLAKE-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SKYLAKE-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [10:3.00]
; SKX-NEXT: movl %ecx, %eax # sched: [1:0.25]
; SKX-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00]
-; SKX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; SKX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; SKX-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [7:2.00]
; BTVER2-NEXT: movl %ecx, %eax # sched: [1:0.17]
; BTVER2-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [12:2.00]
-; BTVER2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; BTVER2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; BTVER2-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: movl %ecx, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [100:?]
-; ZNVER1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
+; ZNVER1-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
; ZNVER1-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
; CHECK-NEXT: .short 0
; 1 location
; CHECK-NEXT: .short 1
-; Loc 0: Direct RBP - ofs
+; Loc 0: Direct rbp - ofs
; CHECK-NEXT: .byte 2
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .short 8
; PATCH-NEXT: .short 0
; Num LiveOut Entries: 1
; PATCH-NEXT: .short 1
-; LiveOut Entry 1: %YMM2 (16 bytes) --> %XMM2
+; LiveOut Entry 1: %ymm2 (16 bytes) --> %xmm2
; PATCH-NEXT: .short 19
; PATCH-NEXT: .byte 0
; PATCH-NEXT: .byte 16
; PATCH-NEXT: .short 0
; Num LiveOut Entries: 5
; PATCH-NEXT: .short 5
-; LiveOut Entry 1: %RAX (1 bytes) --> %AL or %AH
+; LiveOut Entry 1: %rax (1 bytes) --> %al or %ah
; PATCH-NEXT: .short 0
; PATCH-NEXT: .byte 0
; PATCH-NEXT: .byte 1
-; LiveOut Entry 2: %R8 (8 bytes)
+; LiveOut Entry 2: %r8 (8 bytes)
; PATCH-NEXT: .short 8
; PATCH-NEXT: .byte 0
; PATCH-NEXT: .byte 8
-; LiveOut Entry 3: %YMM0 (32 bytes)
+; LiveOut Entry 3: %ymm0 (32 bytes)
; PATCH-NEXT: .short 17
; PATCH-NEXT: .byte 0
; PATCH-NEXT: .byte 32
-; LiveOut Entry 4: %YMM1 (32 bytes)
+; LiveOut Entry 4: %ymm1 (32 bytes)
; PATCH-NEXT: .short 18
; PATCH-NEXT: .byte 0
; PATCH-NEXT: .byte 32
-; LiveOut Entry 5: %YMM2 (16 bytes) --> %XMM2
+; LiveOut Entry 5: %ymm2 (16 bytes) --> %xmm2
; PATCH-NEXT: .short 19
; PATCH-NEXT: .byte 0
; PATCH-NEXT: .byte 16
; PATCH-NEXT: .short 0
; Num LiveOut Entries: 2
; PATCH-NEXT: .short 2
-; LiveOut Entry 1: %RSP (8 bytes)
+; LiveOut Entry 1: %rsp (8 bytes)
; PATCH-NEXT: .short 7
; PATCH-NEXT: .byte 0
; PATCH-NEXT: .byte 8
-; LiveOut Entry 2: %YMM2 (16 bytes) --> %XMM2
+; LiveOut Entry 2: %ymm2 (16 bytes) --> %xmm2
; PATCH-NEXT: .short 19
; PATCH-NEXT: .byte 0
; PATCH-NEXT: .byte 16
; PATCH-NEXT: .short 0
; Num LiveOut Entries: 2
; PATCH-NEXT: .short 2
-; LiveOut Entry 1: %RSP (8 bytes)
+; LiveOut Entry 1: %rsp (8 bytes)
; PATCH-NEXT: .short 7
; PATCH-NEXT: .byte 0
; PATCH-NEXT: .byte 8
-; LiveOut Entry 2: %YMM2 (16 bytes) --> %XMM2
+; LiveOut Entry 2: %ymm2 (16 bytes) --> %xmm2
; PATCH-NEXT: .short 19
; PATCH-NEXT: .byte 0
; PATCH-NEXT: .byte 16
; CHECK: .short 0
; CHECK: .short 0
; CHECK: .long 0
-; Direct Spill Slot [RSP+0]
+; Direct Spill Slot [rsp+0]
; CHECK: .byte 2
; CHECK: .byte 0
; CHECK: .short 8
; CHECK: .short 0
; CHECK: .short 0
; CHECK: .long 1
-; Direct Spill Slot [RSP+0]
+; Direct Spill Slot [rsp+0]
; CHECK: .byte 2
; CHECK: .byte 0
; CHECK: .short 8
define <4 x double> @reg_broadcast_2f64_4f64(<2 x double> %a0) nounwind {
; X32-LABEL: reg_broadcast_2f64_4f64:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_2f64_4f64:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x double> %a0, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
define <8 x double> @reg_broadcast_2f64_8f64(<2 x double> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_2f64_8f64:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_2f64_8f64:
; X32-AVX512: # BB#0:
-; X32-AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_2f64_8f64:
; X64-AVX: # BB#0:
-; X64-AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_2f64_8f64:
; X64-AVX512: # BB#0:
-; X64-AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
;
; X32-AVX512-LABEL: reg_broadcast_4f64_8f64:
; X32-AVX512: # BB#0:
-; X32-AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; X32-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
;
; X64-AVX512-LABEL: reg_broadcast_4f64_8f64:
; X64-AVX512: # BB#0:
-; X64-AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; X64-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = shufflevector <4 x double> %a0, <4 x double> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
define <4 x i64> @reg_broadcast_2i64_4i64(<2 x i64> %a0) nounwind {
; X32-LABEL: reg_broadcast_2i64_4i64:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_2i64_4i64:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
define <8 x i64> @reg_broadcast_2i64_8i64(<2 x i64> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_2i64_8i64:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_2i64_8i64:
; X32-AVX512: # BB#0:
-; X32-AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_2i64_8i64:
; X64-AVX: # BB#0:
-; X64-AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_2i64_8i64:
; X64-AVX512: # BB#0:
-; X64-AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
;
; X32-AVX512-LABEL: reg_broadcast_4i64_8i64:
; X32-AVX512: # BB#0:
-; X32-AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; X32-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
;
; X64-AVX512-LABEL: reg_broadcast_4i64_8i64:
; X64-AVX512: # BB#0:
-; X64-AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; X64-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
define <8 x float> @reg_broadcast_4f32_8f32(<4 x float> %a0) nounwind {
; X32-LABEL: reg_broadcast_4f32_8f32:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_4f32_8f32:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
define <16 x float> @reg_broadcast_4f32_16f32(<4 x float> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_4f32_16f32:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_4f32_16f32:
; X32-AVX512: # BB#0:
-; X32-AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_4f32_16f32:
; X64-AVX: # BB#0:
-; X64-AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_4f32_16f32:
; X64-AVX512: # BB#0:
-; X64-AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
;
; X32-AVX512-LABEL: reg_broadcast_8f32_16f32:
; X32-AVX512: # BB#0:
-; X32-AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; X32-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
;
; X64-AVX512-LABEL: reg_broadcast_8f32_16f32:
; X64-AVX512: # BB#0:
-; X64-AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; X64-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
define <8 x i32> @reg_broadcast_4i32_8i32(<4 x i32> %a0) nounwind {
; X32-LABEL: reg_broadcast_4i32_8i32:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_4i32_8i32:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
define <16 x i32> @reg_broadcast_4i32_16i32(<4 x i32> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_4i32_16i32:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_4i32_16i32:
; X32-AVX512: # BB#0:
-; X32-AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_4i32_16i32:
; X64-AVX: # BB#0:
-; X64-AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_4i32_16i32:
; X64-AVX512: # BB#0:
-; X64-AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
;
; X32-AVX512-LABEL: reg_broadcast_8i32_16i32:
; X32-AVX512: # BB#0:
-; X32-AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; X32-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
;
; X64-AVX512-LABEL: reg_broadcast_8i32_16i32:
; X64-AVX512: # BB#0:
-; X64-AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; X64-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = shufflevector <8 x i32> %a0, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
define <16 x i16> @reg_broadcast_8i16_16i16(<8 x i16> %a0) nounwind {
; X32-LABEL: reg_broadcast_8i16_16i16:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_8i16_16i16:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
define <32 x i16> @reg_broadcast_8i16_32i16(<8 x i16> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_8i16_32i16:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: reg_broadcast_8i16_32i16:
; X32-AVX512F: # BB#0:
-; X32-AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: reg_broadcast_8i16_32i16:
; X32-AVX512BW: # BB#0:
-; X32-AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: reg_broadcast_8i16_32i16:
; X32-AVX512DQ: # BB#0:
-; X32-AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_8i16_32i16:
; X64-AVX: # BB#0:
-; X64-AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: reg_broadcast_8i16_32i16:
; X64-AVX512F: # BB#0:
-; X64-AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: reg_broadcast_8i16_32i16:
; X64-AVX512BW: # BB#0:
-; X64-AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: reg_broadcast_8i16_32i16:
; X64-AVX512DQ: # BB#0:
-; X64-AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
;
; X32-AVX512BW-LABEL: reg_broadcast_16i16_32i16:
; X32-AVX512BW: # BB#0:
-; X32-AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; X32-AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
;
; X64-AVX512BW-LABEL: reg_broadcast_16i16_32i16:
; X64-AVX512BW: # BB#0:
-; X64-AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; X64-AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
define <32 x i8> @reg_broadcast_16i8_32i8(<16 x i8> %a0) nounwind {
; X32-LABEL: reg_broadcast_16i8_32i8:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_16i8_32i8:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
define <64 x i8> @reg_broadcast_16i8_64i8(<16 x i8> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_16i8_64i8:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: reg_broadcast_16i8_64i8:
; X32-AVX512F: # BB#0:
-; X32-AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: reg_broadcast_16i8_64i8:
; X32-AVX512BW: # BB#0:
-; X32-AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: reg_broadcast_16i8_64i8:
; X32-AVX512DQ: # BB#0:
-; X32-AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_16i8_64i8:
; X64-AVX: # BB#0:
-; X64-AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: reg_broadcast_16i8_64i8:
; X64-AVX512F: # BB#0:
-; X64-AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: reg_broadcast_16i8_64i8:
; X64-AVX512BW: # BB#0:
-; X64-AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: reg_broadcast_16i8_64i8:
; X64-AVX512DQ: # BB#0:
-; X64-AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
;
; X32-AVX512BW-LABEL: reg_broadcast_32i8_64i8:
; X32-AVX512BW: # BB#0:
-; X32-AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; X32-AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
;
; X64-AVX512BW-LABEL: reg_broadcast_32i8_64i8:
; X64-AVX512BW: # BB#0:
-; X64-AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; X64-AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; Check that we can fold an indexed load into a tail call instruction.
; CHECK: fold_indexed_load
-; CHECK: leaq (%rsi,%rsi,4), %[[RAX:r..]]
-; CHECK: jmpq *16(%{{r..}},%[[RAX]],8) ## TAILCALL
+; CHECK: leaq (%rsi,%rsi,4), %[[rax:r..]]
+; CHECK: jmpq *16(%{{r..}},%[[rax]],8) ## TAILCALL
%struct.funcs = type { i32 (i8*, i32*, i32)*, i32 (i8*)*, i32 (i8*)*, i32 (i8*, i32)*, i32 }
@func_table = external global [0 x %struct.funcs]
define void @fold_indexed_load(i8* %mbstr, i64 %idxprom) nounwind uwtable ssp {
;
; X64-LABEL: test__blcfill_u32:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: andl %edi, %eax
; X64-NEXT: retq
;
; X64-LABEL: test__blci_u32:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: xorl $-1, %eax
; X64-NEXT: orl %edi, %eax
;
; X64-LABEL: test__blcmsk_u32:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: xorl %edi, %eax
; X64-NEXT: retq
;
; X64-LABEL: test__blcs_u32:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: orl %edi, %eax
; X64-NEXT: retq
define i32 @test_x86_tbm_blcfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u32_z2:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: testl %edi, %eax
; CHECK-NEXT: cmovnel %edx, %esi
define i32 @test_x86_tbm_blci_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u32_z2:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: notl %eax
; CHECK-NEXT: orl %edi, %eax
define i32 @test_x86_tbm_blcmsk_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u32_z2:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: xorl %edi, %eax
; CHECK-NEXT: cmovnel %edx, %esi
define i32 @test_x86_tbm_blcs_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u32_z2:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: cmovnel %edx, %esi
define i32 @test_x86_tbm_blsfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u32_z2:
; CHECK: # BB#0:
-; CHECK-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: cmovnel %edx, %esi
;
; X64-LABEL: test2:
; X64: # BB#0: # %entry
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: addl %esi, %edi
; X64-NEXT: leal (%rdi,%rdi), %eax
; X64-NEXT: retq
;
; X64-LABEL: test3:
; X64: # BB#0: # %entry
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: leal (%rdi,%rsi), %eax
; X64-NEXT: movl $4, %ecx
; X64-NEXT: mull %ecx
; CHECK-NEXT: shrl $12, %eax
; CHECK-NEXT: movzwl %ax, %eax
; CHECK-NEXT: movb $37, %dl
-; CHECK-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; CHECK-NEXT: mulb %dl
; CHECK-NEXT: subb %al, %cl
; CHECK-NEXT: movl %ecx, %eax
; X86-NEXT: shrl %cl, %eax
; X86-NEXT: decl %eax
; X86-NEXT: andw {{[0-9]+}}(%esp), %ax
-; X86-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: shift_right_pow_2:
; X64-NEXT: shrl %cl, %eax
; X64-NEXT: decl %eax
; X64-NEXT: andl %edi, %eax
-; X64-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
%shr = lshr i16 -32768, %y
%urem = urem i16 %x, %shr
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: andb $4, %cl
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X86-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X86-NEXT: divb %cl
; X86-NEXT: movzbl %ah, %eax # NOREX
-; X86-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X86-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X86-NEXT: retl
;
; X64-LABEL: and_pow_2:
; X64: # BB#0:
; X64-NEXT: andb $4, %sil
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %EAX<def> %EAX<kill> %AX<def>
+; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %eax # NOREX
-; X64-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; X64-NEXT: retq
%and = and i8 %y, 4
%urem = urem i8 %x, %and
;
; AVX512DQ-LABEL: fptosi_2f64_to_2i64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
;
; AVX-LABEL: fptosi_4f64_to_2i32:
; AVX: # BB#0:
-; AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512DQ-LABEL: fptosi_4f64_to_4i64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f64_to_4i64:
;
; AVX512DQ-LABEL: fptoui_2f64_to_2i64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
;
; AVX512F-LABEL: fptoui_2f64_to_4i32:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512F-NEXT: vzeroupper
;
; AVX512DQ-LABEL: fptoui_2f64_to_4i32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512DQ-NEXT: vzeroupper
;
; AVX512F-LABEL: fptoui_2f64_to_2i32:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
;
; AVX512DQ-LABEL: fptoui_2f64_to_2i32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
;
; AVX512F-LABEL: fptoui_4f64_to_2i32:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f64_to_2i32:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VL-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f64_to_2i32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_2i32:
; AVX512VLDQ: # BB#0:
-; AVX512VLDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VLDQ-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f64_to_4i64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_4i64:
;
; AVX512F-LABEL: fptoui_4f64_to_4i32:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
;
; AVX512DQ-LABEL: fptoui_4f64_to_4i32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
;
; AVX512DQ-LABEL: fptosi_4f32_to_2i64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f32_to_2i64:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vcvttps2qq %xmm0, %ymm0
-; AVX512VLDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = fptosi <4 x float> %a to <4 x i64>
; AVX512DQ-LABEL: fptosi_4f32_to_4i64:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f32_to_4i64:
; AVX512DQ-LABEL: fptosi_8f32_to_4i64:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_8f32_to_4i64:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512VLDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512VLDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VLDQ-NEXT: retq
%cvt = fptosi <8 x float> %a to <8 x i64>
%shuf = shufflevector <8 x i64> %cvt, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
;
; AVX512F-LABEL: fptoui_2f32_to_2i32:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512F-NEXT: vzeroupper
;
; AVX512DQ-LABEL: fptoui_2f32_to_2i32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512DQ-NEXT: vzeroupper
;
; AVX512F-LABEL: fptoui_4f32_to_4i32:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
;
; AVX512DQ-LABEL: fptoui_4f32_to_4i32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
;
; AVX512DQ-LABEL: fptoui_4f32_to_2i64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_2i64:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vcvttps2uqq %xmm0, %ymm0
-; AVX512VLDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <4 x float> %a to <4 x i64>
;
; AVX512F-LABEL: fptoui_8f32_to_8i32:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_8f32_to_8i32:
;
; AVX512DQ-LABEL: fptoui_8f32_to_8i32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_8f32_to_8i32:
; AVX512DQ-LABEL: fptoui_4f32_to_4i64:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_4i64:
; AVX512DQ-LABEL: fptoui_8f32_to_4i64:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_8f32_to_4i64:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512VLDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512VLDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <8 x float> %a to <8 x i64>
%shuf = shufflevector <8 x i64> %cvt, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
;
; X64-LABEL: t0:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
; X64-NEXT: movl $76, -24(%rsp,%rdi,4)
;
; X64-LABEL: t1:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movl $76, %eax
; X64-NEXT: pinsrd $0, %eax, %xmm0
; X64-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
;
; X64-LABEL: t2:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
; X64-NEXT: pinsrd $0, -24(%rsp,%rdi,4), %xmm0
;
; X64-LABEL: t3:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
; X64-NEXT: movss %xmm0, -24(%rsp,%rdi,4)
; X64-NEXT: movq %rsp, %rbp
; X64-NEXT: andq $-32, %rsp
; X64-NEXT: subq $64, %rsp
-; X64-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
; X64-NEXT: movaps %xmm0, (%rsp)
; X64-NEXT: andl $7, %edi
;
; X64-LABEL: t1:
; X64: # BB#0:
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: shll $12, %edi
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=+mmx,+sse4.2 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=+mmx,+sse4.2 | FileCheck %s --check-prefix=X64
-; MMX insertelement is not available; these are promoted to XMM.
+; MMX insertelement is not available; these are promoted to xmm.
; (Without SSE they are split to two ints, and the code is much better.)
define x86_mmx @mmx_movzl(x86_mmx %x) nounwind {
;
; X64-LABEL: var_insert:
; X64: # BB#0: # %entry
-; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
+; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %esi
; X64-NEXT: movl %edi, -24(%rsp,%rsi,4)
;
; X64-LABEL: var_extract:
; X64: # BB#0: # %entry
-; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
; X64-NEXT: movl -24(%rsp,%rdi,4), %eax
; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse4.1 | FileCheck %s --check-prefix=X64
-; This is not an MMX operation; promoted to XMM.
+; This is not an MMX operation; promoted to xmm.
define x86_mmx @t0(i32 %A) nounwind {
; X32-LABEL: t0:
; X32: ## BB#0:
;
; X64-LABEL: t0:
; X64: ## BB#0:
-; X64-NEXT: ## kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
;
; AVX512DQ-LABEL: sitofp_2i64_to_2f64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX-LABEL: sitofp_4i32_to_2f64:
; AVX: # BB#0:
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%cvt = sitofp <4 x i32> %a to <4 x double>
; AVX1: # BB#0:
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2: # BB#0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <8 x i16> %a to <8 x double>
; AVX1: # BB#0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2: # BB#0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x double>
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f64:
; AVX512: # BB#0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
%cvt = sitofp <8 x i16> %a to <8 x double>
%shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; AVX512: # BB#0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x double>
%shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
;
; AVX512DQ-LABEL: uitofp_2i64_to_2f64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
;
; AVX512F-LABEL: uitofp_2i32_to_2f64:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
;
; AVX512DQ-LABEL: uitofp_2i32_to_2f64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: vaddpd %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i32_to_2f64:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_2f64:
; AVX512VL: # BB#0:
; AVX512VL-NEXT: vcvtudq2pd %xmm0, %ymm0
-; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i32_to_2f64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_2f64:
; AVX512VLDQ: # BB#0:
; AVX512VLDQ-NEXT: vcvtudq2pd %xmm0, %ymm0
-; AVX512VLDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <4 x i32> %a to <4 x double>
; AVX1: # BB#0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <8 x i16> %a to <8 x double>
; AVX1: # BB#0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x double>
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f64:
;
; AVX512F-LABEL: uitofp_4i32_to_4f64:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_4f64:
;
; AVX512DQ-LABEL: uitofp_4i32_to_4f64:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_4f64:
; AVX512: # BB#0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
%cvt = uitofp <8 x i16> %a to <8 x double>
%shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; AVX512: # BB#0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x double>
%shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
;
; AVX512DQ-LABEL: sitofp_2i64_to_4f32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
;
; AVX512DQ-LABEL: sitofp_2i64_to_4f32_zero:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512DQ-NEXT: vzeroupper
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f32_undef:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f32_undef:
; AVX512VLDQ: # BB#0:
-; AVX512VLDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2: # BB#0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <8 x i16> %a to <8 x float>
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2: # BB#0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x float>
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x float>
%shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
;
; AVX512DQ-LABEL: uitofp_2i64_to_4f32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
;
; AVX512DQ-LABEL: uitofp_2i64_to_2f32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512DQ-NEXT: vzeroupper
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f32_undef:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f32_undef:
; AVX512VLDQ: # BB#0:
-; AVX512VLDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i32_to_4f32:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
;
; AVX512DQ-LABEL: uitofp_4i32_to_4f32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <8 x i16> %a to <8 x float>
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2: # BB#0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x float>
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
;
; AVX512F-LABEL: uitofp_8i32_to_8f32:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_8i32_to_8f32:
;
; AVX512DQ-LABEL: uitofp_8i32_to_8f32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_8i32_to_8f32:
; AVX512: # BB#0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x float>
%shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_4i64_to_4f64:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512F: # BB#0:
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i64_to_4f64:
; AVX512F: # BB#0:
; AVX512F-NEXT: vmovaps (%rdi), %xmm0
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i32_to_4f64:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i32_to_4f64:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512F: # BB#0:
; AVX512F-NEXT: vmovaps (%rdi), %xmm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512F: # BB#0:
; AVX512F-NEXT: vmovaps (%rdi), %ymm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_8i32_to_8f32:
; AVX512DQ: # BB#0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_8i32_to_8f32:
; X32-NEXT: minss LCPI0_2, %xmm0
; X32-NEXT: maxss %xmm1, %xmm0
; X32-NEXT: cvttss2si %xmm0, %eax
-; X32-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test1:
; X64-NEXT: minss {{.*}}(%rip), %xmm0
; X64-NEXT: maxss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
-; X64-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
;
; X32_AVX1-LABEL: test1:
; X32_AVX1-NEXT: vminss LCPI0_2, %xmm0, %xmm0
; X32_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X32_AVX1-NEXT: vcvttss2si %xmm0, %eax
-; X32_AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X32_AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X32_AVX1-NEXT: retl
;
; X64_AVX1-LABEL: test1:
; X64_AVX1-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X64_AVX1-NEXT: vcvttss2si %xmm0, %eax
-; X64_AVX1-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64_AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64_AVX1-NEXT: retq
;
; X32_AVX512-LABEL: test1:
; X32_AVX512-NEXT: vminss LCPI0_2, %xmm0, %xmm0
; X32_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X32_AVX512-NEXT: vcvttss2si %xmm0, %eax
-; X32_AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X32_AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X32_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: test1:
; X64_AVX512-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X64_AVX512-NEXT: vcvttss2si %xmm0, %eax
-; X64_AVX512-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64_AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64_AVX512-NEXT: retq
%tmp = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1]
%tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: maxss %xmm1, %xmm0
; X32-NEXT: cvttss2si %xmm0, %eax
-; X32-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X32-NEXT: retl
;
; X64-LABEL: test2:
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: maxss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
-; X64-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64-NEXT: retq
;
; X32_AVX-LABEL: test2:
; X32_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X32_AVX-NEXT: vcvttss2si %xmm0, %eax
-; X32_AVX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X32_AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: test2:
; X64_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X64_AVX-NEXT: vcvttss2si %xmm0, %eax
-; X64_AVX-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; X64_AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; X64_AVX-NEXT: retq
%tmp28 = fsub float %f, 1.000000e+00 ; <float> [#uses=1]
%tmp37 = fmul float %tmp28, 5.000000e-01 ; <float> [#uses=1]
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vpextrb $0, %xmm0, %eax
-; XOP-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; XOP-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; XOP-NEXT: retq
%b = call i8 @llvm.bitreverse.i8(i8 %a)
ret i8 %b
define i16 @test_bitreverse_i16(i16 %a) nounwind {
; SSE-LABEL: test_bitreverse_i16:
; SSE: # BB#0:
-; SSE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE-NEXT: rolw $8, %di
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andl $3855, %eax # imm = 0xF0F
; SSE-NEXT: andl $43690, %eax # imm = 0xAAAA
; SSE-NEXT: shrl %eax
; SSE-NEXT: leal (%rax,%rcx,2), %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i16:
; AVX: # BB#0:
-; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: rolw $8, %di
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andl $3855, %eax # imm = 0xF0F
; AVX-NEXT: andl $43690, %eax # imm = 0xAAAA
; AVX-NEXT: shrl %eax
; AVX-NEXT: leal (%rax,%rcx,2), %eax
-; AVX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i16:
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vmovd %xmm0, %eax
-; XOP-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; XOP-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; XOP-NEXT: retq
%b = call i16 @llvm.bitreverse.i16(i16 %a)
ret i16 %b
define i32 @test_bitreverse_i32(i32 %a) nounwind {
; SSE-LABEL: test_bitreverse_i32:
; SSE: # BB#0:
-; SSE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE-NEXT: bswapl %edi
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
;
; AVX-LABEL: test_bitreverse_i32:
; AVX: # BB#0:
-; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: bswapl %edi
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8i16_sext:
; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX-NEXT: movl $-1, %eax
; AVX-NEXT: cmovnel %ecx, %eax
-; AVX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8i16_sext:
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512-NEXT: retq
%c = icmp sgt <8 x i16> %a0, %a1
%s = sext <8 x i1> %c to <8 x i16>
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_sext:
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: xorl %eax, %eax
; AVX2-NEXT: cmpl $-1, %ecx
; AVX2-NEXT: cmovel %ecx, %eax
-; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <16 x i16> %a0, %a1
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_legal_sext:
; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX1-NEXT: movl $-1, %eax
; AVX1-NEXT: cmovnel %ecx, %eax
-; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX2-NEXT: movl $-1, %eax
; AVX2-NEXT: cmovnel %ecx, %eax
-; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: movsbl %al, %eax
-; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <16 x i16> %a0, %a1
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16i8_sext:
; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX-NEXT: movl $-1, %eax
; AVX-NEXT: cmovnel %ecx, %eax
-; AVX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16i8_sext:
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512-NEXT: retq
%c = icmp sgt <16 x i8> %a0, %a1
%s = sext <16 x i1> %c to <16 x i8>
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v32i8_sext:
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: xorl %eax, %eax
; AVX2-NEXT: cmpl $-1, %ecx
; AVX2-NEXT: cmovel %ecx, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <32 x i8> %a0, %a1
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8i16_sext:
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: negl %eax
; AVX-NEXT: sbbl %eax, %eax
-; AVX-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8i16_sext:
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512-NEXT: retq
%c = icmp sgt <8 x i16> %a0, %a1
%s = sext <8 x i1> %c to <8 x i16>
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_sext:
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: negl %eax
; AVX2-NEXT: sbbl %eax, %eax
-; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <16 x i16> %a0, %a1
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_legal_sext:
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: negl %eax
; AVX1-NEXT: sbbl %eax, %eax
-; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpmovmskb %xmm0, %eax
; AVX2-NEXT: negl %eax
; AVX2-NEXT: sbbl %eax, %eax
-; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: movsbl %al, %eax
-; AVX512-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <16 x i16> %a0, %a1
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16i8_sext:
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: negl %eax
; AVX-NEXT: sbbl %eax, %eax
-; AVX-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16i8_sext:
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512-NEXT: retq
%c = icmp sgt <16 x i8> %a0, %a1
%s = sext <16 x i1> %c to <16 x i8>
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v32i8_sext:
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: negl %eax
; AVX2-NEXT: sbbl %eax, %eax
-; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <32 x i8> %a0, %a1
; AVX512: # BB#0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = fcmp ogt <4 x double> %a0, %a1
; AVX512: # BB#0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = fcmp ogt <8 x float> %a0, %a1
; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sgt <4 x i64> %a0, %a1
; AVX512: # BB#0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sgt <8 x i32> %a0, %a1
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i16> %a0, %a1
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <8 x double> %a0, %a1
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <16 x float> %a0, %a1
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <8 x i64> %a0, %a1
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i32> %a0, %a1
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i16> %a0, %a1
ret <32 x i1> %1
; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vextracti128 $1, %ymm4, %xmm3
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vmovdqa %xmm4, %xmm2
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
; AVX512DQ-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-NEXT: vextracti128 $1, %ymm4, %xmm3
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vmovdqa %xmm4, %xmm2
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm3, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <16 x double> %a0, %a1
; AVX512BW-NEXT: vcmpltps %zmm1, %zmm3, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
%1 = fcmp ogt <32 x float> %a0, %a1
ret <32 x i1> %1
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i64> %a0, %a1
; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i32> %a0, %a1
ret <32 x i1> %1
; AVX512F-NEXT: vpcmpgtb %ymm0, %ymm6, %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
-; AVX512F-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512DQ-NEXT: vpcmpgtb %ymm0, %ymm6, %ymm0
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
-; AVX512DQ-NEXT: # kill: %XMM2<def> %XMM2<kill> %YMM2<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-NEXT: kunpckbw %k1, %k2, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
%1 = fcmp ogt <32 x double> %a0, %a1
ret <32 x i1> %1
; AVX512BW-NEXT: kunpckbw %k1, %k2, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i64> %a0, %a1
ret <32 x i1> %1
; X64-SSE-NEXT: movq %rsp, %rbp
; X64-SSE-NEXT: andq $-128, %rsp
; X64-SSE-NEXT: subq $256, %rsp # imm = 0x100
-; X64-SSE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-SSE-NEXT: psrldq {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
; X64-SSE-NEXT: xorps %xmm0, %xmm0
; X64-SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
; X64-AVX-NEXT: movq %rsp, %rbp
; X64-AVX-NEXT: andq $-128, %rsp
; X64-AVX-NEXT: subq $256, %rsp # imm = 0x100
-; X64-AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; X64-AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; X64-AVX-NEXT: vpermpd {{.*#+}} ymm0 = ymm3[3,1,2,3]
; X64-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
; AVX1-NEXT: movswl %ax, %esi
-; AVX1-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX1-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: shrq $48, %rdx
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
; AVX2-NEXT: movswl %ax, %esi
-; AVX2-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX2-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: shrq $48, %rdx
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
; AVX512F-NEXT: movswl %ax, %esi
-; AVX512F-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX512F-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: shrq $32, %rcx
; AVX512F-NEXT: shrq $48, %rdx
; AVX512VL-NEXT: movq %rax, %rcx
; AVX512VL-NEXT: movq %rax, %rdx
; AVX512VL-NEXT: movswl %ax, %esi
-; AVX512VL-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX512VL-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: shrq $32, %rcx
; AVX512VL-NEXT: shrq $48, %rdx
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
; AVX1-NEXT: movswl %ax, %esi
-; AVX1-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX1-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: shrq $48, %rdx
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
; AVX2-NEXT: movswl %ax, %esi
-; AVX2-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX2-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: shrq $48, %rdx
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
; AVX512F-NEXT: movswl %ax, %esi
-; AVX512F-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX512F-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: shrq $32, %rcx
; AVX512F-NEXT: shrq $48, %rdx
; AVX512VL-NEXT: movq %rax, %rcx
; AVX512VL-NEXT: movq %rax, %rdx
; AVX512VL-NEXT: movswl %ax, %esi
-; AVX512VL-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX512VL-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: shrq $32, %rcx
; AVX512VL-NEXT: shrq $48, %rdx
; ALL-NEXT: movq %rdx, %r8
; ALL-NEXT: movq %rdx, %r10
; ALL-NEXT: movswl %dx, %r9d
-; ALL-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill>
+; ALL-NEXT: # kill: %edx<def> %edx<kill> %rdx<kill>
; ALL-NEXT: shrl $16, %edx
; ALL-NEXT: shrq $32, %r8
; ALL-NEXT: shrq $48, %r10
; ALL-NEXT: movq %rdi, %rax
; ALL-NEXT: movq %rdi, %rsi
; ALL-NEXT: movswl %di, %ecx
-; ALL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<kill>
+; ALL-NEXT: # kill: %edi<def> %edi<kill> %rdi<kill>
; ALL-NEXT: shrl $16, %edi
; ALL-NEXT: shrq $32, %rax
; ALL-NEXT: shrq $48, %rsi
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm9
; AVX1-NEXT: movswl %ax, %ecx
-; AVX1-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX1-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm10
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm13
; AVX1-NEXT: movswl %ax, %ecx
-; AVX1-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX1-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm14
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm3
; AVX1-NEXT: movswl %ax, %ecx
-; AVX1-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX1-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm4
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm9
; AVX2-NEXT: movswl %ax, %ecx
-; AVX2-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX2-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm10
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm13
; AVX2-NEXT: movswl %ax, %ecx
-; AVX2-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX2-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm14
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm3
; AVX2-NEXT: movswl %ax, %ecx
-; AVX2-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX2-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm4
; AVX512F-NEXT: movswl %cx, %ecx
; AVX512F-NEXT: vmovd %ecx, %xmm9
; AVX512F-NEXT: movswl %ax, %ecx
-; AVX512F-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX512F-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: cwtl
; AVX512F-NEXT: vmovd %eax, %xmm11
; AVX512F-NEXT: movswl %cx, %ecx
; AVX512F-NEXT: vmovd %ecx, %xmm14
; AVX512F-NEXT: movswl %ax, %ecx
-; AVX512F-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX512F-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: cwtl
; AVX512F-NEXT: vmovd %eax, %xmm15
; AVX512F-NEXT: movswl %cx, %ecx
; AVX512F-NEXT: vmovd %ecx, %xmm1
; AVX512F-NEXT: movswl %ax, %ecx
-; AVX512F-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX512F-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: cwtl
; AVX512F-NEXT: vmovd %eax, %xmm4
; AVX512VL-NEXT: movswl %cx, %ecx
; AVX512VL-NEXT: vmovd %ecx, %xmm9
; AVX512VL-NEXT: movswl %ax, %ecx
-; AVX512VL-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX512VL-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: cwtl
; AVX512VL-NEXT: vmovd %eax, %xmm11
; AVX512VL-NEXT: movswl %cx, %ecx
; AVX512VL-NEXT: vmovd %ecx, %xmm14
; AVX512VL-NEXT: movswl %ax, %ecx
-; AVX512VL-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX512VL-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: cwtl
; AVX512VL-NEXT: vmovd %eax, %xmm15
; AVX512VL-NEXT: movswl %cx, %ecx
; AVX512VL-NEXT: vmovd %ecx, %xmm18
; AVX512VL-NEXT: movswl %ax, %ecx
-; AVX512VL-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX512VL-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: cwtl
; AVX512VL-NEXT: vmovd %eax, %xmm19
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
; AVX1-NEXT: movswl %ax, %esi
-; AVX1-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX1-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: shrq $48, %rdx
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
; AVX2-NEXT: movswl %ax, %esi
-; AVX2-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX2-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: shrq $48, %rdx
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
; AVX512F-NEXT: movswl %ax, %esi
-; AVX512F-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX512F-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: shrq $32, %rcx
; AVX512F-NEXT: shrq $48, %rdx
; AVX512VL-NEXT: movq %rax, %rcx
; AVX512VL-NEXT: movq %rax, %rdx
; AVX512VL-NEXT: movswl %ax, %esi
-; AVX512VL-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; AVX512VL-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: shrq $32, %rcx
; AVX512VL-NEXT: shrq $48, %rdx
; ALL: # BB#0:
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %eax
-; ALL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; ALL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; ALL-NEXT: retq
%1 = fptrunc float %a0 to half
%2 = bitcast half %1 to i16
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
; AVX512F-NEXT: movl %eax, %ebx
; AVX512F-NEXT: shll $16, %ebx
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %r14d
; AVX512VL-NEXT: movl %eax, %ebx
; AVX512VL-NEXT: shll $16, %ebx
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
; AVX512F-NEXT: movl %eax, %ebx
; AVX512F-NEXT: shll $16, %ebx
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %r14d
; AVX512VL-NEXT: movl %eax, %ebx
; AVX512VL-NEXT: shll $16, %ebx
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r15d
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r15d
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r15d
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r15d
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r15d
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r15d
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r15d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r15d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebp
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r15d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebp
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %ebx
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: shll $16, %ebp
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %ebx
; AVX512F-NEXT: movl %eax, %ebp
; AVX512F-NEXT: shll $16, %ebp
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %ebx
; AVX512VL-NEXT: movl %eax, %ebp
; AVX512VL-NEXT: shll $16, %ebp
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %ebx
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %ebx
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: shll $16, %ebp
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %ebx
; AVX512F-NEXT: movl %eax, %ebp
; AVX512F-NEXT: shll $16, %ebp
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %ebx
; AVX512VL-NEXT: movl %eax, %ebp
; AVX512VL-NEXT: shll $16, %ebp
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %ebx
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r13d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r14d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r15d
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r13d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r14d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r15d
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r13d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebp
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r14d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r15d
;
; AVX512CD-LABEL: testv2i64:
; AVX512CD: # BB#0:
-; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
;
; AVX512CD-LABEL: testv2i64u:
; AVX512CD: # BB#0:
-; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
;
; AVX512CD-LABEL: testv4i32:
; AVX512CD: # BB#0:
-; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
;
; AVX512CD-LABEL: testv4i32u:
; AVX512CD: # BB#0:
-; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
;
; AVX512CD-LABEL: testv4i64:
; AVX512CD: # BB#0:
-; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv4i64:
;
; AVX512CD-LABEL: testv4i64u:
; AVX512CD: # BB#0:
-; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv4i64u:
;
; AVX512CD-LABEL: testv8i32:
; AVX512CD: # BB#0:
-; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv8i32:
;
; AVX512CD-LABEL: testv8i32u:
; AVX512CD: # BB#0:
-; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv8i32u:
;
; AVX512VPOPCNTDQ-LABEL: testv2i64:
; AVX512VPOPCNTDQ: # BB#0:
-; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
;
; AVX512VPOPCNTDQ-LABEL: testv4i32:
; AVX512VPOPCNTDQ: # BB#0:
-; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
;
; BITALG_NOVLX-LABEL: testv8i16:
; BITALG_NOVLX: # BB#0:
-; BITALG_NOVLX-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
;
; BITALG_NOVLX-LABEL: testv16i8:
; BITALG_NOVLX: # BB#0:
-; BITALG_NOVLX-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
;
; AVX512VPOPCNTDQ-LABEL: testv4i64:
; AVX512VPOPCNTDQ: # BB#0:
-; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv4i64:
;
; AVX512VPOPCNTDQ-LABEL: testv8i32:
; AVX512VPOPCNTDQ: # BB#0:
-; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv8i32:
;
; BITALG_NOVLX-LABEL: testv16i16:
; BITALG_NOVLX: # BB#0:
-; BITALG_NOVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i16:
;
; BITALG_NOVLX-LABEL: testv32i8:
; BITALG_NOVLX: # BB#0:
-; BITALG_NOVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv32i8:
;
; AVX512BW-LABEL: var_rotate_v2i64:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512BW-LABEL: var_rotate_v4i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512BW-LABEL: var_rotate_v8i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %xmm1, %xmm2, %xmm2
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
;
; AVX512BW-LABEL: constant_rotate_v2i64:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,14]
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512BW-LABEL: constant_rotate_v4i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,5,6,7]
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512BW-LABEL: constant_rotate_v8i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,15,14,13,12,11,10,9]
;
; AVX512BW-LABEL: splatconstant_rotate_v2i64:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512BW-LABEL: splatconstant_rotate_v4i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v2i64:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v4i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
;
; AVX512BW-LABEL: var_rotate_v4i64:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v4i64:
;
; AVX512BW-LABEL: var_rotate_v8i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v8i32:
;
; AVX512BW-LABEL: var_rotate_v16i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %ymm1, %ymm2, %ymm2
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
;
; AVX512BW-LABEL: constant_rotate_v4i64:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4,14,50,60]
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v4i64:
;
; AVX512BW-LABEL: constant_rotate_v8i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,6,7,8,9,10,11]
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v8i32:
;
; AVX512BW-LABEL: constant_rotate_v16i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
;
; AVX512BW-LABEL: splatconstant_rotate_v4i64:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v4i64:
;
; AVX512BW-LABEL: splatconstant_rotate_v8i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v8i32:
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v4i64:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: retq
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v8i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: retq
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_4i1_to_4i64:
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i1_to_4i64:
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: kmovw (%rdi), %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: kmovw (%rdi), %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_16i1_to_16i16:
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: kmovd (%rdi), %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_32i1_to_32i8:
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: sext_32xi1_to_32xi8:
;
; AVX512-LABEL: var_shift_v2i64:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v8i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512-LABEL: splatvar_shift_v2i64:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
;
; AVX512-LABEL: constant_shift_v2i64:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [1,7]
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v8i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512-LABEL: splatconstant_shift_v2i64:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
;
; AVX512-LABEL: var_shift_v4i64:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v4i64:
;
; AVX512BW-LABEL: var_shift_v16i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v16i16:
;
; AVX512-LABEL: splatvar_shift_v4i64:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v4i64:
;
; AVX512-LABEL: constant_shift_v4i64:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [1,7,31,62]
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v4i64:
;
; AVX512BW-LABEL: constant_shift_v16i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v16i16:
;
; AVX512-LABEL: splatconstant_shift_v4i64:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v4i64:
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v8i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v8i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512BW-LABEL: var_shift_v16i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v16i16:
;
; AVX512BW-LABEL: constant_shift_v16i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v16i16:
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v8i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM1<def> %XMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512BW-LABEL: constant_shift_v8i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512BW-LABEL: var_shift_v16i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v16i16:
;
; AVX512BW-LABEL: constant_shift_v16i16:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v16i16:
define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
; AVX1-LABEL: insert_reg_and_zero_v4f64:
; AVX1: # BB#0:
-; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_reg_and_zero_v4f64:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_reg_and_zero_v4f64:
; AVX512VL: # BB#0:
-; AVX512VL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512VL-NEXT: retq
define <16 x i32> @mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03(<4 x i32> %a) {
; ALL-LABEL: mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03:
; ALL: # BB#0:
-; ALL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; ALL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; ALL-NEXT: retq
define <16 x float> @mask_shuffle_v4f32_v16f32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03(<4 x float> %a) {
; ALL-LABEL: mask_shuffle_v4f32_v16f32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03:
; ALL: # BB#0:
-; ALL-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; ALL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; ALL-NEXT: retq
define <8 x i64> @shuffle_v2i64_v8i64_01010101(<2 x i64> %a) {
; AVX512F-LABEL: shuffle_v2i64_v8i64_01010101:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v2i64_v8i64_01010101:
; AVX512F-32: # BB#0:
-; AVX512F-32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512F-32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
define <8 x double> @shuffle_v2f64_v8f64_01010101(<2 x double> %a) {
; AVX512F-LABEL: shuffle_v2f64_v8f64_01010101:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v2f64_v8f64_01010101:
; AVX512F-32: # BB#0:
-; AVX512F-32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; AVX512F-32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; AVX512F-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
define <8 x float> @expand(<4 x float> %a) {
; SKX64-LABEL: expand:
; SKX64: # BB#0:
-; SKX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX64-NEXT: movb $5, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
;
; SKX32-LABEL: expand:
; SKX32: # BB#0:
-; SKX32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX32-NEXT: movb $5, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
define <8 x float> @expand1(<4 x float> %a ) {
; SKX64-LABEL: expand1:
; SKX64: # BB#0:
-; SKX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX64-NEXT: movb $-86, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
;
; KNL64-LABEL: expand1:
; KNL64: # BB#0:
-; KNL64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL64-NEXT: vmovaps {{.*#+}} ymm1 = <u,0,u,1,u,2,u,3>
; KNL64-NEXT: vpermps %ymm0, %ymm1, %ymm0
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
;
; SKX32-LABEL: expand1:
; SKX32: # BB#0:
-; SKX32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX32-NEXT: movb $-86, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
;
; KNL32-LABEL: expand1:
; KNL32: # BB#0:
-; KNL32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL32-NEXT: vmovaps {{.*#+}} ymm1 = <u,0,u,1,u,2,u,3>
; KNL32-NEXT: vpermps %ymm0, %ymm1, %ymm0
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
define <4 x double> @expand2(<2 x double> %a) {
; SKX64-LABEL: expand2:
; SKX64: # BB#0:
-; SKX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX64-NEXT: movb $9, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandpd %ymm0, %ymm0 {%k1} {z}
;
; KNL64-LABEL: expand2:
; KNL64: # BB#0:
-; KNL64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
;
; SKX32-LABEL: expand2:
; SKX32: # BB#0:
-; SKX32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX32-NEXT: movb $9, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandpd %ymm0, %ymm0 {%k1} {z}
;
; KNL32-LABEL: expand2:
; KNL32: # BB#0:
-; KNL32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
define <8 x i32> @expand3(<4 x i32> %a ) {
; SKX64-LABEL: expand3:
; SKX64: # BB#0:
-; SKX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX64-NEXT: movb $-127, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
;
; SKX32-LABEL: expand3:
; SKX32: # BB#0:
-; SKX32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX32-NEXT: movb $-127, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
define <4 x i64> @expand4(<2 x i64> %a ) {
; SKX64-LABEL: expand4:
; SKX64: # BB#0:
-; SKX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX64-NEXT: movb $9, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
;
; KNL64-LABEL: expand4:
; KNL64: # BB#0:
-; KNL64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
;
; SKX32-LABEL: expand4:
; SKX32: # BB#0:
-; SKX32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX32-NEXT: movb $9, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
;
; KNL32-LABEL: expand4:
; KNL32: # BB#0:
-; KNL32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; KNL32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
define <16 x float> @expand7(<8 x float> %a) {
; SKX64-LABEL: expand7:
; SKX64: # BB#0:
-; SKX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX64-NEXT: movw $1285, %ax # imm = 0x505
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
;
; KNL64-LABEL: expand7:
; KNL64: # BB#0:
-; KNL64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL64-NEXT: movw $1285, %ax # imm = 0x505
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
;
; SKX32-LABEL: expand7:
; SKX32: # BB#0:
-; SKX32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX32-NEXT: movw $1285, %ax # imm = 0x505
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
;
; KNL32-LABEL: expand7:
; KNL32: # BB#0:
-; KNL32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL32-NEXT: movw $1285, %ax # imm = 0x505
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
define <16 x float> @expand8(<8 x float> %a ) {
; SKX64-LABEL: expand8:
; SKX64: # BB#0:
-; SKX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
;
; KNL64-LABEL: expand8:
; KNL64: # BB#0:
-; KNL64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
;
; SKX32-LABEL: expand8:
; SKX32: # BB#0:
-; SKX32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
;
; KNL32-LABEL: expand8:
; KNL32: # BB#0:
-; KNL32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
define <8 x double> @expand9(<4 x double> %a) {
; SKX64-LABEL: expand9:
; SKX64: # BB#0:
-; SKX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX64-NEXT: movb $-127, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
;
; KNL64-LABEL: expand9:
; KNL64: # BB#0:
-; KNL64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL64-NEXT: movb $-127, %al
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
;
; SKX32-LABEL: expand9:
; SKX32: # BB#0:
-; SKX32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX32-NEXT: movb $-127, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
;
; KNL32-LABEL: expand9:
; KNL32: # BB#0:
-; KNL32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL32-NEXT: movb $-127, %al
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
define <16 x i32> @expand10(<8 x i32> %a ) {
; SKX64-LABEL: expand10:
; SKX64: # BB#0:
-; SKX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
;
; KNL64-LABEL: expand10:
; KNL64: # BB#0:
-; KNL64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
;
; SKX32-LABEL: expand10:
; SKX32: # BB#0:
-; SKX32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
;
; KNL32-LABEL: expand10:
; KNL32: # BB#0:
-; KNL32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
define <8 x i64> @expand11(<4 x i64> %a) {
; SKX64-LABEL: expand11:
; SKX64: # BB#0:
-; SKX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX64-NEXT: movb $-127, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
;
; KNL64-LABEL: expand11:
; KNL64: # BB#0:
-; KNL64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL64-NEXT: movb $-127, %al
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
;
; SKX32-LABEL: expand11:
; SKX32: # BB#0:
-; SKX32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX32-NEXT: movb $-127, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
;
; KNL32-LABEL: expand11:
; KNL32: # BB#0:
-; KNL32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL32-NEXT: movb $-127, %al
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
define <16 x float> @expand12(<8 x float> %a) {
; SKX64-LABEL: expand12:
; SKX64: # BB#0:
-; SKX64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX64-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; SKX64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; SKX64-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
;
; KNL64-LABEL: expand12:
; KNL64: # BB#0:
-; KNL64-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL64-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
;
; SKX32-LABEL: expand12:
; SKX32: # BB#0:
-; SKX32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; SKX32-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; SKX32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; SKX32-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
;
; KNL32-LABEL: expand12:
; KNL32: # BB#0:
-; KNL32-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; KNL32-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
define <8 x float> @expand14(<4 x float> %a) {
; SKX64-LABEL: expand14:
; SKX64: # BB#0:
-; SKX64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX64-NEXT: movb $20, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
;
; SKX32-LABEL: expand14:
; SKX32: # BB#0:
-; SKX32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; SKX32-NEXT: movb $20, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
define <32 x i8> @combine_pshufb_as_vpbroadcastb256(<2 x i64> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastb256:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vpbroadcastb %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastb256:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vpbroadcastb %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
define <32 x i8> @combine_pshufb_as_vpbroadcastw256(<2 x i64> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastw256:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vpbroadcastw %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastw256:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vpbroadcastw %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
define <8 x i32> @combine_permd_as_vpbroadcastd256(<4 x i32> %a) {
; X32-LABEL: combine_permd_as_vpbroadcastd256:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vpbroadcastd %xmm0, %ymm0
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastd256:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vpbroadcastd %xmm0, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
define <8 x i32> @combine_permd_as_vpbroadcastq256(<4 x i32> %a) {
; X32-LABEL: combine_permd_as_vpbroadcastq256:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vpbroadcastq %xmm0, %ymm0
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastq256:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vpbroadcastq %xmm0, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
define <8 x float> @combine_permps_as_vpbroadcastss256(<4 x float> %a) {
; X32-LABEL: combine_permps_as_vpbroadcastss256:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vbroadcastss %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permps_as_vpbroadcastss256:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
define <4 x double> @combine_permps_as_vpbroadcastsd256(<2 x double> %a) {
; X32-LABEL: combine_permps_as_vpbroadcastsd256:
; X32: # BB#0:
-; X32-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permps_as_vpbroadcastsd256:
; X64: # BB#0:
-; X64-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x double> %a, <2 x double> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512VL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512VL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512VL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512VL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512VL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512VL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%c = shufflevector <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <8 x i1> %a, <8 x i32> <i32 9, i32 6, i32 1, i32 0, i32 3, i32 7, i32 7, i32 0>
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512VL-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512VL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; VL_BW_DQ-NEXT: vpbroadcastd %xmm0, %zmm0
; VL_BW_DQ-NEXT: vpmovd2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
+; VL_BW_DQ-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i16 %a to <16 x i1>
define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1) nounwind {
; SSE-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
; SSE: # BB#0:
-; SSE-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE-NEXT: andl $1, %edi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $1, %esi
;
; AVX-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
; AVX: # BB#0:
-; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: andl $1, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $1, %esi
define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE2-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSE2: # BB#0:
-; SSE2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSE2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSE2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSE2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSE2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSE2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSE2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-NEXT: andl $3, %edi
; SSE2-NEXT: andl $3, %esi
; SSE2-NEXT: andl $3, %edx
;
; SSSE3-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSSE3: # BB#0:
-; SSSE3-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSSE3-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSSE3-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSSE3-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSSE3-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSSE3-NEXT: andl $3, %edi
; SSSE3-NEXT: andl $3, %esi
; SSSE3-NEXT: andl $3, %edx
;
; SSE41-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSE41: # BB#0:
-; SSE41-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSE41-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSE41-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSE41-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSE41-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSE41-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSE41-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE41-NEXT: andl $3, %edi
; SSE41-NEXT: andl $3, %esi
; SSE41-NEXT: andl $3, %edx
;
; AVX-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; AVX: # BB#0:
-; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: andl $3, %esi
; AVX-NEXT: andl $3, %edx
define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE2-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSE2: # BB#0:
-; SSE2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSE2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSE2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSE2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSE2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSE2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSE2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-NEXT: andl $3, %edi
; SSE2-NEXT: andl $3, %esi
; SSE2-NEXT: andl $3, %edx
;
; SSSE3-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSSE3: # BB#0:
-; SSSE3-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSSE3-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSSE3-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSSE3-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSSE3-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSSE3-NEXT: andl $3, %edi
; SSSE3-NEXT: andl $3, %esi
; SSSE3-NEXT: andl $3, %edx
;
; SSE41-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSE41: # BB#0:
-; SSE41-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSE41-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSE41-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSE41-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSE41-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSE41-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSE41-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE41-NEXT: andl $3, %edi
; SSE41-NEXT: andl $3, %esi
; SSE41-NEXT: andl $3, %edx
;
; AVX-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; AVX: # BB#0:
-; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: andl $3, %esi
; AVX-NEXT: andl $3, %edx
define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
; SSE2-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSE2: # BB#0:
-; SSE2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; SSE2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; SSE2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSE2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSE2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSE2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; SSE2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSE2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSE2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSE2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-NEXT: andl $7, %edi
; SSE2-NEXT: andl $7, %esi
; SSE2-NEXT: andl $7, %edx
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSSE3: # BB#0:
-; SSSE3-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; SSSE3-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; SSSE3-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSSE3-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSSE3-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSSE3-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; SSSE3-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSSE3-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSSE3-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSSE3-NEXT: andl $7, %edi
; SSSE3-NEXT: andl $7, %esi
; SSSE3-NEXT: andl $7, %edx
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSE41: # BB#0:
-; SSE41-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; SSE41-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; SSE41-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSE41-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSE41-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSE41-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE41-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; SSE41-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSE41-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSE41-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSE41-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE41-NEXT: andl $7, %edi
; SSE41-NEXT: andl $7, %esi
; SSE41-NEXT: andl $7, %edx
;
; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; AVX: # BB#0:
-; AVX-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; AVX-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; AVX-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: andl $7, %edi
; AVX-NEXT: andl $7, %esi
; AVX-NEXT: andl $7, %edx
define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %i0, i8 %i1, i8 %i2, i8 %i3, i8 %i4, i8 %i5, i8 %i6, i8 %i7, i8 %i8, i8 %i9, i8 %i10, i8 %i11, i8 %i12, i8 %i13, i8 %i14, i8 %i15) nounwind {
; SSE2-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE2: # BB#0:
-; SSE2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; SSE2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; SSE2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSE2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSE2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSE2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; SSE2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSE2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSE2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSE2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
;
; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSSE3: # BB#0:
-; SSSE3-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; SSSE3-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; SSSE3-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSSE3-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSSE3-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSSE3-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; SSSE3-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSSE3-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSSE3-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
;
; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE41: # BB#0:
-; SSE41-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; SSE41-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; SSE41-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSE41-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSE41-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSE41-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE41-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; SSE41-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSE41-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSE41-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSE41-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE41-NEXT: andl $15, %edi
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
;
; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; AVX: # BB#0:
-; AVX-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; AVX-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; AVX-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: andl $15, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float> %y, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
; SSE: # BB#0:
-; SSE-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSE-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSE-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSE-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE-NEXT: andl $3, %edi
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $3, %edx
;
; AVX-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
; AVX: # BB#0:
-; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $3, %edx
define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %y, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
; SSE2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSE2: # BB#0:
-; SSE2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; SSE2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; SSE2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSE2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSE2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSE2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; SSE2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSE2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSE2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSE2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE2-NEXT: andl $7, %edi
; SSE2-NEXT: andl $7, %esi
; SSE2-NEXT: andl $7, %edx
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSSE3: # BB#0:
-; SSSE3-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; SSSE3-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; SSSE3-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSSE3-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSSE3-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSSE3-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSSE3-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; SSSE3-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSSE3-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSSE3-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSSE3-NEXT: andl $7, %edi
; SSSE3-NEXT: andl $7, %esi
; SSSE3-NEXT: andl $7, %edx
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSE41: # BB#0:
-; SSE41-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; SSE41-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; SSE41-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; SSE41-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; SSE41-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; SSE41-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; SSE41-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; SSE41-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SSE41-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; SSE41-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; SSE41-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; SSE41-NEXT: andl $7, %edi
; SSE41-NEXT: andl $7, %esi
; SSE41-NEXT: andl $7, %edx
;
; AVX-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; AVX: # BB#0:
-; AVX-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; AVX-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; AVX-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; AVX-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX-NEXT: andl $7, %edi
; AVX-NEXT: andl $7, %esi
; AVX-NEXT: andl $7, %edx
; ALL-NEXT: movq %rsp, %rbp
; ALL-NEXT: andq $-32, %rsp
; ALL-NEXT: subq $64, %rsp
-; ALL-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; ALL-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; ALL-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; ALL-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; ALL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ALL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ALL-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; ALL-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; ALL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; ALL-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; ALL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ALL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ALL-NEXT: andl $7, %edi
; ALL-NEXT: andl $7, %esi
; ALL-NEXT: andl $7, %edx
define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
; ALL-LABEL: var_shuffle_v8f32_v4f32_xxxxxxxx_i32:
; ALL: # BB#0:
-; ALL-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; ALL-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; ALL-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; ALL-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; ALL-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; ALL-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; ALL-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; ALL-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; ALL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; ALL-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; ALL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; ALL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; ALL-NEXT: andl $3, %edi
; ALL-NEXT: andl $3, %esi
; ALL-NEXT: andl $3, %edx
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $64, %rsp
-; AVX1-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; AVX1-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; AVX1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX1-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; AVX1-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; AVX1-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; AVX1-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; AVX1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: andl $15, %edi
; AVX1-NEXT: vmovaps %ymm0, (%rsp)
; AVX1-NEXT: movzwl (%rsp,%rdi,2), %eax
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $64, %rsp
-; AVX2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; AVX2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; AVX2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; AVX2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; AVX2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; AVX2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; AVX2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: andl $15, %edi
; AVX2-NEXT: vmovaps %ymm0, (%rsp)
; AVX2-NEXT: movzwl (%rsp,%rdi,2), %eax
define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind {
; AVX1-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
; AVX1: # BB#0:
-; AVX1-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; AVX1-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; AVX1-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX1-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX1-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX1-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; AVX1-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; AVX1-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; AVX1-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; AVX1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX1-NEXT: andl $7, %edi
; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: movzwl -24(%rsp,%rdi,2), %eax
;
; AVX2-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
; AVX2: # BB#0:
-; AVX2-NEXT: # kill: %R9D<def> %R9D<kill> %R9<def>
-; AVX2-NEXT: # kill: %R8D<def> %R8D<kill> %R8<def>
-; AVX2-NEXT: # kill: %ECX<def> %ECX<kill> %RCX<def>
-; AVX2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<def>
-; AVX2-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
-; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
+; AVX2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
+; AVX2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
+; AVX2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; AVX2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; AVX2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
; AVX2-NEXT: andl $7, %edi
; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: movzwl -24(%rsp,%rdi,2), %eax
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <4 x i64> %a0, %a1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <8 x i32> %a0, %a1
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: vpmovdw %zmm1, %ymm1
; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0
; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
;
; AVX512-LABEL: trunc_add_const_v4i64_v4i32:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
;
; AVX512-LABEL: trunc_add_const_v8i32_v8i16:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
;
; AVX512BW-LABEL: trunc_add_const_v16i16_v16i8:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <4 x i64> %a0, %a1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i32> %a0, %a1
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX2-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512F-LABEL: trunc_mul_v4i64_v4i32:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpmulld %xmm1, %xmm0, %xmm0
;
; AVX512BW-LABEL: trunc_mul_v4i64_v4i32:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpmulld %xmm1, %xmm0, %xmm0
;
; AVX512DQ-LABEL: trunc_mul_v4i64_v4i32:
; AVX512DQ: # BB#0:
-; AVX512DQ-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512DQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512DQ-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = mul <4 x i64> %a0, %a1
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = mul <8 x i32> %a0, %a1
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
+; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
; AVX512-NEXT: vpmovdw %zmm1, %ymm1
; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
;
; AVX512-LABEL: trunc_mul_const_v4i64_v4i32:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
;
; AVX512-LABEL: trunc_mul_const_v8i32_v8i16:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <4 x i64> %a0, %a1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <8 x i32> %a0, %a1
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512-LABEL: trunc_and_const_v4i64_v4i32:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
;
; AVX512-LABEL: trunc_and_const_v8i32_v8i16:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
;
; AVX512BW-LABEL: trunc_and_const_v16i16_v16i8:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX2-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <4 x i64> %a0, %a1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <8 x i32> %a0, %a1
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512-LABEL: trunc_xor_const_v4i64_v4i32:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
;
; AVX512-LABEL: trunc_xor_const_v8i32_v8i16:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
;
; AVX512BW-LABEL: trunc_xor_const_v16i16_v16i8:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX2-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <4 x i64> %a0, %a1
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512: # BB#0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <8 x i32> %a0, %a1
; AVX512BW: # BB#0:
; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512-LABEL: trunc_or_const_v4i64_v4i32:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
;
; AVX512-LABEL: trunc_or_const_v8i32_v8i16:
; AVX512: # BB#0:
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
;
; AVX512BW-LABEL: trunc_or_const_v16i16_v16i8:
; AVX512BW: # BB#0:
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc8i32_8i16:
; AVX512F: # BB#0: # %entry
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
;
; AVX512BW-LABEL: trunc8i32_8i16:
; AVX512BW: # BB#0: # %entry
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
+; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
;
; AVX512F-LABEL: trunc8i32_8i8:
; AVX512F: # BB#0: # %entry
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovq %xmm0, (%rax)
;
; AVX512BW-LABEL: trunc8i32_8i8:
; AVX512BW: # BB#0: # %entry
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovq %xmm0, (%rax)
;
; AVX512BW-LABEL: trunc16i16_16i8:
; AVX512BW: # BB#0: # %entry
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
;
; AVX512F-LABEL: trunc2x4i64_8i32:
; AVX512F: # BB#0: # %entry
-; AVX512F-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
;
; AVX512BW-LABEL: trunc2x4i64_8i32:
; AVX512BW: # BB#0: # %entry
-; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
;
; AVX512F-LABEL: trunc2x4i64_8i16:
; AVX512F: # BB#0: # %entry
-; AVX512F-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
;
; AVX512BW-LABEL: trunc2x4i64_8i16:
; AVX512BW: # BB#0: # %entry
-; AVX512BW-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512BW-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv4i64:
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv4i64u:
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv8i32:
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv8i32u:
; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i16:
; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i16u:
; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv32i8:
; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv32i8u:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshufbitqmb %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: ## kill: %AX<def> %AX<kill> %EAX<kill>
+; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
;
; AVX512F-LABEL: signbit_sel_v8i32:
; AVX512F: # BB#0:
-; AVX512F-NEXT: # kill: %YMM2<def> %YMM2<kill> %ZMM2<def>
-; AVX512F-NEXT: # kill: %YMM1<def> %YMM1<kill> %ZMM1<def>
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<def>
+; AVX512F-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
+; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm3, %k1
; AVX512F-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v8i32:
; X32-SSE-NEXT: pextrb $0, %xmm1, %eax
; X32-SSE-NEXT: pextrb $4, %xmm1, %edx
; X32-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X32-SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
-; X32-SSE-NEXT: # kill: %DL<def> %DL<kill> %EDX<kill>
-; X32-SSE-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; X32-SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-SSE-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
+; X32-SSE-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: and_v3i8_as_i24:
; X64-SSE-NEXT: pextrb $0, %xmm1, %eax
; X64-SSE-NEXT: pextrb $4, %xmm1, %edx
; X64-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X64-SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
-; X64-SSE-NEXT: # kill: %DL<def> %DL<kill> %EDX<kill>
-; X64-SSE-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; X64-SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
+; X64-SSE-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; X64-SSE-NEXT: retq
%1 = bitcast <3 x i8> %a to i24
%2 = bitcast <3 x i8> %b to i24
; X32-SSE-NEXT: pextrb $0, %xmm1, %eax
; X32-SSE-NEXT: pextrb $4, %xmm1, %edx
; X32-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X32-SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
-; X32-SSE-NEXT: # kill: %DL<def> %DL<kill> %EDX<kill>
-; X32-SSE-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; X32-SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-SSE-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
+; X32-SSE-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: xor_v3i8_as_i24:
; X64-SSE-NEXT: pextrb $0, %xmm1, %eax
; X64-SSE-NEXT: pextrb $4, %xmm1, %edx
; X64-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X64-SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
-; X64-SSE-NEXT: # kill: %DL<def> %DL<kill> %EDX<kill>
-; X64-SSE-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; X64-SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
+; X64-SSE-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; X64-SSE-NEXT: retq
%1 = bitcast <3 x i8> %a to i24
%2 = bitcast <3 x i8> %b to i24
; X32-SSE-NEXT: pextrb $0, %xmm1, %eax
; X32-SSE-NEXT: pextrb $4, %xmm1, %edx
; X32-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X32-SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
-; X32-SSE-NEXT: # kill: %DL<def> %DL<kill> %EDX<kill>
-; X32-SSE-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; X32-SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-SSE-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
+; X32-SSE-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: or_v3i8_as_i24:
; X64-SSE-NEXT: pextrb $0, %xmm1, %eax
; X64-SSE-NEXT: pextrb $4, %xmm1, %edx
; X64-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X64-SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
-; X64-SSE-NEXT: # kill: %DL<def> %DL<kill> %EDX<kill>
-; X64-SSE-NEXT: # kill: %CL<def> %CL<kill> %ECX<kill>
+; X64-SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
+; X64-SSE-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
; X64-SSE-NEXT: retq
%1 = bitcast <3 x i8> %a to i24
%2 = bitcast <3 x i8> %b to i24
; AVX512-NEXT: vpmovb2m %zmm0, %k1
; AVX512-NEXT: kxnorw %k1, %k0, %k0
; AVX512-NEXT: vpmovm2b %k0, %zmm0
-; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%wide.vec = load <64 x i8>, <64 x i8>* %ptr
; AVX512-NEXT: vpmovb2m %zmm0, %k1
; AVX512-NEXT: kxnord %k1, %k0, %k0
; AVX512-NEXT: vpmovm2b %k0, %zmm0
-; AVX512-NEXT: # kill: %YMM0<def> %YMM0<kill> %ZMM0<kill>
+; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
; AVX512-NEXT: retq
%wide.vec = load <128 x i8>, <128 x i8>* %ptr
%v1 = shufflevector <128 x i8> %wide.vec, <128 x i8> undef, <32 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 64, i32 68, i32 72, i32 76, i32 80, i32 84, i32 88, i32 92, i32 96, i32 100, i32 104, i32 108, i32 112, i32 116, i32 120, i32 124>
; RUN: llc -verify-machineinstrs -mtriple=x86_64-apple-macosx -show-mc-encoding -mattr=+avx512f < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK64
; RUN: llc -verify-machineinstrs -mtriple=i386-apple-macosx -show-mc-encoding -mattr=+avx512f < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK32
-; Make sure we spill the high numbered ZMM registers and K registers with the right encoding.
+; Make sure we spill the high numbered zmm registers and K registers with the right encoding.
; CHECK-LABEL: foo
; CHECK: kmovq %k7, {{.+}}
; CHECK64: encoding: [0xc4,0xe1,0xf8,0x91,0xbc,0x24,0x68,0x08,0x00,0x00]
;; In functions with 'no_caller_saved_registers' attribute, all registers should
;; be preserved except for registers used for passing/returning arguments.
-;; In the following function registers %RDI, %RSI and %XMM0 are used to store
-;; arguments %a0, %a1 and %b0 accordingally. The value is returned in %RAX.
+;; In the following function registers %rdi, %rsi and %xmm0 are used to store
+;; arguments %a0, %a1 and %b0 accordingally. The value is returned in %rax.
;; The above registers should not be preserved, however other registers
-;; (that are modified by the function) should be preserved (%RDX and %XMM1).
+;; (that are modified by the function) should be preserved (%rdx and %xmm1).
define x86_64_sysvcc i32 @bar(i32 %a0, i32 %a1, float %b0) #0 {
; CHECK-LABEL: bar:
; CHECK: # BB#0:
;; Because "bar" has 'no_caller_saved_registers' attribute, function "foo"
;; doesn't need to preserve registers except for the arguments passed
-;; to "bar" (%ESI, %EDI and %XMM0).
+;; to "bar" (%esi, %edi and %xmm0).
define x86_64_sysvcc float @foo(i32 %a0, i32 %a1, float %b0) {
; CHECK-LABEL: foo
; CHECK: movaps %xmm0, %xmm1
define <4 x i64> @broadcast128(<2 x i64> %src) {
; CHECK-LABEL: broadcast128:
; CHECK: ## BB#0:
-; CHECK-NEXT: ## kill: %XMM0<def> %XMM0<kill> %YMM0<def>
+; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
; RUN: llc -filetype=obj < %s \
; RUN: | llvm-dwarfdump -debug-info - | FileCheck %s --check-prefix=DWARF
;
-; CHECK: @DEBUG_VALUE: h:x <- [DW_OP_plus_uconst {{.*}}] [%R{{.*}}+0]
+; CHECK: @DEBUG_VALUE: h:x <- [DW_OP_plus_uconst {{.*}}] [%r{{.*}}+0]
; DWARF: DW_TAG_formal_parameter
; DWARF: DW_AT_location
; DWARF-NEXT: DW_OP_reg0 R0
; Function Attrs: optsize ssp
define i64 @_Z3foox(i64 returned) local_unnamed_addr #0 !dbg !13 {
tail call void @llvm.dbg.value(metadata i64 %0, metadata !17, metadata !DIExpression()), !dbg !18
- ; CHECK: @DEBUG_VALUE: foo:offset <- [DW_OP_LLVM_fragment 0 32] %R5
- ; CHECK: @DEBUG_VALUE: foo:offset <- [DW_OP_LLVM_fragment 32 32] %R4
+ ; CHECK: @DEBUG_VALUE: foo:offset <- [DW_OP_LLVM_fragment 0 32] %r5
+ ; CHECK: @DEBUG_VALUE: foo:offset <- [DW_OP_LLVM_fragment 32 32] %r4
%2 = load i64, i64* @g, align 8, !dbg !19, !tbaa !21
%3 = icmp eq i64 %2, %0, !dbg !19
; ASM: pushl %esi
; ASM: .cv_fpo_pushreg %esi
; ASM: .cv_fpo_endprologue
-; ASM: #DEBUG_VALUE: csr1:a <- %ESI
+; ASM: #DEBUG_VALUE: csr1:a <- %esi
; ASM: retl
; ASM: .cv_fpo_endproc
; ASM: pushl %esi
; ASM: .cv_fpo_pushreg %esi
; ASM: .cv_fpo_endprologue
-; ASM: #DEBUG_VALUE: csr2:a <- %ESI
-; ASM: #DEBUG_VALUE: csr2:b <- %EDI
+; ASM: #DEBUG_VALUE: csr2:a <- %esi
+; ASM: #DEBUG_VALUE: csr2:b <- %edi
; ASM: retl
; ASM: .cv_fpo_endproc
; ASM: pushl %esi
; ASM: .cv_fpo_pushreg %esi
; ASM: .cv_fpo_endprologue
-; ASM: #DEBUG_VALUE: csr3:a <- %ESI
-; ASM: #DEBUG_VALUE: csr3:b <- %EDI
-; ASM: #DEBUG_VALUE: csr3:c <- %EBX
+; ASM: #DEBUG_VALUE: csr3:a <- %esi
+; ASM: #DEBUG_VALUE: csr3:b <- %edi
+; ASM: #DEBUG_VALUE: csr3:c <- %ebx
; ASM: retl
; ASM: .cv_fpo_endproc
; ASM: pushl %esi
; ASM: .cv_fpo_pushreg %esi
; ASM: .cv_fpo_endprologue
-; ASM: #DEBUG_VALUE: csr4:a <- %ESI
-; ASM: #DEBUG_VALUE: csr4:b <- %EDI
-; ASM: #DEBUG_VALUE: csr4:c <- %EBX
-; ASM: #DEBUG_VALUE: csr4:d <- %EBP
+; ASM: #DEBUG_VALUE: csr4:a <- %esi
+; ASM: #DEBUG_VALUE: csr4:b <- %edi
+; ASM: #DEBUG_VALUE: csr4:c <- %ebx
+; ASM: #DEBUG_VALUE: csr4:d <- %ebp
; ASM: retl
; ASM: .cv_fpo_endproc
; RUN: llc -mtriple=x86_64-windows-msvc < %s -filetype=obj | llvm-readobj -codeview - | FileCheck %s --check-prefix=OBJ
; This test attempts to exercise gaps in local variables. The local variable 'p'
-; will end up in some CSR (ESI), which will be used in both the BB scheduled
+; will end up in some CSR (esi), which will be used in both the BB scheduled
; discontiguously out of line and the normal return BB. The best way to encode
; this is to use a LocalVariableAddrGap. If the gap is too large, multiple
; ranges should be emitted.
; ASM: callq vardef
; ASM: movl %eax, %esi
; ASM: [[p_b1:\.Ltmp[0-9]+]]:
-; ASM: #DEBUG_VALUE: p <- %ESI
+; ASM: #DEBUG_VALUE: p <- %esi
; ASM: callq barrier
; ASM: movl %esi, %ecx
; ASM: testl %eax, %eax
; ASM: jne .LBB0_5
; ASM: # BB#2: # %if.end
-; ASM: #DEBUG_VALUE: p <- %ESI
+; ASM: #DEBUG_VALUE: p <- %esi
; ASM: callq use
; ASM: jmp .LBB0_4
; ASM: [[p_e1:\.Ltmp[0-9]+]]:
; ASM: retq
; ASM: .LBB0_5: # %if.then4
; ASM: [[p_b2:\.Ltmp[0-9]+]]:
-; ASM: #DEBUG_VALUE: p <- %ESI
+; ASM: #DEBUG_VALUE: p <- %esi
; ASM: callq call_noreturn
; ASM: ud2
; ASM: .Lfunc_end0:
; ASM: .p2align 4, 0x90
; ASM: .LBB0_3: # %for.body
; ASM: [[ox_start:\.Ltmp[0-9]+]]:
-; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 0 32] %EDI
+; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 0 32] %edi
; ASM: .cv_loc 0 1 13 11 # t.c:13:11
; ASM: movl %edi, %ecx
; ASM: callq g
; ASM: movl %eax, %edi
; ASM: [[oy_start:\.Ltmp[0-9]+]]:
-; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 0 32] %EDI
-; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 32 32] %ESI
+; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 0 32] %edi
+; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 32 32] %esi
; ASM: .cv_loc 0 1 14 11 # t.c:14:11
; ASM: movl %esi, %ecx
; ASM: callq g
; ASM: movl %eax, %esi
-; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 32 32] %ESI
+; ASM: #DEBUG_VALUE: loop_csr:o <- [DW_OP_LLVM_fragment 32 32] %esi
; ASM: cmpl n(%rip), %eax
; ASM: jl .LBB0_3
; ASM: [[oy_end:\.Ltmp[0-9]+]]:
; ASM-LABEL: pad_right: # @pad_right
-; ASM: #DEBUG_VALUE: pad_right:o <- [DW_OP_LLVM_fragment 32 32] %ECX
+; ASM: #DEBUG_VALUE: pad_right:o <- [DW_OP_LLVM_fragment 32 32] %ecx
; ASM: movl %ecx, %eax
; ASM: retq
; ASM-LABEL: pad_left: # @pad_left
-; ASM: #DEBUG_VALUE: pad_left:o <- [DW_OP_LLVM_fragment 0 32] %ECX
+; ASM: #DEBUG_VALUE: pad_left:o <- [DW_OP_LLVM_fragment 0 32] %ecx
; ASM: .cv_loc 2 1 24 3 # t.c:24:3
; ASM: movl %ecx, %eax
; ASM: retq
; ASM-LABEL: nested: # @nested
-; ASM: #DEBUG_VALUE: nested:o <- [DW_OP_deref] [%RCX+0]
+; ASM: #DEBUG_VALUE: nested:o <- [DW_OP_deref] [%rcx+0]
; ASM: movl 12(%rcx), %eax
; ASM: [[p_start:\.Ltmp[0-9]+]]:
-; ASM: #DEBUG_VALUE: nested:p <- [DW_OP_LLVM_fragment 32 32] %EAX
+; ASM: #DEBUG_VALUE: nested:p <- [DW_OP_LLVM_fragment 32 32] %eax
; ASM: retq
; ASM-LABEL: bitpiece_spill: # @bitpiece_spill
; ASM: callq g
; ASM: movl %eax, [[offset_o_x:[0-9]+]](%rsp) # 4-byte Spill
; ASM: [[spill_o_x_start:\.Ltmp[0-9]+]]:
-; ASM: #DEBUG_VALUE: bitpiece_spill:o <- [DW_OP_plus_uconst [[offset_o_x]], DW_OP_LLVM_fragment 32 32] [%RSP+0]
+; ASM: #DEBUG_VALUE: bitpiece_spill:o <- [DW_OP_plus_uconst [[offset_o_x]], DW_OP_LLVM_fragment 32 32] [%rsp+0]
; ASM: #APP
; ASM: #NO_APP
; ASM: movl [[offset_o_x]](%rsp), %eax # 4-byte Reload
; ASM: # BB#0: # %entry
; ASM: pushq %rsi
; ASM: subq $32, %rsp
-; ASM: #DEBUG_VALUE: f:p <- %ECX
+; ASM: #DEBUG_VALUE: f:p <- %ecx
; ASM: movl %ecx, %esi
; ASM: [[p_ecx_esi:\.Ltmp.*]]:
-; ASM: #DEBUG_VALUE: f:p <- %ESI
+; ASM: #DEBUG_VALUE: f:p <- %esi
; ASM: callq getint
; ASM: [[after_getint:\.Ltmp.*]]:
-; ASM: #DEBUG_VALUE: a <- %EAX
-; ASM: #DEBUG_VALUE: inlineinc:a <- %EAX
-; ASM: #DEBUG_VALUE: c <- %EAX
+; ASM: #DEBUG_VALUE: a <- %eax
+; ASM: #DEBUG_VALUE: inlineinc:a <- %eax
+; ASM: #DEBUG_VALUE: c <- %eax
; ASM: testl %esi, %esi
; ASM: je .LBB0_2
; ASM: [[after_je:\.Ltmp.*]]:
; ASM: # BB#1: # %if.then
-; ASM-DAG: #DEBUG_VALUE: inlineinc:a <- %EAX
-; ASM-DAG: #DEBUG_VALUE: a <- %EAX
-; ASM-DAG: #DEBUG_VALUE: f:p <- %ESI
+; ASM-DAG: #DEBUG_VALUE: inlineinc:a <- %eax
+; ASM-DAG: #DEBUG_VALUE: a <- %eax
+; ASM-DAG: #DEBUG_VALUE: f:p <- %esi
; ASM: addl $1, %eax
; ASM: [[after_inc_eax:\.Ltmp.*]]:
-; ASM: #DEBUG_VALUE: inlineinc:b <- %EAX
-; ASM: #DEBUG_VALUE: b <- %EAX
+; ASM: #DEBUG_VALUE: inlineinc:b <- %eax
+; ASM: #DEBUG_VALUE: b <- %eax
; ASM: addl $1, x(%rip)
; ASM: [[after_if:\.Ltmp.*]]:
; ASM: .LBB0_2: # %if.else
-; ASM: #DEBUG_VALUE: f:p <- %ESI
+; ASM: #DEBUG_VALUE: f:p <- %esi
; ASM: movl %eax, %ecx
; ASM: addq $32, %rsp
; ASM: popq %rsi
...
# Let's verify that the slot index ranges for the unused variables argc/argv,
-# connected to physical regs %EDI and %RSI, does not overlap with the ranges
+# connected to physical regs %edi and %rsi, does not overlap with the ranges
# for %vreg2 and %vreg3. The register allocator is actually allocating the
-# virtual registers # to %EDI and %ESI, so the ranges for argc/argv should
+# virtual registers # to %edi and %esi, so the ranges for argc/argv should
# not cover the whole BB.
#
# CHECKDBG-LABEL: ********** EMITTING LIVE DEBUG VARIABLES **********
-# CHECKDBG-NEXT: !"argc,5" [0B;0e):0 Loc0=%EDI
+# CHECKDBG-NEXT: !"argc,5" [0B;0e):0 Loc0=%edi
# CHECKDBG-NEXT: [0B;0e):0 BB#0-160B
-# CHECKDBG-NEXT: !"argv,5" [0B;0e):0 Loc0=%RSI
+# CHECKDBG-NEXT: !"argv,5" [0B;0e):0 Loc0=%rsi
# CHECKDBG-NEXT: [0B;0e):0 BB#0-160B
# CHECKDBG-NEXT: !"a0,7" [16r;64r):0 Loc0=%vreg2
# CHECKDBG-NEXT: [16r;64r):0 BB#0-160B
; ASM-LABEL: f: # @f
; ASM: movl %ecx, [[OFF_X:[0-9]+]](%rsp)
-; ASM: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [%RSP+0]
+; ASM: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [%rsp+0]
; ASM: callq escape
; ASM: #DEBUG_VALUE: f:x <- 1
; ASM: movl $1, global(%rip)
; FIXME: Needs a fix to LiveDebugVariables
-; ASMX: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [%RSP+0]
+; ASMX: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [%rsp+0]
; ASM: movl $2, [[OFF_X]](%rsp)
; ASM: callq escape
; ASM: retq
; is control-dependent.
; CHECK-LABEL: use_dbg_addr:
-; CHECK: #DEBUG_VALUE: use_dbg_addr:o <- [%RSP+0]
+; CHECK: #DEBUG_VALUE: use_dbg_addr:o <- [%rsp+0]
; FIXME: Avoid the use of a single-location location list and use
; DW_AT_start_offset instead.
; CHECK-LABEL: __OpenCL_test_kernel:
; CHECK-DAG: ##DEBUG_VALUE: __OpenCL_test_kernel:ip <-
; CHECK-DAG: ##DEBUG_VALUE: xxx <- 0
-; CHECK-DAG: ##DEBUG_VALUE: gid <- %E{{..$}}
-; CHECK-DAG: ##DEBUG_VALUE: idx <- %E{{..$}}
+; CHECK-DAG: ##DEBUG_VALUE: gid <- %e{{..$}}
+; CHECK-DAG: ##DEBUG_VALUE: idx <- %e{{..$}}
; CHECK-NOT: ##DEBUG_VALUE:
declare <4 x i32> @__amdil_get_global_id_int()
}
; CHECK-LABEL: test
-; CHECK: #DEBUG_VALUE: test:w <- [DW_OP_plus_uconst 8] [%RSP+0]
+; CHECK: #DEBUG_VALUE: test:w <- [DW_OP_plus_uconst 8] [%rsp+0]
; DWARF: DW_AT_location [DW_FORM_sec_offset] (
; DWARF-NEXT: {{.*}} - {{.*}}: DW_OP_breg7 RSP+8)
; of individual register def operands.
; ASM: main: # @main
-; ASM: #DEBUG_VALUE: main:argc <- %ECX
+; ASM: #DEBUG_VALUE: main:argc <- %ecx
; ASM: movl $1, x(%rip)
; ASM: callq clobber
; ASM-NEXT: [[argc_range_end:.Ltmp[0-9]+]]:
-; Previously LiveDebugValues would claim argc was still in ECX after the call.
+; Previously LiveDebugValues would claim argc was still in ecx after the call.
; ASM-NOT: #DEBUG_VALUE: main:argc
; argc is the first debug location.
; CHECK: movl %eax, %ecx
; CHECK: .LBB0_3: # %if.end
; Check that this DEBUG_VALUE comes before the left shift.
-; CHECK: #DEBUG_VALUE: bit_offset <- %ECX
+; CHECK: #DEBUG_VALUE: bit_offset <- %ecx
; CHECK: .cv_loc 0 1 8 28 # t.c:8:28
; CHECK: movl $1, %[[reg:[^ ]*]]
; CHECK: shll %cl, %[[reg]]
; with "clang++ -S -emit-llvm -mllvm -asan-skip-promotable-allocas=0 -fsanitize=address -O0 -g test.cc"
; The address of the (potentially now malloc'ed) alloca ends up
-; in RDI, after which it is spilled to the stack. We record the
+; in rdi, after which it is spilled to the stack. We record the
; spill OFFSET on the stack for checking the debug info below.
-; CHECK: #DEBUG_VALUE: bar:y <- [DW_OP_deref] [%RDI+0]
+; CHECK: #DEBUG_VALUE: bar:y <- [DW_OP_deref] [%rdi+0]
; CHECK: movq %rdi, [[OFFSET:[0-9]+]](%rsp)
; CHECK-NEXT: [[START_LABEL:.Ltmp[0-9]+]]
; CHECK-NEXT: #DEBUG_VALUE: bar:y <- [DW_OP_plus_uconst [[OFFSET]], DW_OP_deref, DW_OP_deref]
; DBG_VALUE for variable "n" is extended into BB#5 from its predecessors BB#3
; and BB#4.
; CHECK: .LBB0_5:
-; CHECK-NEXT: #DEBUG_VALUE: main:n <- %EBX
+; CHECK-NEXT: #DEBUG_VALUE: main:n <- %ebx
; Other register values have been clobbered.
; CHECK-NOT: #DEBUG_VALUE:
; CHECK: movl %ecx, m(%rip)
# CHECK-LABEL: f: # @f
# CHECK: movl %ecx, [[OFF_X:[0-9]+]](%rsp)
-# CHECK: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [%RSP+0]
+# CHECK: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [%rsp+0]
# CHECK: leaq [[OFF_X]](%rsp), %rsi
# CHECK: callq escape
# CHECK: #DEBUG_VALUE: f:x <- 1
# CHECK: movl $1, global(%rip)
-# CHECK: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [%RSP+0]
+# CHECK: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[OFF_X]]] [%rsp+0]
# CHECK: movl $2, [[OFF_X]](%rsp)
# CHECK: callq escape
# CHECK: retq
; Check the DEBUG_VALUE comments for good measure.
; RUN: llc -O0 -mtriple=x86_64-apple-darwin %s -o - -filetype=asm | FileCheck %s -check-prefix=ASM-CHECK
; vla should have a register-indirect address at one point.
-; ASM-CHECK: DEBUG_VALUE: vla <- [DW_OP_deref] [%RCX+0]
+; ASM-CHECK: DEBUG_VALUE: vla <- [DW_OP_deref] [%rcx+0]
; ASM-CHECK: DW_OP_breg2
; RUN: llvm-as %s -o - | llvm-dis - | FileCheck %s --check-prefix=PRETTY-PRINT
; CHECK: callq g
; CHECK: movl %eax, [[offs:[0-9]+]](%rsp) # 4-byte Spill
; CHECK: #DEBUG_VALUE: bitpiece_spill:o <- [DW_OP_LLVM_fragment 32 32] 0
-; CHECK: #DEBUG_VALUE: bitpiece_spill:o <- [DW_OP_plus_uconst [[offs]], DW_OP_LLVM_fragment 0 32] [%RSP+0]
+; CHECK: #DEBUG_VALUE: bitpiece_spill:o <- [DW_OP_plus_uconst [[offs]], DW_OP_LLVM_fragment 0 32] [%rsp+0]
; CHECK: #APP
; CHECK: #NO_APP
; CHECK: movl [[offs]](%rsp), %eax # 4-byte Reload
; RUN: llc -O0 -filetype=asm %s -o - | FileCheck %s
; Test large integral function arguments passed in multiple registers.
-; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 64 16] %AX
-; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 48 16] %R9W
-; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 32 16] %R10W
-; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 16 16] %R11W
-; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 0 16] %BX
+; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 64 16] %ax
+; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 48 16] %r9w
+; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 32 16] %r10w
+; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 16 16] %r11w
+; CHECK: DEBUG_VALUE: foo:bar <- [DW_OP_LLVM_fragment 0 16] %bx
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
; }
; CHECK-LABEL: _Z10get_stringv:
-; CHECK: #DEBUG_VALUE: get_string:result <- [%RDI+0]
+; CHECK: #DEBUG_VALUE: get_string:result <- [%rdi+0]
; CHECK: movq %rdi, [[OFFS:[0-9]+]](%rsp) # 8-byte Spill
-; CHECK: #DEBUG_VALUE: get_string:result <- [DW_OP_plus_uconst [[OFFS]], DW_OP_deref] [%RSP+0]
+; CHECK: #DEBUG_VALUE: get_string:result <- [DW_OP_plus_uconst [[OFFS]], DW_OP_deref] [%rsp+0]
; CHECK: callq _ZN6stringC1Ei
; CHECK: #APP
; CHECK: #NO_APP
; }
; CHECK-LABEL: _Z3foo10NonTrivial:
-; CHECK: #DEBUG_VALUE: foo:nt <- [%RDI+0]
+; CHECK: #DEBUG_VALUE: foo:nt <- [%rdi+0]
; CHECK: movq %rdi, -8(%rsp) # 8-byte Spill
-; CHECK: #DEBUG_VALUE: foo:nt <- [DW_OP_constu 8, DW_OP_minus, DW_OP_deref] [%RSP+0]
+; CHECK: #DEBUG_VALUE: foo:nt <- [DW_OP_constu 8, DW_OP_minus, DW_OP_deref] [%rsp+0]
; CHECK: #APP
; CHECK: #NO_APP
; CHECK: movq -8(%rsp), %rax # 8-byte Reload
; CHECK-LABEL: f: # @f
; CHECK: callq g
; CHECK: movl %eax, [[X_OFFS:[0-9]+]](%rsp) # 4-byte Spill
-; CHECK: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[X_OFFS]]] [%RSP+0]
+; CHECK: #DEBUG_VALUE: f:x <- [DW_OP_plus_uconst [[X_OFFS]]] [%rsp+0]
; CHECK: #APP
; CHECK: #NO_APP
; CHECK: callq g
; CHECK: movl %eax, %[[CSR:[^ ]*]]
-; CHECK: #DEBUG_VALUE: f:y <- %ESI
+; CHECK: #DEBUG_VALUE: f:y <- %esi
; CHECK: movl %[[CSR]], %ecx
; CHECK: callq g
; CHECK: movl %[[CSR]], %ecx
; CHECK: movl %[[CSR]], %ecx
; CHECK: callq g
; CHECK: movl [[X_OFFS]](%rsp), %eax # 4-byte Reload
-; CHECK: #DEBUG_VALUE: f:x <- %EAX
+; CHECK: #DEBUG_VALUE: f:x <- %eax
; CHECK: addl %[[CSR]], %eax
; DWARF: DW_TAG_variable
; RUN: llc -O0 -mtriple=x86_64-apple-darwin -filetype=asm %s -o - | FileCheck %s
; Ensure that we generate an indirect location for the variable length array a.
-; CHECK: ##DEBUG_VALUE: vla:a <- [DW_OP_deref] [%RCX+0]
+; CHECK: ##DEBUG_VALUE: vla:a <- [DW_OP_deref] [%rcx+0]
; CHECK: DW_OP_breg2
; rdar://problem/13658587
;
subl $8, %esp
leal -4(%ebp), %eax
Lvar_begin0:
- #DEBUG_VALUE: g:x <- %EAX
+ #DEBUG_VALUE: g:x <- %eax
.cv_loc 0 1 4 7 # <stdin>:4:7
movl $0, -4(%ebp)
.cv_loc 0 1 5 3 # <stdin>:5:3
// rdar://8456364
// CHECK: movw %cs, %ax
-mov %CS, %ax
+mov %cs, %ax
// rdar://8456391
fcmovb %st(1), %st(0) // CHECK: fcmovb %st(1), %st(0)
fdivrp %st(0), %st(1) // CHECK: encoding: [0xde,0xf9]
fdivrp %st(1), %st(0) // CHECK: encoding: [0xde,0xf9]
-fsubrp %ST(0), %ST(1) // CHECK: encoding: [0xde,0xe9]
-fsubrp %ST(1), %ST(0) // CHECK: encoding: [0xde,0xe9]
+fsubrp %st(0), %st(1) // CHECK: encoding: [0xde,0xe9]
+fsubrp %st(1), %st(0) // CHECK: encoding: [0xde,0xe9]
// also PR8861
fdivp %st(0), %st(1) // CHECK: encoding: [0xde,0xf1]