printAnnotation(OS, Annot);
}
-void VEInstPrinter::printOperand(const MCInst *MI, int opNum,
+void VEInstPrinter::printOperand(const MCInst *MI, int OpNum,
const MCSubtargetInfo &STI, raw_ostream &O) {
- const MCOperand &MO = MI->getOperand(opNum);
+ const MCOperand &MO = MI->getOperand(OpNum);
if (MO.isReg()) {
printRegName(O, MO.getReg());
MO.getExpr()->print(O, &MAI);
}
-void VEInstPrinter::printMemASXOperand(const MCInst *MI, int opNum,
+void VEInstPrinter::printMemASXOperand(const MCInst *MI, int OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O, const char *Modifier) {
// If this is an ADD operand, emit it like normal operands.
if (Modifier && !strcmp(Modifier, "arith")) {
- printOperand(MI, opNum, STI, O);
+ printOperand(MI, OpNum, STI, O);
O << ", ";
- printOperand(MI, opNum + 1, STI, O);
+ printOperand(MI, OpNum + 1, STI, O);
return;
}
- const MCOperand &MO = MI->getOperand(opNum + 1);
- if (!MO.isImm() || MO.getImm() != 0) {
- printOperand(MI, opNum + 1, STI, O);
+ if (MI->getOperand(OpNum + 2).isImm() &&
+ MI->getOperand(OpNum + 2).getImm() == 0) {
+ // don't print "+0"
+ } else {
+ printOperand(MI, OpNum + 2, STI, O);
+ }
+ if (MI->getOperand(OpNum + 1).isImm() &&
+ MI->getOperand(OpNum + 1).getImm() == 0 &&
+ MI->getOperand(OpNum).isImm() && MI->getOperand(OpNum).getImm() == 0) {
+ if (MI->getOperand(OpNum + 2).isImm() &&
+ MI->getOperand(OpNum + 2).getImm() == 0) {
+ O << "0";
+ } else {
+ // don't print "+0,+0"
+ }
+ } else {
+ O << "(";
+ if (MI->getOperand(OpNum + 1).isImm() &&
+ MI->getOperand(OpNum + 1).getImm() == 0) {
+ // don't print "+0"
+ } else {
+ printOperand(MI, OpNum + 1, STI, O);
+ }
+ if (MI->getOperand(OpNum).isImm() && MI->getOperand(OpNum).getImm() == 0) {
+ // don't print "+0"
+ } else {
+ O << ", ";
+ printOperand(MI, OpNum, STI, O);
+ }
+ O << ")";
+ }
+}
+
+void VEInstPrinter::printMemASOperandASX(const MCInst *MI, int OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O, const char *Modifier) {
+ // If this is an ADD operand, emit it like normal operands.
+ if (Modifier && !strcmp(Modifier, "arith")) {
+ printOperand(MI, OpNum, STI, O);
+ O << ", ";
+ printOperand(MI, OpNum + 1, STI, O);
+ return;
+ }
+
+ if (MI->getOperand(OpNum + 1).isImm() &&
+ MI->getOperand(OpNum + 1).getImm() == 0) {
+ // don't print "+0"
+ } else {
+ printOperand(MI, OpNum + 1, STI, O);
+ }
+ if (MI->getOperand(OpNum).isImm() && MI->getOperand(OpNum).getImm() == 0) {
+ if (MI->getOperand(OpNum + 1).isImm() &&
+ MI->getOperand(OpNum + 1).getImm() == 0) {
+ O << "0";
+ } else {
+ // don't print "(0)"
+ }
+ } else {
+ O << "(, ";
+ printOperand(MI, OpNum, STI, O);
+ O << ")";
}
- O << "(,";
- printOperand(MI, opNum, STI, O);
- O << ")";
}
-void VEInstPrinter::printMemASOperand(const MCInst *MI, int opNum,
+void VEInstPrinter::printMemASOperand(const MCInst *MI, int OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O, const char *Modifier) {
// If this is an ADD operand, emit it like normal operands.
if (Modifier && !strcmp(Modifier, "arith")) {
- printOperand(MI, opNum, STI, O);
+ printOperand(MI, OpNum, STI, O);
O << ", ";
- printOperand(MI, opNum + 1, STI, O);
+ printOperand(MI, OpNum + 1, STI, O);
return;
}
- const MCOperand &MO = MI->getOperand(opNum + 1);
+ const MCOperand &MO = MI->getOperand(OpNum + 1);
if (!MO.isImm() || MO.getImm() != 0) {
- printOperand(MI, opNum + 1, STI, O);
+ printOperand(MI, OpNum + 1, STI, O);
}
O << "(";
- printOperand(MI, opNum, STI, O);
+ printOperand(MI, OpNum, STI, O);
O << ")";
}
-void VEInstPrinter::printCCOperand(const MCInst *MI, int opNum,
+void VEInstPrinter::printCCOperand(const MCInst *MI, int OpNum,
const MCSubtargetInfo &STI, raw_ostream &O) {
- int CC = (int)MI->getOperand(opNum).getImm();
+ int CC = (int)MI->getOperand(OpNum).getImm();
O << VECondCodeToString((VECC::CondCode)CC);
}
raw_ostream &);
static const char *getRegisterName(unsigned RegNo);
- void printOperand(const MCInst *MI, int opNum, const MCSubtargetInfo &STI,
+ void printOperand(const MCInst *MI, int OpNum, const MCSubtargetInfo &STI,
raw_ostream &OS);
- void printMemASXOperand(const MCInst *MI, int opNum,
+ void printMemASXOperand(const MCInst *MI, int OpNum,
const MCSubtargetInfo &STI, raw_ostream &OS,
const char *Modifier = nullptr);
- void printMemASOperand(const MCInst *MI, int opNum,
+ void printMemASOperandASX(const MCInst *MI, int OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &OS,
+ const char *Modifier = nullptr);
+ void printMemASOperand(const MCInst *MI, int OpNum,
const MCSubtargetInfo &STI, raw_ostream &OS,
const char *Modifier = nullptr);
- void printCCOperand(const MCInst *MI, int opNum, const MCSubtargetInfo &STI,
+ void printCCOperand(const MCInst *MI, int OpNum, const MCSubtargetInfo &STI,
raw_ostream &OS);
};
} // namespace llvm
static void emitLEAzzi(MCStreamer &OutStreamer, MCOperand &Imm, MCOperand &RD,
const MCSubtargetInfo &STI) {
MCInst LEAInst;
- LEAInst.setOpcode(VE::LEAzzi);
+ LEAInst.setOpcode(VE::LEAzii);
LEAInst.addOperand(RD);
+ MCOperand CZero = MCOperand::createImm(0);
+ LEAInst.addOperand(CZero);
+ LEAInst.addOperand(CZero);
LEAInst.addOperand(Imm);
OutStreamer.emitInstruction(LEAInst, STI);
}
static void emitLEASLzzi(MCStreamer &OutStreamer, MCOperand &Imm, MCOperand &RD,
const MCSubtargetInfo &STI) {
MCInst LEASLInst;
- LEASLInst.setOpcode(VE::LEASLzzi);
+ LEASLInst.setOpcode(VE::LEASLzii);
LEASLInst.addOperand(RD);
+ MCOperand CZero = MCOperand::createImm(0);
+ LEASLInst.addOperand(CZero);
+ LEASLInst.addOperand(CZero);
LEASLInst.addOperand(Imm);
OutStreamer.emitInstruction(LEASLInst, STI);
}
MCInst LEAInst;
LEAInst.setOpcode(VE::LEAzii);
LEAInst.addOperand(RD);
+ MCOperand CZero = MCOperand::createImm(0);
+ LEAInst.addOperand(CZero);
LEAInst.addOperand(RS1);
LEAInst.addOperand(Imm);
OutStreamer.emitInstruction(LEAInst, STI);
const MCSubtargetInfo &STI) {
MCInst LEASLInst;
LEASLInst.setOpcode(VE::LEASLrri);
+ LEASLInst.addOperand(RD);
LEASLInst.addOperand(RS1);
LEASLInst.addOperand(RS2);
- LEASLInst.addOperand(RD);
LEASLInst.addOperand(Imm);
OutStreamer.emitInstruction(LEASLInst, STI);
}
// st %plt, 32(,%sp)
// or %fp, 0, %sp
- BuildMI(MBB, MBBI, dl, TII.get(VE::STSri))
+ BuildMI(MBB, MBBI, dl, TII.get(VE::STrii))
.addReg(VE::SX11)
.addImm(0)
+ .addImm(0)
.addReg(VE::SX9);
- BuildMI(MBB, MBBI, dl, TII.get(VE::STSri))
+ BuildMI(MBB, MBBI, dl, TII.get(VE::STrii))
.addReg(VE::SX11)
+ .addImm(0)
.addImm(8)
.addReg(VE::SX10);
- BuildMI(MBB, MBBI, dl, TII.get(VE::STSri))
+ BuildMI(MBB, MBBI, dl, TII.get(VE::STrii))
.addReg(VE::SX11)
+ .addImm(0)
.addImm(24)
.addReg(VE::SX15);
- BuildMI(MBB, MBBI, dl, TII.get(VE::STSri))
+ BuildMI(MBB, MBBI, dl, TII.get(VE::STrii))
.addReg(VE::SX11)
+ .addImm(0)
.addImm(32)
.addReg(VE::SX16);
BuildMI(MBB, MBBI, dl, TII.get(VE::ORri), VE::SX9)
BuildMI(MBB, MBBI, dl, TII.get(VE::ORri), VE::SX11)
.addReg(VE::SX9)
.addImm(0);
- BuildMI(MBB, MBBI, dl, TII.get(VE::LDSri), VE::SX16)
+ BuildMI(MBB, MBBI, dl, TII.get(VE::LDrii), VE::SX16)
.addReg(VE::SX11)
+ .addImm(0)
.addImm(32);
- BuildMI(MBB, MBBI, dl, TII.get(VE::LDSri), VE::SX15)
+ BuildMI(MBB, MBBI, dl, TII.get(VE::LDrii), VE::SX15)
.addReg(VE::SX11)
+ .addImm(0)
.addImm(24);
- BuildMI(MBB, MBBI, dl, TII.get(VE::LDSri), VE::SX10)
+ BuildMI(MBB, MBBI, dl, TII.get(VE::LDrii), VE::SX10)
.addReg(VE::SX11)
+ .addImm(0)
.addImm(8);
- BuildMI(MBB, MBBI, dl, TII.get(VE::LDSri), VE::SX9)
+ BuildMI(MBB, MBBI, dl, TII.get(VE::LDrii), VE::SX9)
.addReg(VE::SX11)
+ .addImm(0)
.addImm(0);
}
// lea %s13,%lo(NumBytes)
// and %s13,%s13,(32)0
// lea.sl %sp,%hi(NumBytes)(%sp, %s13)
- BuildMI(MBB, MBBI, dl, TII.get(VE::LEAzzi), VE::SX13)
+ BuildMI(MBB, MBBI, dl, TII.get(VE::LEAzii), VE::SX13)
+ .addImm(0)
+ .addImm(0)
.addImm(Lo_32(NumBytes));
BuildMI(MBB, MBBI, dl, TII.get(VE::ANDrm0), VE::SX13)
.addReg(VE::SX13)
void Select(SDNode *N) override;
// Complex Pattern Selectors.
- bool SelectADDRrr(SDValue N, SDValue &R1, SDValue &R2);
- bool SelectADDRri(SDValue N, SDValue &Base, SDValue &Offset);
+ bool selectADDRrri(SDValue N, SDValue &Base, SDValue &Index, SDValue &Offset);
+ bool selectADDRrii(SDValue N, SDValue &Base, SDValue &Index, SDValue &Offset);
+ bool selectADDRzri(SDValue N, SDValue &Base, SDValue &Index, SDValue &Offset);
+ bool selectADDRzii(SDValue N, SDValue &Base, SDValue &Index, SDValue &Offset);
+ bool selectADDRri(SDValue N, SDValue &Base, SDValue &Offset);
StringRef getPassName() const override {
return "VE DAG->DAG Pattern Instruction Selection";
private:
SDNode *getGlobalBaseReg();
+
+ bool matchADDRrr(SDValue N, SDValue &Base, SDValue &Index);
+ bool matchADDRri(SDValue N, SDValue &Base, SDValue &Offset);
};
} // end anonymous namespace
-bool VEDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) {
+bool VEDAGToDAGISel::selectADDRrri(SDValue Addr, SDValue &Base, SDValue &Index,
+ SDValue &Offset) {
if (Addr.getOpcode() == ISD::FrameIndex)
return false;
if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
Addr.getOpcode() == ISD::TargetGlobalTLSAddress)
return false; // direct calls.
- if (Addr.getOpcode() == ISD::ADD) {
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
- if (isInt<13>(CN->getSExtValue()))
- return false; // Let the reg+imm pattern catch this!
- if (Addr.getOperand(0).getOpcode() == VEISD::Lo ||
- Addr.getOperand(1).getOpcode() == VEISD::Lo)
- return false; // Let the reg+imm pattern catch this!
- R1 = Addr.getOperand(0);
- R2 = Addr.getOperand(1);
+ SDValue LHS, RHS;
+ if (matchADDRri(Addr, LHS, RHS)) {
+ if (matchADDRrr(LHS, Base, Index)) {
+ Offset = RHS;
+ return true;
+ }
+ // Return false to try selectADDRrii.
+ return false;
+ }
+ if (matchADDRrr(Addr, LHS, RHS)) {
+ if (matchADDRri(RHS, Index, Offset)) {
+ Base = LHS;
+ return true;
+ }
+ if (matchADDRri(LHS, Base, Offset)) {
+ Index = RHS;
+ return true;
+ }
+ Base = LHS;
+ Index = RHS;
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ return true;
+ }
+ return false; // Let the reg+imm(=0) pattern catch this!
+}
+
+bool VEDAGToDAGISel::selectADDRrii(SDValue Addr, SDValue &Base, SDValue &Index,
+ SDValue &Offset) {
+ if (matchADDRri(Addr, Base, Offset)) {
+ Index = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
return true;
}
- return false; // Let the reg+imm pattern catch this!
+ Base = Addr;
+ Index = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ return true;
+}
+
+bool VEDAGToDAGISel::selectADDRzri(SDValue Addr, SDValue &Base, SDValue &Index,
+ SDValue &Offset) {
+ // Prefer ADDRrii.
+ return false;
+}
+
+bool VEDAGToDAGISel::selectADDRzii(SDValue Addr, SDValue &Base, SDValue &Index,
+ SDValue &Offset) {
+ if (dyn_cast<FrameIndexSDNode>(Addr)) {
+ return false;
+ }
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress ||
+ Addr.getOpcode() == ISD::TargetGlobalTLSAddress)
+ return false; // direct calls.
+
+ if (ConstantSDNode *CN = cast<ConstantSDNode>(Addr)) {
+ if (isInt<32>(CN->getSExtValue())) {
+ Base = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ Index = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ Offset =
+ CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(Addr), MVT::i32);
+ return true;
+ }
+ }
+ return false;
}
-bool VEDAGToDAGISel::SelectADDRri(SDValue Addr, SDValue &Base,
+bool VEDAGToDAGISel::selectADDRri(SDValue Addr, SDValue &Base,
SDValue &Offset) {
+ if (matchADDRri(Addr, Base, Offset))
+ return true;
+
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ return true;
+}
+
+bool VEDAGToDAGISel::matchADDRrr(SDValue Addr, SDValue &Base, SDValue &Index) {
+ if (dyn_cast<FrameIndexSDNode>(Addr))
+ return false;
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress ||
+ Addr.getOpcode() == ISD::TargetGlobalTLSAddress)
+ return false; // direct calls.
+
+ if (Addr.getOpcode() == ISD::ADD) {
+ ; // Nothing to do here.
+ } else if (Addr.getOpcode() == ISD::OR) {
+ // We want to look through a transform in InstCombine and DAGCombiner that
+ // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
+ if (!CurDAG->haveNoCommonBitsSet(Addr.getOperand(0), Addr.getOperand(1)))
+ return false;
+ } else {
+ return false;
+ }
+
+ if (Addr.getOperand(0).getOpcode() == VEISD::Lo ||
+ Addr.getOperand(1).getOpcode() == VEISD::Lo)
+ return false; // Let the LEASL patterns catch this!
+
+ Base = Addr.getOperand(0);
+ Index = Addr.getOperand(1);
+ return true;
+}
+
+bool VEDAGToDAGISel::matchADDRri(SDValue Addr, SDValue &Base, SDValue &Offset) {
auto AddrTy = Addr->getValueType(0);
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), AddrTy);
if (CurDAG->isBaseWithConstantOffset(Addr)) {
ConstantSDNode *CN = cast<ConstantSDNode>(Addr.getOperand(1));
- if (isInt<13>(CN->getSExtValue())) {
+ if (isInt<32>(CN->getSExtValue())) {
if (FrameIndexSDNode *FIN =
dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
// Constant offset from frame ref.
return true;
}
}
- Base = Addr;
- Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
- return true;
+ return false;
}
void VEDAGToDAGISel::Select(SDNode *N) {
/// any side effects other than loading from the stack slot.
unsigned VEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- if (MI.getOpcode() == VE::LDSri || MI.getOpcode() == VE::LDLri ||
- MI.getOpcode() == VE::LDUri) {
+ if (MI.getOpcode() == VE::LDrii || // I64
+ MI.getOpcode() == VE::LDLSXrii || // I32
+ MI.getOpcode() == VE::LDUrii // F32
+ ) {
if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
- MI.getOperand(2).getImm() == 0) {
+ MI.getOperand(2).getImm() == 0 && MI.getOperand(3).isImm() &&
+ MI.getOperand(3).getImm() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
/// any side effects other than storing to the stack slot.
unsigned VEInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- if (MI.getOpcode() == VE::STSri || MI.getOpcode() == VE::STLri ||
- MI.getOpcode() == VE::STUri) {
+ if (MI.getOpcode() == VE::STrii || // I64
+ MI.getOpcode() == VE::STLrii || // I32
+ MI.getOpcode() == VE::STUrii // F32
+ ) {
if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
- MI.getOperand(1).getImm() == 0) {
+ MI.getOperand(1).getImm() == 0 && MI.getOperand(2).isImm() &&
+ MI.getOperand(2).getImm() == 0) {
FrameIndex = MI.getOperand(0).getIndex();
- return MI.getOperand(2).getReg();
+ return MI.getOperand(3).getReg();
}
}
return 0;
// On the order of operands here: think "[FrameIdx + 0] = SrcReg".
if (RC == &VE::I64RegClass) {
- BuildMI(MBB, I, DL, get(VE::STSri))
+ BuildMI(MBB, I, DL, get(VE::STrii))
.addFrameIndex(FI)
.addImm(0)
+ .addImm(0)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
} else if (RC == &VE::I32RegClass) {
- BuildMI(MBB, I, DL, get(VE::STLri))
+ BuildMI(MBB, I, DL, get(VE::STLrii))
.addFrameIndex(FI)
.addImm(0)
+ .addImm(0)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
} else if (RC == &VE::F32RegClass) {
- BuildMI(MBB, I, DL, get(VE::STUri))
+ BuildMI(MBB, I, DL, get(VE::STUrii))
.addFrameIndex(FI)
.addImm(0)
+ .addImm(0)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
} else
MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
if (RC == &VE::I64RegClass) {
- BuildMI(MBB, I, DL, get(VE::LDSri), DestReg)
+ BuildMI(MBB, I, DL, get(VE::LDrii), DestReg)
.addFrameIndex(FI)
.addImm(0)
+ .addImm(0)
.addMemOperand(MMO);
} else if (RC == &VE::I32RegClass) {
- BuildMI(MBB, I, DL, get(VE::LDLri), DestReg)
+ BuildMI(MBB, I, DL, get(VE::LDLSXrii), DestReg)
.addFrameIndex(FI)
.addImm(0)
+ .addImm(0)
.addMemOperand(MMO);
} else if (RC == &VE::F32RegClass) {
- BuildMI(MBB, I, DL, get(VE::LDUri), DestReg)
+ BuildMI(MBB, I, DL, get(VE::LDUrii), DestReg)
.addFrameIndex(FI)
.addImm(0)
+ .addImm(0)
.addMemOperand(MMO);
} else
report_fatal_error("Can't load this register from stack slot");
// Update machine-CFG edges
BB->addSuccessor(sinkMBB);
- BuildMI(BB, dl, TII.get(VE::LDSri), VE::SX61)
+ BuildMI(BB, dl, TII.get(VE::LDrii), VE::SX61)
.addReg(VE::SX14)
+ .addImm(0)
.addImm(0x18);
BuildMI(BB, dl, TII.get(VE::ORri), VE::SX62)
.addReg(VE::SX0)
.addImm(0);
- BuildMI(BB, dl, TII.get(VE::LEAzzi), VE::SX63)
+ BuildMI(BB, dl, TII.get(VE::LEAzii), VE::SX63)
+ .addImm(0)
+ .addImm(0)
.addImm(0x13b);
BuildMI(BB, dl, TII.get(VE::SHMri))
.addReg(VE::SX61)
// Instruction Pattern Stuff
//===----------------------------------------------------------------------===//
+def LO7 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(SignExtend32(N->getSExtValue(), 7),
+ SDLoc(N), MVT::i32);
+}]>;
def simm7 : PatLeaf<(imm), [{ return isInt<7>(N->getSExtValue()); }]>;
def simm32 : PatLeaf<(imm), [{ return isInt<32>(N->getSExtValue()); }]>;
def uimm32 : PatLeaf<(imm), [{ return isUInt<32>(N->getZExtValue()); }]>;
}]>;
// Addressing modes.
-def ADDRrr : ComplexPattern<iPTR, 2, "SelectADDRrr", [], []>;
-def ADDRri : ComplexPattern<iPTR, 2, "SelectADDRri", [frameindex], []>;
-
-// ASX format of memory address
-def MEMrr : Operand<iPTR> {
- let PrintMethod = "printMemASXOperand";
- let MIOperandInfo = (ops ptr_rc, ptr_rc);
-}
+def ADDRri : ComplexPattern<iPTR, 2, "selectADDRri", [frameindex], []>;
def MEMri : Operand<iPTR> {
- let PrintMethod = "printMemASXOperand";
+ let PrintMethod = "printMemASOperandASX";
let MIOperandInfo = (ops ptr_rc, i64imm);
}
let MIOperandInfo = (ops ptr_rc, i64imm);
}
+// Addressing modes.
+// SX-Aurora has following fields.
+// sz: register or 0
+// sy: register or immediate (-64 to 63)
+// disp: immediate (-2147483648 to 2147483647)
+//
+// There are two kinds of instruction.
+// ASX format uses sz + sy + disp.
+// AS format uses sz + disp.
+//
+// Moreover, there are four kinds of assembly instruction format.
+// ASX format uses "disp", "disp(, sz)", "disp(sy)", "disp(sy, sz)",
+// "(, sz)", "(sy)", or "(sy, sz)".
+// AS format uses "disp", "disp(, sz)", or "(, sz)" in general.
+// AS format in RRM format uses "disp", "disp(sz)", or "(sz)".
+// AS format in RRM format for host memory access uses "sz", "(sz)",
+// or "disp(sz)".
+//
+// We defined them below.
+//
+// ASX format:
+// MEMrri, MEMrii, MEMzri, MEMzii
+// AS format:
+// well be added later.
+
+def ADDRrri : ComplexPattern<iPTR, 3, "selectADDRrri", [frameindex], []>;
+def ADDRrii : ComplexPattern<iPTR, 3, "selectADDRrii", [frameindex], []>;
+def ADDRzri : ComplexPattern<iPTR, 3, "selectADDRzri", [], []>;
+def ADDRzii : ComplexPattern<iPTR, 3, "selectADDRzii", [], []>;
+//
+// ASX assembly instrcution format:
+def VEMEMrriAsmOperand : AsmOperandClass {
+ let Name = "MEMrri";
+ let ParserMethod = "parseMEMOperand";
+}
+def VEMEMriiAsmOperand : AsmOperandClass {
+ let Name = "MEMrii";
+ let ParserMethod = "parseMEMOperand";
+}
+def VEMEMzriAsmOperand : AsmOperandClass {
+ let Name = "MEMzri";
+ let ParserMethod = "parseMEMOperand";
+}
+def VEMEMziiAsmOperand : AsmOperandClass {
+ let Name = "MEMzii";
+ let ParserMethod = "parseMEMOperand";
+}
+def MEMrri : Operand<iPTR> {
+ let PrintMethod = "printMemASXOperand";
+ let MIOperandInfo = (ops ptr_rc, ptr_rc, i32imm);
+ let ParserMatchClass = VEMEMrriAsmOperand;
+}
+def MEMrii : Operand<iPTR> {
+ let PrintMethod = "printMemASXOperand";
+ let MIOperandInfo = (ops ptr_rc, i32imm, i32imm);
+ let ParserMatchClass = VEMEMriiAsmOperand;
+}
+def MEMzri : Operand<iPTR> {
+ let PrintMethod = "printMemASXOperand";
+ let MIOperandInfo = (ops i32imm /* = 0 */, ptr_rc, i32imm);
+ let ParserMatchClass = VEMEMzriAsmOperand;
+}
+def MEMzii : Operand<iPTR> {
+ let PrintMethod = "printMemASXOperand";
+ let MIOperandInfo = (ops i32imm /* = 0 */, i32imm, i32imm);
+ let ParserMatchClass = VEMEMziiAsmOperand;
+}
+
// Branch targets have OtherVT type.
def brtarget32 : Operand<OtherVT> {
let EncoderMethod = "getBranchTarget32OpValue";
// VE Multiclasses for common instruction formats
//===----------------------------------------------------------------------===//
-multiclass RMm<string opcStr, bits<8>opc,
- RegisterClass RC, ValueType Ty,
- Operand immOp, Operand immOp2,
- SDPatternOperator OpNode=null_frag> {
- def rri : RM<
- opc, (outs RC:$sx), (ins RC:$sy, RC:$sz, immOp2:$imm32),
- !strconcat(opcStr, " $sx, ${imm32}($sy, ${sz})")> {
- let cy = 1;
- let cz = 1;
- let hasSideEffects = 0;
- }
- def rzi : RM<
- opc, (outs RC:$sx), (ins RC:$sz, immOp2:$imm32),
- !strconcat(opcStr, " $sx, ${imm32}(${sz})"),
- [(set Ty:$sx, (OpNode Ty:$sz, (Ty simm32:$imm32)))]> {
- let cy = 0;
- let sy = 0;
- let cz = 1;
- let hasSideEffects = 0;
- }
- def zii : RM<
- opc, (outs RC:$sx), (ins immOp:$sy, immOp2:$imm32),
- !strconcat(opcStr, " $sx, ${imm32}(${sy})"),
- [/* Not define DAG pattern here to avoid llvm uses LEAzii for all add
- instructions.
- (set Ty:$sx, (OpNode (Ty simm7:$sy), (Ty simm32:$imm32))) */]> {
- let cy = 0;
- let cz = 0;
- let sz = 0;
- let hasSideEffects = 0;
- }
- def zzi : RM<
- opc, (outs RC:$sx), (ins immOp2:$imm32),
- !strconcat(opcStr, " $sx, $imm32")> {
- let cy = 0;
- let sy = 0;
- let cz = 0;
- let sz = 0;
- let hasSideEffects = 0;
- }
-}
-
multiclass RRmrr<string opcStr, bits<8>opc,
RegisterClass RCo, ValueType Tyo,
RegisterClass RCi, ValueType Tyi,
// Instructions
//===----------------------------------------------------------------------===//
+//-----------------------------------------------------------------------------
+// Section 8.2 - Load/Store instructions
+//-----------------------------------------------------------------------------
+
+// Multiclass for generic RM instructions
+multiclass RMm<string opcStr, bits<8>opc, RegisterClass RC> {
+ def rri : RM<opc, (outs RC:$dest), (ins MEMrri:$addr),
+ !strconcat(opcStr, " $dest, $addr"), []>;
+ let cy = 0 in
+ def rii : RM<opc, (outs RC:$dest), (ins MEMrii:$addr),
+ !strconcat(opcStr, " $dest, $addr"), []>;
+ let cz = 0 in
+ def zri : RM<opc, (outs RC:$dest), (ins MEMzri:$addr),
+ !strconcat(opcStr, " $dest, $addr"), []>;
+ let cy = 0, cz = 0 in
+ def zii : RM<opc, (outs RC:$dest), (ins MEMzii:$addr),
+ !strconcat(opcStr, " $dest, $addr"), []>;
+}
+
+// Section 8.2.1 - LEA
+let cx = 0, DecoderMethod = "DecodeLoadI64" in
+defm LEA : RMm<"lea", 0x06, I64>;
+let cx = 1, DecoderMethod = "DecodeLoadI64" in
+defm LEASL : RMm<"lea.sl", 0x06, I64>;
+let cx = 0, DecoderMethod = "DecodeLoadI32", isCodeGenOnly = 1 in
+defm LEA32 : RMm<"lea", 0x06, I32>;
+
+def : Pat<(iPTR ADDRrri:$addr), (LEArri MEMrri:$addr)>;
+def : Pat<(iPTR ADDRrii:$addr), (LEArii MEMrii:$addr)>;
+def : Pat<(add I64:$base, simm32:$disp), (LEArii $base, 0, (LO32 $disp))>;
+def : Pat<(add I64:$base, lozero:$disp), (LEASLrii $base, 0, (HI32 $disp))>;
+def : Pat<(add I32:$base, simm32:$disp),
+ (LEA32rii (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $base, sub_i32), 0,
+ (LO32 $disp))>;
+
+def lea_add : PatFrags<(ops node:$base, node:$idx, node:$disp),
+ [(add (add node:$base, node:$idx), node:$disp),
+ (add (add node:$base, node:$disp), node:$idx)]>;
+def : Pat<(lea_add I64:$base, simm7:$idx, simm32:$disp),
+ (LEArii $base, (LO7 $idx), (LO32 $disp))>;
+def : Pat<(lea_add I64:$base, I64:$idx, simm32:$disp),
+ (LEArri $base, $idx, (LO32 $disp))>;
+def : Pat<(lea_add I64:$base, simm7:$idx, lozero:$disp),
+ (LEASLrii $base, (LO7 $idx), (HI32 $disp))>;
+def : Pat<(lea_add I64:$base, I64:$idx, lozero:$disp),
+ (LEASLrri $base, $idx, (HI32 $disp))>;
+
+// Multiclass for load instructions.
+let mayLoad = 1, hasSideEffects = 0 in
+multiclass LOADm<string opcStr, bits<8> opc, RegisterClass RC, ValueType Ty,
+ SDPatternOperator OpNode = null_frag> {
+ def rri : RM<opc, (outs RC:$dest), (ins MEMrri:$addr),
+ !strconcat(opcStr, " $dest, $addr"),
+ [(set Ty:$dest, (OpNode ADDRrri:$addr))]>;
+ let cy = 0 in
+ def rii : RM<opc, (outs RC:$dest), (ins MEMrii:$addr),
+ !strconcat(opcStr, " $dest, $addr"),
+ [(set Ty:$dest, (OpNode ADDRrii:$addr))]>;
+ let cz = 0 in
+ def zri : RM<opc, (outs RC:$dest), (ins MEMzri:$addr),
+ !strconcat(opcStr, " $dest, $addr"),
+ [(set Ty:$dest, (OpNode ADDRzri:$addr))]>;
+ let cy = 0, cz = 0 in
+ def zii : RM<opc, (outs RC:$dest), (ins MEMzii:$addr),
+ !strconcat(opcStr, " $dest, $addr"),
+ [(set Ty:$dest, (OpNode ADDRzii:$addr))]>;
+}
+
+// Section 8.2.2 - LDS
+let DecoderMethod = "DecodeLoadI64" in
+defm LD : LOADm<"ld", 0x01, I64, i64, load>;
+def : Pat<(f64 (load ADDRrri:$addr)), (LDrri MEMrri:$addr)>;
+def : Pat<(f64 (load ADDRrii:$addr)), (LDrii MEMrii:$addr)>;
+def : Pat<(f64 (load ADDRzri:$addr)), (LDzri MEMzri:$addr)>;
+def : Pat<(f64 (load ADDRzii:$addr)), (LDzii MEMzii:$addr)>;
+
+// Section 8.2.3 - LDU
+let DecoderMethod = "DecodeLoadF32" in
+defm LDU : LOADm<"ldu", 0x02, F32, f32, load>;
+
+// Section 8.2.4 - LDL
+let DecoderMethod = "DecodeLoadI32" in
+defm LDLSX : LOADm<"ldl.sx", 0x03, I32, i32, load>;
+let cx = 1, DecoderMethod = "DecodeLoadI32" in
+defm LDLZX : LOADm<"ldl.zx", 0x03, I32, i32, load>;
+
+// Section 8.2.5 - LD2B
+let DecoderMethod = "DecodeLoadI16" in
+defm LD2BSX : LOADm<"ld2b.sx", 0x04, I32, i32, sextloadi16>;
+let cx = 1, DecoderMethod = "DecodeLoadI16" in
+defm LD2BZX : LOADm<"ld2b.zx", 0x04, I32, i32, zextloadi16>;
+
+// Section 8.2.6 - LD1B
+let DecoderMethod = "DecodeLoadI8" in
+defm LD1BSX : LOADm<"ld1b.sx", 0x05, I32, i32, sextloadi8>;
+let cx = 1, DecoderMethod = "DecodeLoadI8" in
+defm LD1BZX : LOADm<"ld1b.zx", 0x05, I32, i32, zextloadi8>;
+
+// Multiclass for store instructions.
+let mayStore = 1 in
+multiclass STOREm<string opcStr, bits<8> opc, RegisterClass RC, ValueType Ty,
+ SDPatternOperator OpNode = null_frag> {
+ def rri : RM<opc, (outs), (ins MEMrri:$addr, RC:$sx),
+ !strconcat(opcStr, " $sx, $addr"),
+ [(OpNode Ty:$sx, ADDRrri:$addr)]>;
+ let cy = 0 in
+ def rii : RM<opc, (outs), (ins MEMrii:$addr, RC:$sx),
+ !strconcat(opcStr, " $sx, $addr"),
+ [(OpNode Ty:$sx, ADDRrii:$addr)]>;
+ let cz = 0 in
+ def zri : RM<opc, (outs), (ins MEMzri:$addr, RC:$sx),
+ !strconcat(opcStr, " $sx, $addr"),
+ [(OpNode Ty:$sx, ADDRzri:$addr)]>;
+ let cy = 0, cz = 0 in
+ def zii : RM<opc, (outs), (ins MEMzii:$addr, RC:$sx),
+ !strconcat(opcStr, " $sx, $addr"),
+ [(OpNode Ty:$sx, ADDRzii:$addr)]>;
+}
+
+// Section 8.2.7 - STS
+let DecoderMethod = "DecodeStoreI64" in
+defm ST : STOREm<"st", 0x11, I64, i64, store>;
+def : Pat<(store f64:$src, ADDRrri:$addr), (STrri MEMrri:$addr, $src)>;
+def : Pat<(store f64:$src, ADDRrii:$addr), (STrii MEMrii:$addr, $src)>;
+def : Pat<(store f64:$src, ADDRzri:$addr), (STzri MEMzri:$addr, $src)>;
+def : Pat<(store f64:$src, ADDRzii:$addr), (STzii MEMzii:$addr, $src)>;
+
+// Section 8.2.8 - STU
+let DecoderMethod = "DecodeStoreF32" in
+defm STU : STOREm<"stu", 0x12, F32, f32, store>;
+
+// Section 8.2.9 - STL
+let DecoderMethod = "DecodeStoreI32" in
+defm STL : STOREm<"stl", 0x13, I32, i32, store>;
+
+// Section 8.2.10 - ST2B
+let DecoderMethod = "DecodeStoreI16" in
+defm ST2B : STOREm<"st2b", 0x14, I32, i32, truncstorei16>;
+
+// Section 8.2.11 - ST1B
+let DecoderMethod = "DecodeStoreI8" in
+defm ST1B : STOREm<"st1b", 0x15, I32, i32, truncstorei8>;
+
// CMOV instructions
let cx = 0, cw = 0, cw2 = 0 in
defm CMOVL : RRCMOVm<"cmov.l.${cf}", 0x3B, I64, i64, simm7Op64, uimm6Op64>;
defm CMOVS : RRCMOVm<"cmov.s.${cf}", 0x3B, F32, f32, simm7Op64, uimm6Op32>;
-// LEA and LEASL instruction (load 32 bit imm to low or high part)
-let cx = 0 in
-defm LEA : RMm<"lea", 0x06, I64, i64, simm7Op64, simm32Op64, add>;
-let cx = 1 in
-defm LEASL : RMm<"lea.sl", 0x06, I64, i64, simm7Op64, simm32Op64>;
-let isCodeGenOnly = 1 in {
-let cx = 0 in
-defm LEA32 : RMm<"lea", 0x06, I32, i32, simm7Op32, simm32Op32, add>;
-}
-
-let cx = 0, cy = 1, cz = 0, sz = 0, hasSideEffects = 0 in {
- def LEAasx : RM<
- 0x06, (outs I64:$sx), (ins MEMri:$addr),
- "lea $sx,$addr", [(set iPTR:$sx, ADDRri:$addr)]>;
-}
-
// 5.3.2.2. Fixed-Point Arithmetic Operation Instructions
// ADD instruction
defm CVD : CVTm<"cvt.d.s", 0x0F, I64, f64, F32, f32, simm7Op32, fpextend>;
}
-// Load and Store instructions
-// As 1st step, only uses sz and imm32 to represent $addr
-let mayLoad = 1, hasSideEffects = 0 in {
-let cy = 0, sy = 0, cz = 1 in {
-let cx = 0 in
-def LDSri : RM<
- 0x01, (outs I64:$sx), (ins MEMri:$addr),
- "ld $sx, $addr",
- [(set i64:$sx, (load ADDRri:$addr))]>;
-let cx = 0 in
-def LDUri : RM<
- 0x02, (outs F32:$sx), (ins MEMri:$addr),
- "ldu $sx, $addr",
- [(set f32:$sx, (load ADDRri:$addr))]>;
-let cx = 0 in
-def LDLri : RM<
- 0x03, (outs I32:$sx), (ins MEMri:$addr),
- "ldl.sx $sx, $addr",
- [(set i32:$sx, (load ADDRri:$addr))]>;
-let cx = 1 in
-def LDLUri : RM<
- 0x03, (outs I32:$sx), (ins MEMri:$addr),
- "ldl.zx $sx, $addr",
- [(set i32:$sx, (load ADDRri:$addr))]>;
-let cx = 0 in
-def LD2Bri : RM<
- 0x04, (outs I32:$sx), (ins MEMri:$addr),
- "ld2b.sx $sx, $addr",
- [(set i32:$sx, (sextloadi16 ADDRri:$addr))]>;
-let cx = 1 in
-def LD2BUri : RM<
- 0x04, (outs I32:$sx), (ins MEMri:$addr),
- "ld2b.zx $sx, $addr",
- [(set i32:$sx, (zextloadi16 ADDRri:$addr))]>;
-let cx = 0 in
-def LD1Bri : RM<
- 0x05, (outs I32:$sx), (ins MEMri:$addr),
- "ld1b.sx $sx, $addr",
- [(set i32:$sx, (sextloadi8 ADDRri:$addr))]>;
-let cx = 1 in
-def LD1BUri : RM<
- 0x05, (outs I32:$sx), (ins MEMri:$addr),
- "ld1b.zx $sx, $addr",
- [(set i32:$sx, (zextloadi8 ADDRri:$addr))]>;
-}
-}
-
-let mayStore = 1, hasSideEffects = 0 in {
-let cx = 0, cy = 0, sy = 0, cz = 1 in {
-def STSri : RM<
- 0x11, (outs), (ins MEMri:$addr, I64:$sx),
- "st $sx, $addr",
- [(store i64:$sx, ADDRri:$addr)]>;
-def STUri : RM<
- 0x12, (outs), (ins MEMri:$addr, F32:$sx),
- "stu $sx, $addr",
- [(store f32:$sx, ADDRri:$addr)]>;
-def STLri : RM<
- 0x13, (outs), (ins MEMri:$addr, I32:$sx),
- "stl $sx, $addr",
- [(store i32:$sx, ADDRri:$addr)]>;
-def ST2Bri : RM<
- 0x14, (outs), (ins MEMri:$addr, I32:$sx),
- "st2b $sx, $addr",
- [(truncstorei16 i32:$sx, ADDRri:$addr)]>;
-def ST1Bri : RM<
- 0x15, (outs), (ins MEMri:$addr, I32:$sx),
- "st1b $sx, $addr",
- [(truncstorei8 i32:$sx, ADDRri:$addr)]>;
-}
-}
-
-def : Pat<(f64 (load ADDRri:$addr)), (LDSri ADDRri:$addr)>;
-def : Pat<(store f64:$sx, ADDRri:$addr), (STSri ADDRri:$addr, $sx)>;
-
// Control-flow
// Jump instruction
cz = 1,
isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1,
hasDelaySlot = 1, isCodeGenOnly = 1, hasSideEffects = 0 in {
-def BArr : CF<
- 0x19, (outs), (ins MEMrr:$addr),
- "b.l $addr",
- [(brind ADDRrr:$addr)]>;
def BAri : CF<
0x19, (outs), (ins MEMri:$addr),
"b.l $addr",
def : Pat<(i32 simm7:$val), (OR32im1 imm:$val, 0)>;
def : Pat<(i64 simm7:$val), (ORim1 imm:$val, 0)>;
// Medium immediates.
-def : Pat<(i32 simm32:$val), (LEA32zzi imm:$val)>;
-def : Pat<(i64 simm32:$val), (LEAzzi imm:$val)>;
-def : Pat<(i64 uimm32:$val), (ANDrm0 (LEAzzi imm:$val), 32)>;
+def : Pat<(i32 simm32:$val), (LEA32zii 0, 0, (LO32 $val))>;
+def : Pat<(i64 simm32:$val), (LEAzii 0, 0, (LO32 $val))>;
+def : Pat<(i64 uimm32:$val), (ANDrm0 (LEAzii 0, 0, (LO32 $val)), 32)>;
// Arbitrary immediates.
def : Pat<(i64 lozero:$val),
- (LEASLzzi (HI32 imm:$val))>;
+ (LEASLzii 0, 0, (HI32 imm:$val))>;
def : Pat<(i64 lomsbzero:$val),
- (LEASLrzi (LEAzzi (LO32 imm:$val)), (HI32 imm:$val))>;
+ (LEASLrii (LEAzii 0, 0, (LO32 imm:$val)), 0, (HI32 imm:$val))>;
def : Pat<(i64 imm:$val),
- (LEASLrzi (ANDrm0 (LEAzzi (LO32 imm:$val)), 32),
+ (LEASLrii (ANDrm0 (LEAzii 0, 0, (LO32 imm:$val)), 32), 0,
(HI32 imm:$val))>;
// floating point
def : Pat<(f32 fpimm:$val),
- (COPY_TO_REGCLASS (LEASLzzi (LOFP32 $val)), F32)>;
+ (COPY_TO_REGCLASS (LEASLzii 0, 0, (LOFP32 $val)), F32)>;
def : Pat<(f64 fplozero:$val),
- (LEASLzzi (HIFP32 $val))>;
+ (LEASLzii 0, 0, (HIFP32 $val))>;
def : Pat<(f64 fplomsbzero:$val),
- (LEASLrzi (LEAzzi (LOFP32 $val)), (HIFP32 $val))>;
+ (LEASLrii (LEAzii 0, 0, (LOFP32 $val)), 0, (HIFP32 $val))>;
def : Pat<(f64 fpimm:$val),
- (LEASLrzi (ANDrm0 (LEAzzi (LOFP32 $val)), 32),
+ (LEASLrii (ANDrm0 (LEAzii 0, 0, (LOFP32 $val)), 32), 0,
(HIFP32 $val))>;
// The same integer registers are used for i32 and i64 values.
-// When registers hold i32 values, the high bits are unused.
+// When registers hold i32 values, the high bits are unused.
// TODO Use standard expansion for shift-based lowering of sext_inreg
// extload, sextload and zextload stuff
-def : Pat<(i64 (sextloadi8 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1Bri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (zextloadi8 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1BUri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (sextloadi16 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2Bri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (zextloadi16 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2BUri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (sextloadi32 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (zextloadi32 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLUri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (extloadi8 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1BUri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (extloadi16 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2BUri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (extloadi32 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLri MEMri:$addr), sub_i32)>;
+multiclass EXT64m<SDPatternOperator from,
+ SDPatternOperator torri,
+ SDPatternOperator torii,
+ SDPatternOperator tozri,
+ SDPatternOperator tozii> {
+ def : Pat<(i64 (from ADDRrri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (torri MEMrri:$addr),
+ sub_i32)>;
+ def : Pat<(i64 (from ADDRrii:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (torii MEMrii:$addr),
+ sub_i32)>;
+ def : Pat<(i64 (from ADDRzri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (tozri MEMzri:$addr),
+ sub_i32)>;
+ def : Pat<(i64 (from ADDRzii:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (tozii MEMzii:$addr),
+ sub_i32)>;
+}
+defm : EXT64m<sextloadi8, LD1BSXrri, LD1BSXrii, LD1BSXzri, LD1BSXzii>;
+defm : EXT64m<zextloadi8, LD1BZXrri, LD1BZXrii, LD1BZXzri, LD1BZXzii>;
+defm : EXT64m<extloadi8, LD1BZXrri, LD1BZXrii, LD1BZXzri, LD1BZXzii>;
+defm : EXT64m<sextloadi16, LD2BSXrri, LD2BSXrii, LD2BSXzri, LD2BSXzii>;
+defm : EXT64m<zextloadi16, LD2BZXrri, LD2BZXrii, LD2BZXzri, LD2BZXzii>;
+defm : EXT64m<extloadi16, LD2BZXrri, LD2BZXrii, LD2BZXzri, LD2BZXzii>;
+defm : EXT64m<sextloadi32, LDLSXrri, LDLSXrii, LDLSXzri, LDLSXzii>;
+defm : EXT64m<zextloadi32, LDLZXrri, LDLZXrii, LDLZXzri, LDLZXzii>;
+defm : EXT64m<extloadi32, LDLSXrri, LDLSXrii, LDLSXzri, LDLSXzii>;
// anyextload
-def : Pat<(extloadi8 ADDRri:$addr), (LD1BUri MEMri:$addr)>;
-def : Pat<(extloadi16 ADDRri:$addr), (LD2BUri MEMri:$addr)>;
+multiclass EXT32m<SDPatternOperator from,
+ SDPatternOperator torri,
+ SDPatternOperator torii,
+ SDPatternOperator tozri,
+ SDPatternOperator tozii> {
+ def : Pat<(from ADDRrri:$addr), (torri MEMrri:$addr)>;
+ def : Pat<(from ADDRrii:$addr), (torii MEMrii:$addr)>;
+ def : Pat<(from ADDRzri:$addr), (tozri MEMzri:$addr)>;
+ def : Pat<(from ADDRzii:$addr), (tozii MEMzii:$addr)>;
+}
+defm : EXT32m<extloadi8, LD1BZXrri, LD1BZXrii, LD1BZXzri, LD1BZXzii>;
+defm : EXT32m<extloadi16, LD2BZXrri, LD2BZXrii, LD2BZXzri, LD2BZXzii>;
// truncstore
-def : Pat<(truncstorei8 i64:$src, ADDRri:$addr),
- (ST1Bri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
-def : Pat<(truncstorei16 i64:$src, ADDRri:$addr),
- (ST2Bri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
-def : Pat<(truncstorei32 i64:$src, ADDRri:$addr),
- (STLri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+multiclass TRUNC64m<SDPatternOperator from,
+ SDPatternOperator torri,
+ SDPatternOperator torii,
+ SDPatternOperator tozri,
+ SDPatternOperator tozii> {
+ def : Pat<(from i64:$src, ADDRrri:$addr),
+ (torri MEMrri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+ def : Pat<(from i64:$src, ADDRrii:$addr),
+ (torii MEMrii:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+ def : Pat<(from i64:$src, ADDRzri:$addr),
+ (tozri MEMzri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+ def : Pat<(from i64:$src, ADDRzii:$addr),
+ (tozii MEMzii:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+}
+defm : TRUNC64m<truncstorei8, ST1Brri, ST1Brii, ST1Bzri, ST1Bzii>;
+defm : TRUNC64m<truncstorei16, ST2Brri, ST2Brii, ST2Bzri, ST2Bzii>;
+defm : TRUNC64m<truncstorei32, STLrri, STLrii, STLzri, ST1Bzii>;
// Address calculation and its optimization
-def : Pat<(VEhi tglobaladdr:$in), (LEASLzzi tglobaladdr:$in)>;
-def : Pat<(VElo tglobaladdr:$in), (ANDrm0 (LEAzzi tglobaladdr:$in), 32)>;
+def : Pat<(VEhi tglobaladdr:$in), (LEASLzii 0, 0, tglobaladdr:$in)>;
+def : Pat<(VElo tglobaladdr:$in), (ANDrm0 (LEAzii 0, 0, tglobaladdr:$in), 32)>;
def : Pat<(add (VEhi tglobaladdr:$in1), (VElo tglobaladdr:$in2)),
- (LEASLrzi (ANDrm0 (LEAzzi tglobaladdr:$in2), 32),
+ (LEASLrii (ANDrm0 (LEAzii 0, 0, tglobaladdr:$in2), 32), 0,
(tglobaladdr:$in1))>;
// GlobalTLS address calculation and its optimization
-def : Pat<(VEhi tglobaltlsaddr:$in), (LEASLzzi tglobaltlsaddr:$in)>;
-def : Pat<(VElo tglobaltlsaddr:$in), (ANDrm0 (LEAzzi tglobaltlsaddr:$in), 32)>;
+def : Pat<(VEhi tglobaltlsaddr:$in), (LEASLzii 0, 0, tglobaltlsaddr:$in)>;
+def : Pat<(VElo tglobaltlsaddr:$in),
+ (ANDrm0 (LEAzii 0, 0, tglobaltlsaddr:$in), 32)>;
def : Pat<(add (VEhi tglobaltlsaddr:$in1), (VElo tglobaltlsaddr:$in2)),
- (LEASLrzi (ANDrm0 (LEAzzi tglobaltlsaddr:$in2), 32),
+ (LEASLrii (ANDrm0 (LEAzii 0, 0, tglobaltlsaddr:$in2), 32), 0,
(tglobaltlsaddr:$in1))>;
// Address calculation and its optimization
-def : Pat<(VEhi texternalsym:$in), (LEASLzzi texternalsym:$in)>;
-def : Pat<(VElo texternalsym:$in), (ANDrm0 (LEAzzi texternalsym:$in), 32)>;
+def : Pat<(VEhi texternalsym:$in), (LEASLzii 0, 0, texternalsym:$in)>;
+def : Pat<(VElo texternalsym:$in),
+ (ANDrm0 (LEAzii 0, 0, texternalsym:$in), 32)>;
def : Pat<(add (VEhi texternalsym:$in1), (VElo texternalsym:$in2)),
- (LEASLrzi (ANDrm0 (LEAzzi texternalsym:$in2), 32),
+ (LEASLrii (ANDrm0 (LEAzii 0, 0, texternalsym:$in2), 32), 0,
(texternalsym:$in1))>;
// Calls
// VE has 32 bit offset field, so no need to expand a target instruction.
// Directly encode it.
MI.getOperand(FIOperandNum).ChangeToRegister(FramePtr, false);
- MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
+ MI.getOperand(FIOperandNum + 2).ChangeToImmediate(Offset);
}
void VERegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int Offset;
Offset = TFI->getFrameIndexReference(MF, FrameIndex, FrameReg);
- Offset += MI.getOperand(FIOperandNum + 1).getImm();
+ Offset += MI.getOperand(FIOperandNum + 2).getImm();
replaceFI(MF, II, MI, dl, FIOperandNum, Offset, FrameReg);
}
define signext i8 @func13(i8 signext %0) {
; CHECK-LABEL: func13:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, 5, %s0
; CHECK-NEXT: sla.w.sx %s0, %s0, 24
; CHECK-NEXT: sra.w.sx %s0, %s0, 24
; CHECK-NEXT: or %s11, 0, %s9
define signext i16 @func14(i16 signext %0) {
; CHECK-LABEL: func14:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, 5, %s0
; CHECK-NEXT: sla.w.sx %s0, %s0, 16
; CHECK-NEXT: sra.w.sx %s0, %s0, 16
; CHECK-NEXT: or %s11, 0, %s9
define i32 @func15(i32 %0) {
; CHECK-LABEL: func15:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, 5, %s0
; CHECK-NEXT: or %s11, 0, %s9
%2 = add nsw i32 %0, 5
ret i32 %2
define i64 @func16(i64 %0) {
; CHECK-LABEL: func16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: lea %s0, 5(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = add nsw i64 %0, 5
ret i64 %2
define zeroext i8 @func18(i8 zeroext %0) {
; CHECK-LABEL: func18:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, 5, %s0
; CHECK-NEXT: and %s0, %s0, (56)0
; CHECK-NEXT: or %s11, 0, %s9
%2 = add i8 %0, 5
define zeroext i16 @func19(i16 zeroext %0) {
; CHECK-LABEL: func19:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, 5, %s0
; CHECK-NEXT: and %s0, %s0, (48)0
; CHECK-NEXT: or %s11, 0, %s9
%2 = add i16 %0, 5
define i32 @func20(i32 %0) {
; CHECK-LABEL: func20:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, 5, %s0
; CHECK-NEXT: or %s11, 0, %s9
%2 = add i32 %0, 5
ret i32 %2
define i64 @func21(i64 %0) {
; CHECK-LABEL: func21:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: lea %s0, 5(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = add i64 %0, 5
ret i64 %2
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret@hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB0_3
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret@hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB1_3
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret@hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB2_3
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret@hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB3_3
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret@hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB4_3
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret@hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB5_3
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret@hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB6_3
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret@hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB7_3
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret@hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB8_3
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret@hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB9_3
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, sample_add@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, sample_add@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, sample_add@hi(, %s0)
; CHECK-NEXT: or %s0, 1, (0)1
; CHECK-NEXT: or %s1, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-LABEL: stack_call_int:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: or %s0, 10, (0)1
-; CHECK-NEXT: stl %s0, 248(,%s11)
+; CHECK-NEXT: stl %s0, 248(, %s11)
; CHECK-NEXT: or %s34, 9, (0)1
; CHECK-NEXT: lea %s0, stack_callee_int@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, stack_callee_int@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, stack_callee_int@hi(, %s0)
; CHECK-NEXT: or %s0, 1, (0)1
; CHECK-NEXT: or %s1, 2, (0)1
; CHECK-NEXT: or %s2, 3, (0)1
; CHECK-NEXT: or %s5, 6, (0)1
; CHECK-NEXT: or %s6, 7, (0)1
; CHECK-NEXT: or %s7, 8, (0)1
-; CHECK-NEXT: stl %s34, 240(,%s11)
+; CHECK-NEXT: stl %s34, 240(, %s11)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s11, 0, %s9
%r = tail call i32 @stack_callee_int(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10)
; CHECK-LABEL: stack_call_int_szext:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: or %s0, -1, (0)1
-; CHECK-NEXT: stl %s0, 248(,%s11)
+; CHECK-NEXT: stl %s0, 248(, %s11)
; CHECK-NEXT: lea %s34, 65535
; CHECK-NEXT: lea %s1, stack_callee_int_szext@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, stack_callee_int_szext@hi(%s1)
+; CHECK-NEXT: lea.sl %s12, stack_callee_int_szext@hi(, %s1)
; CHECK-NEXT: lea %s1, 255
; CHECK-NEXT: or %s2, 3, (0)1
; CHECK-NEXT: or %s3, 4, (0)1
; CHECK-NEXT: or %s5, 6, (0)1
; CHECK-NEXT: or %s6, 7, (0)1
; CHECK-NEXT: or %s7, 8, (0)1
-; CHECK-NEXT: stl %s34, 240(,%s11)
+; CHECK-NEXT: stl %s34, 240(, %s11)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s11, 0, %s9
%r = tail call i32 @stack_callee_int_szext(i1 -1, i8 -1, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i16 -1, i8 -1)
; CHECK-LABEL: stack_call_float:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, 1092616192
-; CHECK-NEXT: stl %s0, 252(,%s11)
+; CHECK-NEXT: stl %s0, 252(, %s11)
; CHECK-NEXT: lea %s0, 1091567616
; CHECK-NEXT: lea %s1, stack_callee_float@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, stack_callee_float@hi(%s1)
+; CHECK-NEXT: lea.sl %s12, stack_callee_float@hi(, %s1)
; CHECK-NEXT: lea.sl %s1, 1065353216
; CHECK-NEXT: lea.sl %s2, 1073741824
; CHECK-NEXT: lea.sl %s3, 1077936128
; CHECK-NEXT: lea.sl %s6, 1086324736
; CHECK-NEXT: lea.sl %s7, 1088421888
; CHECK-NEXT: lea.sl %s34, 1090519040
-; CHECK-NEXT: stl %s0, 244(,%s11)
+; CHECK-NEXT: stl %s0, 244(, %s11)
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: or %s1, 0, %s2
; CHECK-NEXT: or %s2, 0, %s3
define float @stack_call_float2(float %p0) {
; CHECK-LABEL: stack_call_float2:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s0, 252(,%s11)
+; CHECK-NEXT: stu %s0, 252(, %s11)
; CHECK-NEXT: lea %s1, stack_callee_float@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, stack_callee_float@hi(%s1)
-; CHECK-NEXT: stu %s0, 244(,%s11)
+; CHECK-NEXT: lea.sl %s12, stack_callee_float@hi(, %s1)
+; CHECK-NEXT: stu %s0, 244(, %s11)
; CHECK-NEXT: or %s1, 0, %s0
; CHECK-NEXT: or %s2, 0, %s0
; CHECK-NEXT: or %s3, 0, %s0
define i32 @stack_stack_arg_i32_r9(i1 %0, i8 %1, i16 %2, i32 %3, i64 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9) {
; CHECK-LABEL: stack_stack_arg_i32_r9:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 424(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 424(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
ret i32 %9
}
define i64 @stack_stack_arg_i64_r9(i1 %0, i8 %1, i16 %2, i32 %3, i64 %4, i64 %5, i64 %6, i64 %7, i64 %8, i64 %9) {
; CHECK-LABEL: stack_stack_arg_i64_r9:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 424(,%s11)
+; CHECK-NEXT: ld %s0, 424(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 %9
}
define float @stack_stack_arg_f32_r9(float %p0, float %p1, float %p2, float %p3, float %p4, float %p5, float %p6, float %p7, float %s0, float %s1) {
; CHECK-LABEL: stack_stack_arg_f32_r9:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 428(,%s11)
+; CHECK-NEXT: ldu %s0, 428(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
ret float %s1
}
define i32 @stack_stack_arg_i32f32_r8(i32 %p0, float %p1, i32 %p2, float %p3, i32 %p4, float %p5, i32 %p6, float %p7, i32 %s0, float %s1) {
; CHECK-LABEL: stack_stack_arg_i32f32_r8:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 416(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 416(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
ret i32 %s0
}
define float @stack_stack_arg_i32f32_r9(i32 %p0, float %p1, i32 %p2, float %p3, i32 %p4, float %p5, i32 %p6, float %p7, i32 %s0, float %s1) {
; CHECK-LABEL: stack_stack_arg_i32f32_r9:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 428(,%s11)
+; CHECK-NEXT: ldu %s0, 428(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
ret float %s1
}
define void @fun(%struct.a* noalias nocapture sret %a, i32 %p1, i32 %p2) {
; CHECK-LABEL: fun:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s1, (,%s0)
-; CHECK-NEXT: stl %s2, 4(,%s0)
+; CHECK-NEXT: stl %s1, (, %s0)
+; CHECK-NEXT: stl %s2, 4(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%a.zero = getelementptr inbounds %struct.a, %struct.a* %a, i64 0, i32 0
store i32 %p1, i32* %a.zero, align 4
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, callee@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, callee@hi(%s0)
-; CHECK-NEXT: lea %s0,-8(,%s9)
+; CHECK-NEXT: lea.sl %s12, callee@hi(, %s0)
+; CHECK-NEXT: lea %s0, -8(, %s9)
; CHECK-NEXT: or %s1, 3, (0)1
; CHECK-NEXT: or %s2, 4, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
-; CHECK-NEXT: ld %s0, -8(,%s9)
+; CHECK-NEXT: ld %s0, -8(, %s9)
; CHECK-NEXT: lea %s1, A@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, A@hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, A@hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i64, align 8
%a.bc = bitcast i64* %a to %struct.a*
; CHECK-NEXT: lea.sl %s2, 1160773632
; CHECK-NEXT: or %s1, %s1, %s2
; CHECK-NEXT: lea %s2, 1048576
-; CHECK-NEXT: lea.sl %s2, -986710016(%s2)
+; CHECK-NEXT: lea.sl %s2, -986710016(, %s2)
; CHECK-NEXT: fadd.d %s1, %s1, %s2
; CHECK-NEXT: lea %s2, -1
; CHECK-NEXT: and %s2, %s2, (32)0
; CHECK-LABEL: p15032385535i64:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, 2147483647
-; CHECK-NEXT: lea.sl %s0, 3(%s0)
+; CHECK-NEXT: lea.sl %s0, 3(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 15032385535
}
; CHECK-LABEL: p15032385535si64:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, 2147483647
-; CHECK-NEXT: lea.sl %s0, 3(%s0)
+; CHECK-NEXT: lea.sl %s0, 3(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 15032385535
}
; CHECK-LABEL: p15032385535zi64:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, 2147483647
-; CHECK-NEXT: lea.sl %s0, 3(%s0)
+; CHECK-NEXT: lea.sl %s0, 3(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 15032385535
}
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, -2147483648
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, 3(%s0)
+; CHECK-NEXT: lea.sl %s0, 3(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 15032385536
}
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, -2147483648
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, 3(%s0)
+; CHECK-NEXT: lea.sl %s0, 3(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 15032385536
}
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, -2147483648
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, 3(%s0)
+; CHECK-NEXT: lea.sl %s0, 3(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 15032385536
}
; CHECK-LABEL: p2p3f64:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, 1717986918
-; CHECK-NEXT: lea.sl %s0, 1073899110(%s0)
+; CHECK-NEXT: lea.sl %s0, 1073899110(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret double 2.3
}
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, -1717986918
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, 1080035737(%s0)
+; CHECK-NEXT: lea.sl %s0, 1080035737(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret double 128.3
}
; CHECK-NEXT: and %s0, %s0, (48)0
; CHECK-NEXT: sll %s0, %s0, 32
; CHECK-NEXT: ldz %s0, %s0
-; CHECK-NEXT: lea %s0, -16(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -16, %s0
; CHECK-NEXT: or %s11, 0, %s9
%r = tail call i16 @llvm.ctlz.i16(i16 %p, i1 true)
ret i16 %r
; CHECK-NEXT: and %s0, %s0, (56)0
; CHECK-NEXT: sll %s0, %s0, 32
; CHECK-NEXT: ldz %s0, %s0
-; CHECK-NEXT: lea %s0, -24(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -24, %s0
; CHECK-NEXT: or %s11, 0, %s9
%r = tail call i8 @llvm.ctlz.i8(i8 %p, i1 true)
ret i8 %r
define i64 @func1(i64 %p) {
; CHECK-LABEL: func1:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s1, -1(%s0)
+; CHECK-NEXT: lea %s1, -1(, %s0)
; CHECK-NEXT: xor %s0, -1, %s0
; CHECK-NEXT: and %s0, %s0, %s1
; CHECK-NEXT: pcnt %s0, %s0
define i32 @func2(i32 %p) {
; CHECK-LABEL: func2:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s1, -1(%s0)
+; CHECK-NEXT: adds.w.sx %s1, -1, %s0
; CHECK-NEXT: xor %s0, -1, %s0
; CHECK-NEXT: and %s0, %s0, %s1
; CHECK-NEXT: and %s0, %s0, (32)0
define i16 @func3(i16 %p) {
; CHECK-LABEL: func3:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s1, -1(%s0)
+; CHECK-NEXT: adds.w.sx %s1, -1, %s0
; CHECK-NEXT: xor %s0, -1, %s0
; CHECK-NEXT: and %s0, %s0, %s1
; CHECK-NEXT: and %s0, %s0, (32)0
define i8 @func4(i8 %p) {
; CHECK-LABEL: func4:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s1, -1(%s0)
+; CHECK-NEXT: adds.w.sx %s1, -1, %s0
; CHECK-NEXT: xor %s0, -1, %s0
; CHECK-NEXT: and %s0, %s0, %s1
; CHECK-NEXT: and %s0, %s0, (32)0
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, -1
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, 2146435071(%s1)
+; CHECK-NEXT: lea.sl %s1, 2146435071(, %s1)
; CHECK-NEXT: fadd.d %s0, %s0, %s1
; CHECK-NEXT: or %s11, 0, %s9
%r = fadd double %a, 0x7FEFFFFFFFFFFFFF
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, -1
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, 2146435071(%s1)
+; CHECK-NEXT: lea.sl %s1, 2146435071(, %s1)
; CHECK-NEXT: fdiv.d %s0, %s0, %s1
; CHECK-NEXT: or %s11, 0, %s9
%r = fdiv double %a, 0x7FEFFFFFFFFFFFFF
define float @func_i16fp32(i16* %a) {
; CHECK-LABEL: func_i16fp32:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: lea %s1, __gnu_h2f_ieee@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee@hi(%s1)
+; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee@hi(, %s1)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s11, 0, %s9
%a.val = load i16, i16* %a, align 4
define double @func_i16fp64(i16* %a) {
; CHECK-LABEL: func_i16fp64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: lea %s1, __gnu_h2f_ieee@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee@hi(%s1)
+; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee@hi(, %s1)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: cvt.d.s %s0, %s0
; CHECK-NEXT: or %s11, 0, %s9
define float @func_fp16fp32(half* %a) {
; CHECK-LABEL: func_fp16fp32:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: lea %s1, __gnu_h2f_ieee@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee@hi(%s1)
+; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee@hi(, %s1)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s11, 0, %s9
%a.val = load half, half* %a, align 4
define double @func_fp16fp64(half* %a) {
; CHECK-LABEL: func_fp16fp64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: lea %s1, __gnu_h2f_ieee@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee@hi(%s1)
+; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee@hi(, %s1)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: cvt.d.s %s0, %s0
; CHECK-NEXT: or %s11, 0, %s9
define void @func_fp32i16(i16* %fl.ptr, float %val) {
; CHECK-LABEL: func_fp32i16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; CHECK-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; CHECK-NEXT: or %s18, 0, %s0
; CHECK-NEXT: lea %s0, __gnu_f2h_ieee@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_f2h_ieee@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, __gnu_f2h_ieee@hi(, %s0)
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: bsic %lr, (,%s12)
-; CHECK-NEXT: st2b %s0, (,%s18)
-; CHECK-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; CHECK-NEXT: st2b %s0, (, %s18)
+; CHECK-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; CHECK-NEXT: or %s11, 0, %s9
%val.asf = call i16 @llvm.convert.to.fp16.f32(float %val)
store i16 %val.asf, i16* %fl.ptr
define half @func_fp32fp16(half* %fl.ptr, float %a) {
; CHECK-LABEL: func_fp32fp16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
-; CHECK-NEXT: st %s19, 56(,%s9) # 8-byte Folded Spill
+; CHECK-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT: st %s19, 56(, %s9) # 8-byte Folded Spill
; CHECK-NEXT: or %s18, 0, %s0
; CHECK-NEXT: lea %s0, __gnu_f2h_ieee@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_f2h_ieee@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, __gnu_f2h_ieee@hi(, %s0)
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s19, 0, %s0
; CHECK-NEXT: lea %s0, __gnu_h2f_ieee@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee@hi(, %s0)
; CHECK-NEXT: or %s0, 0, %s19
; CHECK-NEXT: bsic %lr, (,%s12)
-; CHECK-NEXT: st2b %s19, (,%s18)
-; CHECK-NEXT: ld %s19, 56(,%s9) # 8-byte Folded Reload
-; CHECK-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; CHECK-NEXT: st2b %s19, (, %s18)
+; CHECK-NEXT: ld %s19, 56(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; CHECK-NEXT: or %s11, 0, %s9
%a.asd = fptrunc float %a to half
store half %a.asd, half* %fl.ptr
define double @func_fp32fp64(float* %a) {
; CHECK-LABEL: func_fp32fp64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: cvt.d.s %s0, %s0
; CHECK-NEXT: or %s11, 0, %s9
%a.val = load float, float* %a, align 4
define void @func_fp64i16(i16* %fl.ptr, double %val) {
; CHECK-LABEL: func_fp64i16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; CHECK-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; CHECK-NEXT: or %s18, 0, %s0
; CHECK-NEXT: lea %s0, __truncdfhf2@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, __truncdfhf2@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, __truncdfhf2@hi(, %s0)
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: bsic %lr, (,%s12)
-; CHECK-NEXT: st2b %s0, (,%s18)
-; CHECK-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; CHECK-NEXT: st2b %s0, (, %s18)
+; CHECK-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; CHECK-NEXT: or %s11, 0, %s9
%val.asf = call i16 @llvm.convert.to.fp16.f64(double %val)
store i16 %val.asf, i16* %fl.ptr
define void @func_fp64fp16(half* %fl.ptr, double %val) {
; CHECK-LABEL: func_fp64fp16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; CHECK-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; CHECK-NEXT: or %s18, 0, %s0
; CHECK-NEXT: lea %s0, __truncdfhf2@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, __truncdfhf2@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, __truncdfhf2@hi(, %s0)
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: bsic %lr, (,%s12)
-; CHECK-NEXT: st2b %s0, (,%s18)
-; CHECK-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; CHECK-NEXT: st2b %s0, (, %s18)
+; CHECK-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; CHECK-NEXT: or %s11, 0, %s9
%val.asf = fptrunc double %val to half
store half %val.asf, half* %fl.ptr
; CHECK-LABEL: func_fp64fp32:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: cvt.s.d %s1, %s1
-; CHECK-NEXT: stu %s1, (,%s0)
+; CHECK-NEXT: stu %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%val.asf = fptrunc double %val to float
store float %val.asf, float* %fl.ptr
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, -1
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, 2146435071(%s1)
+; CHECK-NEXT: lea.sl %s1, 2146435071(, %s1)
; CHECK-NEXT: fmul.d %s0, %s0, %s1
; CHECK-NEXT: or %s11, 0, %s9
%r = fmul double %a, 0x7FEFFFFFFFFFFFFF
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, -1
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, -1048577(%s1)
+; CHECK-NEXT: lea.sl %s1, -1048577(, %s1)
; CHECK-NEXT: fadd.d %s0, %s0, %s1
; CHECK-NEXT: or %s11, 0, %s9
%r = fadd double %a, 0xFFEFFFFFFFFFFFFF
; CHECK-NEXT: lea.sl %s2, 1160773632
; CHECK-NEXT: or %s1, %s1, %s2
; CHECK-NEXT: lea %s2, 1048576
-; CHECK-NEXT: lea.sl %s2, -986710016(%s2)
+; CHECK-NEXT: lea.sl %s2, -986710016(, %s2)
; CHECK-NEXT: fadd.d %s1, %s1, %s2
; CHECK-NEXT: lea %s2, -1
; CHECK-NEXT: and %s2, %s2, (32)0
define double @loadf64stk() {
; CHECK-LABEL: loadf64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 1
%1 = load double, double* %addr, align 1
define float @loadf32stk() {
; CHECK-LABEL: loadf32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 188(,%s11)
+; CHECK-NEXT: ldu %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 1
%1 = load float, float* %addr, align 1
define i64 @loadi64stk() {
; CHECK-LABEL: loadi64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 1
%1 = load i64, i64* %addr, align 1
define i32 @loadi32stk() {
; CHECK-LABEL: loadi32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 1
%1 = load i32, i32* %addr, align 1
define i16 @loadi16stk() {
; CHECK-LABEL: loadi16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 1
%1 = load i16, i16* %addr, align 1
define i8 @loadi8stk() {
; CHECK-LABEL: loadi8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 1
%1 = load i8, i8* %addr, align 1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf64@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf64@hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf64@hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load double, double* @vf64, align 1
ret double %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf32@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf32@hi(%s0)
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf32@hi(, %s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load float, float* @vf32, align 1
ret float %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi64@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi64@hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi64@hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i64, i64* @vi64, align 1
ret i64 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi32@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi32@hi(%s0)
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi32@hi(, %s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i32, i32* @vi32, align 1
ret i32 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi16@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi16@hi(%s0)
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi16@hi(, %s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i16, i16* @vi16, align 1
ret i16 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi8@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi8@hi(%s0)
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi8@hi(, %s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i8, i8* @vi8, align 1
ret i8 %1
define double @loadf64stk() {
; CHECK-LABEL: loadf64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 2
%1 = load double, double* %addr, align 2
define float @loadf32stk() {
; CHECK-LABEL: loadf32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 188(,%s11)
+; CHECK-NEXT: ldu %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 2
%1 = load float, float* %addr, align 2
define i64 @loadi64stk() {
; CHECK-LABEL: loadi64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 2
%1 = load i64, i64* %addr, align 2
define i32 @loadi32stk() {
; CHECK-LABEL: loadi32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 2
%1 = load i32, i32* %addr, align 2
define i16 @loadi16stk() {
; CHECK-LABEL: loadi16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 2
%1 = load i16, i16* %addr, align 2
define i8 @loadi8stk() {
; CHECK-LABEL: loadi8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 2
%1 = load i8, i8* %addr, align 2
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf64@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf64@hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf64@hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load double, double* @vf64, align 2
ret double %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf32@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf32@hi(%s0)
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf32@hi(, %s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load float, float* @vf32, align 2
ret float %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi64@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi64@hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi64@hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i64, i64* @vi64, align 2
ret i64 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi32@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi32@hi(%s0)
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi32@hi(, %s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i32, i32* @vi32, align 2
ret i32 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi16@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi16@hi(%s0)
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi16@hi(, %s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i16, i16* @vi16, align 2
ret i16 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi8@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi8@hi(%s0)
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi8@hi(, %s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i8, i8* @vi8, align 2
ret i8 %1
define double @loadf64stk() {
; CHECK-LABEL: loadf64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 4
%1 = load double, double* %addr, align 4
define float @loadf32stk() {
; CHECK-LABEL: loadf32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 188(,%s11)
+; CHECK-NEXT: ldu %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 4
%1 = load float, float* %addr, align 4
define i64 @loadi64stk() {
; CHECK-LABEL: loadi64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 4
%1 = load i64, i64* %addr, align 4
define i32 @loadi32stk() {
; CHECK-LABEL: loadi32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 4
%1 = load i32, i32* %addr, align 4
define i16 @loadi16stk() {
; CHECK-LABEL: loadi16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 188(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 4
%1 = load i16, i16* %addr, align 4
define i8 @loadi8stk() {
; CHECK-LABEL: loadi8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 188(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 4
%1 = load i8, i8* %addr, align 4
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf64@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf64@hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf64@hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load double, double* @vf64, align 4
ret double %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf32@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf32@hi(%s0)
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf32@hi(, %s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load float, float* @vf32, align 4
ret float %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi64@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi64@hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi64@hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i64, i64* @vi64, align 4
ret i64 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi32@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi32@hi(%s0)
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi32@hi(, %s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i32, i32* @vi32, align 4
ret i32 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi16@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi16@hi(%s0)
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi16@hi(, %s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i16, i16* @vi16, align 4
ret i16 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi8@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi8@hi(%s0)
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi8@hi(, %s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i8, i8* @vi8, align 4
ret i8 %1
define double @loadf64stk() {
; CHECK-LABEL: loadf64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 8
%1 = load double, double* %addr, align 8
define float @loadf32stk() {
; CHECK-LABEL: loadf32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 184(,%s11)
+; CHECK-NEXT: ldu %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 8
%1 = load float, float* %addr, align 8
define i64 @loadi64stk() {
; CHECK-LABEL: loadi64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 8
%1 = load i64, i64* %addr, align 8
define i32 @loadi32stk() {
; CHECK-LABEL: loadi32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 184(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 8
%1 = load i32, i32* %addr, align 8
define i16 @loadi16stk() {
; CHECK-LABEL: loadi16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 184(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 8
%1 = load i16, i16* %addr, align 8
define i8 @loadi8stk() {
; CHECK-LABEL: loadi8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 184(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 8
%1 = load i8, i8* %addr, align 8
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf64@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf64@hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf64@hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load double, double* @vf64, align 8
ret double %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf32@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf32@hi(%s0)
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf32@hi(, %s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load float, float* @vf32, align 8
ret float %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi64@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi64@hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi64@hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i64, i64* @vi64, align 8
ret i64 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi32@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi32@hi(%s0)
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi32@hi(, %s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i32, i32* @vi32, align 8
ret i32 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi16@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi16@hi(%s0)
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi16@hi(, %s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i16, i16* @vi16, align 8
ret i16 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi8@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi8@hi(%s0)
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi8@hi(, %s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i8, i8* @vi8, align 8
ret i8 %1
define double @loadf64(double* nocapture readonly %0) {
; CHECK-LABEL: loadf64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load double, double* %0, align 16
ret double %2
define float @loadf32(float* nocapture readonly %0) {
; CHECK-LABEL: loadf32:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load float, float* %0, align 16
ret float %2
define i64 @loadi64(i64* nocapture readonly %0) {
; CHECK-LABEL: loadi64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i64, i64* %0, align 16
ret i64 %2
define i32 @loadi32(i32* nocapture readonly %0) {
; CHECK-LABEL: loadi32:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i32, i32* %0, align 16
ret i32 %2
define i64 @loadi32sext(i32* nocapture readonly %0) {
; CHECK-LABEL: loadi32sext:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i32, i32* %0, align 16
%3 = sext i32 %2 to i64
define i64 @loadi32zext(i32* nocapture readonly %0) {
; CHECK-LABEL: loadi32zext:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.zx %s0, (,%s0)
+; CHECK-NEXT: ldl.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i32, i32* %0, align 16
%3 = zext i32 %2 to i64
define i16 @loadi16(i16* nocapture readonly %0) {
; CHECK-LABEL: loadi16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i16, i16* %0, align 16
ret i16 %2
define i64 @loadi16sext(i16* nocapture readonly %0) {
; CHECK-LABEL: loadi16sext:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.sx %s0, (,%s0)
+; CHECK-NEXT: ld2b.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i16, i16* %0, align 16
%3 = sext i16 %2 to i64
define i64 @loadi16zext(i16* nocapture readonly %0) {
; CHECK-LABEL: loadi16zext:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i16, i16* %0, align 16
%3 = zext i16 %2 to i64
define i8 @loadi8(i8* nocapture readonly %0) {
; CHECK-LABEL: loadi8:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i8, i8* %0, align 16
ret i8 %2
define i64 @loadi8sext(i8* nocapture readonly %0) {
; CHECK-LABEL: loadi8sext:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, (,%s0)
+; CHECK-NEXT: ld1b.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i8, i8* %0, align 16
%3 = sext i8 %2 to i64
define i64 @loadi8zext(i8* nocapture readonly %0) {
; CHECK-LABEL: loadi8zext:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i8, i8* %0, align 16
%3 = zext i8 %2 to i64
define double @loadf64stk() {
; CHECK-LABEL: loadf64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 176(,%s11)
+; CHECK-NEXT: ld %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 16
%1 = load double, double* %addr, align 16
define float @loadf32stk() {
; CHECK-LABEL: loadf32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 176(,%s11)
+; CHECK-NEXT: ldu %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 16
%1 = load float, float* %addr, align 16
define i64 @loadi64stk() {
; CHECK-LABEL: loadi64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 176(,%s11)
+; CHECK-NEXT: ld %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 16
%1 = load i64, i64* %addr, align 16
define i32 @loadi32stk() {
; CHECK-LABEL: loadi32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 176(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 16
%1 = load i32, i32* %addr, align 16
define i16 @loadi16stk() {
; CHECK-LABEL: loadi16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 176(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 16
%1 = load i16, i16* %addr, align 16
define i8 @loadi8stk() {
; CHECK-LABEL: loadi8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 176(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 16
%1 = load i8, i8* %addr, align 16
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf64@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf64@hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf64@hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load double, double* @vf64, align 8
ret double %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf32@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf32@hi(%s0)
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf32@hi(, %s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load float, float* @vf32, align 4
ret float %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi64@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi64@hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi64@hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i64, i64* @vi64, align 8
ret i64 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi32@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi32@hi(%s0)
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi32@hi(, %s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i32, i32* @vi32, align 4
ret i32 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi16@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi16@hi(%s0)
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi16@hi(, %s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i16, i16* @vi16, align 2
ret i16 %1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi8@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi8@hi(%s0)
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi8@hi(, %s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i8, i8* @vi8, align 1
ret i8 %1
; CHECK-NEXT: lea.sl %s15, _GLOBAL_OFFSET_TABLE_@pc_hi(%s16, %s15)
; CHECK-NEXT: lea %s0, dst@got_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, dst@got_hi(%s0)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
-; CHECK-NEXT: ld %s1, (,%s0)
+; CHECK-NEXT: lea.sl %s0, dst@got_hi(, %s0)
+; CHECK-NEXT: ld %s1, (%s0, %s15)
; CHECK-NEXT: lea %s0, ptr@got_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, ptr@got_hi(%s0)
; CHECK-NEXT: lea %s2, src@got_lo
; CHECK-NEXT: and %s2, %s2, (32)0
-; CHECK-NEXT: lea.sl %s2, src@got_hi(%s2)
-; CHECK-NEXT: adds.l %s2, %s15, %s2
-; CHECK-NEXT: ld %s2, (,%s2)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
-; CHECK-NEXT: ld %s0, (,%s0)
-; CHECK-NEXT: ldl.sx %s2, (,%s2)
-; CHECK-NEXT: st %s1, (,%s0)
+; CHECK-NEXT: lea.sl %s2, src@got_hi(, %s2)
+; CHECK-NEXT: ld %s2, (%s2, %s15)
+; CHECK-NEXT: lea.sl %s0, ptr@got_hi(, %s0)
+; CHECK-NEXT: ld %s0, (%s0, %s15)
+; CHECK-NEXT: ldl.sx %s2, (, %s2)
+; CHECK-NEXT: st %s1, (, %s0)
; CHECK-NEXT: or %s0, 1, (0)1
-; CHECK-NEXT: stl %s2, (,%s1)
+; CHECK-NEXT: stl %s2, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i32* @dst, i32** @ptr, align 8
; CHECK-NEXT: lea.sl %s15, _GLOBAL_OFFSET_TABLE_@pc_hi(%s16, %s15)
; CHECK-NEXT: lea %s0, src@gotoff_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, src@gotoff_hi(%s0)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, src@gotoff_hi(, %s0)
+; CHECK-NEXT: ld1b.zx %s0, (%s0, %s15)
; CHECK-NEXT: or %s1, 0, (0)1
; CHECK-NEXT: lea %s2, 100
; CHECK-NEXT: cmov.w.ne %s1, %s2, %s0
; CHECK-NEXT: lea %s0, dst@gotoff_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, dst@gotoff_hi(%s0)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
-; CHECK-NEXT: stl %s1, (,%s0)
+; CHECK-NEXT: lea.sl %s0, dst@gotoff_hi(, %s0)
+; CHECK-NEXT: stl %s1, (%s0, %s15)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i1, i1* @src, align 4
; CHECK-NEXT: lea.sl %s15, _GLOBAL_OFFSET_TABLE_@pc_hi(%s16, %s15)
; CHECK-NEXT: lea %s0, src@gotoff_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, src@gotoff_hi(%s0)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
+; CHECK-NEXT: lea.sl %s0, src@gotoff_hi(, %s0)
; CHECK-NEXT: or %s1, 1, (0)1
-; CHECK-NEXT: st1b %s1, (,%s0)
+; CHECK-NEXT: st1b %s1, (%s0, %s15)
; CHECK-NEXT: lea %s12, func@plt_lo(-24)
; CHECK-NEXT: and %s12, %s12, (32)0
; CHECK-NEXT: sic %s16
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: lea %s0, dst@gotoff_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, dst@gotoff_hi(%s0)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
-; CHECK-NEXT: ldl.sx %s1, (,%s0)
-; CHECK-NEXT: stl %s1, 184(,%s11)
+; CHECK-NEXT: lea.sl %s0, dst@gotoff_hi(, %s0)
+; CHECK-NEXT: ldl.sx %s1, (%s0, %s15)
+; CHECK-NEXT: stl %s1, 184(, %s11)
; CHECK-NEXT: lea %s0, .L.str@gotoff_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, .L.str@gotoff_hi(%s0)
+; CHECK-NEXT: lea.sl %s0, .L.str@gotoff_hi(, %s0)
; CHECK-NEXT: adds.l %s0, %s15, %s0
; CHECK-NEXT: lea %s12, printf@plt_lo(-24)
; CHECK-NEXT: and %s12, %s12, (32)0
; CHECK-NEXT: sic %s16
; CHECK-NEXT: lea.sl %s12, printf@plt_hi(%s16, %s12)
-; CHECK-NEXT: st %s0, 176(,%s11)
+; CHECK-NEXT: st %s0, 176(, %s11)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s0, 0, (0)1
; CHECK-NEXT: or %s11, 0, %s9
; CHECK-NEXT: lea.sl %s15, _GLOBAL_OFFSET_TABLE_@pc_hi(%s16, %s15)
; CHECK-NEXT: lea %s0, function@got_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, function@got_hi(%s0)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, function@got_hi(, %s0)
+; CHECK-NEXT: ld %s0, (%s0, %s15)
; CHECK-NEXT: lea %s1, ptr@got_lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, ptr@got_hi(%s1)
-; CHECK-NEXT: adds.l %s1, %s15, %s1
-; CHECK-NEXT: ld %s1, (,%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, ptr@got_hi(, %s1)
+; CHECK-NEXT: ld %s1, (%s1, %s15)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s12, 0, %s0
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s11, 0, %s9
define signext i16 @func1() {
; CHECK-LABEL: func1:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.sx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
define i32 @func2() {
; CHECK-LABEL: func2:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.sx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
define i64 @func3() {
; CHECK-LABEL: func3:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.sx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
define zeroext i16 @func5() {
; CHECK-LABEL: func5:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.sx %s0, 191(, %s11)
; CHECK-NEXT: and %s0, %s0, (48)0
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
define i32 @func6() {
; CHECK-LABEL: func6:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.sx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
define i64 @func7() {
; CHECK-LABEL: func7:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.sx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
define signext i16 @func9() {
; CHECK-LABEL: func9:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
define i32 @func10() {
; CHECK-LABEL: func10:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
define i64 @func11() {
; CHECK-LABEL: func11:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
define zeroext i16 @func13() {
; CHECK-LABEL: func13:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
define zeroext i16 @func14() {
; CHECK-LABEL: func14:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
define i64 @func15() {
; CHECK-LABEL: func15:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
define i32 @func17() {
; CHECK-LABEL: func17:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.sx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.sx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.val = load i16, i16* %a, align 2
define i64 @func18() {
; CHECK-LABEL: func18:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.sx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.sx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.val = load i16, i16* %a, align 2
define zeroext i16 @func20() {
; CHECK-LABEL: func20:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.conv = load i16, i16* %a, align 2
define i64 @func21() {
; CHECK-LABEL: func21:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.sx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.sx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.val = load i16, i16* %a, align 2
define i32 @func23() {
; CHECK-LABEL: func23:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.val = load i16, i16* %a, align 2
define i64 @func24() {
; CHECK-LABEL: func24:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.val = load i16, i16* %a, align 2
define zeroext i16 @func26() {
; CHECK-LABEL: func26:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.conv = load i16, i16* %a, align 2
define i64 @func27() {
; CHECK-LABEL: func27:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.val = load i16, i16* %a, align 2
define i64 @func29() {
; CHECK-LABEL: func29:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i32, align 4
%a.val = load i32, i32* %a, align 4
define i64 @func31() {
; CHECK-LABEL: func31:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i32, align 4
%a.val = load i32, i32* %a, align 4
define i64 @func33() {
; CHECK-LABEL: func33:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.zx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.zx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i32, align 4
%a.val = load i32, i32* %a, align 4
define i64 @func35() {
; CHECK-LABEL: func35:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.zx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.zx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i32, align 4
%a.val = load i32, i32* %a, align 4
define signext i8 @func37() {
; CHECK-LABEL: func37:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: sla.w.sx %s0, %s0, 31
; CHECK-NEXT: sra.w.sx %s0, %s0, 31
; CHECK-NEXT: or %s11, 0, %s9
define signext i16 @func38() {
; CHECK-LABEL: func38:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: sla.w.sx %s0, %s0, 31
; CHECK-NEXT: sra.w.sx %s0, %s0, 31
; CHECK-NEXT: or %s11, 0, %s9
define signext i32 @func39() {
; CHECK-LABEL: func39:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: sla.w.sx %s0, %s0, 31
; CHECK-NEXT: sra.w.sx %s0, %s0, 31
; CHECK-NEXT: or %s11, 0, %s9
define signext i64 @func40() {
; CHECK-LABEL: func40:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: sll %s0, %s0, 63
; CHECK-NEXT: sra.l %s0, %s0, 63
; CHECK-NEXT: or %s11, 0, %s9
define signext i8 @func42() {
; CHECK-LABEL: func42:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i1, align 1
%a.val = load i1, i1* %a, align 1
define signext i16 @func43() {
; CHECK-LABEL: func43:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i1, align 1
%a.val = load i1, i1* %a, align 1
define signext i32 @func44() {
; CHECK-LABEL: func44:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i1, align 1
%a.val = load i1, i1* %a, align 1
define signext i64 @func45() {
; CHECK-LABEL: func45:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i1, align 1
%a.val = load i1, i1* %a, align 1
define void @func() {
; CHECK-LABEL: func:
; CHECK: # %bb.0:
-; CHECK-NEXT: st %s9, (,%s11)
-; CHECK-NEXT: st %s10, 8(,%s11)
-; CHECK-NEXT: st %s15, 24(,%s11)
-; CHECK-NEXT: st %s16, 32(,%s11)
+; CHECK-NEXT: st %s9, (, %s11)
+; CHECK-NEXT: st %s10, 8(, %s11)
+; CHECK-NEXT: st %s15, 24(, %s11)
+; CHECK-NEXT: st %s16, 32(, %s11)
; CHECK-NEXT: or %s9, 0, %s11
; CHECK-NEXT: lea %s13, -176
; CHECK-NEXT: and %s13, %s13, (32)0
-; CHECK-NEXT: lea.sl %s11, -1(%s11, %s13)
+; CHECK-NEXT: lea.sl %s11, -1(%s13, %s11)
; CHECK-NEXT: brge.l %s11, %s8, .LBB0_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: ld %s61, 24(,%s14)
+; CHECK-NEXT: ld %s61, 24(, %s14)
; CHECK-NEXT: or %s62, 0, %s0
; CHECK-NEXT: lea %s63, 315
; CHECK-NEXT: shm.l %s63, (%s61)
; CHECK-NEXT: or %s0, 0, %s62
; CHECK-NEXT: .LBB0_2:
; CHECK-NEXT: or %s11, 0, %s9
-; CHECK-NEXT: ld %s16, 32(,%s11)
-; CHECK-NEXT: ld %s15, 24(,%s11)
-; CHECK-NEXT: ld %s10, 8(,%s11)
-; CHECK-NEXT: ld %s9, (,%s11)
+; CHECK-NEXT: ld %s16, 32(, %s11)
+; CHECK-NEXT: ld %s15, 24(, %s11)
+; CHECK-NEXT: ld %s10, 8(, %s11)
+; CHECK-NEXT: ld %s9, (, %s11)
; CHECK-NEXT: b.l (,%lr)
ret void
}
define i64 @func1(i64) {
; CHECK-LABEL: func1:
; CHECK: # %bb.0:
-; CHECK-NEXT: st %s9, (,%s11)
-; CHECK-NEXT: st %s10, 8(,%s11)
-; CHECK-NEXT: st %s15, 24(,%s11)
-; CHECK-NEXT: st %s16, 32(,%s11)
+; CHECK-NEXT: st %s9, (, %s11)
+; CHECK-NEXT: st %s10, 8(, %s11)
+; CHECK-NEXT: st %s15, 24(, %s11)
+; CHECK-NEXT: st %s16, 32(, %s11)
; CHECK-NEXT: or %s9, 0, %s11
; CHECK-NEXT: lea %s13, -176
; CHECK-NEXT: and %s13, %s13, (32)0
-; CHECK-NEXT: lea.sl %s11, -1(%s11, %s13)
+; CHECK-NEXT: lea.sl %s11, -1(%s13, %s11)
; CHECK-NEXT: brge.l %s11, %s8, .LBB1_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: ld %s61, 24(,%s14)
+; CHECK-NEXT: ld %s61, 24(, %s14)
; CHECK-NEXT: or %s62, 0, %s0
; CHECK-NEXT: lea %s63, 315
; CHECK-NEXT: shm.l %s63, (%s61)
; CHECK-NEXT: or %s0, 0, %s62
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: or %s11, 0, %s9
-; CHECK-NEXT: ld %s16, 32(,%s11)
-; CHECK-NEXT: ld %s15, 24(,%s11)
-; CHECK-NEXT: ld %s10, 8(,%s11)
-; CHECK-NEXT: ld %s9, (,%s11)
+; CHECK-NEXT: ld %s16, 32(, %s11)
+; CHECK-NEXT: ld %s15, 24(, %s11)
+; CHECK-NEXT: ld %s10, 8(, %s11)
+; CHECK-NEXT: ld %s9, (, %s11)
; CHECK-NEXT: b.l (,%lr)
ret i64 %0
}
define i64 @func2(i64, i64, i64, i64, i64) {
; CHECK-LABEL: func2:
; CHECK: # %bb.0:
-; CHECK-NEXT: st %s9, (,%s11)
-; CHECK-NEXT: st %s10, 8(,%s11)
-; CHECK-NEXT: st %s15, 24(,%s11)
-; CHECK-NEXT: st %s16, 32(,%s11)
+; CHECK-NEXT: st %s9, (, %s11)
+; CHECK-NEXT: st %s10, 8(, %s11)
+; CHECK-NEXT: st %s15, 24(, %s11)
+; CHECK-NEXT: st %s16, 32(, %s11)
; CHECK-NEXT: or %s9, 0, %s11
; CHECK-NEXT: lea %s13, -176
; CHECK-NEXT: and %s13, %s13, (32)0
-; CHECK-NEXT: lea.sl %s11, -1(%s11, %s13)
+; CHECK-NEXT: lea.sl %s11, -1(%s13, %s11)
; CHECK-NEXT: brge.l %s11, %s8, .LBB2_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: ld %s61, 24(,%s14)
+; CHECK-NEXT: ld %s61, 24(, %s14)
; CHECK-NEXT: or %s62, 0, %s0
; CHECK-NEXT: lea %s63, 315
; CHECK-NEXT: shm.l %s63, (%s61)
; CHECK-NEXT: .LBB2_2:
; CHECK-NEXT: or %s0, 0, %s4
; CHECK-NEXT: or %s11, 0, %s9
-; CHECK-NEXT: ld %s16, 32(,%s11)
-; CHECK-NEXT: ld %s15, 24(,%s11)
-; CHECK-NEXT: ld %s10, 8(,%s11)
-; CHECK-NEXT: ld %s9, (,%s11)
+; CHECK-NEXT: ld %s16, 32(, %s11)
+; CHECK-NEXT: ld %s15, 24(, %s11)
+; CHECK-NEXT: ld %s10, 8(, %s11)
+; CHECK-NEXT: ld %s9, (, %s11)
; CHECK-NEXT: b.l (,%lr)
ret i64 %4
}
define void @storef64stk(double %0) {
; CHECK-LABEL: storef64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 1
store double %0, double* %addr, align 1
define void @storef32stk(float %0) {
; CHECK-LABEL: storef32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s0, 188(,%s11)
+; CHECK-NEXT: stu %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 1
store float %0, float* %addr, align 1
define void @storei64stk(i64 %0) {
; CHECK-LABEL: storei64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 1
store i64 %0, i64* %addr, align 1
define void @storei32stk(i32 %0) {
; CHECK-LABEL: storei32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, 188(,%s11)
+; CHECK-NEXT: stl %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 1
store i32 %0, i32* %addr, align 1
define void @storei16stk(i16 %0) {
; CHECK-LABEL: storei16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s0, 190(,%s11)
+; CHECK-NEXT: st2b %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 1
store i16 %0, i16* %addr, align 1
define void @storei8stk(i8 %0) {
; CHECK-LABEL: storei8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s0, 191(,%s11)
+; CHECK-NEXT: st1b %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 1
store i8 %0, i8* %addr, align 1
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf64@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf64@hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf64@hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store double %0, double* @vf64, align 1
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf32@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf32@hi(%s1)
-; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf32@hi(, %s1)
+; CHECK-NEXT: stu %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store float %0, float* @vf32, align 1
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi64@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi64@hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi64@hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i64 %0, i64* @vi64, align 1
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi32@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi32@hi(%s1)
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi32@hi(, %s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i32 %0, i32* @vi32, align 1
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi16@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi16@hi(%s1)
-; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi16@hi(, %s1)
+; CHECK-NEXT: st2b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i16 %0, i16* @vi16, align 1
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi8@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi8@hi(%s1)
-; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi8@hi(, %s1)
+; CHECK-NEXT: st1b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i8 %0, i8* @vi8, align 1
ret void
define void @storef64stk(double %0) {
; CHECK-LABEL: storef64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 2
store double %0, double* %addr, align 2
define void @storef32stk(float %0) {
; CHECK-LABEL: storef32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s0, 188(,%s11)
+; CHECK-NEXT: stu %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 2
store float %0, float* %addr, align 2
define void @storei64stk(i64 %0) {
; CHECK-LABEL: storei64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 2
store i64 %0, i64* %addr, align 2
define void @storei32stk(i32 %0) {
; CHECK-LABEL: storei32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, 188(,%s11)
+; CHECK-NEXT: stl %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 2
store i32 %0, i32* %addr, align 2
define void @storei16stk(i16 %0) {
; CHECK-LABEL: storei16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s0, 190(,%s11)
+; CHECK-NEXT: st2b %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 2
store i16 %0, i16* %addr, align 2
define void @storei8stk(i8 %0) {
; CHECK-LABEL: storei8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s0, 190(,%s11)
+; CHECK-NEXT: st1b %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 2
store i8 %0, i8* %addr, align 2
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf64@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf64@hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf64@hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store double %0, double* @vf64, align 2
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf32@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf32@hi(%s1)
-; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf32@hi(, %s1)
+; CHECK-NEXT: stu %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store float %0, float* @vf32, align 2
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi64@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi64@hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi64@hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i64 %0, i64* @vi64, align 2
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi32@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi32@hi(%s1)
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi32@hi(, %s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i32 %0, i32* @vi32, align 2
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi16@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi16@hi(%s1)
-; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi16@hi(, %s1)
+; CHECK-NEXT: st2b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i16 %0, i16* @vi16, align 2
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi8@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi8@hi(%s1)
-; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi8@hi(, %s1)
+; CHECK-NEXT: st1b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i8 %0, i8* @vi8, align 2
ret void
define void @storef64stk(double %0) {
; CHECK-LABEL: storef64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 4
store double %0, double* %addr, align 4
define void @storef32stk(float %0) {
; CHECK-LABEL: storef32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s0, 188(,%s11)
+; CHECK-NEXT: stu %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 4
store float %0, float* %addr, align 4
define void @storei64stk(i64 %0) {
; CHECK-LABEL: storei64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 4
store i64 %0, i64* %addr, align 4
define void @storei32stk(i32 %0) {
; CHECK-LABEL: storei32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, 188(,%s11)
+; CHECK-NEXT: stl %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 4
store i32 %0, i32* %addr, align 4
define void @storei16stk(i16 %0) {
; CHECK-LABEL: storei16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s0, 188(,%s11)
+; CHECK-NEXT: st2b %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 4
store i16 %0, i16* %addr, align 4
define void @storei8stk(i8 %0) {
; CHECK-LABEL: storei8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s0, 188(,%s11)
+; CHECK-NEXT: st1b %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 4
store i8 %0, i8* %addr, align 4
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf64@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf64@hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf64@hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store double %0, double* @vf64, align 4
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf32@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf32@hi(%s1)
-; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf32@hi(, %s1)
+; CHECK-NEXT: stu %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store float %0, float* @vf32, align 4
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi64@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi64@hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi64@hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i64 %0, i64* @vi64, align 4
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi32@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi32@hi(%s1)
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi32@hi(, %s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i32 %0, i32* @vi32, align 4
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi16@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi16@hi(%s1)
-; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi16@hi(, %s1)
+; CHECK-NEXT: st2b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i16 %0, i16* @vi16, align 4
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi8@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi8@hi(%s1)
-; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi8@hi(, %s1)
+; CHECK-NEXT: st1b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i8 %0, i8* @vi8, align 4
ret void
define void @storef64stk(double %0) {
; CHECK-LABEL: storef64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 8
store double %0, double* %addr, align 8
define void @storef32stk(float %0) {
; CHECK-LABEL: storef32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s0, 184(,%s11)
+; CHECK-NEXT: stu %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 8
store float %0, float* %addr, align 8
define void @storei64stk(i64 %0) {
; CHECK-LABEL: storei64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 8
store i64 %0, i64* %addr, align 8
define void @storei32stk(i32 %0) {
; CHECK-LABEL: storei32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, 184(,%s11)
+; CHECK-NEXT: stl %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 8
store i32 %0, i32* %addr, align 8
define void @storei16stk(i16 %0) {
; CHECK-LABEL: storei16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s0, 184(,%s11)
+; CHECK-NEXT: st2b %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 8
store i16 %0, i16* %addr, align 8
define void @storei8stk(i8 %0) {
; CHECK-LABEL: storei8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s0, 184(,%s11)
+; CHECK-NEXT: st1b %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 8
store i8 %0, i8* %addr, align 8
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf64@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf64@hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf64@hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store double %0, double* @vf64, align 8
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf32@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf32@hi(%s1)
-; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf32@hi(, %s1)
+; CHECK-NEXT: stu %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store float %0, float* @vf32, align 8
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi64@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi64@hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi64@hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i64 %0, i64* @vi64, align 8
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi32@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi32@hi(%s1)
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi32@hi(, %s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i32 %0, i32* @vi32, align 8
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi16@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi16@hi(%s1)
-; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi16@hi(, %s1)
+; CHECK-NEXT: st2b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i16 %0, i16* @vi16, align 8
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi8@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi8@hi(%s1)
-; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi8@hi(, %s1)
+; CHECK-NEXT: st1b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i8 %0, i8* @vi8, align 8
ret void
define void @storef64(double* nocapture %0, double %1) {
; CHECK-LABEL: storef64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s1, (,%s0)
+; CHECK-NEXT: st %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
store double %1, double* %0, align 16
ret void
define void @storef32(float* nocapture %0, float %1) {
; CHECK-LABEL: storef32:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s1, (,%s0)
+; CHECK-NEXT: stu %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
store float %1, float* %0, align 16
ret void
define void @storei64(i64* nocapture %0, i64 %1) {
; CHECK-LABEL: storei64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s1, (,%s0)
+; CHECK-NEXT: st %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
store i64 %1, i64* %0, align 16
ret void
define void @storei32(i32* nocapture %0, i32 %1) {
; CHECK-LABEL: storei32:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s1, (,%s0)
+; CHECK-NEXT: stl %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
store i32 %1, i32* %0, align 16
ret void
define void @storei32tr(i32* nocapture %0, i64 %1) {
; CHECK-LABEL: storei32tr:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s1, (,%s0)
+; CHECK-NEXT: stl %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%3 = trunc i64 %1 to i32
store i32 %3, i32* %0, align 16
define void @storei16(i16* nocapture %0, i16 %1) {
; CHECK-LABEL: storei16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s1, (,%s0)
+; CHECK-NEXT: st2b %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
store i16 %1, i16* %0, align 16
ret void
define void @storei16tr(i16* nocapture %0, i64 %1) {
; CHECK-LABEL: storei16tr:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s1, (,%s0)
+; CHECK-NEXT: st2b %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%3 = trunc i64 %1 to i16
store i16 %3, i16* %0, align 16
define void @storei8(i8* nocapture %0, i8 %1) {
; CHECK-LABEL: storei8:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s1, (,%s0)
+; CHECK-NEXT: st1b %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
store i8 %1, i8* %0, align 16
ret void
define void @storei8tr(i8* nocapture %0, i64 %1) {
; CHECK-LABEL: storei8tr:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s1, (,%s0)
+; CHECK-NEXT: st1b %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%3 = trunc i64 %1 to i8
store i8 %3, i8* %0, align 16
define void @storef64stk(double %0) {
; CHECK-LABEL: storef64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 176(,%s11)
+; CHECK-NEXT: st %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 16
store double %0, double* %addr, align 16
define void @storef32stk(float %0) {
; CHECK-LABEL: storef32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s0, 176(,%s11)
+; CHECK-NEXT: stu %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 16
store float %0, float* %addr, align 16
define void @storei64stk(i64 %0) {
; CHECK-LABEL: storei64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 176(,%s11)
+; CHECK-NEXT: st %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 16
store i64 %0, i64* %addr, align 16
define void @storei32stk(i32 %0) {
; CHECK-LABEL: storei32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, 176(,%s11)
+; CHECK-NEXT: stl %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 16
store i32 %0, i32* %addr, align 16
define void @storei16stk(i16 %0) {
; CHECK-LABEL: storei16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s0, 176(,%s11)
+; CHECK-NEXT: st2b %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 16
store i16 %0, i16* %addr, align 16
define void @storei8stk(i8 %0) {
; CHECK-LABEL: storei8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s0, 176(,%s11)
+; CHECK-NEXT: st1b %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 16
store i8 %0, i8* %addr, align 16
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf64@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf64@hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf64@hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store double %0, double* @vf64, align 8
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf32@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf32@hi(%s1)
-; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf32@hi(, %s1)
+; CHECK-NEXT: stu %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store float %0, float* @vf32, align 4
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi64@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi64@hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi64@hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i64 %0, i64* @vi64, align 8
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi32@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi32@hi(%s1)
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi32@hi(, %s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i32 %0, i32* @vi32, align 4
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi16@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi16@hi(%s1)
-; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi16@hi(, %s1)
+; CHECK-NEXT: st2b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i16 %0, i16* @vi16, align 2
ret void
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi8@lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi8@hi(%s1)
-; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi8@hi(, %s1)
+; CHECK-NEXT: st1b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i8 %0, i8* @vi8, align 1
ret void
define signext i8 @func13(i8 signext %0, i8 signext %1) {
; CHECK-LABEL: func13:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -5, %s0
; CHECK-NEXT: sla.w.sx %s0, %s0, 24
; CHECK-NEXT: sra.w.sx %s0, %s0, 24
; CHECK-NEXT: or %s11, 0, %s9
define signext i16 @func14(i16 signext %0, i16 signext %1) {
; CHECK-LABEL: func14:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -5, %s0
; CHECK-NEXT: sla.w.sx %s0, %s0, 16
; CHECK-NEXT: sra.w.sx %s0, %s0, 16
; CHECK-NEXT: or %s11, 0, %s9
define i32 @func15(i32 %0, i32 %1) {
; CHECK-LABEL: func15:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -5, %s0
; CHECK-NEXT: or %s11, 0, %s9
%3 = add nsw i32 %0, -5
ret i32 %3
define i64 @func16(i64 %0, i64 %1) {
; CHECK-LABEL: func16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: lea %s0, -5(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%3 = add nsw i64 %0, -5
ret i64 %3
define zeroext i8 @func18(i8 zeroext %0, i8 zeroext %1) {
; CHECK-LABEL: func18:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -5, %s0
; CHECK-NEXT: and %s0, %s0, (56)0
; CHECK-NEXT: or %s11, 0, %s9
%3 = add i8 %0, -5
define zeroext i16 @func19(i16 zeroext %0, i16 zeroext %1) {
; CHECK-LABEL: func19:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -5, %s0
; CHECK-NEXT: and %s0, %s0, (48)0
; CHECK-NEXT: or %s11, 0, %s9
%3 = add i16 %0, -5
define i32 @func20(i32 %0, i32 %1) {
; CHECK-LABEL: func20:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -5, %s0
; CHECK-NEXT: or %s11, 0, %s9
%3 = add i32 %0, -5
ret i32 %3
define i64 @func21(i64 %0, i64 %1) {
; CHECK-LABEL: func21:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: lea %s0, -5(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%3 = add i64 %0, -5
ret i64 %3
define i64 @func26(i64 %0, i64 %1) {
; CHECK-LABEL: func26:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -2147483648(%s0)
+; CHECK-NEXT: lea %s0, -2147483648(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%3 = add nsw i64 %0, -2147483648
ret i64 %3
; Function Attrs: norecurse nounwind readnone
define nonnull i32* @get_global() {
; GENDYN-LABEL: get_global:
-; GENDYN: # %bb.0: # %entry
-; GENDYN-NEXT: st %s9, (,%s11)
-; GENDYN-NEXT: st %s10, 8(,%s11)
-; GENDYN-NEXT: st %s15, 24(,%s11)
-; GENDYN-NEXT: st %s16, 32(,%s11)
-; GENDYN-NEXT: or %s9, 0, %s11
-; GENDYN-NEXT: lea %s13, -240
-; GENDYN-NEXT: and %s13, %s13, (32)0
-; GENDYN-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYN-NEXT: brge.l %s11, %s8, .LBB0_2
-; GENDYN-NEXT: # %bb.1: # %entry
-; GENDYN-NEXT: ld %s61, 24(,%s14)
-; GENDYN-NEXT: or %s62, 0, %s0
-; GENDYN-NEXT: lea %s63, 315
-; GENDYN-NEXT: shm.l %s63, (%s61)
-; GENDYN-NEXT: shm.l %s8, 8(%s61)
-; GENDYN-NEXT: shm.l %s11, 16(%s61)
-; GENDYN-NEXT: monc
-; GENDYN-NEXT: or %s0, 0, %s62
-; GENDYN-NEXT: .LBB0_2: # %entry
+; GENDYN: .LBB{{[0-9]+}}_2:
; GENDYN-NEXT: lea %s0, x@tls_gd_lo(-24)
; GENDYN-NEXT: and %s0, %s0, (32)0
; GENDYN-NEXT: sic %s10
; GENDYN-NEXT: lea.sl %s12, __tls_get_addr@plt_hi(%s10, %s12)
; GENDYN-NEXT: bsic %s10, (, %s12)
; GENDYN-NEXT: or %s11, 0, %s9
-; GENDYN-NEXT: ld %s16, 32(,%s11)
-; GENDYN-NEXT: ld %s15, 24(,%s11)
-; GENDYN-NEXT: ld %s10, 8(,%s11)
-; GENDYN-NEXT: ld %s9, (,%s11)
-; GENDYN-NEXT: b.l (,%lr)
;
; GENDYNPIC-LABEL: get_global:
-; GENDYNPIC: # %bb.0: # %entry
-; GENDYNPIC-NEXT: st %s9, (,%s11)
-; GENDYNPIC-NEXT: st %s10, 8(,%s11)
-; GENDYNPIC-NEXT: st %s15, 24(,%s11)
-; GENDYNPIC-NEXT: st %s16, 32(,%s11)
-; GENDYNPIC-NEXT: or %s9, 0, %s11
-; GENDYNPIC-NEXT: lea %s13, -240
-; GENDYNPIC-NEXT: and %s13, %s13, (32)0
-; GENDYNPIC-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYNPIC-NEXT: brge.l %s11, %s8, .LBB0_2
-; GENDYNPIC-NEXT: # %bb.1: # %entry
-; GENDYNPIC-NEXT: ld %s61, 24(,%s14)
-; GENDYNPIC-NEXT: or %s62, 0, %s0
-; GENDYNPIC-NEXT: lea %s63, 315
-; GENDYNPIC-NEXT: shm.l %s63, (%s61)
-; GENDYNPIC-NEXT: shm.l %s8, 8(%s61)
-; GENDYNPIC-NEXT: shm.l %s11, 16(%s61)
-; GENDYNPIC-NEXT: monc
-; GENDYNPIC-NEXT: or %s0, 0, %s62
-; GENDYNPIC-NEXT: .LBB0_2: # %entry
+; GENDYNPIC: .LBB{{[0-9]+}}_2:
; GENDYNPIC-NEXT: lea %s15, _GLOBAL_OFFSET_TABLE_@pc_lo(-24)
; GENDYNPIC-NEXT: and %s15, %s15, (32)0
; GENDYNPIC-NEXT: sic %s16
; GENDYNPIC-NEXT: lea.sl %s12, __tls_get_addr@plt_hi(%s10, %s12)
; GENDYNPIC-NEXT: bsic %s10, (, %s12)
; GENDYNPIC-NEXT: or %s11, 0, %s9
-; GENDYNPIC-NEXT: ld %s16, 32(,%s11)
-; GENDYNPIC-NEXT: ld %s15, 24(,%s11)
-; GENDYNPIC-NEXT: ld %s10, 8(,%s11)
-; GENDYNPIC-NEXT: ld %s9, (,%s11)
-; GENDYNPIC-NEXT: b.l (,%lr)
; LOCAL-LABEL: get_global:
; LOCAL: .LBB{{[0-9]+}}_2:
; LOCAL-NEXT: lea %s34, x@tpoff_lo
; Function Attrs: norecurse nounwind readnone
define nonnull i32* @get_local() {
; GENDYN-LABEL: get_local:
-; GENDYN: # %bb.0: # %entry
-; GENDYN-NEXT: st %s9, (,%s11)
-; GENDYN-NEXT: st %s10, 8(,%s11)
-; GENDYN-NEXT: st %s15, 24(,%s11)
-; GENDYN-NEXT: st %s16, 32(,%s11)
-; GENDYN-NEXT: or %s9, 0, %s11
-; GENDYN-NEXT: lea %s13, -240
-; GENDYN-NEXT: and %s13, %s13, (32)0
-; GENDYN-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYN-NEXT: brge.l %s11, %s8, .LBB1_2
-; GENDYN-NEXT: # %bb.1: # %entry
-; GENDYN-NEXT: ld %s61, 24(,%s14)
-; GENDYN-NEXT: or %s62, 0, %s0
-; GENDYN-NEXT: lea %s63, 315
-; GENDYN-NEXT: shm.l %s63, (%s61)
-; GENDYN-NEXT: shm.l %s8, 8(%s61)
-; GENDYN-NEXT: shm.l %s11, 16(%s61)
-; GENDYN-NEXT: monc
-; GENDYN-NEXT: or %s0, 0, %s62
-; GENDYN-NEXT: .LBB1_2: # %entry
+; GENDYN: .LBB{{[0-9]+}}_2:
; GENDYN-NEXT: lea %s0, y@tls_gd_lo(-24)
; GENDYN-NEXT: and %s0, %s0, (32)0
; GENDYN-NEXT: sic %s10
; GENDYN-NEXT: lea.sl %s12, __tls_get_addr@plt_hi(%s10, %s12)
; GENDYN-NEXT: bsic %s10, (, %s12)
; GENDYN-NEXT: or %s11, 0, %s9
-; GENDYN-NEXT: ld %s16, 32(,%s11)
-; GENDYN-NEXT: ld %s15, 24(,%s11)
-; GENDYN-NEXT: ld %s10, 8(,%s11)
-; GENDYN-NEXT: ld %s9, (,%s11)
-; GENDYN-NEXT: b.l (,%lr)
;
; GENDYNPIC-LABEL: get_local:
-; GENDYNPIC: # %bb.0: # %entry
-; GENDYNPIC-NEXT: st %s9, (,%s11)
-; GENDYNPIC-NEXT: st %s10, 8(,%s11)
-; GENDYNPIC-NEXT: st %s15, 24(,%s11)
-; GENDYNPIC-NEXT: st %s16, 32(,%s11)
-; GENDYNPIC-NEXT: or %s9, 0, %s11
-; GENDYNPIC-NEXT: lea %s13, -240
-; GENDYNPIC-NEXT: and %s13, %s13, (32)0
-; GENDYNPIC-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYNPIC-NEXT: brge.l %s11, %s8, .LBB1_2
-; GENDYNPIC-NEXT: # %bb.1: # %entry
-; GENDYNPIC-NEXT: ld %s61, 24(,%s14)
-; GENDYNPIC-NEXT: or %s62, 0, %s0
-; GENDYNPIC-NEXT: lea %s63, 315
-; GENDYNPIC-NEXT: shm.l %s63, (%s61)
-; GENDYNPIC-NEXT: shm.l %s8, 8(%s61)
-; GENDYNPIC-NEXT: shm.l %s11, 16(%s61)
-; GENDYNPIC-NEXT: monc
-; GENDYNPIC-NEXT: or %s0, 0, %s62
-; GENDYNPIC-NEXT: .LBB1_2: # %entry
+; GENDYNPIC: .LBB{{[0-9]+}}_2:
; GENDYNPIC-NEXT: lea %s15, _GLOBAL_OFFSET_TABLE_@pc_lo(-24)
; GENDYNPIC-NEXT: and %s15, %s15, (32)0
; GENDYNPIC-NEXT: sic %s16
; GENDYNPIC-NEXT: lea.sl %s12, __tls_get_addr@plt_hi(%s10, %s12)
; GENDYNPIC-NEXT: bsic %s10, (, %s12)
; GENDYNPIC-NEXT: or %s11, 0, %s9
-; GENDYNPIC-NEXT: ld %s16, 32(,%s11)
-; GENDYNPIC-NEXT: ld %s15, 24(,%s11)
-; GENDYNPIC-NEXT: ld %s10, 8(,%s11)
-; GENDYNPIC-NEXT: ld %s9, (,%s11)
-; GENDYNPIC-NEXT: b.l (,%lr)
; LOCAL-LABEL: get_local:
; LOCAL: .LBB{{[0-9]+}}_2:
; LOCAL-NEXT: lea %s34, y@tpoff_lo
; Function Attrs: norecurse nounwind
define void @set_global(i32 %v) {
; GENDYN-LABEL: set_global:
-; GENDYN: # %bb.0: # %entry
-; GENDYN-NEXT: st %s9, (,%s11)
-; GENDYN-NEXT: st %s10, 8(,%s11)
-; GENDYN-NEXT: st %s15, 24(,%s11)
-; GENDYN-NEXT: st %s16, 32(,%s11)
-; GENDYN-NEXT: or %s9, 0, %s11
-; GENDYN-NEXT: lea %s13, -240
-; GENDYN-NEXT: and %s13, %s13, (32)0
-; GENDYN-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYN-NEXT: brge.l %s11, %s8, .LBB2_2
-; GENDYN-NEXT: # %bb.1: # %entry
-; GENDYN-NEXT: ld %s61, 24(,%s14)
-; GENDYN-NEXT: or %s62, 0, %s0
-; GENDYN-NEXT: lea %s63, 315
-; GENDYN-NEXT: shm.l %s63, (%s61)
-; GENDYN-NEXT: shm.l %s8, 8(%s61)
-; GENDYN-NEXT: shm.l %s11, 16(%s61)
-; GENDYN-NEXT: monc
-; GENDYN-NEXT: or %s0, 0, %s62
-; GENDYN-NEXT: .LBB2_2: # %entry
-; GENDYN-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; GENDYN: .LBB{{[0-9]+}}_2:
+; GENDYN-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; GENDYN-NEXT: or %s18, 0, %s0
; GENDYN-NEXT: lea %s0, x@tls_gd_lo(-24)
; GENDYN-NEXT: and %s0, %s0, (32)0
; GENDYN-NEXT: and %s12, %s12, (32)0
; GENDYN-NEXT: lea.sl %s12, __tls_get_addr@plt_hi(%s10, %s12)
; GENDYN-NEXT: bsic %s10, (, %s12)
-; GENDYN-NEXT: stl %s18, (,%s0)
-; GENDYN-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; GENDYN-NEXT: stl %s18, (, %s0)
+; GENDYN-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; GENDYN-NEXT: or %s11, 0, %s9
-; GENDYN-NEXT: ld %s16, 32(,%s11)
-; GENDYN-NEXT: ld %s15, 24(,%s11)
-; GENDYN-NEXT: ld %s10, 8(,%s11)
-; GENDYN-NEXT: ld %s9, (,%s11)
-; GENDYN-NEXT: b.l (,%lr)
;
; GENDYNPIC-LABEL: set_global:
-; GENDYNPIC: # %bb.0: # %entry
-; GENDYNPIC-NEXT: st %s9, (,%s11)
-; GENDYNPIC-NEXT: st %s10, 8(,%s11)
-; GENDYNPIC-NEXT: st %s15, 24(,%s11)
-; GENDYNPIC-NEXT: st %s16, 32(,%s11)
-; GENDYNPIC-NEXT: or %s9, 0, %s11
-; GENDYNPIC-NEXT: lea %s13, -240
-; GENDYNPIC-NEXT: and %s13, %s13, (32)0
-; GENDYNPIC-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYNPIC-NEXT: brge.l %s11, %s8, .LBB2_2
-; GENDYNPIC-NEXT: # %bb.1: # %entry
-; GENDYNPIC-NEXT: ld %s61, 24(,%s14)
-; GENDYNPIC-NEXT: or %s62, 0, %s0
-; GENDYNPIC-NEXT: lea %s63, 315
-; GENDYNPIC-NEXT: shm.l %s63, (%s61)
-; GENDYNPIC-NEXT: shm.l %s8, 8(%s61)
-; GENDYNPIC-NEXT: shm.l %s11, 16(%s61)
-; GENDYNPIC-NEXT: monc
-; GENDYNPIC-NEXT: or %s0, 0, %s62
-; GENDYNPIC-NEXT: .LBB2_2: # %entry
-; GENDYNPIC-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; GENDYNPIC: .LBB{{[0-9]+}}_2:
+; GENDYNPIC-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; GENDYNPIC-NEXT: or %s18, 0, %s0
; GENDYNPIC-NEXT: lea %s15, _GLOBAL_OFFSET_TABLE_@pc_lo(-24)
; GENDYNPIC-NEXT: and %s15, %s15, (32)0
; GENDYNPIC-NEXT: and %s12, %s12, (32)0
; GENDYNPIC-NEXT: lea.sl %s12, __tls_get_addr@plt_hi(%s10, %s12)
; GENDYNPIC-NEXT: bsic %s10, (, %s12)
-; GENDYNPIC-NEXT: stl %s18, (,%s0)
-; GENDYNPIC-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; GENDYNPIC-NEXT: stl %s18, (, %s0)
+; GENDYNPIC-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; GENDYNPIC-NEXT: or %s11, 0, %s9
-; GENDYNPIC-NEXT: ld %s16, 32(,%s11)
-; GENDYNPIC-NEXT: ld %s15, 24(,%s11)
-; GENDYNPIC-NEXT: ld %s10, 8(,%s11)
-; GENDYNPIC-NEXT: ld %s9, (,%s11)
-; GENDYNPIC-NEXT: b.l (,%lr)
; LOCAL-LABEL: set_global:
; LOCAL: .LBB{{[0-9]+}}_2:
; LOCAL-NEXT: lea %s34, x@tpoff_lo
; LOCAL-NEXT: and %s34, %s34, (32)0
; LOCAL-NEXT: lea.sl %s34, x@tpoff_hi(%s34)
; LOCAL-NEXT: adds.l %s34, %s14, %s34
-; LOCAL-NEXT: stl %s0, (,%s34)
+; LOCAL-NEXT: stl %s0, (, %s34)
; LOCAL-NEXT: or %s11, 0, %s9
entry:
store i32 %v, i32* @x, align 4
; Function Attrs: norecurse nounwind
define void @set_local(i32 %v) {
; GENDYN-LABEL: set_local:
-; GENDYN: # %bb.0: # %entry
-; GENDYN-NEXT: st %s9, (,%s11)
-; GENDYN-NEXT: st %s10, 8(,%s11)
-; GENDYN-NEXT: st %s15, 24(,%s11)
-; GENDYN-NEXT: st %s16, 32(,%s11)
-; GENDYN-NEXT: or %s9, 0, %s11
-; GENDYN-NEXT: lea %s13, -240
-; GENDYN-NEXT: and %s13, %s13, (32)0
-; GENDYN-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYN-NEXT: brge.l %s11, %s8, .LBB3_2
-; GENDYN-NEXT: # %bb.1: # %entry
-; GENDYN-NEXT: ld %s61, 24(,%s14)
-; GENDYN-NEXT: or %s62, 0, %s0
-; GENDYN-NEXT: lea %s63, 315
-; GENDYN-NEXT: shm.l %s63, (%s61)
-; GENDYN-NEXT: shm.l %s8, 8(%s61)
-; GENDYN-NEXT: shm.l %s11, 16(%s61)
-; GENDYN-NEXT: monc
-; GENDYN-NEXT: or %s0, 0, %s62
-; GENDYN-NEXT: .LBB3_2: # %entry
-; GENDYN-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; GENDYN: .LBB{{[0-9]+}}_2:
+; GENDYN-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; GENDYN-NEXT: or %s18, 0, %s0
; GENDYN-NEXT: lea %s0, y@tls_gd_lo(-24)
; GENDYN-NEXT: and %s0, %s0, (32)0
; GENDYN-NEXT: and %s12, %s12, (32)0
; GENDYN-NEXT: lea.sl %s12, __tls_get_addr@plt_hi(%s10, %s12)
; GENDYN-NEXT: bsic %s10, (, %s12)
-; GENDYN-NEXT: stl %s18, (,%s0)
-; GENDYN-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; GENDYN-NEXT: stl %s18, (, %s0)
+; GENDYN-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; GENDYN-NEXT: or %s11, 0, %s9
-; GENDYN-NEXT: ld %s16, 32(,%s11)
-; GENDYN-NEXT: ld %s15, 24(,%s11)
-; GENDYN-NEXT: ld %s10, 8(,%s11)
-; GENDYN-NEXT: ld %s9, (,%s11)
-; GENDYN-NEXT: b.l (,%lr)
;
; GENDYNPIC-LABEL: set_local:
-; GENDYNPIC: # %bb.0: # %entry
-; GENDYNPIC-NEXT: st %s9, (,%s11)
-; GENDYNPIC-NEXT: st %s10, 8(,%s11)
-; GENDYNPIC-NEXT: st %s15, 24(,%s11)
-; GENDYNPIC-NEXT: st %s16, 32(,%s11)
-; GENDYNPIC-NEXT: or %s9, 0, %s11
-; GENDYNPIC-NEXT: lea %s13, -240
-; GENDYNPIC-NEXT: and %s13, %s13, (32)0
-; GENDYNPIC-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYNPIC-NEXT: brge.l %s11, %s8, .LBB3_2
-; GENDYNPIC-NEXT: # %bb.1: # %entry
-; GENDYNPIC-NEXT: ld %s61, 24(,%s14)
-; GENDYNPIC-NEXT: or %s62, 0, %s0
-; GENDYNPIC-NEXT: lea %s63, 315
-; GENDYNPIC-NEXT: shm.l %s63, (%s61)
-; GENDYNPIC-NEXT: shm.l %s8, 8(%s61)
-; GENDYNPIC-NEXT: shm.l %s11, 16(%s61)
-; GENDYNPIC-NEXT: monc
-; GENDYNPIC-NEXT: or %s0, 0, %s62
-; GENDYNPIC-NEXT: .LBB3_2: # %entry
-; GENDYNPIC-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; GENDYNPIC: .LBB{{[0-9]+}}_2:
+; GENDYNPIC-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; GENDYNPIC-NEXT: or %s18, 0, %s0
; GENDYNPIC-NEXT: lea %s15, _GLOBAL_OFFSET_TABLE_@pc_lo(-24)
; GENDYNPIC-NEXT: and %s15, %s15, (32)0
; GENDYNPIC-NEXT: and %s12, %s12, (32)0
; GENDYNPIC-NEXT: lea.sl %s12, __tls_get_addr@plt_hi(%s10, %s12)
; GENDYNPIC-NEXT: bsic %s10, (, %s12)
-; GENDYNPIC-NEXT: stl %s18, (,%s0)
-; GENDYNPIC-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; GENDYNPIC-NEXT: stl %s18, (, %s0)
+; GENDYNPIC-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; GENDYNPIC-NEXT: or %s11, 0, %s9
-; GENDYNPIC-NEXT: ld %s16, 32(,%s11)
-; GENDYNPIC-NEXT: ld %s15, 24(,%s11)
-; GENDYNPIC-NEXT: ld %s10, 8(,%s11)
-; GENDYNPIC-NEXT: ld %s9, (,%s11)
-; GENDYNPIC-NEXT: b.l (,%lr)
; LOCAL-LABEL: set_local:
; LOCAL: .LBB{{[0-9]+}}_2:
; LOCAL-NEXT: lea %s34, y@tpoff_lo
; LOCAL-NEXT: and %s34, %s34, (32)0
; LOCAL-NEXT: lea.sl %s34, y@tpoff_hi(%s34)
; LOCAL-NEXT: adds.l %s34, %s14, %s34
-; LOCAL-NEXT: stl %s0, (,%s34)
+; LOCAL-NEXT: stl %s0, (, %s34)
; LOCAL-NEXT: or %s11, 0, %s9
entry:
store i32 %v, i32* @y, align 4
define void @func0(i1 signext %p, i8* %a) {
; CHECK-LABEL: func0:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: st1b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i1 %p to i8
store i8 %p.conv, i8* %a, align 2
define void @func1(i8 signext %p, i16* %a) {
; CHECK-LABEL: func1:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: st2b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i8 %p to i16
store i16 %p.conv, i16* %a, align 2
define void @func2(i8 signext %p, i32* %a) {
; CHECK-LABEL: func2:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i8 %p to i32
store i32 %p.conv, i32* %a, align 4
; CHECK-LABEL: func3:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i8 %p to i64
store i64 %p.conv, i64* %a, align 8
define void @func5(i16 signext %p, i32* %a) {
; CHECK-LABEL: func5:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i16 %p to i32
store i32 %p.conv, i32* %a, align 4
; CHECK-LABEL: func6:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i16 %p to i64
store i64 %p.conv, i64* %a, align 8
; CHECK-LABEL: func8:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i32 %p to i64
store i64 %p.conv, i64* %a, align 8
define i32 @func_vainout(i32, ...) {
; CHECK-LABEL: func_vainout:
-; CHECK: ldl.sx %s1, 184(,%s9)
-; CHECK: ld2b.sx %s18, 192(,%s9)
-; CHECK: ld1b.sx %s19, 200(,%s9)
-; CHECK: ldl.sx %s20, 208(,%s9)
-; CHECK: ld2b.zx %s21, 216(,%s9)
-; CHECK: ld1b.zx %s22, 224(,%s9)
-; CHECK: ldu %s23, 236(,%s9)
-; CHECK: ld %s24, 240(,%s9)
-; CHECK: ld %s25, 248(,%s9)
-; CHECK: ld %s26, 256(,%s9)
+; CHECK: ldl.sx %s1, 184(, %s9)
+; CHECK: ld2b.sx %s18, 192(, %s9)
+; CHECK: ld1b.sx %s19, 200(, %s9)
+; CHECK: ldl.sx %s20, 208(, %s9)
+; CHECK: ld2b.zx %s21, 216(, %s9)
+; CHECK: ld1b.zx %s22, 224(, %s9)
+; CHECK: ldu %s23, 236(, %s9)
+; CHECK: ld %s24, 240(, %s9)
+; CHECK: ld %s25, 248(, %s9)
+; CHECK: ld %s26, 256(, %s9)
%a = alloca i8*, align 8
%a8 = bitcast i8** %a to i8*
define i32 @va_func(i32, ...) {
; CHECK-LABEL: va_func:
-; CHECK: ldl.sx %s0, 184(,%s9)
-; CHECK: ld2b.sx %s18, 192(,%s9)
-; CHECK: ld1b.sx %s19, 200(,%s9)
-; CHECK: ldl.sx %s20, 208(,%s9)
-; CHECK: ld2b.zx %s21, 216(,%s9)
-; CHECK: ld1b.zx %s22, 224(,%s9)
-; CHECK: ldu %s23, 236(,%s9)
-; CHECK: ld %s24, 240(,%s9)
-; CHECK: ld %s25, 248(,%s9)
+; CHECK: ldl.sx %s0, 184(, %s9)
+; CHECK: ld2b.sx %s18, 192(, %s9)
+; CHECK: ld1b.sx %s19, 200(, %s9)
+; CHECK: ldl.sx %s20, 208(, %s9)
+; CHECK: ld2b.zx %s21, 216(, %s9)
+; CHECK: ld1b.zx %s22, 224(, %s9)
+; CHECK: ldu %s23, 236(, %s9)
+; CHECK: ld %s24, 240(, %s9)
+; CHECK: ld %s25, 248(, %s9)
%va = alloca i8*, align 8
%va.i8 = bitcast i8** %va to i8*
define i32 @caller() {
; CHECK-LABEL: caller:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; CHECK-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; CHECK-NEXT: or %s7, 0, (0)1
-; CHECK-NEXT: st %s7, 280(,%s11)
+; CHECK-NEXT: st %s7, 280(, %s11)
; CHECK-NEXT: or %s0, 11, (0)1
-; CHECK-NEXT: st %s0, 272(,%s11)
-; CHECK-NEXT: st %s7, 264(,%s11)
+; CHECK-NEXT: st %s0, 272(, %s11)
+; CHECK-NEXT: st %s7, 264(, %s11)
; CHECK-NEXT: or %s0, 10, (0)1
-; CHECK-NEXT: st %s0, 256(,%s11)
+; CHECK-NEXT: st %s0, 256(, %s11)
; CHECK-NEXT: lea.sl %s0, 1075970048
-; CHECK-NEXT: st %s0, 248(,%s11)
+; CHECK-NEXT: st %s0, 248(, %s11)
; CHECK-NEXT: or %s0, 8, (0)1
-; CHECK-NEXT: st %s0, 240(,%s11)
-; CHECK-NEXT: st %s7, 232(,%s11)
+; CHECK-NEXT: st %s0, 240(, %s11)
+; CHECK-NEXT: st %s7, 232(, %s11)
; CHECK-NEXT: lea %s0, 1086324736
-; CHECK-NEXT: stl %s0, 228(,%s11)
+; CHECK-NEXT: stl %s0, 228(, %s11)
; CHECK-NEXT: or %s5, 5, (0)1
-; CHECK-NEXT: stl %s5, 216(,%s11)
+; CHECK-NEXT: stl %s5, 216(, %s11)
; CHECK-NEXT: or %s4, 4, (0)1
-; CHECK-NEXT: stl %s4, 208(,%s11)
+; CHECK-NEXT: stl %s4, 208(, %s11)
; CHECK-NEXT: or %s3, 3, (0)1
-; CHECK-NEXT: stl %s3, 200(,%s11)
+; CHECK-NEXT: stl %s3, 200(, %s11)
; CHECK-NEXT: or %s2, 2, (0)1
-; CHECK-NEXT: stl %s2, 192(,%s11)
+; CHECK-NEXT: stl %s2, 192(, %s11)
; CHECK-NEXT: or %s1, 1, (0)1
-; CHECK-NEXT: stl %s1, 184(,%s11)
+; CHECK-NEXT: stl %s1, 184(, %s11)
; CHECK-NEXT: or %s18, 0, (0)1
; CHECK-NEXT: lea %s0, func@lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, func@hi(%s0)
+; CHECK-NEXT: lea.sl %s12, func@hi(, %s0)
; CHECK-NEXT: lea.sl %s0, 1086324736
-; CHECK-NEXT: stl %s18, 176(,%s11)
+; CHECK-NEXT: stl %s18, 176(, %s11)
; CHECK-NEXT: or %s6, 0, %s0
; CHECK-NEXT: or %s0, 0, %s18
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s0, 0, %s18
-; CHECK-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; CHECK-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; CHECK-NEXT: or %s11, 0, %s9
call i32 (i32, ...) @func(i32 0, i16 1, i8 2, i32 3, i16 4, i8 5, float 6.0, i8* null, i64 8, double 9.0, i128 10, i128 11)
ret i32 0