RISCVLegalizerInfo.cpp
RISCVMCInstLower.cpp
RISCVMergeBaseOffset.cpp
+ RISCVRedundantCopyElimination.cpp
RISCVRegisterBankInfo.cpp
RISCVRegisterInfo.cpp
RISCVSExtWRemoval.cpp
FunctionPass *createRISCVInsertVSETVLIPass();
void initializeRISCVInsertVSETVLIPass(PassRegistry &);
+FunctionPass *createRISCVRedundantCopyEliminationPass();
+void initializeRISCVRedundantCopyEliminationPass(PassRegistry &);
+
InstructionSelector *createRISCVInstructionSelector(const RISCVTargetMachine &,
RISCVSubtarget &,
RISCVRegisterBankInfo &);
--- /dev/null
+//=- RISCVRedundantCopyElimination.cpp - Remove useless copy for RISCV ------=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass removes unnecessary zero copies in BBs that are targets of
+// beqz/bnez instructions. For instance, the copy instruction in the code below
+// can be removed because the beqz jumps to BB#2 when a0 is zero.
+// BB#1:
+// beqz %a0, <BB#2>
+// BB#2:
+// %a0 = COPY %x0
+// This pass should be run after register allocation.
+//
+// This pass is based on the earliest versions of
+// AArch64RedundantCopyElimination.
+//
+// FIXME: Support compares with constants other than zero? This is harder to
+// do on RISC-V since branches can't have immediates.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-copyelim"
+
+STATISTIC(NumCopiesRemoved, "Number of copies removed.");
+
+namespace {
+class RISCVRedundantCopyElimination : public MachineFunctionPass {
+ const MachineRegisterInfo *MRI;
+ const TargetRegisterInfo *TRI;
+
+public:
+ static char ID;
+ RISCVRedundantCopyElimination() : MachineFunctionPass(ID) {
+ initializeRISCVRedundantCopyEliminationPass(
+ *PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::NoVRegs);
+ }
+
+ StringRef getPassName() const override {
+ return "RISCV Redundant Copy Elimination";
+ }
+
+private:
+ bool optimizeBlock(MachineBasicBlock &MBB);
+};
+
+} // end anonymous namespace
+
+char RISCVRedundantCopyElimination::ID = 0;
+
+INITIALIZE_PASS(RISCVRedundantCopyElimination, "riscv-copyelim",
+ "RISCV redundant copy elimination pass", false, false)
+
+static bool guaranteesZeroRegInBlock(const MachineInstr &MI,
+ const MachineBasicBlock &MBB) {
+ unsigned Opc = MI.getOpcode();
+ if (Opc == RISCV::BEQ && MI.getOperand(1).getReg() == RISCV::X0 &&
+ &MBB == MI.getOperand(2).getMBB())
+ return true;
+ if (Opc == RISCV::BNE && MI.getOperand(1).getReg() == RISCV::X0 &&
+ &MBB != MI.getOperand(2).getMBB())
+ return true;
+
+ return false;
+}
+
+bool RISCVRedundantCopyElimination::optimizeBlock(MachineBasicBlock &MBB) {
+ // Check if the current basic block has a single predecessor.
+ if (MBB.pred_size() != 1)
+ return false;
+
+ // Check if the predecessor has two successors, implying the block ends in a
+ // conditional branch.
+ MachineBasicBlock *PredMBB = *MBB.pred_begin();
+ if (PredMBB->succ_size() != 2)
+ return false;
+
+ MachineBasicBlock::iterator CondBr = PredMBB->getLastNonDebugInstr();
+ if (CondBr == PredMBB->end())
+ return false;
+
+ while (true) {
+ // If we run out of terminators, give up.
+ if (!CondBr->isTerminator())
+ return false;
+ // If we found a branch with X0, stop searching and try to remove copies.
+ // TODO: Handle multiple branches with different registers.
+ if (guaranteesZeroRegInBlock(*CondBr, MBB))
+ break;
+ // If we reached the beginning of the basic block, give up.
+ if (CondBr == PredMBB->begin())
+ return false;
+ --CondBr;
+ }
+
+ Register TargetReg = CondBr->getOperand(0).getReg();
+ if (!TargetReg)
+ return false;
+
+ bool Changed = false;
+ MachineBasicBlock::iterator LastChange = MBB.begin();
+ // Remove redundant Copy instructions unless TargetReg is modified.
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
+ MachineInstr *MI = &*I;
+ ++I;
+ if (MI->isCopy() && MI->getOperand(0).isReg() &&
+ MI->getOperand(1).isReg()) {
+ Register DefReg = MI->getOperand(0).getReg();
+ Register SrcReg = MI->getOperand(1).getReg();
+
+ if (SrcReg == RISCV::X0 && !MRI->isReserved(DefReg) &&
+ TargetReg == DefReg) {
+ LLVM_DEBUG(dbgs() << "Remove redundant Copy : ");
+ LLVM_DEBUG(MI->print(dbgs()));
+
+ MI->eraseFromParent();
+ Changed = true;
+ LastChange = I;
+ ++NumCopiesRemoved;
+ continue;
+ }
+ }
+
+ if (MI->modifiesRegister(TargetReg, TRI))
+ break;
+ }
+
+ if (!Changed)
+ return false;
+
+ // Otherwise, we have to fixup the use-def chain, starting with the
+ // BEQ/BNE. Conservatively mark as much as we can live.
+ CondBr->clearRegisterKills(TargetReg, TRI);
+
+ // Add newly used reg to the block's live-in list if it isn't there already.
+ if (!MBB.isLiveIn(TargetReg))
+ MBB.addLiveIn(TargetReg);
+
+ // Clear any kills of TargetReg between CondBr and the last removed COPY.
+ for (MachineInstr &MMI : make_range(MBB.begin(), LastChange))
+ MMI.clearRegisterKills(TargetReg, TRI);
+
+ return true;
+}
+
+bool RISCVRedundantCopyElimination::runOnMachineFunction(MachineFunction &MF) {
+ if (skipFunction(MF.getFunction()))
+ return false;
+
+ TRI = MF.getSubtarget().getRegisterInfo();
+ MRI = &MF.getRegInfo();
+
+ bool Changed = false;
+ for (MachineBasicBlock &MBB : MF)
+ Changed |= optimizeBlock(MBB);
+
+ return Changed;
+}
+
+FunctionPass *llvm::createRISCVRedundantCopyEliminationPass() {
+ return new RISCVRedundantCopyElimination();
+}
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
+static cl::opt<bool> EnableRedundantCopyElimination(
+ "riscv-enable-copyelim",
+ cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
+ cl::Hidden);
+
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
RegisterTargetMachine<RISCVTargetMachine> X(getTheRISCV32Target());
RegisterTargetMachine<RISCVTargetMachine> Y(getTheRISCV64Target());
void addPreSched2() override;
void addMachineSSAOptimization() override;
void addPreRegAlloc() override;
+ void addPostRegAlloc() override;
};
} // namespace
addPass(createRISCVMergeBaseOffsetOptPass());
addPass(createRISCVInsertVSETVLIPass());
}
+
+void RISCVPassConfig::addPostRegAlloc() {
+ if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
+ addPass(createRISCVRedundantCopyEliminationPass());
+}
; RV32IFD-LABEL: fcvt_w_d_sat:
; RV32IFD: # %bb.0: # %start
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB3_2
-; RV32IFD-NEXT: # %bb.1: # %start
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB3_2:
+; RV32IFD-NEXT: beqz a0, .LBB3_2
+; RV32IFD-NEXT: # %bb.1:
; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz
+; RV32IFD-NEXT: .LBB3_2: # %start
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: fcvt_w_d_sat:
; RV64IFD: # %bb.0: # %start
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB3_2
-; RV64IFD-NEXT: # %bb.1: # %start
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB3_2:
+; RV64IFD-NEXT: beqz a0, .LBB3_2
+; RV64IFD-NEXT: # %bb.1:
; RV64IFD-NEXT: fcvt.w.d a0, fa0, rtz
+; RV64IFD-NEXT: .LBB3_2: # %start
; RV64IFD-NEXT: ret
;
; RV32I-LABEL: fcvt_w_d_sat:
; RV32IFD-LABEL: fcvt_wu_d_sat:
; RV32IFD: # %bb.0: # %start
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB6_2
-; RV32IFD-NEXT: # %bb.1: # %start
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB6_2:
+; RV32IFD-NEXT: beqz a0, .LBB6_2
+; RV32IFD-NEXT: # %bb.1:
; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rtz
+; RV32IFD-NEXT: .LBB6_2: # %start
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: fcvt_wu_d_sat:
; RV64IFD: # %bb.0: # %start
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB6_2
-; RV64IFD-NEXT: # %bb.1: # %start
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB6_2:
+; RV64IFD-NEXT: beqz a0, .LBB6_2
+; RV64IFD-NEXT: # %bb.1:
; RV64IFD-NEXT: fcvt.wu.d a0, fa0, rtz
+; RV64IFD-NEXT: .LBB6_2: # %start
; RV64IFD-NEXT: ret
;
; RV32I-LABEL: fcvt_wu_d_sat:
; RV64IFD-LABEL: fcvt_l_d_sat:
; RV64IFD: # %bb.0: # %start
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB12_2
-; RV64IFD-NEXT: # %bb.1: # %start
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB12_2:
+; RV64IFD-NEXT: beqz a0, .LBB12_2
+; RV64IFD-NEXT: # %bb.1:
; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz
+; RV64IFD-NEXT: .LBB12_2: # %start
; RV64IFD-NEXT: ret
;
; RV32I-LABEL: fcvt_l_d_sat:
; RV64IFD-LABEL: fcvt_lu_d_sat:
; RV64IFD: # %bb.0: # %start
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB14_2
-; RV64IFD-NEXT: # %bb.1: # %start
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB14_2:
+; RV64IFD-NEXT: beqz a0, .LBB14_2
+; RV64IFD-NEXT: # %bb.1:
; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz
+; RV64IFD-NEXT: .LBB14_2: # %start
; RV64IFD-NEXT: ret
;
; RV32I-LABEL: fcvt_lu_d_sat:
; RV32IFD-LABEL: fcvt_w_s_sat_i16:
; RV32IFD: # %bb.0: # %start
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB26_2
-; RV32IFD-NEXT: # %bb.1: # %start
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB26_2:
+; RV32IFD-NEXT: beqz a0, .LBB26_2
+; RV32IFD-NEXT: # %bb.1:
; RV32IFD-NEXT: lui a0, %hi(.LCPI26_0)
; RV32IFD-NEXT: fld ft0, %lo(.LCPI26_0)(a0)
; RV32IFD-NEXT: lui a0, %hi(.LCPI26_1)
; RV32IFD-NEXT: fmax.d ft0, fa0, ft0
; RV32IFD-NEXT: fmin.d ft0, ft0, ft1
; RV32IFD-NEXT: fcvt.w.d a0, ft0, rtz
+; RV32IFD-NEXT: .LBB26_2: # %start
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: fcvt_w_s_sat_i16:
; RV64IFD: # %bb.0: # %start
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB26_2
-; RV64IFD-NEXT: # %bb.1: # %start
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB26_2:
+; RV64IFD-NEXT: beqz a0, .LBB26_2
+; RV64IFD-NEXT: # %bb.1:
; RV64IFD-NEXT: lui a0, %hi(.LCPI26_0)
; RV64IFD-NEXT: fld ft0, %lo(.LCPI26_0)(a0)
; RV64IFD-NEXT: lui a0, %hi(.LCPI26_1)
; RV64IFD-NEXT: fmax.d ft0, fa0, ft0
; RV64IFD-NEXT: fmin.d ft0, ft0, ft1
; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT: .LBB26_2: # %start
; RV64IFD-NEXT: ret
;
; RV32I-LABEL: fcvt_w_s_sat_i16:
; RV32IFD-LABEL: fcvt_w_s_sat_i8:
; RV32IFD: # %bb.0: # %start
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB30_2
-; RV32IFD-NEXT: # %bb.1: # %start
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB30_2:
+; RV32IFD-NEXT: beqz a0, .LBB30_2
+; RV32IFD-NEXT: # %bb.1:
; RV32IFD-NEXT: lui a0, %hi(.LCPI30_0)
; RV32IFD-NEXT: fld ft0, %lo(.LCPI30_0)(a0)
; RV32IFD-NEXT: lui a0, %hi(.LCPI30_1)
; RV32IFD-NEXT: fmax.d ft0, fa0, ft0
; RV32IFD-NEXT: fmin.d ft0, ft0, ft1
; RV32IFD-NEXT: fcvt.w.d a0, ft0, rtz
+; RV32IFD-NEXT: .LBB30_2: # %start
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: fcvt_w_s_sat_i8:
; RV64IFD: # %bb.0: # %start
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB30_2
-; RV64IFD-NEXT: # %bb.1: # %start
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB30_2:
+; RV64IFD-NEXT: beqz a0, .LBB30_2
+; RV64IFD-NEXT: # %bb.1:
; RV64IFD-NEXT: lui a0, %hi(.LCPI30_0)
; RV64IFD-NEXT: fld ft0, %lo(.LCPI30_0)(a0)
; RV64IFD-NEXT: lui a0, %hi(.LCPI30_1)
; RV64IFD-NEXT: fmax.d ft0, fa0, ft0
; RV64IFD-NEXT: fmin.d ft0, ft0, ft1
; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT: .LBB30_2: # %start
; RV64IFD-NEXT: ret
;
; RV32I-LABEL: fcvt_w_s_sat_i8:
; RV32IFD-LABEL: test_floor_si32:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB0_2
+; RV32IFD-NEXT: beqz a0, .LBB0_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB0_2:
; RV32IFD-NEXT: fcvt.w.d a0, fa0, rdn
+; RV32IFD-NEXT: .LBB0_2:
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_floor_si32:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB0_2
+; RV64IFD-NEXT: beqz a0, .LBB0_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB0_2:
; RV64IFD-NEXT: fcvt.w.d a0, fa0, rdn
+; RV64IFD-NEXT: .LBB0_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.floor.f64(double %x)
%b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
; RV64IFD-LABEL: test_floor_si64:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB1_2
+; RV64IFD-NEXT: beqz a0, .LBB1_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB1_2:
; RV64IFD-NEXT: fcvt.l.d a0, fa0, rdn
+; RV64IFD-NEXT: .LBB1_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.floor.f64(double %x)
%b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
; RV32IFD-LABEL: test_floor_ui32:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB2_2
+; RV32IFD-NEXT: beqz a0, .LBB2_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB2_2:
; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rdn
+; RV32IFD-NEXT: .LBB2_2:
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_floor_ui32:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB2_2
+; RV64IFD-NEXT: beqz a0, .LBB2_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB2_2:
; RV64IFD-NEXT: fcvt.wu.d a0, fa0, rdn
+; RV64IFD-NEXT: .LBB2_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.floor.f64(double %x)
%b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
; RV64IFD-LABEL: test_floor_ui64:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB3_2
+; RV64IFD-NEXT: beqz a0, .LBB3_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB3_2:
; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rdn
+; RV64IFD-NEXT: .LBB3_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.floor.f64(double %x)
%b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
; RV32IFD-LABEL: test_ceil_si32:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB4_2
+; RV32IFD-NEXT: beqz a0, .LBB4_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB4_2:
; RV32IFD-NEXT: fcvt.w.d a0, fa0, rup
+; RV32IFD-NEXT: .LBB4_2:
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_ceil_si32:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB4_2
+; RV64IFD-NEXT: beqz a0, .LBB4_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB4_2:
; RV64IFD-NEXT: fcvt.w.d a0, fa0, rup
+; RV64IFD-NEXT: .LBB4_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.ceil.f64(double %x)
%b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
; RV64IFD-LABEL: test_ceil_si64:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB5_2
+; RV64IFD-NEXT: beqz a0, .LBB5_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB5_2:
; RV64IFD-NEXT: fcvt.l.d a0, fa0, rup
+; RV64IFD-NEXT: .LBB5_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.ceil.f64(double %x)
%b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
; RV32IFD-LABEL: test_ceil_ui32:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB6_2
+; RV32IFD-NEXT: beqz a0, .LBB6_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB6_2:
; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rup
+; RV32IFD-NEXT: .LBB6_2:
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_ceil_ui32:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB6_2
+; RV64IFD-NEXT: beqz a0, .LBB6_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB6_2:
; RV64IFD-NEXT: fcvt.wu.d a0, fa0, rup
+; RV64IFD-NEXT: .LBB6_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.ceil.f64(double %x)
%b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
; RV64IFD-LABEL: test_ceil_ui64:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB7_2
+; RV64IFD-NEXT: beqz a0, .LBB7_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB7_2:
; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rup
+; RV64IFD-NEXT: .LBB7_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.ceil.f64(double %x)
%b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
; RV32IFD-LABEL: test_trunc_si32:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB8_2
+; RV32IFD-NEXT: beqz a0, .LBB8_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB8_2:
; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz
+; RV32IFD-NEXT: .LBB8_2:
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_trunc_si32:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB8_2
+; RV64IFD-NEXT: beqz a0, .LBB8_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB8_2:
; RV64IFD-NEXT: fcvt.w.d a0, fa0, rtz
+; RV64IFD-NEXT: .LBB8_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.trunc.f64(double %x)
%b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
; RV64IFD-LABEL: test_trunc_si64:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB9_2
+; RV64IFD-NEXT: beqz a0, .LBB9_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB9_2:
; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz
+; RV64IFD-NEXT: .LBB9_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.trunc.f64(double %x)
%b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
; RV32IFD-LABEL: test_trunc_ui32:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB10_2
+; RV32IFD-NEXT: beqz a0, .LBB10_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB10_2:
; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rtz
+; RV32IFD-NEXT: .LBB10_2:
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_trunc_ui32:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB10_2
+; RV64IFD-NEXT: beqz a0, .LBB10_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB10_2:
; RV64IFD-NEXT: fcvt.wu.d a0, fa0, rtz
+; RV64IFD-NEXT: .LBB10_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.trunc.f64(double %x)
%b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
; RV64IFD-LABEL: test_trunc_ui64:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB11_2
+; RV64IFD-NEXT: beqz a0, .LBB11_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB11_2:
; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz
+; RV64IFD-NEXT: .LBB11_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.trunc.f64(double %x)
%b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
; RV32IFD-LABEL: test_round_si32:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB12_2
+; RV32IFD-NEXT: beqz a0, .LBB12_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB12_2:
; RV32IFD-NEXT: fcvt.w.d a0, fa0, rmm
+; RV32IFD-NEXT: .LBB12_2:
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_round_si32:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB12_2
+; RV64IFD-NEXT: beqz a0, .LBB12_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB12_2:
; RV64IFD-NEXT: fcvt.w.d a0, fa0, rmm
+; RV64IFD-NEXT: .LBB12_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.round.f64(double %x)
%b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
; RV64IFD-LABEL: test_round_si64:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB13_2
+; RV64IFD-NEXT: beqz a0, .LBB13_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB13_2:
; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm
+; RV64IFD-NEXT: .LBB13_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.round.f64(double %x)
%b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
; RV32IFD-LABEL: test_round_ui32:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB14_2
+; RV32IFD-NEXT: beqz a0, .LBB14_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB14_2:
; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rmm
+; RV32IFD-NEXT: .LBB14_2:
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_round_ui32:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB14_2
+; RV64IFD-NEXT: beqz a0, .LBB14_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB14_2:
; RV64IFD-NEXT: fcvt.wu.d a0, fa0, rmm
+; RV64IFD-NEXT: .LBB14_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.round.f64(double %x)
%b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
; RV64IFD-LABEL: test_round_ui64:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB15_2
+; RV64IFD-NEXT: beqz a0, .LBB15_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB15_2:
; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rmm
+; RV64IFD-NEXT: .LBB15_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.round.f64(double %x)
%b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
; RV32IFD-LABEL: test_roundeven_si32:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB16_2
+; RV32IFD-NEXT: beqz a0, .LBB16_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB16_2:
; RV32IFD-NEXT: fcvt.w.d a0, fa0, rne
+; RV32IFD-NEXT: .LBB16_2:
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_roundeven_si32:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB16_2
+; RV64IFD-NEXT: beqz a0, .LBB16_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB16_2:
; RV64IFD-NEXT: fcvt.w.d a0, fa0, rne
+; RV64IFD-NEXT: .LBB16_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.roundeven.f64(double %x)
%b = call i32 @llvm.fptosi.sat.i32.f64(double %a)
; RV64IFD-LABEL: test_roundeven_si64:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB17_2
+; RV64IFD-NEXT: beqz a0, .LBB17_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB17_2:
; RV64IFD-NEXT: fcvt.l.d a0, fa0, rne
+; RV64IFD-NEXT: .LBB17_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.roundeven.f64(double %x)
%b = call i64 @llvm.fptosi.sat.i64.f64(double %a)
; RV32IFD-LABEL: test_roundeven_ui32:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: bnez a0, .LBB18_2
+; RV32IFD-NEXT: beqz a0, .LBB18_2
; RV32IFD-NEXT: # %bb.1:
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB18_2:
; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rne
+; RV32IFD-NEXT: .LBB18_2:
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_roundeven_ui32:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB18_2
+; RV64IFD-NEXT: beqz a0, .LBB18_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB18_2:
; RV64IFD-NEXT: fcvt.wu.d a0, fa0, rne
+; RV64IFD-NEXT: .LBB18_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.roundeven.f64(double %x)
%b = call i32 @llvm.fptoui.sat.i32.f64(double %a)
; RV64IFD-LABEL: test_roundeven_ui64:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: bnez a0, .LBB19_2
+; RV64IFD-NEXT: beqz a0, .LBB19_2
; RV64IFD-NEXT: # %bb.1:
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB19_2:
; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rne
+; RV64IFD-NEXT: .LBB19_2:
; RV64IFD-NEXT: ret
%a = call double @llvm.roundeven.f64(double %x)
%b = call i64 @llvm.fptoui.sat.i64.f64(double %a)
; RV32IF-LABEL: fcvt_w_s_sat:
; RV32IF: # %bb.0: # %start
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB1_2
-; RV32IF-NEXT: # %bb.1: # %start
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB1_2:
+; RV32IF-NEXT: beqz a0, .LBB1_2
+; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32IF-NEXT: .LBB1_2: # %start
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: fcvt_w_s_sat:
; RV64IF: # %bb.0: # %start
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB1_2
-; RV64IF-NEXT: # %bb.1: # %start
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB1_2:
+; RV64IF-NEXT: beqz a0, .LBB1_2
+; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rtz
+; RV64IF-NEXT: .LBB1_2: # %start
; RV64IF-NEXT: ret
;
; RV32I-LABEL: fcvt_w_s_sat:
; RV32IF-LABEL: fcvt_wu_s_sat:
; RV32IF: # %bb.0: # %start
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB4_2
-; RV32IF-NEXT: # %bb.1: # %start
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB4_2:
+; RV32IF-NEXT: beqz a0, .LBB4_2
+; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
+; RV32IF-NEXT: .LBB4_2: # %start
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: fcvt_wu_s_sat:
; RV64IF: # %bb.0: # %start
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB4_2
-; RV64IF-NEXT: # %bb.1: # %start
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB4_2:
+; RV64IF-NEXT: beqz a0, .LBB4_2
+; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rtz
+; RV64IF-NEXT: .LBB4_2: # %start
; RV64IF-NEXT: ret
;
; RV32I-LABEL: fcvt_wu_s_sat:
; RV64IF-LABEL: fcvt_l_s_sat:
; RV64IF: # %bb.0: # %start
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB12_2
-; RV64IF-NEXT: # %bb.1: # %start
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB12_2:
+; RV64IF-NEXT: beqz a0, .LBB12_2
+; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
+; RV64IF-NEXT: .LBB12_2: # %start
; RV64IF-NEXT: ret
;
; RV32I-LABEL: fcvt_l_s_sat:
; RV64IF-LABEL: fcvt_lu_s_sat:
; RV64IF: # %bb.0: # %start
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB14_2
-; RV64IF-NEXT: # %bb.1: # %start
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB14_2:
+; RV64IF-NEXT: beqz a0, .LBB14_2
+; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
+; RV64IF-NEXT: .LBB14_2: # %start
; RV64IF-NEXT: ret
;
; RV32I-LABEL: fcvt_lu_s_sat:
; RV32IF-LABEL: fcvt_w_s_sat_i16:
; RV32IF: # %bb.0: # %start
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB24_2
-; RV32IF-NEXT: # %bb.1: # %start
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB24_2:
+; RV32IF-NEXT: beqz a0, .LBB24_2
+; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: lui a0, %hi(.LCPI24_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI24_0)(a0)
; RV32IF-NEXT: lui a0, %hi(.LCPI24_1)
; RV32IF-NEXT: fmax.s ft0, fa0, ft0
; RV32IF-NEXT: fmin.s ft0, ft0, ft1
; RV32IF-NEXT: fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT: .LBB24_2: # %start
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: fcvt_w_s_sat_i16:
; RV64IF: # %bb.0: # %start
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB24_2
-; RV64IF-NEXT: # %bb.1: # %start
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB24_2:
+; RV64IF-NEXT: beqz a0, .LBB24_2
+; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: lui a0, %hi(.LCPI24_0)
; RV64IF-NEXT: flw ft0, %lo(.LCPI24_0)(a0)
; RV64IF-NEXT: lui a0, %hi(.LCPI24_1)
; RV64IF-NEXT: fmax.s ft0, fa0, ft0
; RV64IF-NEXT: fmin.s ft0, ft0, ft1
; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT: .LBB24_2: # %start
; RV64IF-NEXT: ret
;
; RV32I-LABEL: fcvt_w_s_sat_i16:
; RV32IF-LABEL: fcvt_w_s_sat_i8:
; RV32IF: # %bb.0: # %start
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB28_2
-; RV32IF-NEXT: # %bb.1: # %start
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB28_2:
+; RV32IF-NEXT: beqz a0, .LBB28_2
+; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: lui a0, %hi(.LCPI28_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI28_0)(a0)
; RV32IF-NEXT: lui a0, %hi(.LCPI28_1)
; RV32IF-NEXT: fmax.s ft0, fa0, ft0
; RV32IF-NEXT: fmin.s ft0, ft0, ft1
; RV32IF-NEXT: fcvt.w.s a0, ft0, rtz
+; RV32IF-NEXT: .LBB28_2: # %start
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: fcvt_w_s_sat_i8:
; RV64IF: # %bb.0: # %start
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB28_2
-; RV64IF-NEXT: # %bb.1: # %start
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB28_2:
+; RV64IF-NEXT: beqz a0, .LBB28_2
+; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: lui a0, %hi(.LCPI28_0)
; RV64IF-NEXT: flw ft0, %lo(.LCPI28_0)(a0)
; RV64IF-NEXT: lui a0, %hi(.LCPI28_1)
; RV64IF-NEXT: fmax.s ft0, fa0, ft0
; RV64IF-NEXT: fmin.s ft0, ft0, ft1
; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz
+; RV64IF-NEXT: .LBB28_2: # %start
; RV64IF-NEXT: ret
;
; RV32I-LABEL: fcvt_w_s_sat_i8:
; RV32IF-LABEL: test_floor_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB0_2
+; RV32IF-NEXT: beqz a0, .LBB0_2
; RV32IF-NEXT: # %bb.1:
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB0_2:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
+; RV32IF-NEXT: .LBB0_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB0_2
+; RV64IF-NEXT: beqz a0, .LBB0_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB0_2:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rdn
+; RV64IF-NEXT: .LBB0_2:
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = call i32 @llvm.fptosi.sat.i32.f32(float %a)
; RV64IF-LABEL: test_floor_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB1_2
+; RV64IF-NEXT: beqz a0, .LBB1_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB1_2:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
+; RV64IF-NEXT: .LBB1_2:
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = call i64 @llvm.fptosi.sat.i64.f32(float %a)
; RV32IF-LABEL: test_floor_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB2_2
+; RV32IF-NEXT: beqz a0, .LBB2_2
; RV32IF-NEXT: # %bb.1:
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB2_2:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
+; RV32IF-NEXT: .LBB2_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB2_2
+; RV64IF-NEXT: beqz a0, .LBB2_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB2_2:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rdn
+; RV64IF-NEXT: .LBB2_2:
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = call i32 @llvm.fptoui.sat.i32.f32(float %a)
; RV64IF-LABEL: test_floor_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB3_2
+; RV64IF-NEXT: beqz a0, .LBB3_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB3_2:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
+; RV64IF-NEXT: .LBB3_2:
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = call i64 @llvm.fptoui.sat.i64.f32(float %a)
; RV32IF-LABEL: test_ceil_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB4_2
+; RV32IF-NEXT: beqz a0, .LBB4_2
; RV32IF-NEXT: # %bb.1:
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB4_2:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
+; RV32IF-NEXT: .LBB4_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB4_2
+; RV64IF-NEXT: beqz a0, .LBB4_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB4_2:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rup
+; RV64IF-NEXT: .LBB4_2:
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = call i32 @llvm.fptosi.sat.i32.f32(float %a)
; RV64IF-LABEL: test_ceil_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB5_2
+; RV64IF-NEXT: beqz a0, .LBB5_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB5_2:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
+; RV64IF-NEXT: .LBB5_2:
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = call i64 @llvm.fptosi.sat.i64.f32(float %a)
; RV32IF-LABEL: test_ceil_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB6_2
+; RV32IF-NEXT: beqz a0, .LBB6_2
; RV32IF-NEXT: # %bb.1:
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB6_2:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
+; RV32IF-NEXT: .LBB6_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB6_2
+; RV64IF-NEXT: beqz a0, .LBB6_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB6_2:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rup
+; RV64IF-NEXT: .LBB6_2:
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = call i32 @llvm.fptoui.sat.i32.f32(float %a)
; RV64IF-LABEL: test_ceil_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB7_2
+; RV64IF-NEXT: beqz a0, .LBB7_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB7_2:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
+; RV64IF-NEXT: .LBB7_2:
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = call i64 @llvm.fptoui.sat.i64.f32(float %a)
; RV32IF-LABEL: test_trunc_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB8_2
+; RV32IF-NEXT: beqz a0, .LBB8_2
; RV32IF-NEXT: # %bb.1:
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB8_2:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32IF-NEXT: .LBB8_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB8_2
+; RV64IF-NEXT: beqz a0, .LBB8_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB8_2:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rtz
+; RV64IF-NEXT: .LBB8_2:
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = call i32 @llvm.fptosi.sat.i32.f32(float %a)
; RV64IF-LABEL: test_trunc_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB9_2
+; RV64IF-NEXT: beqz a0, .LBB9_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB9_2:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
+; RV64IF-NEXT: .LBB9_2:
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = call i64 @llvm.fptosi.sat.i64.f32(float %a)
; RV32IF-LABEL: test_trunc_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB10_2
+; RV32IF-NEXT: beqz a0, .LBB10_2
; RV32IF-NEXT: # %bb.1:
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB10_2:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
+; RV32IF-NEXT: .LBB10_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB10_2
+; RV64IF-NEXT: beqz a0, .LBB10_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB10_2:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rtz
+; RV64IF-NEXT: .LBB10_2:
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = call i32 @llvm.fptoui.sat.i32.f32(float %a)
; RV64IF-LABEL: test_trunc_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB11_2
+; RV64IF-NEXT: beqz a0, .LBB11_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB11_2:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
+; RV64IF-NEXT: .LBB11_2:
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = call i64 @llvm.fptoui.sat.i64.f32(float %a)
; RV32IF-LABEL: test_round_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB12_2
+; RV32IF-NEXT: beqz a0, .LBB12_2
; RV32IF-NEXT: # %bb.1:
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB12_2:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
+; RV32IF-NEXT: .LBB12_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB12_2
+; RV64IF-NEXT: beqz a0, .LBB12_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB12_2:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rmm
+; RV64IF-NEXT: .LBB12_2:
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = call i32 @llvm.fptosi.sat.i32.f32(float %a)
; RV64IF-LABEL: test_round_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB13_2
+; RV64IF-NEXT: beqz a0, .LBB13_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB13_2:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
+; RV64IF-NEXT: .LBB13_2:
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = call i64 @llvm.fptosi.sat.i64.f32(float %a)
; RV32IF-LABEL: test_round_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB14_2
+; RV32IF-NEXT: beqz a0, .LBB14_2
; RV32IF-NEXT: # %bb.1:
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB14_2:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
+; RV32IF-NEXT: .LBB14_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB14_2
+; RV64IF-NEXT: beqz a0, .LBB14_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB14_2:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rmm
+; RV64IF-NEXT: .LBB14_2:
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = call i32 @llvm.fptoui.sat.i32.f32(float %a)
; RV64IF-LABEL: test_round_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB15_2
+; RV64IF-NEXT: beqz a0, .LBB15_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB15_2:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
+; RV64IF-NEXT: .LBB15_2:
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = call i64 @llvm.fptoui.sat.i64.f32(float %a)
; RV32IF-LABEL: test_roundeven_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB16_2
+; RV32IF-NEXT: beqz a0, .LBB16_2
; RV32IF-NEXT: # %bb.1:
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB16_2:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
+; RV32IF-NEXT: .LBB16_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB16_2
+; RV64IF-NEXT: beqz a0, .LBB16_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB16_2:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rne
+; RV64IF-NEXT: .LBB16_2:
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = call i32 @llvm.fptosi.sat.i32.f32(float %a)
; RV64IF-LABEL: test_roundeven_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB17_2
+; RV64IF-NEXT: beqz a0, .LBB17_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB17_2:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
+; RV64IF-NEXT: .LBB17_2:
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = call i64 @llvm.fptosi.sat.i64.f32(float %a)
; RV32IF-LABEL: test_roundeven_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: bnez a0, .LBB18_2
+; RV32IF-NEXT: beqz a0, .LBB18_2
; RV32IF-NEXT: # %bb.1:
-; RV32IF-NEXT: li a0, 0
-; RV32IF-NEXT: ret
-; RV32IF-NEXT: .LBB18_2:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
+; RV32IF-NEXT: .LBB18_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB18_2
+; RV64IF-NEXT: beqz a0, .LBB18_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB18_2:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rne
+; RV64IF-NEXT: .LBB18_2:
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = call i32 @llvm.fptoui.sat.i32.f32(float %a)
; RV64IF-LABEL: test_roundeven_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: bnez a0, .LBB19_2
+; RV64IF-NEXT: beqz a0, .LBB19_2
; RV64IF-NEXT: # %bb.1:
-; RV64IF-NEXT: li a0, 0
-; RV64IF-NEXT: ret
-; RV64IF-NEXT: .LBB19_2:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
+; RV64IF-NEXT: .LBB19_2:
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = call i64 @llvm.fptoui.sat.i64.f32(float %a)
; RV32IFD-NEXT: sw a1, 12(sp)
; RV32IFD-NEXT: fld ft0, 8(sp)
; RV32IFD-NEXT: feq.d a0, ft0, ft0
-; RV32IFD-NEXT: bnez a0, .LBB0_2
-; RV32IFD-NEXT: # %bb.1: # %entry
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: addi sp, sp, 16
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB0_2:
+; RV32IFD-NEXT: beqz a0, .LBB0_2
+; RV32IFD-NEXT: # %bb.1:
; RV32IFD-NEXT: fcvt.w.d a0, ft0, rtz
+; RV32IFD-NEXT: .LBB0_2: # %entry
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV32IFD-NEXT: sw a1, 12(sp)
; RV32IFD-NEXT: fld ft0, 8(sp)
; RV32IFD-NEXT: feq.d a0, ft0, ft0
-; RV32IFD-NEXT: bnez a0, .LBB1_2
-; RV32IFD-NEXT: # %bb.1: # %entry
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: addi sp, sp, 16
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB1_2:
+; RV32IFD-NEXT: beqz a0, .LBB1_2
+; RV32IFD-NEXT: # %bb.1:
; RV32IFD-NEXT: fcvt.wu.d a0, ft0, rtz
+; RV32IFD-NEXT: .LBB1_2: # %entry
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV32IFD-NEXT: sw a1, 12(sp)
; RV32IFD-NEXT: fld ft0, 8(sp)
; RV32IFD-NEXT: feq.d a0, ft0, ft0
-; RV32IFD-NEXT: bnez a0, .LBB2_2
-; RV32IFD-NEXT: # %bb.1: # %entry
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: addi sp, sp, 16
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB2_2:
+; RV32IFD-NEXT: beqz a0, .LBB2_2
+; RV32IFD-NEXT: # %bb.1:
; RV32IFD-NEXT: fcvt.wu.d a0, ft0, rtz
+; RV32IFD-NEXT: .LBB2_2: # %entry
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV32: # %bb.0: # %entry
; RV32-NEXT: fmv.w.x ft0, a0
; RV32-NEXT: feq.s a0, ft0, ft0
-; RV32-NEXT: bnez a0, .LBB3_2
-; RV32-NEXT: # %bb.1: # %entry
-; RV32-NEXT: li a0, 0
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB3_2:
+; RV32-NEXT: beqz a0, .LBB3_2
+; RV32-NEXT: # %bb.1:
; RV32-NEXT: fcvt.w.s a0, ft0, rtz
+; RV32-NEXT: .LBB3_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: stest_f32i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: fmv.w.x ft0, a0
; RV32-NEXT: feq.s a0, ft0, ft0
-; RV32-NEXT: bnez a0, .LBB4_2
-; RV32-NEXT: # %bb.1: # %entry
-; RV32-NEXT: li a0, 0
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB4_2:
+; RV32-NEXT: beqz a0, .LBB4_2
+; RV32-NEXT: # %bb.1:
; RV32-NEXT: fcvt.wu.s a0, ft0, rtz
+; RV32-NEXT: .LBB4_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: utest_f32i32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: fmv.w.x ft0, a0
; RV32-NEXT: feq.s a0, ft0, ft0
-; RV32-NEXT: bnez a0, .LBB5_2
-; RV32-NEXT: # %bb.1: # %entry
-; RV32-NEXT: li a0, 0
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB5_2:
+; RV32-NEXT: beqz a0, .LBB5_2
+; RV32-NEXT: # %bb.1:
; RV32-NEXT: fcvt.wu.s a0, ft0, rtz
+; RV32-NEXT: .LBB5_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: ustest_f32i32:
; RV64IFD: # %bb.0: # %entry
; RV64IFD-NEXT: fmv.d.x ft0, a0
; RV64IFD-NEXT: feq.d a0, ft0, ft0
-; RV64IFD-NEXT: bnez a0, .LBB18_2
-; RV64IFD-NEXT: # %bb.1: # %entry
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB18_2:
+; RV64IFD-NEXT: beqz a0, .LBB18_2
+; RV64IFD-NEXT: # %bb.1:
; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT: .LBB18_2: # %entry
; RV64IFD-NEXT: ret
entry:
%conv = fptosi double %x to i128
; RV32-NEXT: .LBB19_5: # %entry
; RV32-NEXT: bnez a0, .LBB19_7
; RV32-NEXT: # %bb.6: # %entry
-; RV32-NEXT: li a0, 0
; RV32-NEXT: li a1, 0
; RV32-NEXT: j .LBB19_8
; RV32-NEXT: .LBB19_7:
; RV32-NEXT: bnez a1, .LBB20_9
; RV32-NEXT: # %bb.6: # %entry
; RV32-NEXT: li a0, 0
-; RV32-NEXT: li a1, 0
; RV32-NEXT: li a2, 0
; RV32-NEXT: li a3, 1
; RV32-NEXT: bnez a2, .LBB20_10
; RV64: # %bb.0: # %entry
; RV64-NEXT: fmv.w.x ft0, a0
; RV64-NEXT: feq.s a0, ft0, ft0
-; RV64-NEXT: bnez a0, .LBB21_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB21_2:
+; RV64-NEXT: beqz a0, .LBB21_2
+; RV64-NEXT: # %bb.1:
; RV64-NEXT: fcvt.l.s a0, ft0, rtz
+; RV64-NEXT: .LBB21_2: # %entry
; RV64-NEXT: ret
entry:
%conv = fptosi float %x to i128
; RV32-NEXT: .LBB22_5: # %entry
; RV32-NEXT: bnez a0, .LBB22_7
; RV32-NEXT: # %bb.6: # %entry
-; RV32-NEXT: li a0, 0
; RV32-NEXT: li a1, 0
; RV32-NEXT: j .LBB22_8
; RV32-NEXT: .LBB22_7:
; RV32-NEXT: bnez a1, .LBB23_9
; RV32-NEXT: # %bb.6: # %entry
; RV32-NEXT: li a0, 0
-; RV32-NEXT: li a1, 0
; RV32-NEXT: li a2, 0
; RV32-NEXT: li a3, 1
; RV32-NEXT: bnez a2, .LBB23_10
; RV32-NEXT: .LBB25_5: # %entry
; RV32-NEXT: bnez a0, .LBB25_7
; RV32-NEXT: # %bb.6: # %entry
-; RV32-NEXT: li a0, 0
; RV32-NEXT: li a1, 0
; RV32-NEXT: j .LBB25_8
; RV32-NEXT: .LBB25_7:
; RV32-NEXT: bnez a1, .LBB26_9
; RV32-NEXT: # %bb.6: # %entry
; RV32-NEXT: li a0, 0
-; RV32-NEXT: li a1, 0
; RV32-NEXT: li a2, 0
; RV32-NEXT: li a3, 1
; RV32-NEXT: bnez a2, .LBB26_10
; RV32IFD-NEXT: sw a1, 12(sp)
; RV32IFD-NEXT: fld ft0, 8(sp)
; RV32IFD-NEXT: feq.d a0, ft0, ft0
-; RV32IFD-NEXT: bnez a0, .LBB27_2
-; RV32IFD-NEXT: # %bb.1: # %entry
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: addi sp, sp, 16
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB27_2:
+; RV32IFD-NEXT: beqz a0, .LBB27_2
+; RV32IFD-NEXT: # %bb.1:
; RV32IFD-NEXT: fcvt.w.d a0, ft0, rtz
+; RV32IFD-NEXT: .LBB27_2: # %entry
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV32IFD-NEXT: sw a1, 12(sp)
; RV32IFD-NEXT: fld ft0, 8(sp)
; RV32IFD-NEXT: feq.d a0, ft0, ft0
-; RV32IFD-NEXT: bnez a0, .LBB28_2
-; RV32IFD-NEXT: # %bb.1: # %entry
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: addi sp, sp, 16
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB28_2:
+; RV32IFD-NEXT: beqz a0, .LBB28_2
+; RV32IFD-NEXT: # %bb.1:
; RV32IFD-NEXT: fcvt.wu.d a0, ft0, rtz
+; RV32IFD-NEXT: .LBB28_2: # %entry
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV32IFD-NEXT: sw a1, 12(sp)
; RV32IFD-NEXT: fld ft0, 8(sp)
; RV32IFD-NEXT: feq.d a0, ft0, ft0
-; RV32IFD-NEXT: bnez a0, .LBB29_2
-; RV32IFD-NEXT: # %bb.1: # %entry
-; RV32IFD-NEXT: li a0, 0
-; RV32IFD-NEXT: addi sp, sp, 16
-; RV32IFD-NEXT: ret
-; RV32IFD-NEXT: .LBB29_2:
+; RV32IFD-NEXT: beqz a0, .LBB29_2
+; RV32IFD-NEXT: # %bb.1:
; RV32IFD-NEXT: fcvt.wu.d a0, ft0, rtz
+; RV32IFD-NEXT: .LBB29_2: # %entry
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV32: # %bb.0: # %entry
; RV32-NEXT: fmv.w.x ft0, a0
; RV32-NEXT: feq.s a0, ft0, ft0
-; RV32-NEXT: bnez a0, .LBB30_2
-; RV32-NEXT: # %bb.1: # %entry
-; RV32-NEXT: li a0, 0
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB30_2:
+; RV32-NEXT: beqz a0, .LBB30_2
+; RV32-NEXT: # %bb.1:
; RV32-NEXT: fcvt.w.s a0, ft0, rtz
+; RV32-NEXT: .LBB30_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: stest_f32i32_mm:
; RV32: # %bb.0: # %entry
; RV32-NEXT: fmv.w.x ft0, a0
; RV32-NEXT: feq.s a0, ft0, ft0
-; RV32-NEXT: bnez a0, .LBB31_2
-; RV32-NEXT: # %bb.1: # %entry
-; RV32-NEXT: li a0, 0
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB31_2:
+; RV32-NEXT: beqz a0, .LBB31_2
+; RV32-NEXT: # %bb.1:
; RV32-NEXT: fcvt.wu.s a0, ft0, rtz
+; RV32-NEXT: .LBB31_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: utest_f32i32_mm:
; RV32: # %bb.0: # %entry
; RV32-NEXT: fmv.w.x ft0, a0
; RV32-NEXT: feq.s a0, ft0, ft0
-; RV32-NEXT: bnez a0, .LBB32_2
-; RV32-NEXT: # %bb.1: # %entry
-; RV32-NEXT: li a0, 0
-; RV32-NEXT: ret
-; RV32-NEXT: .LBB32_2:
+; RV32-NEXT: beqz a0, .LBB32_2
+; RV32-NEXT: # %bb.1:
; RV32-NEXT: fcvt.wu.s a0, ft0, rtz
+; RV32-NEXT: .LBB32_2: # %entry
; RV32-NEXT: ret
;
; RV64-LABEL: ustest_f32i32_mm:
; RV64IFD: # %bb.0: # %entry
; RV64IFD-NEXT: fmv.d.x ft0, a0
; RV64IFD-NEXT: feq.d a0, ft0, ft0
-; RV64IFD-NEXT: bnez a0, .LBB45_2
-; RV64IFD-NEXT: # %bb.1: # %entry
-; RV64IFD-NEXT: li a0, 0
-; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB45_2:
+; RV64IFD-NEXT: beqz a0, .LBB45_2
+; RV64IFD-NEXT: # %bb.1:
; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz
+; RV64IFD-NEXT: .LBB45_2: # %entry
; RV64IFD-NEXT: ret
entry:
%conv = fptosi double %x to i128
; RV64: # %bb.0: # %entry
; RV64-NEXT: fmv.w.x ft0, a0
; RV64-NEXT: feq.s a0, ft0, ft0
-; RV64-NEXT: bnez a0, .LBB48_2
-; RV64-NEXT: # %bb.1: # %entry
-; RV64-NEXT: li a0, 0
-; RV64-NEXT: ret
-; RV64-NEXT: .LBB48_2:
+; RV64-NEXT: beqz a0, .LBB48_2
+; RV64-NEXT: # %bb.1:
; RV64-NEXT: fcvt.l.s a0, ft0, rtz
+; RV64-NEXT: .LBB48_2: # %entry
; RV64-NEXT: ret
entry:
%conv = fptosi float %x to i128
; RV32IZFH: # %bb.0: # %start
; RV32IZFH-NEXT: fcvt.s.h ft0, fa0
; RV32IZFH-NEXT: feq.s a0, ft0, ft0
-; RV32IZFH-NEXT: bnez a0, .LBB1_2
-; RV32IZFH-NEXT: # %bb.1: # %start
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB1_2:
+; RV32IZFH-NEXT: beqz a0, .LBB1_2
+; RV32IZFH-NEXT: # %bb.1:
; RV32IZFH-NEXT: lui a0, %hi(.LCPI1_0)
; RV32IZFH-NEXT: flw ft1, %lo(.LCPI1_0)(a0)
; RV32IZFH-NEXT: lui a0, %hi(.LCPI1_1)
; RV32IZFH-NEXT: fmax.s ft0, ft0, ft1
; RV32IZFH-NEXT: fmin.s ft0, ft0, ft2
; RV32IZFH-NEXT: fcvt.w.s a0, ft0, rtz
+; RV32IZFH-NEXT: .LBB1_2: # %start
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: fcvt_si_h_sat:
; RV64IZFH: # %bb.0: # %start
; RV64IZFH-NEXT: fcvt.s.h ft0, fa0
; RV64IZFH-NEXT: feq.s a0, ft0, ft0
-; RV64IZFH-NEXT: bnez a0, .LBB1_2
-; RV64IZFH-NEXT: # %bb.1: # %start
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB1_2:
+; RV64IZFH-NEXT: beqz a0, .LBB1_2
+; RV64IZFH-NEXT: # %bb.1:
; RV64IZFH-NEXT: lui a0, %hi(.LCPI1_0)
; RV64IZFH-NEXT: flw ft1, %lo(.LCPI1_0)(a0)
; RV64IZFH-NEXT: lui a0, %hi(.LCPI1_1)
; RV64IZFH-NEXT: fmax.s ft0, ft0, ft1
; RV64IZFH-NEXT: fmin.s ft0, ft0, ft2
; RV64IZFH-NEXT: fcvt.l.s a0, ft0, rtz
+; RV64IZFH-NEXT: .LBB1_2: # %start
; RV64IZFH-NEXT: ret
;
; RV32IDZFH-LABEL: fcvt_si_h_sat:
; RV32IDZFH: # %bb.0: # %start
; RV32IDZFH-NEXT: fcvt.s.h ft0, fa0
; RV32IDZFH-NEXT: feq.s a0, ft0, ft0
-; RV32IDZFH-NEXT: bnez a0, .LBB1_2
-; RV32IDZFH-NEXT: # %bb.1: # %start
-; RV32IDZFH-NEXT: li a0, 0
-; RV32IDZFH-NEXT: ret
-; RV32IDZFH-NEXT: .LBB1_2:
+; RV32IDZFH-NEXT: beqz a0, .LBB1_2
+; RV32IDZFH-NEXT: # %bb.1:
; RV32IDZFH-NEXT: lui a0, %hi(.LCPI1_0)
; RV32IDZFH-NEXT: flw ft1, %lo(.LCPI1_0)(a0)
; RV32IDZFH-NEXT: lui a0, %hi(.LCPI1_1)
; RV32IDZFH-NEXT: fmax.s ft0, ft0, ft1
; RV32IDZFH-NEXT: fmin.s ft0, ft0, ft2
; RV32IDZFH-NEXT: fcvt.w.s a0, ft0, rtz
+; RV32IDZFH-NEXT: .LBB1_2: # %start
; RV32IDZFH-NEXT: ret
;
; RV64IDZFH-LABEL: fcvt_si_h_sat:
; RV64IDZFH: # %bb.0: # %start
; RV64IDZFH-NEXT: fcvt.s.h ft0, fa0
; RV64IDZFH-NEXT: feq.s a0, ft0, ft0
-; RV64IDZFH-NEXT: bnez a0, .LBB1_2
-; RV64IDZFH-NEXT: # %bb.1: # %start
-; RV64IDZFH-NEXT: li a0, 0
-; RV64IDZFH-NEXT: ret
-; RV64IDZFH-NEXT: .LBB1_2:
+; RV64IDZFH-NEXT: beqz a0, .LBB1_2
+; RV64IDZFH-NEXT: # %bb.1:
; RV64IDZFH-NEXT: lui a0, %hi(.LCPI1_0)
; RV64IDZFH-NEXT: flw ft1, %lo(.LCPI1_0)(a0)
; RV64IDZFH-NEXT: lui a0, %hi(.LCPI1_1)
; RV64IDZFH-NEXT: fmax.s ft0, ft0, ft1
; RV64IDZFH-NEXT: fmin.s ft0, ft0, ft2
; RV64IDZFH-NEXT: fcvt.l.s a0, ft0, rtz
+; RV64IDZFH-NEXT: .LBB1_2: # %start
; RV64IDZFH-NEXT: ret
;
; RV32I-LABEL: fcvt_si_h_sat:
; RV32IZFH-LABEL: fcvt_w_h_sat:
; RV32IZFH: # %bb.0: # %start
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: bnez a0, .LBB5_2
-; RV32IZFH-NEXT: # %bb.1: # %start
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB5_2:
+; RV32IZFH-NEXT: beqz a0, .LBB5_2
+; RV32IZFH-NEXT: # %bb.1:
; RV32IZFH-NEXT: fcvt.w.h a0, fa0, rtz
+; RV32IZFH-NEXT: .LBB5_2: # %start
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: fcvt_w_h_sat:
; RV64IZFH: # %bb.0: # %start
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB5_2
-; RV64IZFH-NEXT: # %bb.1: # %start
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB5_2:
+; RV64IZFH-NEXT: beqz a0, .LBB5_2
+; RV64IZFH-NEXT: # %bb.1:
; RV64IZFH-NEXT: fcvt.w.h a0, fa0, rtz
+; RV64IZFH-NEXT: .LBB5_2: # %start
; RV64IZFH-NEXT: ret
;
; RV32IDZFH-LABEL: fcvt_w_h_sat:
; RV32IDZFH: # %bb.0: # %start
; RV32IDZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IDZFH-NEXT: bnez a0, .LBB5_2
-; RV32IDZFH-NEXT: # %bb.1: # %start
-; RV32IDZFH-NEXT: li a0, 0
-; RV32IDZFH-NEXT: ret
-; RV32IDZFH-NEXT: .LBB5_2:
+; RV32IDZFH-NEXT: beqz a0, .LBB5_2
+; RV32IDZFH-NEXT: # %bb.1:
; RV32IDZFH-NEXT: fcvt.w.h a0, fa0, rtz
+; RV32IDZFH-NEXT: .LBB5_2: # %start
; RV32IDZFH-NEXT: ret
;
; RV64IDZFH-LABEL: fcvt_w_h_sat:
; RV64IDZFH: # %bb.0: # %start
; RV64IDZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IDZFH-NEXT: bnez a0, .LBB5_2
-; RV64IDZFH-NEXT: # %bb.1: # %start
-; RV64IDZFH-NEXT: li a0, 0
-; RV64IDZFH-NEXT: ret
-; RV64IDZFH-NEXT: .LBB5_2:
+; RV64IDZFH-NEXT: beqz a0, .LBB5_2
+; RV64IDZFH-NEXT: # %bb.1:
; RV64IDZFH-NEXT: fcvt.w.h a0, fa0, rtz
+; RV64IDZFH-NEXT: .LBB5_2: # %start
; RV64IDZFH-NEXT: ret
;
; RV32I-LABEL: fcvt_w_h_sat:
; RV32IZFH-LABEL: fcvt_wu_h_sat:
; RV32IZFH: # %bb.0: # %start
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: bnez a0, .LBB8_2
-; RV32IZFH-NEXT: # %bb.1: # %start
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB8_2:
+; RV32IZFH-NEXT: beqz a0, .LBB8_2
+; RV32IZFH-NEXT: # %bb.1:
; RV32IZFH-NEXT: fcvt.wu.h a0, fa0, rtz
+; RV32IZFH-NEXT: .LBB8_2: # %start
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: fcvt_wu_h_sat:
; RV64IZFH: # %bb.0: # %start
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB8_2
-; RV64IZFH-NEXT: # %bb.1: # %start
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB8_2:
+; RV64IZFH-NEXT: beqz a0, .LBB8_2
+; RV64IZFH-NEXT: # %bb.1:
; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rtz
+; RV64IZFH-NEXT: .LBB8_2: # %start
; RV64IZFH-NEXT: ret
;
; RV32IDZFH-LABEL: fcvt_wu_h_sat:
; RV32IDZFH: # %bb.0: # %start
; RV32IDZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IDZFH-NEXT: bnez a0, .LBB8_2
-; RV32IDZFH-NEXT: # %bb.1: # %start
-; RV32IDZFH-NEXT: li a0, 0
-; RV32IDZFH-NEXT: ret
-; RV32IDZFH-NEXT: .LBB8_2:
+; RV32IDZFH-NEXT: beqz a0, .LBB8_2
+; RV32IDZFH-NEXT: # %bb.1:
; RV32IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz
+; RV32IDZFH-NEXT: .LBB8_2: # %start
; RV32IDZFH-NEXT: ret
;
; RV64IDZFH-LABEL: fcvt_wu_h_sat:
; RV64IDZFH: # %bb.0: # %start
; RV64IDZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IDZFH-NEXT: bnez a0, .LBB8_2
-; RV64IDZFH-NEXT: # %bb.1: # %start
-; RV64IDZFH-NEXT: li a0, 0
-; RV64IDZFH-NEXT: ret
-; RV64IDZFH-NEXT: .LBB8_2:
+; RV64IDZFH-NEXT: beqz a0, .LBB8_2
+; RV64IDZFH-NEXT: # %bb.1:
; RV64IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz
+; RV64IDZFH-NEXT: .LBB8_2: # %start
; RV64IDZFH-NEXT: ret
;
; RV32I-LABEL: fcvt_wu_h_sat:
; RV64IZFH-LABEL: fcvt_l_h_sat:
; RV64IZFH: # %bb.0: # %start
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB10_2
-; RV64IZFH-NEXT: # %bb.1: # %start
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB10_2:
+; RV64IZFH-NEXT: beqz a0, .LBB10_2
+; RV64IZFH-NEXT: # %bb.1:
; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rtz
+; RV64IZFH-NEXT: .LBB10_2: # %start
; RV64IZFH-NEXT: ret
;
; RV32IDZFH-LABEL: fcvt_l_h_sat:
; RV64IDZFH-LABEL: fcvt_l_h_sat:
; RV64IDZFH: # %bb.0: # %start
; RV64IDZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IDZFH-NEXT: bnez a0, .LBB10_2
-; RV64IDZFH-NEXT: # %bb.1: # %start
-; RV64IDZFH-NEXT: li a0, 0
-; RV64IDZFH-NEXT: ret
-; RV64IDZFH-NEXT: .LBB10_2:
+; RV64IDZFH-NEXT: beqz a0, .LBB10_2
+; RV64IDZFH-NEXT: # %bb.1:
; RV64IDZFH-NEXT: fcvt.l.h a0, fa0, rtz
+; RV64IDZFH-NEXT: .LBB10_2: # %start
; RV64IDZFH-NEXT: ret
;
; RV32I-LABEL: fcvt_l_h_sat:
; RV64IZFH-LABEL: fcvt_lu_h_sat:
; RV64IZFH: # %bb.0: # %start
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB12_2
-; RV64IZFH-NEXT: # %bb.1: # %start
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB12_2:
+; RV64IZFH-NEXT: beqz a0, .LBB12_2
+; RV64IZFH-NEXT: # %bb.1:
; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rtz
+; RV64IZFH-NEXT: .LBB12_2: # %start
; RV64IZFH-NEXT: ret
;
; RV32IDZFH-LABEL: fcvt_lu_h_sat:
; RV64IDZFH-LABEL: fcvt_lu_h_sat:
; RV64IDZFH: # %bb.0: # %start
; RV64IDZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IDZFH-NEXT: bnez a0, .LBB12_2
-; RV64IDZFH-NEXT: # %bb.1: # %start
-; RV64IDZFH-NEXT: li a0, 0
-; RV64IDZFH-NEXT: ret
-; RV64IDZFH-NEXT: .LBB12_2:
+; RV64IDZFH-NEXT: beqz a0, .LBB12_2
+; RV64IDZFH-NEXT: # %bb.1:
; RV64IDZFH-NEXT: fcvt.lu.h a0, fa0, rtz
+; RV64IDZFH-NEXT: .LBB12_2: # %start
; RV64IDZFH-NEXT: ret
;
; RV32I-LABEL: fcvt_lu_h_sat:
; RV32IZFH: # %bb.0: # %start
; RV32IZFH-NEXT: fcvt.s.h ft0, fa0
; RV32IZFH-NEXT: feq.s a0, ft0, ft0
-; RV32IZFH-NEXT: bnez a0, .LBB32_2
-; RV32IZFH-NEXT: # %bb.1: # %start
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB32_2:
+; RV32IZFH-NEXT: beqz a0, .LBB32_2
+; RV32IZFH-NEXT: # %bb.1:
; RV32IZFH-NEXT: lui a0, %hi(.LCPI32_0)
; RV32IZFH-NEXT: flw ft1, %lo(.LCPI32_0)(a0)
; RV32IZFH-NEXT: lui a0, %hi(.LCPI32_1)
; RV32IZFH-NEXT: fmax.s ft0, ft0, ft1
; RV32IZFH-NEXT: fmin.s ft0, ft0, ft2
; RV32IZFH-NEXT: fcvt.w.s a0, ft0, rtz
+; RV32IZFH-NEXT: .LBB32_2: # %start
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: fcvt_w_s_sat_i16:
; RV64IZFH: # %bb.0: # %start
; RV64IZFH-NEXT: fcvt.s.h ft0, fa0
; RV64IZFH-NEXT: feq.s a0, ft0, ft0
-; RV64IZFH-NEXT: bnez a0, .LBB32_2
-; RV64IZFH-NEXT: # %bb.1: # %start
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB32_2:
+; RV64IZFH-NEXT: beqz a0, .LBB32_2
+; RV64IZFH-NEXT: # %bb.1:
; RV64IZFH-NEXT: lui a0, %hi(.LCPI32_0)
; RV64IZFH-NEXT: flw ft1, %lo(.LCPI32_0)(a0)
; RV64IZFH-NEXT: lui a0, %hi(.LCPI32_1)
; RV64IZFH-NEXT: fmax.s ft0, ft0, ft1
; RV64IZFH-NEXT: fmin.s ft0, ft0, ft2
; RV64IZFH-NEXT: fcvt.l.s a0, ft0, rtz
+; RV64IZFH-NEXT: .LBB32_2: # %start
; RV64IZFH-NEXT: ret
;
; RV32IDZFH-LABEL: fcvt_w_s_sat_i16:
; RV32IDZFH: # %bb.0: # %start
; RV32IDZFH-NEXT: fcvt.s.h ft0, fa0
; RV32IDZFH-NEXT: feq.s a0, ft0, ft0
-; RV32IDZFH-NEXT: bnez a0, .LBB32_2
-; RV32IDZFH-NEXT: # %bb.1: # %start
-; RV32IDZFH-NEXT: li a0, 0
-; RV32IDZFH-NEXT: ret
-; RV32IDZFH-NEXT: .LBB32_2:
+; RV32IDZFH-NEXT: beqz a0, .LBB32_2
+; RV32IDZFH-NEXT: # %bb.1:
; RV32IDZFH-NEXT: lui a0, %hi(.LCPI32_0)
; RV32IDZFH-NEXT: flw ft1, %lo(.LCPI32_0)(a0)
; RV32IDZFH-NEXT: lui a0, %hi(.LCPI32_1)
; RV32IDZFH-NEXT: fmax.s ft0, ft0, ft1
; RV32IDZFH-NEXT: fmin.s ft0, ft0, ft2
; RV32IDZFH-NEXT: fcvt.w.s a0, ft0, rtz
+; RV32IDZFH-NEXT: .LBB32_2: # %start
; RV32IDZFH-NEXT: ret
;
; RV64IDZFH-LABEL: fcvt_w_s_sat_i16:
; RV64IDZFH: # %bb.0: # %start
; RV64IDZFH-NEXT: fcvt.s.h ft0, fa0
; RV64IDZFH-NEXT: feq.s a0, ft0, ft0
-; RV64IDZFH-NEXT: bnez a0, .LBB32_2
-; RV64IDZFH-NEXT: # %bb.1: # %start
-; RV64IDZFH-NEXT: li a0, 0
-; RV64IDZFH-NEXT: ret
-; RV64IDZFH-NEXT: .LBB32_2:
+; RV64IDZFH-NEXT: beqz a0, .LBB32_2
+; RV64IDZFH-NEXT: # %bb.1:
; RV64IDZFH-NEXT: lui a0, %hi(.LCPI32_0)
; RV64IDZFH-NEXT: flw ft1, %lo(.LCPI32_0)(a0)
; RV64IDZFH-NEXT: lui a0, %hi(.LCPI32_1)
; RV64IDZFH-NEXT: fmax.s ft0, ft0, ft1
; RV64IDZFH-NEXT: fmin.s ft0, ft0, ft2
; RV64IDZFH-NEXT: fcvt.l.s a0, ft0, rtz
+; RV64IDZFH-NEXT: .LBB32_2: # %start
; RV64IDZFH-NEXT: ret
;
; RV32I-LABEL: fcvt_w_s_sat_i16:
; RV32IZFH: # %bb.0: # %start
; RV32IZFH-NEXT: fcvt.s.h ft0, fa0
; RV32IZFH-NEXT: feq.s a0, ft0, ft0
-; RV32IZFH-NEXT: bnez a0, .LBB36_2
-; RV32IZFH-NEXT: # %bb.1: # %start
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB36_2:
+; RV32IZFH-NEXT: beqz a0, .LBB36_2
+; RV32IZFH-NEXT: # %bb.1:
; RV32IZFH-NEXT: lui a0, %hi(.LCPI36_0)
; RV32IZFH-NEXT: flw ft1, %lo(.LCPI36_0)(a0)
; RV32IZFH-NEXT: lui a0, %hi(.LCPI36_1)
; RV32IZFH-NEXT: fmax.s ft0, ft0, ft1
; RV32IZFH-NEXT: fmin.s ft0, ft0, ft2
; RV32IZFH-NEXT: fcvt.w.s a0, ft0, rtz
+; RV32IZFH-NEXT: .LBB36_2: # %start
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: fcvt_w_s_sat_i8:
; RV64IZFH: # %bb.0: # %start
; RV64IZFH-NEXT: fcvt.s.h ft0, fa0
; RV64IZFH-NEXT: feq.s a0, ft0, ft0
-; RV64IZFH-NEXT: bnez a0, .LBB36_2
-; RV64IZFH-NEXT: # %bb.1: # %start
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB36_2:
+; RV64IZFH-NEXT: beqz a0, .LBB36_2
+; RV64IZFH-NEXT: # %bb.1:
; RV64IZFH-NEXT: lui a0, %hi(.LCPI36_0)
; RV64IZFH-NEXT: flw ft1, %lo(.LCPI36_0)(a0)
; RV64IZFH-NEXT: lui a0, %hi(.LCPI36_1)
; RV64IZFH-NEXT: fmax.s ft0, ft0, ft1
; RV64IZFH-NEXT: fmin.s ft0, ft0, ft2
; RV64IZFH-NEXT: fcvt.l.s a0, ft0, rtz
+; RV64IZFH-NEXT: .LBB36_2: # %start
; RV64IZFH-NEXT: ret
;
; RV32IDZFH-LABEL: fcvt_w_s_sat_i8:
; RV32IDZFH: # %bb.0: # %start
; RV32IDZFH-NEXT: fcvt.s.h ft0, fa0
; RV32IDZFH-NEXT: feq.s a0, ft0, ft0
-; RV32IDZFH-NEXT: bnez a0, .LBB36_2
-; RV32IDZFH-NEXT: # %bb.1: # %start
-; RV32IDZFH-NEXT: li a0, 0
-; RV32IDZFH-NEXT: ret
-; RV32IDZFH-NEXT: .LBB36_2:
+; RV32IDZFH-NEXT: beqz a0, .LBB36_2
+; RV32IDZFH-NEXT: # %bb.1:
; RV32IDZFH-NEXT: lui a0, %hi(.LCPI36_0)
; RV32IDZFH-NEXT: flw ft1, %lo(.LCPI36_0)(a0)
; RV32IDZFH-NEXT: lui a0, %hi(.LCPI36_1)
; RV32IDZFH-NEXT: fmax.s ft0, ft0, ft1
; RV32IDZFH-NEXT: fmin.s ft0, ft0, ft2
; RV32IDZFH-NEXT: fcvt.w.s a0, ft0, rtz
+; RV32IDZFH-NEXT: .LBB36_2: # %start
; RV32IDZFH-NEXT: ret
;
; RV64IDZFH-LABEL: fcvt_w_s_sat_i8:
; RV64IDZFH: # %bb.0: # %start
; RV64IDZFH-NEXT: fcvt.s.h ft0, fa0
; RV64IDZFH-NEXT: feq.s a0, ft0, ft0
-; RV64IDZFH-NEXT: bnez a0, .LBB36_2
-; RV64IDZFH-NEXT: # %bb.1: # %start
-; RV64IDZFH-NEXT: li a0, 0
-; RV64IDZFH-NEXT: ret
-; RV64IDZFH-NEXT: .LBB36_2:
+; RV64IDZFH-NEXT: beqz a0, .LBB36_2
+; RV64IDZFH-NEXT: # %bb.1:
; RV64IDZFH-NEXT: lui a0, %hi(.LCPI36_0)
; RV64IDZFH-NEXT: flw ft1, %lo(.LCPI36_0)(a0)
; RV64IDZFH-NEXT: lui a0, %hi(.LCPI36_1)
; RV64IDZFH-NEXT: fmax.s ft0, ft0, ft1
; RV64IDZFH-NEXT: fmin.s ft0, ft0, ft2
; RV64IDZFH-NEXT: fcvt.l.s a0, ft0, rtz
+; RV64IDZFH-NEXT: .LBB36_2: # %start
; RV64IDZFH-NEXT: ret
;
; RV32I-LABEL: fcvt_w_s_sat_i8:
; RV32IZFH-LABEL: test_floor_si32:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: bnez a0, .LBB0_2
+; RV32IZFH-NEXT: beqz a0, .LBB0_2
; RV32IZFH-NEXT: # %bb.1:
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB0_2:
; RV32IZFH-NEXT: fcvt.w.h a0, fa0, rdn
+; RV32IZFH-NEXT: .LBB0_2:
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_floor_si32:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB0_2
+; RV64IZFH-NEXT: beqz a0, .LBB0_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB0_2:
; RV64IZFH-NEXT: fcvt.w.h a0, fa0, rdn
+; RV64IZFH-NEXT: .LBB0_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.floor.f16(half %x)
%b = call i32 @llvm.fptosi.sat.i32.f16(half %a)
; RV64IZFH-LABEL: test_floor_si64:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB1_2
+; RV64IZFH-NEXT: beqz a0, .LBB1_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB1_2:
; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rdn
+; RV64IZFH-NEXT: .LBB1_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.floor.f16(half %x)
%b = call i64 @llvm.fptosi.sat.i64.f16(half %a)
; RV32IZFH-LABEL: test_floor_ui32:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: bnez a0, .LBB2_2
+; RV32IZFH-NEXT: beqz a0, .LBB2_2
; RV32IZFH-NEXT: # %bb.1:
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB2_2:
; RV32IZFH-NEXT: fcvt.wu.h a0, fa0, rdn
+; RV32IZFH-NEXT: .LBB2_2:
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_floor_ui32:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB2_2
+; RV64IZFH-NEXT: beqz a0, .LBB2_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB2_2:
; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rdn
+; RV64IZFH-NEXT: .LBB2_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.floor.f16(half %x)
%b = call i32 @llvm.fptoui.sat.i32.f16(half %a)
; RV64IZFH-LABEL: test_floor_ui64:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB3_2
+; RV64IZFH-NEXT: beqz a0, .LBB3_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB3_2:
; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rdn
+; RV64IZFH-NEXT: .LBB3_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.floor.f16(half %x)
%b = call i64 @llvm.fptoui.sat.i64.f16(half %a)
; RV32IZFH-LABEL: test_ceil_si32:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: bnez a0, .LBB4_2
+; RV32IZFH-NEXT: beqz a0, .LBB4_2
; RV32IZFH-NEXT: # %bb.1:
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB4_2:
; RV32IZFH-NEXT: fcvt.w.h a0, fa0, rup
+; RV32IZFH-NEXT: .LBB4_2:
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_ceil_si32:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB4_2
+; RV64IZFH-NEXT: beqz a0, .LBB4_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB4_2:
; RV64IZFH-NEXT: fcvt.w.h a0, fa0, rup
+; RV64IZFH-NEXT: .LBB4_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.ceil.f16(half %x)
%b = call i32 @llvm.fptosi.sat.i32.f16(half %a)
; RV64IZFH-LABEL: test_ceil_si64:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB5_2
+; RV64IZFH-NEXT: beqz a0, .LBB5_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB5_2:
; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rup
+; RV64IZFH-NEXT: .LBB5_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.ceil.f16(half %x)
%b = call i64 @llvm.fptosi.sat.i64.f16(half %a)
; RV32IZFH-LABEL: test_ceil_ui32:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: bnez a0, .LBB6_2
+; RV32IZFH-NEXT: beqz a0, .LBB6_2
; RV32IZFH-NEXT: # %bb.1:
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB6_2:
; RV32IZFH-NEXT: fcvt.wu.h a0, fa0, rup
+; RV32IZFH-NEXT: .LBB6_2:
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_ceil_ui32:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB6_2
+; RV64IZFH-NEXT: beqz a0, .LBB6_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB6_2:
; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rup
+; RV64IZFH-NEXT: .LBB6_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.ceil.f16(half %x)
%b = call i32 @llvm.fptoui.sat.i32.f16(half %a)
; RV64IZFH-LABEL: test_ceil_ui64:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB7_2
+; RV64IZFH-NEXT: beqz a0, .LBB7_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB7_2:
; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rup
+; RV64IZFH-NEXT: .LBB7_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.ceil.f16(half %x)
%b = call i64 @llvm.fptoui.sat.i64.f16(half %a)
; RV32IZFH-LABEL: test_trunc_si32:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: bnez a0, .LBB8_2
+; RV32IZFH-NEXT: beqz a0, .LBB8_2
; RV32IZFH-NEXT: # %bb.1:
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB8_2:
; RV32IZFH-NEXT: fcvt.w.h a0, fa0, rtz
+; RV32IZFH-NEXT: .LBB8_2:
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_trunc_si32:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB8_2
+; RV64IZFH-NEXT: beqz a0, .LBB8_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB8_2:
; RV64IZFH-NEXT: fcvt.w.h a0, fa0, rtz
+; RV64IZFH-NEXT: .LBB8_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.trunc.f16(half %x)
%b = call i32 @llvm.fptosi.sat.i32.f16(half %a)
; RV64IZFH-LABEL: test_trunc_si64:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB9_2
+; RV64IZFH-NEXT: beqz a0, .LBB9_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB9_2:
; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rtz
+; RV64IZFH-NEXT: .LBB9_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.trunc.f16(half %x)
%b = call i64 @llvm.fptosi.sat.i64.f16(half %a)
; RV32IZFH-LABEL: test_trunc_ui32:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: bnez a0, .LBB10_2
+; RV32IZFH-NEXT: beqz a0, .LBB10_2
; RV32IZFH-NEXT: # %bb.1:
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB10_2:
; RV32IZFH-NEXT: fcvt.wu.h a0, fa0, rtz
+; RV32IZFH-NEXT: .LBB10_2:
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_trunc_ui32:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB10_2
+; RV64IZFH-NEXT: beqz a0, .LBB10_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB10_2:
; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rtz
+; RV64IZFH-NEXT: .LBB10_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.trunc.f16(half %x)
%b = call i32 @llvm.fptoui.sat.i32.f16(half %a)
; RV64IZFH-LABEL: test_trunc_ui64:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB11_2
+; RV64IZFH-NEXT: beqz a0, .LBB11_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB11_2:
; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rtz
+; RV64IZFH-NEXT: .LBB11_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.trunc.f16(half %x)
%b = call i64 @llvm.fptoui.sat.i64.f16(half %a)
; RV32IZFH-LABEL: test_round_si32:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: bnez a0, .LBB12_2
+; RV32IZFH-NEXT: beqz a0, .LBB12_2
; RV32IZFH-NEXT: # %bb.1:
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB12_2:
; RV32IZFH-NEXT: fcvt.w.h a0, fa0, rmm
+; RV32IZFH-NEXT: .LBB12_2:
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_round_si32:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB12_2
+; RV64IZFH-NEXT: beqz a0, .LBB12_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB12_2:
; RV64IZFH-NEXT: fcvt.w.h a0, fa0, rmm
+; RV64IZFH-NEXT: .LBB12_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.round.f16(half %x)
%b = call i32 @llvm.fptosi.sat.i32.f16(half %a)
; RV64IZFH-LABEL: test_round_si64:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB13_2
+; RV64IZFH-NEXT: beqz a0, .LBB13_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB13_2:
; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rmm
+; RV64IZFH-NEXT: .LBB13_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.round.f16(half %x)
%b = call i64 @llvm.fptosi.sat.i64.f16(half %a)
; RV32IZFH-LABEL: test_round_ui32:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: bnez a0, .LBB14_2
+; RV32IZFH-NEXT: beqz a0, .LBB14_2
; RV32IZFH-NEXT: # %bb.1:
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB14_2:
; RV32IZFH-NEXT: fcvt.wu.h a0, fa0, rmm
+; RV32IZFH-NEXT: .LBB14_2:
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_round_ui32:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB14_2
+; RV64IZFH-NEXT: beqz a0, .LBB14_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB14_2:
; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rmm
+; RV64IZFH-NEXT: .LBB14_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.round.f16(half %x)
%b = call i32 @llvm.fptoui.sat.i32.f16(half %a)
; RV64IZFH-LABEL: test_round_ui64:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB15_2
+; RV64IZFH-NEXT: beqz a0, .LBB15_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB15_2:
; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rmm
+; RV64IZFH-NEXT: .LBB15_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.round.f16(half %x)
%b = call i64 @llvm.fptoui.sat.i64.f16(half %a)
; RV32IZFH-LABEL: test_roundeven_si32:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: bnez a0, .LBB16_2
+; RV32IZFH-NEXT: beqz a0, .LBB16_2
; RV32IZFH-NEXT: # %bb.1:
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB16_2:
; RV32IZFH-NEXT: fcvt.w.h a0, fa0, rne
+; RV32IZFH-NEXT: .LBB16_2:
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_roundeven_si32:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB16_2
+; RV64IZFH-NEXT: beqz a0, .LBB16_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB16_2:
; RV64IZFH-NEXT: fcvt.w.h a0, fa0, rne
+; RV64IZFH-NEXT: .LBB16_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.roundeven.f16(half %x)
%b = call i32 @llvm.fptosi.sat.i32.f16(half %a)
; RV64IZFH-LABEL: test_roundeven_si64:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB17_2
+; RV64IZFH-NEXT: beqz a0, .LBB17_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB17_2:
; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rne
+; RV64IZFH-NEXT: .LBB17_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.roundeven.f16(half %x)
%b = call i64 @llvm.fptosi.sat.i64.f16(half %a)
; RV32IZFH-LABEL: test_roundeven_ui32:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: bnez a0, .LBB18_2
+; RV32IZFH-NEXT: beqz a0, .LBB18_2
; RV32IZFH-NEXT: # %bb.1:
-; RV32IZFH-NEXT: li a0, 0
-; RV32IZFH-NEXT: ret
-; RV32IZFH-NEXT: .LBB18_2:
; RV32IZFH-NEXT: fcvt.wu.h a0, fa0, rne
+; RV32IZFH-NEXT: .LBB18_2:
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: test_roundeven_ui32:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB18_2
+; RV64IZFH-NEXT: beqz a0, .LBB18_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB18_2:
; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rne
+; RV64IZFH-NEXT: .LBB18_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.roundeven.f16(half %x)
%b = call i32 @llvm.fptoui.sat.i32.f16(half %a)
; RV64IZFH-LABEL: test_roundeven_ui64:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: bnez a0, .LBB19_2
+; RV64IZFH-NEXT: beqz a0, .LBB19_2
; RV64IZFH-NEXT: # %bb.1:
-; RV64IZFH-NEXT: li a0, 0
-; RV64IZFH-NEXT: ret
-; RV64IZFH-NEXT: .LBB19_2:
; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rne
+; RV64IZFH-NEXT: .LBB19_2:
; RV64IZFH-NEXT: ret
%a = call half @llvm.roundeven.f16(half %x)
%b = call i64 @llvm.fptoui.sat.i64.f16(half %a)
; RV32I-NEXT: xori a1, a1, 136
; RV32I-NEXT: xori a0, a0, 7
; RV32I-NEXT: or a0, a1, a0
-; RV32I-NEXT: bnez a0, .LBB0_2
-; RV32I-NEXT: # %bb.1: # %if.end
-; RV32I-NEXT: li a0, 0
-; RV32I-NEXT: ret
-; RV32I-NEXT: .LBB0_2: # %if.then
+; RV32I-NEXT: beqz a0, .LBB0_2
+; RV32I-NEXT: # %bb.1: # %if.then
; RV32I-NEXT: li a0, 1
+; RV32I-NEXT: .LBB0_2: # %if.end
; RV32I-NEXT: ret
entry:
%0 = load i8, i8* getelementptr inbounds ([5 x i8], [5 x i8]* @bytes, i32 0, i32 0), align 1
; RV32I-NEXT: xor a1, a1, a2
; RV32I-NEXT: xori a0, a0, 7
; RV32I-NEXT: or a0, a1, a0
-; RV32I-NEXT: bnez a0, .LBB1_2
-; RV32I-NEXT: # %bb.1: # %if.end
-; RV32I-NEXT: li a0, 0
-; RV32I-NEXT: ret
-; RV32I-NEXT: .LBB1_2: # %if.then
+; RV32I-NEXT: beqz a0, .LBB1_2
+; RV32I-NEXT: # %bb.1: # %if.then
; RV32I-NEXT: li a0, 1
+; RV32I-NEXT: .LBB1_2: # %if.end
; RV32I-NEXT: ret
entry:
%0 = load i16, i16* getelementptr inbounds ([5 x i16], [5 x i16]* @shorts, i32 0, i32 0), align 2