//
//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// LoongArch specific DAG Nodes.
+//===----------------------------------------------------------------------===//
+
+def SDT_LoongArchMOVGR2FR_W_LA64
+ : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisVT<1, i64>]>;
+def SDT_LoongArchMOVFR2GR_S_LA64
+ : SDTypeProfile<1, 1, [SDTCisVT<0, i64>, SDTCisVT<1, f32>]>;
+def SDT_LoongArchFTINT : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisFP<1>]>;
+
+def loongarch_movgr2fr_w_la64
+ : SDNode<"LoongArchISD::MOVGR2FR_W_LA64", SDT_LoongArchMOVGR2FR_W_LA64>;
+def loongarch_movfr2gr_s_la64
+ : SDNode<"LoongArchISD::MOVFR2GR_S_LA64", SDT_LoongArchMOVFR2GR_S_LA64>;
+def loongarch_ftint : SDNode<"LoongArchISD::FTINT", SDT_LoongArchFTINT>;
+
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
def : Pat<(f32 fpimm0), (MOVGR2FR_W R0)>;
def : Pat<(f32 fpimm0neg), (FNEG_S (MOVGR2FR_W R0))>;
def : Pat<(f32 fpimm1), (FFINT_S_W (MOVGR2FR_W (ADDI_W R0, 1)))>;
+
+// FP Conversion
+def : Pat<(loongarch_ftint FPR32:$src), (FTINTRZ_W_S FPR32:$src)>;
} // Predicates = [HasBasicF]
+
+let Predicates = [HasBasicF, IsLA64] in {
+// GPR -> FPR
+def : Pat<(loongarch_movgr2fr_w_la64 GPR:$src), (MOVGR2FR_W GPR:$src)>;
+// FPR -> GPR
+def : Pat<(loongarch_movfr2gr_s_la64 FPR32:$src),
+ (MOVFR2GR_S FPR32:$src)>;
+// int -> f32
+def : Pat<(f32 (sint_to_fp GPR:$src)), (FFINT_S_W (MOVGR2FR_W GPR:$src))>;
+} // Predicates = [HasBasicF, IsLA64]
+
+let Predicates = [HasBasicF, IsLA32] in {
+// GPR -> FPR
+def : Pat<(bitconvert (i32 GPR:$src)), (MOVGR2FR_W GPR:$src)>;
+// FPR -> GPR
+def : Pat<(i32 (bitconvert FPR32:$src)), (MOVFR2GR_S FPR32:$src)>;
+// int -> f32
+def : Pat<(f32 (sint_to_fp (i32 GPR:$src))), (FFINT_S_W (MOVGR2FR_W GPR:$src))>;
+} // Predicates = [HasBasicF, IsLA64]
/// Stores
defm : StPat<store, FST_D, FPR64, f64>;
+
+/// FP conversion operations
+
+def : Pat<(loongarch_ftint FPR64:$src), (FTINTRZ_W_D FPR64:$src)>;
+def : Pat<(f64 (loongarch_ftint FPR64:$src)), (FTINTRZ_L_D FPR64:$src)>;
+def : Pat<(loongarch_ftint FPR32:$src), (FTINTRZ_L_S FPR32:$src)>;
+
+// f64 -> f32
+def : Pat<(f32 (fpround FPR64:$src)), (FCVT_S_D FPR64:$src)>;
+// f32 -> f64
+def : Pat<(f64 (fpextend FPR32:$src)), (FCVT_D_S FPR32:$src)>;
} // Predicates = [HasBasicD]
/// Floating point constants
def : Pat<(f64 fpimm0), (MOVGR2FR_D R0)>;
def : Pat<(f64 fpimm0neg), (FNEG_D (MOVGR2FR_D R0))>;
def : Pat<(f64 fpimm1), (FFINT_D_L (MOVGR2FR_D (ADDI_D R0, 1)))>;
+
+// Convert int to FP
+def : Pat<(f64 (sint_to_fp (i64 (sexti32 (i64 GPR:$src))))),
+ (FFINT_D_W (MOVGR2FR_W GPR:$src))>;
+def : Pat<(f64 (sint_to_fp GPR:$src)), (FFINT_D_L (MOVGR2FR_D GPR:$src))>;
+
+def : Pat<(bitconvert GPR:$src), (MOVGR2FR_D GPR:$src)>;
+
+// Convert FP to int
+def : Pat<(bitconvert FPR64:$src), (MOVFR2GR_D FPR64:$src)>;
} // Predicates = [HasBasicD, IsLA64]
let Predicates = [HasBasicD, IsLA32] in {
def : Pat<(f64 fpimm0), (MOVGR2FRH_W (MOVGR2FR_W_64 R0), R0)>;
def : Pat<(f64 fpimm0neg), (FNEG_D (MOVGR2FRH_W (MOVGR2FR_W_64 R0), R0))>;
def : Pat<(f64 fpimm1), (FCVT_D_S (FFINT_S_W (MOVGR2FR_W (ADDI_W R0, 1))))>;
+
+// Convert int to FP
+def : Pat<(f64 (sint_to_fp (i32 GPR:$src))), (FFINT_D_W (MOVGR2FR_W GPR:$src))>;
} // Predicates = [HasBasicD, IsLA32]
return true;
}
+bool LoongArchDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
+ if (N.getOpcode() == ISD::SIGN_EXTEND_INREG &&
+ cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) {
+ Val = N.getOperand(0);
+ return true;
+ }
+ MVT VT = N.getSimpleValueType();
+ if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
+ Val = N;
+ return true;
+ }
+
+ return false;
+}
+
// This pass converts a legalized DAG into a LoongArch-specific DAG, ready
// for instruction scheduling.
FunctionPass *llvm::createLoongArchISelDag(LoongArchTargetMachine &TM) {
return selectShiftMask(N, 32, ShAmt);
}
+ bool selectSExti32(SDValue N, SDValue &Val);
+
// Include the pieces autogenerated from the target description.
#include "LoongArchGenDAGISel.inc"
};
setOperationAction(ISD::SHL_PARTS, GRLenVT, Custom);
setOperationAction(ISD::SRA_PARTS, GRLenVT, Custom);
setOperationAction(ISD::SRL_PARTS, GRLenVT, Custom);
+ setOperationAction(ISD::FP_TO_SINT, GRLenVT, Custom);
setOperationAction({ISD::GlobalAddress, ISD::ConstantPool}, GRLenVT, Custom);
setOperationAction(ISD::SHL, MVT::i32, Custom);
setOperationAction(ISD::SRA, MVT::i32, Custom);
setOperationAction(ISD::SRL, MVT::i32, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
+ setOperationAction(ISD::BITCAST, MVT::i32, Custom);
}
static const ISD::CondCode FPCCToExpand[] = {ISD::SETOGT, ISD::SETOGE,
setCondCodeAction(FPCCToExpand, MVT::f64, Expand);
setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
}
setOperationAction(ISD::BR_CC, GRLenVT, Expand);
return SDValue();
case ISD::ConstantPool:
return lowerConstantPool(Op, DAG);
+ case ISD::FP_TO_SINT:
+ return lowerFP_TO_SINT(Op, DAG);
+ case ISD::BITCAST:
+ return lowerBITCAST(Op, DAG);
}
}
+SDValue LoongArchTargetLowering::lowerBITCAST(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ SDLoc DL(Op);
+ SDValue Op0 = Op.getOperand(0);
+
+ if (Op.getValueType() == MVT::f32 && Op0.getValueType() == MVT::i32 &&
+ Subtarget.is64Bit() && Subtarget.hasBasicF()) {
+ SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
+ return DAG.getNode(LoongArchISD::MOVGR2FR_W_LA64, DL, MVT::f32, NewOp0);
+ }
+ return Op;
+}
+
+SDValue LoongArchTargetLowering::lowerFP_TO_SINT(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ SDLoc DL(Op);
+
+ if (Op.getValueSizeInBits() > 32 && Subtarget.hasBasicF() &&
+ !Subtarget.hasBasicD()) {
+ SDValue Dst =
+ DAG.getNode(LoongArchISD::FTINT, DL, MVT::f32, Op.getOperand(0));
+ return DAG.getNode(LoongArchISD::MOVFR2GR_S_LA64, DL, MVT::i64, Dst);
+ }
+
+ EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
+ SDValue Trunc = DAG.getNode(LoongArchISD::FTINT, DL, FPTy, Op.getOperand(0));
+ return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Trunc);
+}
+
SDValue LoongArchTargetLowering::lowerConstantPool(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
break;
}
break;
+ case ISD::FP_TO_SINT: {
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+ SDValue Src = N->getOperand(0);
+ EVT VT = EVT::getFloatingPointVT(N->getValueSizeInBits(0));
+ SDValue Dst = DAG.getNode(LoongArchISD::FTINT, DL, VT, Src);
+ Results.push_back(DAG.getNode(ISD::BITCAST, DL, N->getValueType(0), Dst));
+ break;
+ }
+ case ISD::BITCAST: {
+ EVT VT = N->getValueType(0);
+ SDValue Src = N->getOperand(0);
+ EVT SrcVT = Src.getValueType();
+ if (VT == MVT::i32 && SrcVT == MVT::f32 && Subtarget.is64Bit() &&
+ Subtarget.hasBasicF()) {
+ SDValue Dst =
+ DAG.getNode(LoongArchISD::MOVFR2GR_S_LA64, DL, MVT::i64, Src);
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Dst));
+ }
+ break;
+ }
}
}
NODE_NAME_CASE(SRA_W)
NODE_NAME_CASE(SRL_W)
NODE_NAME_CASE(BSTRPICK)
+ NODE_NAME_CASE(MOVGR2FR_W_LA64)
+ NODE_NAME_CASE(MOVFR2GR_S_LA64)
+ NODE_NAME_CASE(FTINT)
}
#undef NODE_NAME_CASE
return nullptr;
SRA_W,
SRL_W,
+ // FPR<->GPR transfer operations
+ MOVGR2FR_W_LA64,
+ MOVFR2GR_S_LA64,
+
+ FTINT,
+
BSTRPICK,
};
EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const override;
SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
bool isFPImmLegal(const APFloat &Imm, EVT VT,
bool ForCodeSize) const override;
: ComplexPattern<GRLenVT, 1, "selectShiftMaskGRLen", [], [], 0>;
def shiftMask32 : ComplexPattern<i64, 1, "selectShiftMask32", [], [], 0>;
+def sexti32 : ComplexPattern<i64, 1, "selectSExti32">;
+
class shiftop<SDPatternOperator operator>
: PatFrag<(ops node:$val, node:$count),
(operator node:$val, (GRLenVT (shiftMaskGRLen node:$count)))>;
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+define float @convert_double_to_float(double %a) nounwind {
+; LA32-LABEL: convert_double_to_float:
+; LA32: # %bb.0:
+; LA32-NEXT: fcvt.s.d $fa0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_double_to_float:
+; LA64: # %bb.0:
+; LA64-NEXT: fcvt.s.d $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = fptrunc double %a to float
+ ret float %1
+}
+
+define double @convert_float_to_double(float %a) nounwind {
+; LA32-LABEL: convert_float_to_double:
+; LA32: # %bb.0:
+; LA32-NEXT: fcvt.d.s $fa0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_float_to_double:
+; LA64: # %bb.0:
+; LA64-NEXT: fcvt.d.s $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = fpext float %a to double
+ ret double %1
+}
+
+define double @convert_i8_to_double(i8 signext %a) nounwind {
+; LA32-LABEL: convert_i8_to_double:
+; LA32: # %bb.0:
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: ffint.d.w $fa0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_i8_to_double:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: ffint.d.w $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = sitofp i8 %a to double
+ ret double %1
+}
+
+define double @convert_i16_to_double(i16 signext %a) nounwind {
+; LA32-LABEL: convert_i16_to_double:
+; LA32: # %bb.0:
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: ffint.d.w $fa0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_i16_to_double:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: ffint.d.w $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = sitofp i16 %a to double
+ ret double %1
+}
+
+define double @convert_i32_to_double(i32 %a) nounwind {
+; LA32-LABEL: convert_i32_to_double:
+; LA32: # %bb.0:
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: ffint.d.w $fa0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_i32_to_double:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: ffint.d.w $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = sitofp i32 %a to double
+ ret double %1
+}
+
+define double @convert_i64_to_double(i64 %a) nounwind {
+; LA32-LABEL: convert_i64_to_double:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: bl __floatdidf
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_i64_to_double:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.d $fa0, $a0
+; LA64-NEXT: ffint.d.l $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = sitofp i64 %a to double
+ ret double %1
+}
+
+define i32 @convert_double_to_i32(double %a) nounwind {
+; LA32-LABEL: convert_double_to_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: ftintrz.w.d $fa0, $fa0
+; LA32-NEXT: movfr2gr.s $a0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_double_to_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: ftintrz.w.d $fa0, $fa0
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = fptosi double %a to i32
+ ret i32 %1
+}
+
+define i64 @convert_double_to_i64(double %a) nounwind {
+; LA32-LABEL: convert_double_to_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: bl __fixdfdi
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_double_to_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ftintrz.l.d $fa0, $fa0
+; LA64-NEXT: movfr2gr.d $a0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = fptosi double %a to i64
+ ret i64 %1
+}
+
+define i64 @bitcast_double_to_i64(double %a) nounwind {
+; LA32-LABEL: bitcast_double_to_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: fst.d $fa0, $sp, 8
+; LA32-NEXT: addi.w $a0, $sp, 8
+; LA32-NEXT: ori $a0, $a0, 4
+; LA32-NEXT: ld.w $a1, $a0, 0
+; LA32-NEXT: ld.w $a0, $sp, 8
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: bitcast_double_to_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: movfr2gr.d $a0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = bitcast double %a to i64
+ ret i64 %1
+}
+
+define double @bitcast_i64_to_double(i64 %a) nounwind {
+; LA32-LABEL: bitcast_i64_to_double:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: addi.w $a2, $sp, 8
+; LA32-NEXT: ori $a2, $a2, 4
+; LA32-NEXT: st.w $a1, $a2, 0
+; LA32-NEXT: st.w $a0, $sp, 8
+; LA32-NEXT: fld.d $fa0, $sp, 8
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: bitcast_i64_to_double:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.d $fa0, $a0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = bitcast i64 %a to double
+ ret double %1
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
+
+define signext i8 @convert_float_to_i8(float %a) nounwind {
+; LA32-LABEL: convert_float_to_i8:
+; LA32: # %bb.0:
+; LA32-NEXT: ftintrz.w.s $fa0, $fa0
+; LA32-NEXT: movfr2gr.s $a0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_float_to_i8:
+; LA64: # %bb.0:
+; LA64-NEXT: ftintrz.w.s $fa0, $fa0
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = fptosi float %a to i8
+ ret i8 %1
+}
+
+define signext i16 @convert_float_to_i16(float %a) nounwind {
+; LA32-LABEL: convert_float_to_i16:
+; LA32: # %bb.0:
+; LA32-NEXT: ftintrz.w.s $fa0, $fa0
+; LA32-NEXT: movfr2gr.s $a0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_float_to_i16:
+; LA64: # %bb.0:
+; LA64-NEXT: ftintrz.w.s $fa0, $fa0
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = fptosi float %a to i16
+ ret i16 %1
+}
+
+define i32 @convert_float_to_i32(float %a) nounwind {
+; LA32-LABEL: convert_float_to_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: ftintrz.w.s $fa0, $fa0
+; LA32-NEXT: movfr2gr.s $a0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_float_to_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: ftintrz.w.s $fa0, $fa0
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = fptosi float %a to i32
+ ret i32 %1
+}
+
+define i64 @convert_float_to_i64(float %a) nounwind {
+; LA32-LABEL: convert_float_to_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: bl __fixsfdi
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_float_to_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ftintrz.w.s $fa0, $fa0
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = fptosi float %a to i64
+ ret i64 %1
+}
+
+define float @convert_i8_to_float(i8 signext %a) nounwind {
+; LA32-LABEL: convert_i8_to_float:
+; LA32: # %bb.0:
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: ffint.s.w $fa0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_i8_to_float:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: ffint.s.w $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = sitofp i8 %a to float
+ ret float %1
+}
+
+define float @convert_i16_to_float(i16 signext %a) nounwind {
+; LA32-LABEL: convert_i16_to_float:
+; LA32: # %bb.0:
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: ffint.s.w $fa0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_i16_to_float:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: ffint.s.w $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = sitofp i16 %a to float
+ ret float %1
+}
+
+define float @convert_i32_to_float(i32 %a) nounwind {
+; LA32-LABEL: convert_i32_to_float:
+; LA32: # %bb.0:
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: ffint.s.w $fa0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_i32_to_float:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.w $a0, $a0, 0
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: ffint.s.w $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = sitofp i32 %a to float
+ ret float %1
+}
+
+define float @convert_i64_to_float(i64 %a) nounwind {
+; LA32-LABEL: convert_i64_to_float:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: bl __floatdisf
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: convert_i64_to_float:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: ffint.s.w $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = sitofp i64 %a to float
+ ret float %1
+}
+
+define i32 @bitcast_float_to_i32(float %a) nounwind {
+; LA32-LABEL: bitcast_float_to_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: movfr2gr.s $a0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: bitcast_float_to_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = bitcast float %a to i32
+ ret i32 %1
+}
+
+define float @bitcast_i32_to_float(i32 %a) nounwind {
+; LA32-LABEL: bitcast_i32_to_float:
+; LA32: # %bb.0:
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: bitcast_i32_to_float:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = bitcast i32 %a to float
+ ret float %1
+}