defm : StPat<store, FST_S, FPR32, f32>;
+/// Floating point constants
+
+def : Pat<(f32 fpimm0), (MOVGR2FR_W R0)>;
+def : Pat<(f32 fpimm0neg), (FNEG_S (MOVGR2FR_W R0))>;
+def : Pat<(f32 fpimm1), (FFINT_S_W (MOVGR2FR_W (ADDI_W R0, 1)))>;
} // Predicates = [HasBasicF]
def MOVFR2GR_D : FP_MOV<0b0000000100010100101110, "movfr2gr.d", GPR, FPR64>;
} // Predicates = [HasBasicD, IsLA64]
+// Instructions only available on LA32
+let Predicates = [HasBasicD, IsLA32], isCodeGenOnly = 1 in {
+def MOVGR2FR_W_64 : FP_MOV<0b0000000100010100101001, "movgr2fr.w", FPR64, GPR>;
+} // Predicates = [HasBasicD, IsLA32], isCodeGenOnly = 1
+
//===----------------------------------------------------------------------===//
// Pseudo-instructions and codegen patterns
//===----------------------------------------------------------------------===//
/// Stores
defm : StPat<store, FST_D, FPR64, f64>;
-
} // Predicates = [HasBasicD]
+
+/// Floating point constants
+
+let Predicates = [HasBasicD, IsLA64] in {
+def : Pat<(f64 fpimm0), (MOVGR2FR_D R0)>;
+def : Pat<(f64 fpimm0neg), (FNEG_D (MOVGR2FR_D R0))>;
+def : Pat<(f64 fpimm1), (FFINT_D_L (MOVGR2FR_D (ADDI_D R0, 1)))>;
+} // Predicates = [HasBasicD, IsLA64]
+
+let Predicates = [HasBasicD, IsLA32] in {
+def : Pat<(f64 fpimm0), (MOVGR2FRH_W (MOVGR2FR_W_64 R0), R0)>;
+def : Pat<(f64 fpimm0neg), (FNEG_D (MOVGR2FRH_W (MOVGR2FR_W_64 R0), R0))>;
+def : Pat<(f64 fpimm1), (FCVT_D_S (FFINT_S_W (MOVGR2FR_W (ADDI_W R0, 1))))>;
+} // Predicates = [HasBasicD, IsLA32]
setOperationAction(ISD::SRA_PARTS, GRLenVT, Custom);
setOperationAction(ISD::SRL_PARTS, GRLenVT, Custom);
- setOperationAction(ISD::GlobalAddress, GRLenVT, Custom);
+ setOperationAction({ISD::GlobalAddress, ISD::ConstantPool}, GRLenVT, Custom);
if (Subtarget.is64Bit()) {
setOperationAction(ISD::SHL, MVT::i32, Custom);
if (Subtarget.hasBasicD()) {
setCondCodeAction(FPCCToExpand, MVT::f64, Expand);
setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
}
setOperationAction(ISD::BR_CC, GRLenVT, Expand);
assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
"Unexpected custom legalisation");
return SDValue();
+ case ISD::ConstantPool:
+ return lowerConstantPool(Op, DAG);
}
}
+SDValue LoongArchTargetLowering::lowerConstantPool(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT Ty = Op.getValueType();
+ ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
+
+ // FIXME: Only support PC-relative addressing to access the symbol.
+ // Target flags will be added later.
+ if (!isPositionIndependent()) {
+ SDValue ConstantN = DAG.getTargetConstantPool(
+ N->getConstVal(), Ty, N->getAlign(), N->getOffset());
+ SDValue AddrHi(DAG.getMachineNode(LoongArch::PCALAU12I, DL, Ty, ConstantN),
+ 0);
+ SDValue Addr(DAG.getMachineNode(Subtarget.is64Bit() ? LoongArch::ADDI_D
+ : LoongArch::ADDI_W,
+ DL, Ty, AddrHi, ConstantN),
+ 0);
+ return Addr;
+ }
+ report_fatal_error("Unable to lower ConstantPool");
+}
+
SDValue LoongArchTargetLowering::lowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
return DAG.getNode(LoongArchISD::RET, DL, MVT::Other, RetOps);
}
+
+bool LoongArchTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
+ bool ForCodeSize) const {
+ assert((VT == MVT::f32 || VT == MVT::f64) && "Unexpected VT");
+
+ if (VT == MVT::f32 && !Subtarget.hasBasicF())
+ return false;
+ if (VT == MVT::f64 && !Subtarget.hasBasicD())
+ return false;
+ return (Imm.isZero() || Imm.isExactlyValue(+1.0));
+}
MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const override;
+ SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+
+ bool isFPImmLegal(const APFloat &Imm, EVT VT,
+ bool ForCodeSize) const override;
};
} // end namespace llvm
N->getValueType(0));
}]>;
+// FP immediate patterns.
+def fpimm0 : PatLeaf<(fpimm), [{return N->isExactlyValue(+0.0);}]>;
+def fpimm0neg : PatLeaf<(fpimm), [{return N->isExactlyValue(-0.0);}]>;
+def fpimm1 : PatLeaf<(fpimm), [{return N->isExactlyValue(+1.0);}]>;
+
def CallSymbol: AsmOperandClass {
let Name = "CallSymbol";
let RenderMethod = "addImmOperands";
case MachineOperand::MO_Immediate:
MCOp = MCOperand::createImm(MO.getImm());
break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ MCOp = lowerSymbolOperand(MO, AP.GetCPISymbol(MO.getIndex()), AP);
+ break;
case MachineOperand::MO_GlobalAddress:
MCOp = lowerSymbolOperand(MO, AP.getSymbolPreferLocal(*MO.getGlobal()), AP);
break;
break;
// TODO: lower special operands
case MachineOperand::MO_BlockAddress:
- case MachineOperand::MO_ConstantPoolIndex:
case MachineOperand::MO_JumpTableIndex:
break;
}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64
+
+define double @f64_positive_zero() nounwind {
+; LA32-LABEL: f64_positive_zero:
+; LA32: # %bb.0:
+; LA32-NEXT: movgr2fr.w $fa0, $zero
+; LA32-NEXT: movgr2frh.w $fa0, $zero
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_positive_zero:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.d $fa0, $zero
+; LA64-NEXT: jirl $zero, $ra, 0
+ ret double 0.0
+}
+
+define double @f64_negative_zero() nounwind {
+; LA32-LABEL: f64_negative_zero:
+; LA32: # %bb.0:
+; LA32-NEXT: movgr2fr.w $fa0, $zero
+; LA32-NEXT: movgr2frh.w $fa0, $zero
+; LA32-NEXT: fneg.d $fa0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_negative_zero:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.d $fa0, $zero
+; LA64-NEXT: fneg.d $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ ret double -0.0
+}
+
+define double @f64_constant_pi() nounwind {
+; LA32-LABEL: f64_constant_pi:
+; LA32: # %bb.0:
+; LA32-NEXT: pcalau12i $a0, .LCPI2_0
+; LA32-NEXT: addi.w $a0, $a0, .LCPI2_0
+; LA32-NEXT: fld.d $fa0, $a0, 0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_constant_pi:
+; LA64: # %bb.0:
+; LA64-NEXT: pcalau12i $a0, .LCPI2_0
+; LA64-NEXT: addi.d $a0, $a0, .LCPI2_0
+; LA64-NEXT: fld.d $fa0, $a0, 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ ret double 3.1415926535897931159979634685441851615905761718750
+}
+
+define double @f64_add_fimm1(double %a) nounwind {
+; LA32-LABEL: f64_add_fimm1:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $a0, $zero, 1
+; LA32-NEXT: movgr2fr.w $fa1, $a0
+; LA32-NEXT: ffint.s.w $fa1, $fa1
+; LA32-NEXT: fcvt.d.s $fa1, $fa1
+; LA32-NEXT: fadd.d $fa0, $fa0, $fa1
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_add_fimm1:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $a0, $zero, 1
+; LA64-NEXT: movgr2fr.d $fa1, $a0
+; LA64-NEXT: ffint.d.l $fa1, $fa1
+; LA64-NEXT: fadd.d $fa0, $fa0, $fa1
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = fadd double %a, 1.0
+ ret double %1
+}
+
+define double @f64_positive_fimm1() nounwind {
+; LA32-LABEL: f64_positive_fimm1:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $a0, $zero, 1
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: ffint.s.w $fa0, $fa0
+; LA32-NEXT: fcvt.d.s $fa0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: f64_positive_fimm1:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $a0, $zero, 1
+; LA64-NEXT: movgr2fr.d $fa0, $a0
+; LA64-NEXT: ffint.d.l $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ ret double 1.0
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64
+
+define float @f32_positive_zero() nounwind {
+; LA32-LABEL: f32_positive_zero:
+; LA32: # %bb.0:
+; LA32-NEXT: movgr2fr.w $fa0, $zero
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_positive_zero:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.w $fa0, $zero
+; LA64-NEXT: jirl $zero, $ra, 0
+ ret float 0.0
+}
+
+define float @f32_negative_zero() nounwind {
+; LA32-LABEL: f32_negative_zero:
+; LA32: # %bb.0:
+; LA32-NEXT: movgr2fr.w $fa0, $zero
+; LA32-NEXT: fneg.s $fa0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_negative_zero:
+; LA64: # %bb.0:
+; LA64-NEXT: movgr2fr.w $fa0, $zero
+; LA64-NEXT: fneg.s $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ ret float -0.0
+}
+
+define float @f32_constant_pi() nounwind {
+; LA32-LABEL: f32_constant_pi:
+; LA32: # %bb.0:
+; LA32-NEXT: pcalau12i $a0, .LCPI2_0
+; LA32-NEXT: addi.w $a0, $a0, .LCPI2_0
+; LA32-NEXT: fld.s $fa0, $a0, 0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_constant_pi:
+; LA64: # %bb.0:
+; LA64-NEXT: pcalau12i $a0, .LCPI2_0
+; LA64-NEXT: addi.d $a0, $a0, .LCPI2_0
+; LA64-NEXT: fld.s $fa0, $a0, 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ ret float 3.14159274101257324218750
+}
+
+define float @f32_add_fimm1(float %a) nounwind {
+; LA32-LABEL: f32_add_fimm1:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $a0, $zero, 1
+; LA32-NEXT: movgr2fr.w $fa1, $a0
+; LA32-NEXT: ffint.s.w $fa1, $fa1
+; LA32-NEXT: fadd.s $fa0, $fa0, $fa1
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_add_fimm1:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.w $a0, $zero, 1
+; LA64-NEXT: movgr2fr.w $fa1, $a0
+; LA64-NEXT: ffint.s.w $fa1, $fa1
+; LA64-NEXT: fadd.s $fa0, $fa0, $fa1
+; LA64-NEXT: jirl $zero, $ra, 0
+ %1 = fadd float %a, 1.0
+ ret float %1
+}
+
+define float @f32_positive_fimm1() nounwind {
+; LA32-LABEL: f32_positive_fimm1:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $a0, $zero, 1
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: ffint.s.w $fa0, $fa0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: f32_positive_fimm1:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.w $a0, $zero, 1
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: ffint.s.w $fa0, $fa0
+; LA64-NEXT: jirl $zero, $ra, 0
+ ret float 1.0
+}