setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
+
+ setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
+ setOperationAction(ISD::FRINT, MVT::f128, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
+ setOperationAction(ISD::FCEIL, MVT::f128, Legal);
+ setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
+ setOperationAction(ISD::FROUND, MVT::f128, Legal);
+
setOperationAction(ISD::SELECT, MVT::f128, Expand);
setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
def XSRQPI : Z23_VT5_R1_VB5_RMC2_EX1<63, 5, 0, "xsrqpi" , []>;
def XSRQPIX : Z23_VT5_R1_VB5_RMC2_EX1<63, 5, 1, "xsrqpix", []>;
+ // Use current rounding mode
+ def : Pat<(f128 (fnearbyint f128:$vB)), (f128 (XSRQPI 0, $vB, 3))>;
+ // Round to nearest, ties away from zero
+ def : Pat<(f128 (fround f128:$vB)), (f128 (XSRQPI 0, $vB, 0))>;
+ // Round towards Zero
+ def : Pat<(f128 (ftrunc f128:$vB)), (f128 (XSRQPI 1, $vB, 1))>;
+ // Round towards +Inf
+ def : Pat<(f128 (fceil f128:$vB)), (f128 (XSRQPI 1, $vB, 2))>;
+ // Round towards -Inf
+ def : Pat<(f128 (ffloor f128:$vB)), (f128 (XSRQPI 1, $vB, 3))>;
+
+ // Use current rounding mode, [with Inexact]
+ def : Pat<(f128 (frint f128:$vB)), (f128 (XSRQPIX 0, $vB, 3))>;
+
// Round Quad-Precision to Double-Extended Precision (fp80)
def XSRQPXP : Z23_VT5_R1_VB5_RMC2_EX1<63, 37, 0, "xsrqpxp", []>;
--- /dev/null
+; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown \
+; RUN: -enable-ppc-quad-precision -verify-machineinstrs < %s | FileCheck %s
+
+
+define void @qp_trunc(fp128* nocapture readonly %a, fp128* nocapture %res) {
+entry:
+ %0 = load fp128, fp128* %a, align 16
+ %1 = tail call fp128 @llvm.trunc.f128(fp128 %0)
+ store fp128 %1, fp128* %res, align 16
+ ret void
+; CHECK-LABEL: qp_trunc
+; CHECK: xsrqpi 1, {{[0-9]+}}, {{[0-9]+}}, 1
+; CHECK: blr
+}
+declare fp128 @llvm.trunc.f128(fp128 %Val)
+
+define void @qp_rint(fp128* nocapture readonly %a, fp128* nocapture %res) {
+entry:
+ %0 = load fp128, fp128* %a, align 16
+ %1 = tail call fp128 @llvm.rint.f128(fp128 %0)
+ store fp128 %1, fp128* %res, align 16
+ ret void
+; CHECK-LABEL: qp_rint
+; CHECK: xsrqpix 0, {{[0-9]+}}, {{[0-9]+}}, 3
+; CHECK: blr
+}
+declare fp128 @llvm.rint.f128(fp128 %Val)
+
+define void @qp_nearbyint(fp128* nocapture readonly %a, fp128* nocapture %res) {
+entry:
+ %0 = load fp128, fp128* %a, align 16
+ %1 = tail call fp128 @llvm.nearbyint.f128(fp128 %0)
+ store fp128 %1, fp128* %res, align 16
+ ret void
+; CHECK-LABEL: qp_nearbyint
+; CHECK: xsrqpi 0, {{[0-9]+}}, {{[0-9]+}}, 3
+; CHECK: blr
+}
+declare fp128 @llvm.nearbyint.f128(fp128 %Val)
+
+define void @qp_round(fp128* nocapture readonly %a, fp128* nocapture %res) {
+entry:
+ %0 = load fp128, fp128* %a, align 16
+ %1 = tail call fp128 @llvm.round.f128(fp128 %0)
+ store fp128 %1, fp128* %res, align 16
+ ret void
+; CHECK-LABEL: qp_round
+; CHECK: xsrqpi 0, {{[0-9]+}}, {{[0-9]+}}, 0
+; CHECK: blr
+}
+declare fp128 @llvm.round.f128(fp128 %Val)
+
+define void @qp_floor(fp128* nocapture readonly %a, fp128* nocapture %res) {
+entry:
+ %0 = load fp128, fp128* %a, align 16
+ %1 = tail call fp128 @llvm.floor.f128(fp128 %0)
+ store fp128 %1, fp128* %res, align 16
+ ret void
+; CHECK-LABEL: qp_floor
+; CHECK: xsrqpi 1, {{[0-9]+}}, {{[0-9]+}}, 3
+; CHECK: blr
+}
+declare fp128 @llvm.floor.f128(fp128 %Val)
+
+define void @qp_ceil(fp128* nocapture readonly %a, fp128* nocapture %res) {
+entry:
+ %0 = load fp128, fp128* %a, align 16
+ %1 = tail call fp128 @llvm.ceil.f128(fp128 %0)
+ store fp128 %1, fp128* %res, align 16
+ ret void
+; CHECK-LABEL: qp_ceil
+; CHECK: xsrqpi 1, {{[0-9]+}}, {{[0-9]+}}, 2
+; CHECK: blr
+}
+declare fp128 @llvm.ceil.f128(fp128 %Val)
+