}
; Function Attrs: norecurse nounwind
+define void @sdwConv2qp_04(fp128* nocapture %a, i1 %b) {
+; CHECK-LABEL: sdwConv2qp_04:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: andi. r4, r4, 1
+; CHECK-NEXT: li r4, 0
+; CHECK-NEXT: li r5, -1
+; CHECK-NEXT: iselgt r4, r5, r4
+; CHECK-NEXT: mtvsrwa v2, r4
+; CHECK-NEXT: xscvsdqp v2, v2
+; CHECK-NEXT: stxv v2, 0(r3)
+; CHECK-NEXT: blr
+;
+; CHECK-P8-LABEL: sdwConv2qp_04:
+; CHECK-P8: # %bb.0: # %entry
+; CHECK-P8-NEXT: mflr r0
+; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT: .cfi_offset lr, 16
+; CHECK-P8-NEXT: .cfi_offset r30, -16
+; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT: std r0, 16(r1)
+; CHECK-P8-NEXT: stdu r1, -48(r1)
+; CHECK-P8-NEXT: mr r30, r3
+; CHECK-P8-NEXT: clrldi r3, r4, 63
+; CHECK-P8-NEXT: neg r3, r3
+; CHECK-P8-NEXT: bl __floatsikf
+; CHECK-P8-NEXT: nop
+; CHECK-P8-NEXT: std r4, 8(r30)
+; CHECK-P8-NEXT: std r3, 0(r30)
+; CHECK-P8-NEXT: addi r1, r1, 48
+; CHECK-P8-NEXT: ld r0, 16(r1)
+; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT: mtlr r0
+; CHECK-P8-NEXT: blr
+entry:
+ %conv = sitofp i1 %b to fp128
+ store fp128 %conv, fp128* %a, align 16
+ ret void
+
+}
+
+; Function Attrs: norecurse nounwind
define void @udwConv2qp(fp128* nocapture %a, i64 %b) {
; CHECK-LABEL: udwConv2qp:
; CHECK: # %bb.0: # %entry
}
; Function Attrs: norecurse nounwind
+define void @udwConv2qp_04(fp128* nocapture %a, i1 %b) {
+; CHECK-LABEL: udwConv2qp_04:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: clrlwi r4, r4, 31
+; CHECK-NEXT: mtvsrwa v2, r4
+; CHECK-NEXT: xscvsdqp v2, v2
+; CHECK-NEXT: stxv v2, 0(r3)
+; CHECK-NEXT: blr
+;
+; CHECK-P8-LABEL: udwConv2qp_04:
+; CHECK-P8: # %bb.0: # %entry
+; CHECK-P8-NEXT: mflr r0
+; CHECK-P8-NEXT: .cfi_def_cfa_offset 48
+; CHECK-P8-NEXT: .cfi_offset lr, 16
+; CHECK-P8-NEXT: .cfi_offset r30, -16
+; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-P8-NEXT: std r0, 16(r1)
+; CHECK-P8-NEXT: stdu r1, -48(r1)
+; CHECK-P8-NEXT: mr r30, r3
+; CHECK-P8-NEXT: clrldi r3, r4, 63
+; CHECK-P8-NEXT: bl __floatunsikf
+; CHECK-P8-NEXT: nop
+; CHECK-P8-NEXT: std r4, 8(r30)
+; CHECK-P8-NEXT: std r3, 0(r30)
+; CHECK-P8-NEXT: addi r1, r1, 48
+; CHECK-P8-NEXT: ld r0, 16(r1)
+; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-P8-NEXT: mtlr r0
+; CHECK-P8-NEXT: blr
+entry:
+ %conv = uitofp i1 %b to fp128
+ store fp128 %conv, fp128* %a, align 16
+ ret void
+
+}
+
+; Function Attrs: norecurse nounwind
define fp128* @sdwConv2qp_testXForm(fp128* returned %sink,
; CHECK-LABEL: sdwConv2qp_testXForm:
; CHECK: # %bb.0: # %entry
ret i128 %conv
}
+; Function Attrs: norecurse nounwind readonly
+define i1 @qpConv2ui1(fp128* nocapture readonly %a) {
+; CHECK-LABEL: qpConv2ui1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lxv v2, 0(r3)
+; CHECK-NEXT: xscvqpswz v2, v2
+; CHECK-NEXT: mfvsrwz r3, v2
+; CHECK-NEXT: blr
+;
+; CHECK-P8-LABEL: qpConv2ui1:
+; CHECK-P8: # %bb.0: # %entry
+; CHECK-P8-NEXT: mflr r0
+; CHECK-P8-NEXT: std r0, 16(r1)
+; CHECK-P8-NEXT: stdu r1, -32(r1)
+; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT: .cfi_offset lr, 16
+; CHECK-P8-NEXT: ld r5, 0(r3)
+; CHECK-P8-NEXT: ld r4, 8(r3)
+; CHECK-P8-NEXT: mr r3, r5
+; CHECK-P8-NEXT: bl __fixunskfsi
+; CHECK-P8-NEXT: nop
+; CHECK-P8-NEXT: addi r1, r1, 32
+; CHECK-P8-NEXT: ld r0, 16(r1)
+; CHECK-P8-NEXT: mtlr r0
+; CHECK-P8-NEXT: blr
+entry:
+ %0 = load fp128, fp128* %a, align 16
+ %conv = fptoui fp128 %0 to i1
+ ret i1 %conv
+}
+
+; Function Attrs: norecurse nounwind readonly
+define i1 @qpConv2si1(fp128* nocapture readonly %a) {
+; CHECK-LABEL: qpConv2si1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lxv v2, 0(r3)
+; CHECK-NEXT: xscvqpswz v2, v2
+; CHECK-NEXT: mfvsrwz r3, v2
+; CHECK-NEXT: blr
+;
+; CHECK-P8-LABEL: qpConv2si1:
+; CHECK-P8: # %bb.0: # %entry
+; CHECK-P8-NEXT: mflr r0
+; CHECK-P8-NEXT: std r0, 16(r1)
+; CHECK-P8-NEXT: stdu r1, -32(r1)
+; CHECK-P8-NEXT: .cfi_def_cfa_offset 32
+; CHECK-P8-NEXT: .cfi_offset lr, 16
+; CHECK-P8-NEXT: ld r5, 0(r3)
+; CHECK-P8-NEXT: ld r4, 8(r3)
+; CHECK-P8-NEXT: mr r3, r5
+; CHECK-P8-NEXT: bl __fixkfsi
+; CHECK-P8-NEXT: nop
+; CHECK-P8-NEXT: addi r1, r1, 32
+; CHECK-P8-NEXT: ld r0, 16(r1)
+; CHECK-P8-NEXT: mtlr r0
+; CHECK-P8-NEXT: blr
+entry:
+ %0 = load fp128, fp128* %a, align 16
+ %conv = fptosi fp128 %0 to i1
+ ret i1 %conv
+}
; RUN: < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr8 -mattr=-vsx \
; RUN: | FileCheck %s -check-prefix=NOVSX
+declare i1 @llvm.experimental.constrained.fptosi.i1.f128(fp128, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.f128(fp128, metadata)
declare i64 @llvm.experimental.constrained.fptosi.i64.f128(fp128, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f128(fp128, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f128(fp128, metadata)
+declare i1 @llvm.experimental.constrained.fptoui.i1.f128(fp128, metadata)
+declare i1 @llvm.experimental.constrained.fptosi.i1.ppcf128(ppc_fp128, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(ppc_fp128, metadata)
declare i64 @llvm.experimental.constrained.fptosi.i64.ppcf128(ppc_fp128, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.ppcf128(ppc_fp128, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.ppcf128(ppc_fp128, metadata)
+declare i1 @llvm.experimental.constrained.fptoui.i1.ppcf128(ppc_fp128, metadata)
declare i128 @llvm.experimental.constrained.fptosi.i128.ppcf128(ppc_fp128, metadata)
declare i128 @llvm.experimental.constrained.fptoui.i128.ppcf128(ppc_fp128, metadata)
declare i128 @llvm.experimental.constrained.fptosi.i128.f128(fp128, metadata)
declare i128 @llvm.experimental.constrained.fptoui.i128.f128(fp128, metadata)
+declare fp128 @llvm.experimental.constrained.sitofp.f128.i1(i1, metadata, metadata)
declare fp128 @llvm.experimental.constrained.sitofp.f128.i32(i32, metadata, metadata)
declare fp128 @llvm.experimental.constrained.sitofp.f128.i64(i64, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.uitofp.f128.i1(i1, metadata, metadata)
declare fp128 @llvm.experimental.constrained.uitofp.f128.i32(i32, metadata, metadata)
declare fp128 @llvm.experimental.constrained.uitofp.f128.i64(i64, metadata, metadata)
+declare ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i1(i1, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i32(i32, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i64(i64, metadata, metadata)
+declare ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i1(i1, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i32(i32, metadata, metadata)
declare ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i64(i64, metadata, metadata)
ret i128 %conv
}
+define i1 @q_to_s1(fp128 %m) #0 {
+; P8-LABEL: q_to_s1:
+; P8: # %bb.0: # %entry
+; P8-NEXT: mflr r0
+; P8-NEXT: std r0, 16(r1)
+; P8-NEXT: stdu r1, -112(r1)
+; P8-NEXT: .cfi_def_cfa_offset 112
+; P8-NEXT: .cfi_offset lr, 16
+; P8-NEXT: bl __fixkfsi
+; P8-NEXT: nop
+; P8-NEXT: addi r1, r1, 112
+; P8-NEXT: ld r0, 16(r1)
+; P8-NEXT: mtlr r0
+; P8-NEXT: blr
+;
+; P9-LABEL: q_to_s1:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xscvqpswz v2, v2
+; P9-NEXT: mfvsrwz r3, v2
+; P9-NEXT: blr
+;
+; NOVSX-LABEL: q_to_s1:
+; NOVSX: # %bb.0: # %entry
+; NOVSX-NEXT: mflr r0
+; NOVSX-NEXT: std r0, 16(r1)
+; NOVSX-NEXT: stdu r1, -32(r1)
+; NOVSX-NEXT: .cfi_def_cfa_offset 32
+; NOVSX-NEXT: .cfi_offset lr, 16
+; NOVSX-NEXT: bl __fixkfsi
+; NOVSX-NEXT: nop
+; NOVSX-NEXT: addi r1, r1, 32
+; NOVSX-NEXT: ld r0, 16(r1)
+; NOVSX-NEXT: mtlr r0
+; NOVSX-NEXT: blr
+entry:
+ %conv = tail call i1 @llvm.experimental.constrained.fptosi.i1.f128(fp128 %m, metadata !"fpexcept.strict") #0
+ ret i1 %conv
+}
+
+define i1 @q_to_u1(fp128 %m) #0 {
+; P8-LABEL: q_to_u1:
+; P8: # %bb.0: # %entry
+; P8-NEXT: mflr r0
+; P8-NEXT: std r0, 16(r1)
+; P8-NEXT: stdu r1, -112(r1)
+; P8-NEXT: .cfi_def_cfa_offset 112
+; P8-NEXT: .cfi_offset lr, 16
+; P8-NEXT: bl __fixunskfsi
+; P8-NEXT: nop
+; P8-NEXT: addi r1, r1, 112
+; P8-NEXT: ld r0, 16(r1)
+; P8-NEXT: mtlr r0
+; P8-NEXT: blr
+;
+; P9-LABEL: q_to_u1:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xscvqpswz v2, v2
+; P9-NEXT: mfvsrwz r3, v2
+; P9-NEXT: blr
+;
+; NOVSX-LABEL: q_to_u1:
+; NOVSX: # %bb.0: # %entry
+; NOVSX-NEXT: mflr r0
+; NOVSX-NEXT: std r0, 16(r1)
+; NOVSX-NEXT: stdu r1, -32(r1)
+; NOVSX-NEXT: .cfi_def_cfa_offset 32
+; NOVSX-NEXT: .cfi_offset lr, 16
+; NOVSX-NEXT: bl __fixunskfsi
+; NOVSX-NEXT: nop
+; NOVSX-NEXT: addi r1, r1, 32
+; NOVSX-NEXT: ld r0, 16(r1)
+; NOVSX-NEXT: mtlr r0
+; NOVSX-NEXT: blr
+entry:
+ %conv = tail call i1 @llvm.experimental.constrained.fptoui.i1.f128(fp128 %m, metadata !"fpexcept.strict") #0
+ ret i1 %conv
+}
+
define i128 @ppcq_to_i128(ppc_fp128 %m) #0 {
; P8-LABEL: ppcq_to_i128:
; P8: # %bb.0: # %entry
; P8-NEXT: .cfi_def_cfa_offset 128
; P8-NEXT: .cfi_offset lr, 16
; P8-NEXT: .cfi_offset r30, -16
-; P8-NEXT: addis r3, r2, .LCPI11_0@toc@ha
+; P8-NEXT: addis r3, r2, .LCPI13_0@toc@ha
; P8-NEXT: xxlxor f3, f3, f3
; P8-NEXT: std r30, 112(r1) # 8-byte Folded Spill
-; P8-NEXT: lfs f0, .LCPI11_0@toc@l(r3)
+; P8-NEXT: lfs f0, .LCPI13_0@toc@l(r3)
; P8-NEXT: fcmpo cr0, f2, f3
; P8-NEXT: lis r3, -32768
; P8-NEXT: xxlxor f3, f3, f3
; P8-NEXT: crandc 4*cr5+gt, 4*cr1+lt, 4*cr1+eq
; P8-NEXT: cror 4*cr5+lt, 4*cr5+gt, 4*cr5+lt
; P8-NEXT: isel r30, 0, r3, 4*cr5+lt
-; P8-NEXT: bc 12, 4*cr5+lt, .LBB11_2
+; P8-NEXT: bc 12, 4*cr5+lt, .LBB13_2
; P8-NEXT: # %bb.1: # %entry
; P8-NEXT: fmr f3, f0
-; P8-NEXT: .LBB11_2: # %entry
+; P8-NEXT: .LBB13_2: # %entry
; P8-NEXT: xxlxor f4, f4, f4
; P8-NEXT: bl __gcc_qsub
; P8-NEXT: nop
; P9-NEXT: std r30, -16(r1) # 8-byte Folded Spill
; P9-NEXT: std r0, 16(r1)
; P9-NEXT: stdu r1, -48(r1)
-; P9-NEXT: addis r3, r2, .LCPI11_0@toc@ha
+; P9-NEXT: addis r3, r2, .LCPI13_0@toc@ha
; P9-NEXT: xxlxor f3, f3, f3
-; P9-NEXT: lfs f0, .LCPI11_0@toc@l(r3)
+; P9-NEXT: lfs f0, .LCPI13_0@toc@l(r3)
; P9-NEXT: fcmpo cr1, f2, f3
; P9-NEXT: lis r3, -32768
; P9-NEXT: fcmpo cr0, f1, f0
; P9-NEXT: crandc 4*cr5+gt, lt, eq
; P9-NEXT: cror 4*cr5+lt, 4*cr5+gt, 4*cr5+lt
; P9-NEXT: isel r30, 0, r3, 4*cr5+lt
-; P9-NEXT: bc 12, 4*cr5+lt, .LBB11_2
+; P9-NEXT: bc 12, 4*cr5+lt, .LBB13_2
; P9-NEXT: # %bb.1: # %entry
; P9-NEXT: fmr f3, f0
-; P9-NEXT: .LBB11_2: # %entry
+; P9-NEXT: .LBB13_2: # %entry
; P9-NEXT: xxlxor f4, f4, f4
; P9-NEXT: bl __gcc_qsub
; P9-NEXT: nop
; NOVSX-NEXT: .cfi_def_cfa_offset 48
; NOVSX-NEXT: .cfi_offset lr, 16
; NOVSX-NEXT: .cfi_offset cr2, 8
-; NOVSX-NEXT: addis r3, r2, .LCPI11_0@toc@ha
-; NOVSX-NEXT: addis r4, r2, .LCPI11_1@toc@ha
-; NOVSX-NEXT: lfs f0, .LCPI11_0@toc@l(r3)
-; NOVSX-NEXT: lfs f4, .LCPI11_1@toc@l(r4)
+; NOVSX-NEXT: addis r3, r2, .LCPI13_0@toc@ha
+; NOVSX-NEXT: addis r4, r2, .LCPI13_1@toc@ha
+; NOVSX-NEXT: lfs f0, .LCPI13_0@toc@l(r3)
+; NOVSX-NEXT: lfs f4, .LCPI13_1@toc@l(r4)
; NOVSX-NEXT: fcmpo cr0, f1, f0
; NOVSX-NEXT: fcmpo cr1, f2, f4
; NOVSX-NEXT: fmr f3, f4
; NOVSX-NEXT: crand 4*cr5+lt, eq, 4*cr1+lt
; NOVSX-NEXT: crandc 4*cr5+gt, lt, eq
; NOVSX-NEXT: cror 4*cr2+lt, 4*cr5+gt, 4*cr5+lt
-; NOVSX-NEXT: bc 12, 4*cr2+lt, .LBB11_2
+; NOVSX-NEXT: bc 12, 4*cr2+lt, .LBB13_2
; NOVSX-NEXT: # %bb.1: # %entry
; NOVSX-NEXT: fmr f3, f0
-; NOVSX-NEXT: .LBB11_2: # %entry
+; NOVSX-NEXT: .LBB13_2: # %entry
; NOVSX-NEXT: bl __gcc_qsub
; NOVSX-NEXT: nop
; NOVSX-NEXT: mffs f0
ret i32 %conv
}
+define fp128 @i1_to_q(i1 signext %m) #0 {
+; P8-LABEL: i1_to_q:
+; P8: # %bb.0: # %entry
+; P8-NEXT: mflr r0
+; P8-NEXT: std r0, 16(r1)
+; P8-NEXT: stdu r1, -112(r1)
+; P8-NEXT: .cfi_def_cfa_offset 112
+; P8-NEXT: .cfi_offset lr, 16
+; P8-NEXT: bl __floatsikf
+; P8-NEXT: nop
+; P8-NEXT: addi r1, r1, 112
+; P8-NEXT: ld r0, 16(r1)
+; P8-NEXT: mtlr r0
+; P8-NEXT: blr
+;
+; P9-LABEL: i1_to_q:
+; P9: # %bb.0: # %entry
+; P9-NEXT: mtvsrwa v2, r3
+; P9-NEXT: xscvsdqp v2, v2
+; P9-NEXT: blr
+;
+; NOVSX-LABEL: i1_to_q:
+; NOVSX: # %bb.0: # %entry
+; NOVSX-NEXT: mflr r0
+; NOVSX-NEXT: std r0, 16(r1)
+; NOVSX-NEXT: stdu r1, -32(r1)
+; NOVSX-NEXT: .cfi_def_cfa_offset 32
+; NOVSX-NEXT: .cfi_offset lr, 16
+; NOVSX-NEXT: bl __floatsikf
+; NOVSX-NEXT: nop
+; NOVSX-NEXT: addi r1, r1, 32
+; NOVSX-NEXT: ld r0, 16(r1)
+; NOVSX-NEXT: mtlr r0
+; NOVSX-NEXT: blr
+entry:
+ %conv = tail call fp128 @llvm.experimental.constrained.sitofp.f128.i1(i1 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %conv
+}
+
+define fp128 @u1_to_q(i1 zeroext %m) #0 {
+; P8-LABEL: u1_to_q:
+; P8: # %bb.0: # %entry
+; P8-NEXT: mflr r0
+; P8-NEXT: std r0, 16(r1)
+; P8-NEXT: stdu r1, -112(r1)
+; P8-NEXT: .cfi_def_cfa_offset 112
+; P8-NEXT: .cfi_offset lr, 16
+; P8-NEXT: bl __floatunsikf
+; P8-NEXT: nop
+; P8-NEXT: addi r1, r1, 112
+; P8-NEXT: ld r0, 16(r1)
+; P8-NEXT: mtlr r0
+; P8-NEXT: blr
+;
+; P9-LABEL: u1_to_q:
+; P9: # %bb.0: # %entry
+; P9-NEXT: mtvsrwa v2, r3
+; P9-NEXT: xscvsdqp v2, v2
+; P9-NEXT: blr
+;
+; NOVSX-LABEL: u1_to_q:
+; NOVSX: # %bb.0: # %entry
+; NOVSX-NEXT: mflr r0
+; NOVSX-NEXT: std r0, 16(r1)
+; NOVSX-NEXT: stdu r1, -32(r1)
+; NOVSX-NEXT: .cfi_def_cfa_offset 32
+; NOVSX-NEXT: .cfi_offset lr, 16
+; NOVSX-NEXT: bl __floatunsikf
+; NOVSX-NEXT: nop
+; NOVSX-NEXT: addi r1, r1, 32
+; NOVSX-NEXT: ld r0, 16(r1)
+; NOVSX-NEXT: mtlr r0
+; NOVSX-NEXT: blr
+entry:
+ %conv = tail call fp128 @llvm.experimental.constrained.uitofp.f128.i1(i1 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %conv
+}
+
+define ppc_fp128 @i1_to_ppcq(i1 signext %m) #0 {
+; P8-LABEL: i1_to_ppcq:
+; P8: # %bb.0: # %entry
+; P8-NEXT: mtfprwa f0, r3
+; P8-NEXT: xxlxor f2, f2, f2
+; P8-NEXT: xscvsxddp f1, f0
+; P8-NEXT: blr
+;
+; P9-LABEL: i1_to_ppcq:
+; P9: # %bb.0: # %entry
+; P9-NEXT: mtfprwa f0, r3
+; P9-NEXT: xxlxor f2, f2, f2
+; P9-NEXT: xscvsxddp f1, f0
+; P9-NEXT: blr
+;
+; NOVSX-LABEL: i1_to_ppcq:
+; NOVSX: # %bb.0: # %entry
+; NOVSX-NEXT: addi r4, r1, -4
+; NOVSX-NEXT: stw r3, -4(r1)
+; NOVSX-NEXT: addis r3, r2, .LCPI16_0@toc@ha
+; NOVSX-NEXT: lfiwax f0, 0, r4
+; NOVSX-NEXT: lfs f2, .LCPI16_0@toc@l(r3)
+; NOVSX-NEXT: fcfid f1, f0
+; NOVSX-NEXT: blr
+entry:
+ %conv = tail call ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i1(i1 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret ppc_fp128 %conv
+}
+
+define ppc_fp128 @u1_to_ppcq(i1 zeroext %m) #0 {
+; P8-LABEL: u1_to_ppcq:
+; P8: # %bb.0: # %entry
+; P8-NEXT: mtfprwa f0, r3
+; P8-NEXT: xxlxor f2, f2, f2
+; P8-NEXT: xscvsxddp f1, f0
+; P8-NEXT: blr
+;
+; P9-LABEL: u1_to_ppcq:
+; P9: # %bb.0: # %entry
+; P9-NEXT: mtfprwa f0, r3
+; P9-NEXT: xxlxor f2, f2, f2
+; P9-NEXT: xscvsxddp f1, f0
+; P9-NEXT: blr
+;
+; NOVSX-LABEL: u1_to_ppcq:
+; NOVSX: # %bb.0: # %entry
+; NOVSX-NEXT: addi r4, r1, -4
+; NOVSX-NEXT: stw r3, -4(r1)
+; NOVSX-NEXT: addis r3, r2, .LCPI17_0@toc@ha
+; NOVSX-NEXT: lfiwax f0, 0, r4
+; NOVSX-NEXT: lfs f2, .LCPI17_0@toc@l(r3)
+; NOVSX-NEXT: fcfid f1, f0
+; NOVSX-NEXT: blr
+entry:
+ %conv = tail call ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i1(i1 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret ppc_fp128 %conv
+}
+
define fp128 @i32_to_q(i32 signext %m) #0 {
; P8-LABEL: i32_to_q:
; P8: # %bb.0: # %entry
}
+; Function Attrs: nounwind readnone
+define i1 @f64_to_si1(double %X) #0 {
+; FPCVT-LABEL: f64_to_si1:
+; FPCVT: # %bb.0: # %entry
+; FPCVT-NEXT: fctiwz 0, 1
+; FPCVT-NEXT: addi 3, 1, -4
+; FPCVT-NEXT: stfiwx 0, 0, 3
+; FPCVT-NEXT: lwz 3, -4(1)
+; FPCVT-NEXT: blr
+;
+; PPC64-LABEL: f64_to_si1:
+; PPC64: # %bb.0: # %entry
+; PPC64-NEXT: addi 3, 1, -4
+; PPC64-NEXT: fctiwz 0, 1
+; PPC64-NEXT: stfiwx 0, 0, 3
+; PPC64-NEXT: lwz 3, -4(1)
+; PPC64-NEXT: blr
+;
+; PWR9-LABEL: f64_to_si1:
+; PWR9: # %bb.0: # %entry
+; PWR9-NEXT: xscvdpsxws 0, 1
+; PWR9-NEXT: mffprwz 3, 0
+; PWR9-NEXT: blr
+entry:
+ %conv = fptosi double %X to i1
+ ret i1 %conv
+
+}
+
+; Function Attrs: nounwind readnone
+define i1 @f64_to_ui1(double %X) #0 {
+; FPCVT-LABEL: f64_to_ui1:
+; FPCVT: # %bb.0: # %entry
+; FPCVT-NEXT: fctiwz 0, 1
+; FPCVT-NEXT: addi 3, 1, -4
+; FPCVT-NEXT: stfiwx 0, 0, 3
+; FPCVT-NEXT: lwz 3, -4(1)
+; FPCVT-NEXT: blr
+;
+; PPC64-LABEL: f64_to_ui1:
+; PPC64: # %bb.0: # %entry
+; PPC64-NEXT: addi 3, 1, -4
+; PPC64-NEXT: fctiwz 0, 1
+; PPC64-NEXT: stfiwx 0, 0, 3
+; PPC64-NEXT: lwz 3, -4(1)
+; PPC64-NEXT: blr
+;
+; PWR9-LABEL: f64_to_ui1:
+; PWR9: # %bb.0: # %entry
+; PWR9-NEXT: xscvdpsxws 0, 1
+; PWR9-NEXT: mffprwz 3, 0
+; PWR9-NEXT: blr
+entry:
+ %conv = fptoui double %X to i1
+ ret i1 %conv
+
+}
+
+; Function Attrs: nounwind readnone
+define double @si1_to_f64(i1 %X) #0 {
+; FPCVT-LABEL: si1_to_f64:
+; FPCVT: # %bb.0: # %entry
+; FPCVT-NEXT: andi. 3, 3, 1
+; FPCVT-NEXT: li 4, 0
+; FPCVT-NEXT: li 3, -1
+; FPCVT-NEXT: iselgt 3, 3, 4
+; FPCVT-NEXT: addi 4, 1, -4
+; FPCVT-NEXT: stw 3, -4(1)
+; FPCVT-NEXT: lfiwax 0, 0, 4
+; FPCVT-NEXT: fcfid 1, 0
+; FPCVT-NEXT: blr
+;
+; PPC64-LABEL: si1_to_f64:
+; PPC64: # %bb.0: # %entry
+; PPC64-NEXT: andi. 3, 3, 1
+; PPC64-NEXT: li 4, -1
+; PPC64-NEXT: li 3, 0
+; PPC64-NEXT: bc 12, 1, .LBB6_1
+; PPC64-NEXT: b .LBB6_2
+; PPC64-NEXT: .LBB6_1: # %entry
+; PPC64-NEXT: addi 3, 4, 0
+; PPC64-NEXT: .LBB6_2: # %entry
+; PPC64-NEXT: std 3, -8(1)
+; PPC64-NEXT: lfd 0, -8(1)
+; PPC64-NEXT: fcfid 1, 0
+; PPC64-NEXT: blr
+;
+; PWR9-LABEL: si1_to_f64:
+; PWR9: # %bb.0: # %entry
+; PWR9-NEXT: andi. 3, 3, 1
+; PWR9-NEXT: li 3, 0
+; PWR9-NEXT: li 4, -1
+; PWR9-NEXT: iselgt 3, 4, 3
+; PWR9-NEXT: mtfprwa 0, 3
+; PWR9-NEXT: xscvsxddp 1, 0
+; PWR9-NEXT: blr
+entry:
+ %conv = sitofp i1 %X to double
+ ret double %conv
+
+}
+
+; Function Attrs: nounwind readnone
+define double @ui1_to_f64(i1 %X) #0 {
+; FPCVT-LABEL: ui1_to_f64:
+; FPCVT: # %bb.0: # %entry
+; FPCVT-NEXT: clrlwi 3, 3, 31
+; FPCVT-NEXT: addi 4, 1, -4
+; FPCVT-NEXT: stw 3, -4(1)
+; FPCVT-NEXT: lfiwax 0, 0, 4
+; FPCVT-NEXT: fcfid 1, 0
+; FPCVT-NEXT: blr
+;
+; PPC64-LABEL: ui1_to_f64:
+; PPC64: # %bb.0: # %entry
+; PPC64-NEXT: clrldi 3, 3, 63
+; PPC64-NEXT: std 3, -8(1)
+; PPC64-NEXT: lfd 0, -8(1)
+; PPC64-NEXT: fcfid 1, 0
+; PPC64-NEXT: blr
+;
+; PWR9-LABEL: ui1_to_f64:
+; PWR9: # %bb.0: # %entry
+; PWR9-NEXT: clrlwi 3, 3, 31
+; PWR9-NEXT: mtfprwa 0, 3
+; PWR9-NEXT: xscvsxddp 1, 0
+; PWR9-NEXT: blr
+entry:
+ %conv = uitofp i1 %X to double
+ ret double %conv
+
+}
attributes #0 = { nounwind readnone "no-signed-zeros-fp-math"="true" }