+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mcpu=ppc32 -mtriple=powerpc-unknown-linux-gnu < %s | FileCheck %s
+
define double @test(i1 %X) {
- %Y = uitofp i1 %X to double
- ret double %Y
+; CHECK-LABEL: test:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li 4, .LCPI0_0@l
+; CHECK-NEXT: andi. 3, 3, 1
+; CHECK-NEXT: addis 3, 4, .LCPI0_0@ha
+; CHECK-NEXT: li 4, .LCPI0_1@l
+; CHECK-NEXT: addis 4, 4, .LCPI0_1@ha
+; CHECK-NEXT: bc 12, 1, .LBB0_1
+; CHECK-NEXT: b .LBB0_2
+; CHECK-NEXT: .LBB0_1:
+; CHECK-NEXT: addi 3, 4, 0
+; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: lfs 1, 0(3)
+; CHECK-NEXT: blr
+ %Y = uitofp i1 %X to double
+ ret double %Y
+}
+
+; Verify the cases won't crash because of missing chains
+
+@foo = dso_local global double 0.000000e+00, align 8
+
+define double @u1tofp(i1 %i, double %d) #0 {
+; CHECK-LABEL: u1tofp:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li 4, .LCPI1_0@l
+; CHECK-NEXT: andi. 3, 3, 1
+; CHECK-NEXT: addis 3, 4, .LCPI1_0@ha
+; CHECK-NEXT: li 4, .LCPI1_1@l
+; CHECK-NEXT: addis 4, 4, .LCPI1_1@ha
+; CHECK-NEXT: bc 12, 1, .LBB1_1
+; CHECK-NEXT: b .LBB1_2
+; CHECK-NEXT: .LBB1_1: # %entry
+; CHECK-NEXT: addi 3, 4, 0
+; CHECK-NEXT: .LBB1_2: # %entry
+; CHECK-NEXT: fmr 0, 1
+; CHECK-NEXT: lfs 1, 0(3)
+; CHECK-NEXT: lis 3, foo@ha
+; CHECK-NEXT: stfd 0, foo@l(3)
+; CHECK-NEXT: blr
+entry:
+ %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i1(i1 %i, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ store volatile double %d, double* @foo, align 8
+ ret double %conv
+}
+
+define double @s1tofp(i1 %i, double %d) #0 {
+; CHECK-LABEL: s1tofp:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li 4, .LCPI2_0@l
+; CHECK-NEXT: andi. 3, 3, 1
+; CHECK-NEXT: addis 3, 4, .LCPI2_0@ha
+; CHECK-NEXT: li 4, .LCPI2_1@l
+; CHECK-NEXT: addis 4, 4, .LCPI2_1@ha
+; CHECK-NEXT: bc 12, 1, .LBB2_1
+; CHECK-NEXT: b .LBB2_2
+; CHECK-NEXT: .LBB2_1: # %entry
+; CHECK-NEXT: addi 3, 4, 0
+; CHECK-NEXT: .LBB2_2: # %entry
+; CHECK-NEXT: fmr 0, 1
+; CHECK-NEXT: lfs 1, 0(3)
+; CHECK-NEXT: lis 3, foo@ha
+; CHECK-NEXT: stfd 0, foo@l(3)
+; CHECK-NEXT: blr
+entry:
+ %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i1(i1 %i, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ store volatile double %d, double* @foo, align 8
+ ret double %conv
}
-; CHECK-LABEL: @test
-; CHECK-DAG: addis 3, 4, .LCPI
-; CHECK-DAG: addis 4, 4, .LCPI
-; CHECK-DAG: andi. {{[0-9]+}}, 3, 1
-; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
-; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
-; CHECK-NEXT: [[TRUE]]
-; CHECK-NEXT: addi 3, 4, 0
-; CHECK-NEXT: [[SUCCESSOR]]
-; CHECK-NEXT: lfs 1, 0(3)
-; CHECK-NEXT: blr
+declare double @llvm.experimental.constrained.uitofp.f64.i1(i1, metadata, metadata)
+declare double @llvm.experimental.constrained.sitofp.f64.i1(i1, metadata, metadata)
+
+attributes #0 = { strictfp }
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -verify-machineinstrs -mtriple=ppc32 < %s | FileCheck %s
-
-@foo = dso_local global double 0.000000e+00, align 8
-
-; Verify the cases won't crash because of missing chains
-
-define double @u1tofp(i1 %i, double %d) #0 {
-; CHECK-LABEL: u1tofp:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: li 4, .LCPI0_0@l
-; CHECK-NEXT: andi. 3, 3, 1
-; CHECK-NEXT: addis 3, 4, .LCPI0_0@ha
-; CHECK-NEXT: li 4, .LCPI0_1@l
-; CHECK-NEXT: addis 4, 4, .LCPI0_1@ha
-; CHECK-NEXT: bc 12, 1, .LBB0_1
-; CHECK-NEXT: b .LBB0_2
-; CHECK-NEXT: .LBB0_1: # %entry
-; CHECK-NEXT: addi 3, 4, 0
-; CHECK-NEXT: .LBB0_2: # %entry
-; CHECK-NEXT: fmr 0, 1
-; CHECK-NEXT: lfs 1, 0(3)
-; CHECK-NEXT: lis 3, foo@ha
-; CHECK-NEXT: stfd 0, foo@l(3)
-; CHECK-NEXT: blr
-entry:
- %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i1(i1 %i, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
- store volatile double %d, double* @foo, align 8
- ret double %conv
-}
-
-define double @s1tofp(i1 %i, double %d) #0 {
-; CHECK-LABEL: s1tofp:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: li 4, .LCPI1_0@l
-; CHECK-NEXT: andi. 3, 3, 1
-; CHECK-NEXT: addis 3, 4, .LCPI1_0@ha
-; CHECK-NEXT: li 4, .LCPI1_1@l
-; CHECK-NEXT: addis 4, 4, .LCPI1_1@ha
-; CHECK-NEXT: bc 12, 1, .LBB1_1
-; CHECK-NEXT: b .LBB1_2
-; CHECK-NEXT: .LBB1_1: # %entry
-; CHECK-NEXT: addi 3, 4, 0
-; CHECK-NEXT: .LBB1_2: # %entry
-; CHECK-NEXT: fmr 0, 1
-; CHECK-NEXT: lfs 1, 0(3)
-; CHECK-NEXT: lis 3, foo@ha
-; CHECK-NEXT: stfd 0, foo@l(3)
-; CHECK-NEXT: blr
-entry:
- %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i1(i1 %i, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
- store volatile double %d, double* @foo, align 8
- ret double %conv
-}
-
-declare double @llvm.experimental.constrained.uitofp.f64.i1(i1, metadata, metadata)
-declare double @llvm.experimental.constrained.sitofp.f64.i1(i1, metadata, metadata)
-
-attributes #0 = { strictfp }