STW %2:gprc, %0:gprc, 100
BLR8 implicit $lr8, implicit $rm
...
+---
+name: testFoldRLWINMAndANDI
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x3
+ ; CHECK-LABEL: name: testFoldRLWINMAndANDI
+ ; CHECK: liveins: $x3
+ ; CHECK: [[COPY:%[0-9]+]]:g8rc = COPY $x3
+ ; CHECK: [[COPY1:%[0-9]+]]:gprc = COPY [[COPY]].sub_32
+ ; CHECK: [[RLWINM:%[0-9]+]]:gprc = RLWINM [[COPY1]], 4, 28, 31
+ ; CHECK: [[ANDI_rec:%[0-9]+]]:gprc = ANDI_rec [[RLWINM]], 4, implicit-def $cr0
+ ; CHECK: BLR8 implicit $lr8, implicit $rm
+ %0:g8rc = COPY $x3
+ %1:gprc = COPY %0.sub_32:g8rc
+ %2:gprc = RLWINM %1:gprc, 4, 28, 31
+ %3:gprc = ANDI_rec %2:gprc, 4, implicit-def $cr0
+ BLR8 implicit $lr8, implicit $rm
+...
ret i32 %0
}
declare i32 @llvm.ppc.vsx.xvtsqrtsp(<4 x float>)
+
+define i32 @xvtdivdp_andi(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: xvtdivdp_andi:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvtdivdp cr0, v2, v3
+; CHECK-NEXT: li r4, 222
+; CHECK-NEXT: mfocrf r3, 128
+; CHECK-NEXT: srwi r3, r3, 28
+; CHECK-NEXT: andi. r3, r3, 2
+; CHECK-NEXT: li r3, 22
+; CHECK-NEXT: iseleq r3, r4, r3
+; CHECK-NEXT: blr
+ entry:
+ %0 = tail call i32 @llvm.ppc.vsx.xvtdivdp(<2 x double> %a, <2 x double> %b)
+ %1 = and i32 %0, 2
+ %cmp.not = icmp eq i32 %1, 0
+ %retval.0 = select i1 %cmp.not, i32 222, i32 22
+ ret i32 %retval.0
+}
+
+define i32 @xvtdivdp_shift(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: xvtdivdp_shift:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvtdivdp cr0, v2, v3
+; CHECK-NEXT: mfocrf r3, 128
+; CHECK-NEXT: srwi r3, r3, 28
+; CHECK-NEXT: rlwinm r3, r3, 28, 31, 31
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i32 @llvm.ppc.vsx.xvtdivdp(<2 x double> %a, <2 x double> %b)
+ %1 = lshr i32 %0, 4
+ %.lobit = and i32 %1, 1
+ ret i32 %.lobit
+}