ret i32 %1
}
-; This test is similar to the type legalized versio of the fshl/fshr tests, but
+; This test is similar to the type legalized version of the fshl/fshr tests, but
; instead of having the same input to both shifts it has different inputs. Make
; sure we don't match it has a roriw.
; FIXME: We're currently missing a check that the inputs are the same.
ret i32 %c
}
+; This is similar to the type legalized roriw pattern, but the and mask is more
+; than 32 bits so the lshr doesn't shift zeroes into the lower 32 bits. Make
+; sure we don't match it to roriw.
+; FIXME: We are currently truncating the mask to 32-bits before checking.
+define i64 @roriw_bug(i64 %x) nounwind {
+; RV64I-LABEL: roriw_bug:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a0, 31
+; RV64I-NEXT: andi a0, a0, -2
+; RV64I-NEXT: srli a2, a0, 1
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: sext.w a1, a1
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64IB-LABEL: roriw_bug:
+; RV64IB: # %bb.0:
+; RV64IB-NEXT: andi a1, a0, -2
+; RV64IB-NEXT: roriw a0, a0, 1
+; RV64IB-NEXT: xor a0, a1, a0
+; RV64IB-NEXT: ret
+;
+; RV64IBB-LABEL: roriw_bug:
+; RV64IBB: # %bb.0:
+; RV64IBB-NEXT: andi a1, a0, -2
+; RV64IBB-NEXT: roriw a0, a0, 1
+; RV64IBB-NEXT: xor a0, a1, a0
+; RV64IBB-NEXT: ret
+;
+; RV64IBP-LABEL: roriw_bug:
+; RV64IBP: # %bb.0:
+; RV64IBP-NEXT: andi a1, a0, -2
+; RV64IBP-NEXT: roriw a0, a0, 1
+; RV64IBP-NEXT: xor a0, a1, a0
+; RV64IBP-NEXT: ret
+ %a = shl i64 %x, 31
+ %b = and i64 %x, 18446744073709551614
+ %c = lshr i64 %b, 1
+ %d = or i64 %a, %c
+ %e = shl i64 %d, 32
+ %f = ashr i64 %e, 32
+ %g = xor i64 %b, %f ; to increase the use count on %b to disable SimplifyDemandedBits.
+ ret i64 %g
+}
+
define i64 @rori_i64_fshl(i64 %a) nounwind {
; RV64I-LABEL: rori_i64_fshl:
; RV64I: # %bb.0: