ret i16 %ret
}
+define i64 @rotateright_64_zext_neg_mask_amount(i64 %0, i32 %1) {
+; CHECK-LABEL: @rotateright_64_zext_neg_mask_amount(
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP1:%.*]], 63
+; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP0:%.*]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = sub nsw i32 0, [[TMP1]]
+; CHECK-NEXT: [[TMP7:%.*]] = and i32 [[TMP6]], 63
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = shl i64 [[TMP0]], [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP5]], [[TMP9]]
+; CHECK-NEXT: ret i64 [[TMP10]]
+;
+ %3 = and i32 %1, 63
+ %4 = zext i32 %3 to i64
+ %5 = lshr i64 %0, %4
+ %6 = sub nsw i32 0, %1
+ %7 = and i32 %6, 63
+ %8 = zext i32 %7 to i64
+ %9 = shl i64 %0, %8
+ %10 = or i64 %5, %9
+ ret i64 %10
+}
+
define i8 @rotateleft_8_neg_mask_wide_amount(i8 %v, i32 %shamt) {
; CHECK-LABEL: @rotateleft_8_neg_mask_wide_amount(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[SHAMT:%.*]] to i8
ret i8 %ret
}
+define i64 @rotateleft_64_zext_neg_mask_amount(i64 %0, i32 %1) {
+; CHECK-LABEL: @rotateleft_64_zext_neg_mask_amount(
+; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP1:%.*]], 63
+; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP0:%.*]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = sub nsw i32 0, [[TMP1]]
+; CHECK-NEXT: [[TMP7:%.*]] = and i32 [[TMP6]], 63
+; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = lshr i64 [[TMP0]], [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = or i64 [[TMP5]], [[TMP9]]
+; CHECK-NEXT: ret i64 [[TMP10]]
+;
+ %3 = and i32 %1, 63
+ %4 = zext i32 %3 to i64
+ %5 = shl i64 %0, %4
+ %6 = sub nsw i32 0, %1
+ %7 = and i32 %6, 63
+ %8 = zext i32 %7 to i64
+ %9 = lshr i64 %0, %8
+ %10 = or i64 %5, %9
+ ret i64 %10
+}
+
; Non-power-of-2 types. This could be transformed, but it's not a typical rotate pattern.
define i9 @rotateleft_9_neg_mask_wide_amount_commute(i9 %v, i33 %shamt) {