-; NOTE: Assertions have been autogenerated by update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instsimplify -S | FileCheck %s
; Division-by-zero is undef. UB in any vector lane means the whole op is undef.
define i32 @rem1(i32 %x, i32 %n) {
; CHECK-LABEL: @rem1(
-; CHECK-NEXT: [[MOD:%.*]] = srem i32 %x, %n
+; CHECK-NEXT: [[MOD:%.*]] = srem i32 [[X:%.*]], [[N:%.*]]
; CHECK-NEXT: ret i32 [[MOD]]
;
%mod = srem i32 %x, %n
define i32 @rem2(i32 %x, i32 %n) {
; CHECK-LABEL: @rem2(
-; CHECK-NEXT: [[MOD:%.*]] = urem i32 %x, %n
+; CHECK-NEXT: [[MOD:%.*]] = urem i32 [[X:%.*]], [[N:%.*]]
; CHECK-NEXT: ret i32 [[MOD]]
;
%mod = urem i32 %x, %n
define i32 @rem3(i32 %x, i32 %n) {
; CHECK-LABEL: @rem3(
-; CHECK-NEXT: [[MOD:%.*]] = srem i32 %x, %n
-; CHECK-NEXT: [[MOD1:%.*]] = urem i32 [[MOD]], %n
+; CHECK-NEXT: [[MOD:%.*]] = srem i32 [[X:%.*]], [[N:%.*]]
+; CHECK-NEXT: [[MOD1:%.*]] = urem i32 [[MOD]], [[N]]
; CHECK-NEXT: ret i32 [[MOD1]]
;
%mod = srem i32 %x, %n
define i32 @urem_dividend_known_smaller_than_constant_divisor(i32 %x) {
; CHECK-LABEL: @urem_dividend_known_smaller_than_constant_divisor(
-; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 250
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 250
; CHECK-NEXT: ret i32 [[AND]]
;
%and = and i32 %x, 250
define i32 @not_urem_dividend_known_smaller_than_constant_divisor(i32 %x) {
; CHECK-LABEL: @not_urem_dividend_known_smaller_than_constant_divisor(
-; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 251
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 251
; CHECK-NEXT: [[R:%.*]] = urem i32 [[AND]], 251
; CHECK-NEXT: ret i32 [[R]]
;
define i32 @not_urem_constant_dividend_known_smaller_than_divisor(i32 %x) {
; CHECK-LABEL: @not_urem_constant_dividend_known_smaller_than_divisor(
-; CHECK-NEXT: [[OR:%.*]] = or i32 %x, 251
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], 251
; CHECK-NEXT: [[R:%.*]] = urem i32 251, [[OR]]
; CHECK-NEXT: ret i32 [[R]]
;
define i32 @urem_dividend_known_smaller_than_divisor(i32 %x, i32 %y) {
; CHECK-LABEL: @urem_dividend_known_smaller_than_divisor(
-; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 250
-; CHECK-NEXT: [[OR:%.*]] = or i32 %y, 251
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 250
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 251
; CHECK-NEXT: [[R:%.*]] = urem i32 [[AND]], [[OR]]
; CHECK-NEXT: ret i32 [[R]]
;
define i32 @not_urem_dividend_known_smaller_than_divisor(i32 %x, i32 %y) {
; CHECK-LABEL: @not_urem_dividend_known_smaller_than_divisor(
-; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 251
-; CHECK-NEXT: [[OR:%.*]] = or i32 %y, 251
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 251
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 251
; CHECK-NEXT: [[R:%.*]] = urem i32 [[AND]], [[OR]]
; CHECK-NEXT: ret i32 [[R]]
;
}
!0 = !{i32 0, i32 3}
+
+define i32 @rem5(i32 %x, i32 %y) {
+; CHECK-LABEL: @rem5(
+; CHECK-NEXT: [[SHL:%.*]] = shl nsw i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[MOD:%.*]] = srem i32 [[SHL]], [[X]]
+; CHECK-NEXT: ret i32 [[MOD]]
+;
+ %shl = shl nsw i32 %x, %y
+ %mod = srem i32 %shl, %x
+ ret i32 %mod
+}
+
+define <2 x i32> @rem6(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @rem6(
+; CHECK-NEXT: [[SHL:%.*]] = shl nsw <2 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[MOD:%.*]] = srem <2 x i32> [[SHL]], [[X]]
+; CHECK-NEXT: ret <2 x i32> [[MOD]]
+;
+ %shl = shl nsw <2 x i32> %x, %y
+ %mod = srem <2 x i32> %shl, %x
+ ret <2 x i32> %mod
+}
+
+; make sure the previous fold doesn't take place for wrapped shifts
+
+define i32 @rem7(i32 %x, i32 %y) {
+; CHECK-LABEL: @rem7(
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[MOD:%.*]] = srem i32 [[SHL]], [[X]]
+; CHECK-NEXT: ret i32 [[MOD]]
+;
+ %shl = shl i32 %x, %y
+ %mod = srem i32 %shl, %x
+ ret i32 %mod
+}
+
+define i32 @rem8(i32 %x, i32 %y) {
+; CHECK-LABEL: @rem8(
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[MOD:%.*]] = urem i32 [[SHL]], [[X]]
+; CHECK-NEXT: ret i32 [[MOD]]
+;
+ %shl = shl nuw i32 %x, %y
+ %mod = urem i32 %shl, %x
+ ret i32 %mod
+}
+
+define <2 x i32> @rem9(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @rem9(
+; CHECK-NEXT: [[SHL:%.*]] = shl nuw <2 x i32> [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[MOD:%.*]] = urem <2 x i32> [[SHL]], [[X]]
+; CHECK-NEXT: ret <2 x i32> [[MOD]]
+;
+ %shl = shl nuw <2 x i32> %x, %y
+ %mod = urem <2 x i32> %shl, %x
+ ret <2 x i32> %mod
+}
+
+; make sure the previous fold doesn't take place for wrapped shifts
+
+define i32 @rem10(i32 %x, i32 %y) {
+; CHECK-LABEL: @rem10(
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: [[MOD:%.*]] = urem i32 [[SHL]], [[X]]
+; CHECK-NEXT: ret i32 [[MOD]]
+;
+ %shl = shl i32 %x, %y
+ %mod = urem i32 %shl, %x
+ ret i32 %mod
+}