%or1 = or i32 %xor, %neg
ret i32 %or1
}
+
+define i8 @ashr_bitwidth_mask(i8 %x, i8 %y) {
+; CHECK-LABEL: @ashr_bitwidth_mask(
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i8 [[X:%.*]], 7
+; CHECK-NEXT: [[R:%.*]] = or i8 [[SIGN]], [[Y:%.*]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %sign = ashr i8 %x, 7
+ %r = or i8 %sign, %y
+ ret i8 %r
+}
+
+define <2 x i8> @ashr_bitwidth_mask_vec_commute(<2 x i8> %x, <2 x i8> %py) {
+; CHECK-LABEL: @ashr_bitwidth_mask_vec_commute(
+; CHECK-NEXT: [[Y:%.*]] = mul <2 x i8> [[PY:%.*]], <i8 42, i8 2>
+; CHECK-NEXT: [[SIGN:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 7, i8 7>
+; CHECK-NEXT: [[R:%.*]] = or <2 x i8> [[Y]], [[SIGN]]
+; CHECK-NEXT: ret <2 x i8> [[R]]
+;
+ %y = mul <2 x i8> %py, <i8 42, i8 2> ; thwart complexity-based ordering
+ %sign = ashr <2 x i8> %x, <i8 7, i8 7>
+ %r = or <2 x i8> %y, %sign
+ ret <2 x i8> %r
+}
+
+define i32 @ashr_bitwidth_mask_use(i32 %x, i32 %y) {
+; CHECK-LABEL: @ashr_bitwidth_mask_use(
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i32 [[X:%.*]], 7
+; CHECK-NEXT: call void @use(i32 [[SIGN]])
+; CHECK-NEXT: [[R:%.*]] = or i32 [[SIGN]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[R]]
+;
+ %sign = ashr i32 %x, 7
+ call void @use(i32 %sign)
+ %r = or i32 %sign, %y
+ ret i32 %r
+}
+
+define i8 @ashr_not_bitwidth_mask(i8 %x, i8 %y) {
+; CHECK-LABEL: @ashr_not_bitwidth_mask(
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i8 [[X:%.*]], 6
+; CHECK-NEXT: [[R:%.*]] = or i8 [[SIGN]], [[Y:%.*]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %sign = ashr i8 %x, 6
+ %r = or i8 %sign, %y
+ ret i8 %r
+}
+
+define i8 @lshr_bitwidth_mask(i8 %x, i8 %y) {
+; CHECK-LABEL: @lshr_bitwidth_mask(
+; CHECK-NEXT: [[SIGN:%.*]] = lshr i8 [[X:%.*]], 7
+; CHECK-NEXT: [[R:%.*]] = or i8 [[SIGN]], [[Y:%.*]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %sign = lshr i8 %x, 7
+ %r = or i8 %sign, %y
+ ret i8 %r
+}