}
; If all bits affected by the add are included
-; in the mask, do the add before the mask op.
+; in the mask, do the mask op before the add.
define i8 @masked_add(i8 %x) {
; CHECK-LABEL: @masked_add(
-; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X:%.*]], 96
-; CHECK-NEXT: [[R:%.*]] = and i8 [[TMP1]], -16
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], -16
+; CHECK-NEXT: [[R:%.*]] = add i8 [[AND]], 96
; CHECK-NEXT: ret i8 [[R]]
;
%and = and i8 %x, 240 ; 0xf0
define <2 x i8> @masked_add_splat(<2 x i8> %x) {
; CHECK-LABEL: @masked_add_splat(
-; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X:%.*]], <i8 64, i8 64>
-; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[TMP1]], <i8 -64, i8 -64>
+; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[X:%.*]], <i8 -64, i8 -64>
+; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[AND]], <i8 64, i8 64>
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%and = and <2 x i8> %x, <i8 192, i8 192> ; 0xc0
define i8 @masked_add_multi_use(i8 %x) {
; CHECK-LABEL: @masked_add_multi_use(
-; CHECK-NEXT: [[TMP:%.*]] = add i8 [[X:%.*]], 96
-; CHECK-NEXT: [[R:%.*]] = and i8 [[TMP:%.*]], -16
-; CHECK-NEXT: call void @use(i8 [[X]])
+; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], -16
+; CHECK-NEXT: [[R:%.*]] = add i8 [[AND]], 96
+; CHECK-NEXT: call void @use(i8 [[AND]])
; CHECK-NEXT: ret i8 [[R]]
;
%and = and i8 %x, -16 ; 0xf0
%r = add i8 %and, 96 ; 0x60
- call void @use(i8 %x) ; extra use
+ call void @use(i8 %and) ; extra use
ret i8 %r
}
; CHECK-LABEL: @n9_wrong_x0(
; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X_0:%.*]], 15
; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0
-; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X_0]], 16
-; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X_0]], -16
+; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16
; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X_1:%.*]], i8 [[X_BIASED_HIGHBITS]]
; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]]
;
; CHECK-LABEL: @n9_wrong_x1(
; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X_0:%.*]], 15
; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0
-; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X_1:%.*]], 16
-; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X_1:%.*]], -16
+; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16
; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X_0]], i8 [[X_BIASED_HIGHBITS]]
; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]]
;
; CHECK-LABEL: @n9_wrong_x2(
; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X_1:%.*]], 15
; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0
-; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X_0:%.*]], 16
-; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X_0:%.*]], -16
+; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16
; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X_0]], i8 [[X_BIASED_HIGHBITS]]
; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]]
;
; CHECK-LABEL: @n10_wrong_low_bit_mask(
; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X:%.*]], 31
; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0
-; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X]], 16
-; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], -16
+; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16
; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X]], i8 [[X_BIASED_HIGHBITS]]
; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]]
;
; CHECK-LABEL: @n12_wrong_bias(
; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X:%.*]], 15
; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0
-; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X]], 32
-; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], -16
+; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 32
; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X]], i8 [[X_BIASED_HIGHBITS]]
; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]]
;
; CHECK-LABEL: @n14_wrong_comparison_constant(
; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X:%.*]], 15
; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 1
-; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X]], 16
-; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], -16
+; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16
; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X]], i8 [[X_BIASED_HIGHBITS]]
; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]]
;
; CHECK-LABEL: @n15_wrong_comparison_predicate_and_constant(
; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X:%.*]], 14
; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0
-; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X]], 16
-; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], -16
+; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16
; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X]], i8 [[X_BIASED_HIGHBITS]]
; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]]
;
; CHECK-LABEL: @n16_oneuse(
; CHECK-NEXT: [[X_LOWBITS:%.*]] = and i8 [[X:%.*]], 15
; CHECK-NEXT: [[X_LOWBITS_ARE_ZERO:%.*]] = icmp eq i8 [[X_LOWBITS]], 0
-; CHECK-NEXT: [[X_BIASED:%.*]] = add i8 [[X]], 16
-; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = and i8 [[X_BIASED]], -16
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], -16
+; CHECK-NEXT: [[X_BIASED_HIGHBITS:%.*]] = add i8 [[TMP1]], 16
; CHECK-NEXT: call void @use.i8(i8 [[X_BIASED_HIGHBITS]])
; CHECK-NEXT: [[X_ROUNDEDUP:%.*]] = select i1 [[X_LOWBITS_ARE_ZERO]], i8 [[X]], i8 [[X_BIASED_HIGHBITS]]
; CHECK-NEXT: ret i8 [[X_ROUNDEDUP]]