operands[2] = gen_lowpart (QImode, operands[0]);
})
-(define_insn_and_split "*setcc_si_1_and"
- [(set (match_operand:SI 0 "register_operand" "=q")
- (match_operator:SI 1 "ix86_comparison_operator"
+(define_insn_and_split "*setcc_<mode>_1_and"
+ [(set (match_operand:SWI24 0 "register_operand" "=q")
+ (match_operator:SWI24 1 "ix86_comparison_operator"
[(reg FLAGS_REG) (const_int 0)]))
(clobber (reg:CC FLAGS_REG))]
"!TARGET_PARTIAL_REG_STALL
"#"
"&& reload_completed"
[(set (match_dup 2) (match_dup 1))
- (parallel [(set (match_dup 0) (zero_extend:SI (match_dup 2)))
+ (parallel [(set (match_dup 0) (zero_extend:SWI24 (match_dup 2)))
(clobber (reg:CC FLAGS_REG))])]
{
operands[1] = shallow_copy_rtx (operands[1]);
operands[2] = gen_lowpart (QImode, operands[0]);
})
-(define_insn_and_split "*setcc_si_1_movzbl"
- [(set (match_operand:SI 0 "register_operand" "=q")
- (match_operator:SI 1 "ix86_comparison_operator"
+(define_insn_and_split "*setcc_<mode>_1_movzbl"
+ [(set (match_operand:SWI24 0 "register_operand" "=q")
+ (match_operator:SWI24 1 "ix86_comparison_operator"
[(reg FLAGS_REG) (const_int 0)]))]
"!TARGET_PARTIAL_REG_STALL
&& (!TARGET_ZERO_EXTEND_WITH_AND || optimize_function_for_size_p (cfun))"
"#"
"&& reload_completed"
[(set (match_dup 2) (match_dup 1))
- (set (match_dup 0) (zero_extend:SI (match_dup 2)))]
+ (set (match_dup 0) (zero_extend:SWI24 (match_dup 2)))]
{
operands[1] = shallow_copy_rtx (operands[1]);
PUT_MODE (operands[1], QImode);
--- /dev/null
+/* PR target/95950 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -mtune=generic" } */
+/* { dg-final { scan-assembler-times "\tseta\t" 4 } } */
+/* { dg-final { scan-assembler-times "\tseto\t" 16 } } */
+/* { dg-final { scan-assembler-times "\tsetc\t" 4 } } */
+/* { dg-final { scan-assembler-not "\tjn?a\t" } } */
+/* { dg-final { scan-assembler-not "\tjn?o\t" } } */
+/* { dg-final { scan-assembler-not "\tjn?c\t" } } */
+
+char
+f1 (short a, short b)
+{
+ return __builtin_mul_overflow_p (a, b, (short) 0);
+}
+
+char
+f2 (short a, short b)
+{
+ return __builtin_add_overflow_p (a, b, (short) 0);
+}
+
+char
+f3 (short a, short b)
+{
+ return __builtin_sub_overflow_p (a, b, (short) 0);
+}
+
+char
+f4 (unsigned short a, unsigned short b)
+{
+ return __builtin_mul_overflow_p (a, b, (unsigned short) 0);
+}
+
+char
+f5 (unsigned short a, unsigned short b)
+{
+ return __builtin_add_overflow_p (a, b, (unsigned short) 0);
+}
+
+char
+f6 (unsigned short a, unsigned short b)
+{
+ return __builtin_sub_overflow_p (a, b, (unsigned short) 0);
+}
+
+char
+f7 (short a, short b)
+{
+ return __builtin_mul_overflow_p (a, b, (short) 0);
+}
+
+char
+f8 (short a, short b)
+{
+ return __builtin_add_overflow_p (a, b, (short) 0);
+}
+
+char
+f9 (short a, short b)
+{
+ return __builtin_sub_overflow_p (a, b, (short) 0);
+}
+
+char
+f10 (unsigned short a, unsigned short b)
+{
+ return __builtin_mul_overflow_p (a, b, (unsigned short) 0);
+}
+
+char
+f11 (unsigned short a, unsigned short b)
+{
+ return __builtin_add_overflow_p (a, b, (unsigned short) 0);
+}
+
+char
+f12 (unsigned short a, unsigned short b)
+{
+ return __builtin_sub_overflow_p (a, b, (unsigned short) 0);
+}
+
+unsigned short
+f13 (short a, short b)
+{
+ return __builtin_mul_overflow_p (a, b, (short) 0);
+}
+
+unsigned short
+f14 (short a, short b)
+{
+ return __builtin_add_overflow_p (a, b, (short) 0);
+}
+
+unsigned short
+f15 (short a, short b)
+{
+ return __builtin_sub_overflow_p (a, b, (short) 0);
+}
+
+unsigned short
+f16 (unsigned short a, unsigned short b)
+{
+ return __builtin_mul_overflow_p (a, b, (unsigned short) 0);
+}
+
+unsigned short
+f17 (unsigned short a, unsigned short b)
+{
+ return __builtin_add_overflow_p (a, b, (unsigned short) 0);
+}
+
+unsigned short
+f18 (unsigned short a, unsigned short b)
+{
+ return __builtin_sub_overflow_p (a, b, (unsigned short) 0);
+}
+
+unsigned short
+f19 (short a, short b)
+{
+ return __builtin_mul_overflow_p (a, b, (short) 0);
+}
+
+unsigned short
+f20 (short a, short b)
+{
+ return __builtin_add_overflow_p (a, b, (short) 0);
+}
+
+unsigned short
+f21 (short a, short b)
+{
+ return __builtin_sub_overflow_p (a, b, (short) 0);
+}
+
+unsigned short
+f22 (unsigned short a, unsigned short b)
+{
+ return __builtin_mul_overflow_p (a, b, (unsigned short) 0);
+}
+
+unsigned short
+f23 (unsigned short a, unsigned short b)
+{
+ return __builtin_add_overflow_p (a, b, (unsigned short) 0);
+}
+
+unsigned short
+f24 (unsigned short a, unsigned short b)
+{
+ return __builtin_sub_overflow_p (a, b, (unsigned short) 0);
+}