return true;
})
+;; Return true if OP is a parallel for an pmovz{bw,wd,dq} vec_select,
+;; where one of the two operands of the vec_concat is const0_operand.
+(define_predicate "pmovzx_parallel"
+ (and (match_code "parallel")
+ (match_code "const_int" "a"))
+{
+ int nelt = XVECLEN (op, 0);
+ int elt, i;
+
+ if (nelt < 2)
+ return false;
+
+ /* Check that the permutation is suitable for pmovz{bw,wd,dq}.
+ For example { 0 16 1 17 2 18 3 19 4 20 5 21 6 22 7 23 }. */
+ elt = INTVAL (XVECEXP (op, 0, 0));
+ if (elt == 0)
+ {
+ for (i = 1; i < nelt; ++i)
+ if ((i & 1) != 0)
+ {
+ if (INTVAL (XVECEXP (op, 0, i)) < nelt)
+ return false;
+ }
+ else if (INTVAL (XVECEXP (op, 0, i)) != i / 2)
+ return false;
+ }
+ else
+ return false;
+
+ return true;
+})
+
;; Return true if OP is a parallel for a vbroadcast permute.
(define_predicate "avx_vbroadcast_operand"
(and (match_code "parallel")
(any_extend:V8HI (match_dup 1)))]
"operands[1] = adjust_address_nv (operands[1], V8QImode, 0);")
+(define_insn_and_split "*sse4_1_zero_extendv8qiv8hi2_3"
+ [(set (match_operand:V16QI 0 "register_operand" "=Yr,*x,v")
+ (vec_select:V16QI
+ (vec_concat:V32QI
+ (match_operand:V16QI 1 "vector_operand" "Yrm,*xm,vm")
+ (match_operand:V16QI 2 "const0_operand" "C,C,C"))
+ (match_parallel 3 "pmovzx_parallel"
+ [(match_operand 4 "const_int_operand" "n,n,n")])))]
+ "TARGET_SSE4_1"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:V8HI
+ (vec_select:V8QI
+ (match_dup 1)
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)
+ (const_int 4) (const_int 5)
+ (const_int 6) (const_int 7)]))))]
+{
+ operands[0] = lowpart_subreg (V8HImode, operands[0], V16QImode);
+ if (MEM_P (operands[1]))
+ {
+ operands[1] = lowpart_subreg (V8QImode, operands[1], V16QImode);
+ operands[1] = gen_rtx_ZERO_EXTEND (V8HImode, operands[1]);
+ emit_insn (gen_rtx_SET (operands[0], operands[1]));
+ DONE;
+ }
+})
+
(define_expand "<insn>v8qiv8hi2"
[(set (match_operand:V8HI 0 "register_operand")
(any_extend:V8HI
}
})
+(define_insn_and_split "*sse4_1_zero_extendv4hiv4si2_3"
+ [(set (match_operand:V8HI 0 "register_operand" "=Yr,*x,v")
+ (vec_select:V8HI
+ (vec_concat:V16HI
+ (match_operand:V8HI 1 "vector_operand" "Yrm,*xm,vm")
+ (match_operand:V8HI 2 "const0_operand" "C,C,C"))
+ (match_parallel 3 "pmovzx_parallel"
+ [(match_operand 4 "const_int_operand" "n,n,n")])))]
+ "TARGET_SSE4_1"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:V4SI
+ (vec_select:V4HI
+ (match_dup 1)
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)]))))]
+{
+ operands[0] = lowpart_subreg (V4SImode, operands[0], V8HImode);
+ if (MEM_P (operands[1]))
+ {
+ operands[1] = lowpart_subreg (V4HImode, operands[1], V8HImode);
+ operands[1] = gen_rtx_ZERO_EXTEND (V4SImode, operands[1]);
+ emit_insn (gen_rtx_SET (operands[0], operands[1]));
+ DONE;
+ }
+})
+
(define_insn "avx512f_<code>v8qiv8di2<mask_name>"
[(set (match_operand:V8DI 0 "register_operand" "=v")
(any_extend:V8DI
(any_extend:V2DI (match_dup 1)))]
"operands[1] = adjust_address_nv (operands[1], V2SImode, 0);")
+(define_insn_and_split "*sse4_1_zero_extendv2siv2di2_3"
+ [(set (match_operand:V4SI 0 "register_operand" "=Yr,*x,v")
+ (vec_select:V4SI
+ (vec_concat:V8SI
+ (match_operand:V4SI 1 "vector_operand" "Yrm,*xm,vm")
+ (match_operand:V4SI 2 "const0_operand" "C,C,C"))
+ (match_parallel 3 "pmovzx_parallel"
+ [(match_operand 4 "const_int_operand" "n,n,n")])))]
+ "TARGET_SSE4_1"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:V2DI
+ (vec_select:V2SI (match_dup 1)
+ (parallel [(const_int 0) (const_int 1)]))))]
+{
+ operands[0] = lowpart_subreg (V2DImode, operands[0], V4SImode);
+ if (MEM_P (operands[1]))
+ {
+ operands[1] = lowpart_subreg (V2SImode, operands[1], V4SImode);
+ operands[1] = gen_rtx_ZERO_EXTEND (V2DImode, operands[1]);
+ emit_insn (gen_rtx_SET (operands[0], operands[1]));
+ DONE;
+ }
+})
+
(define_expand "<insn>v2siv2di2"
[(set (match_operand:V2DI 0 "register_operand")
(any_extend:V2DI
--- /dev/null
+/* PR target/95905 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -msse4.1" } */
+/* { dg-final { scan-assembler "\tv?pmovzxbw\t" } } */
+/* { dg-final { scan-assembler "\tv?pmovzxwd\t" } } */
+/* { dg-final { scan-assembler "\tv?pmovzxdq\t" } } */
+
+typedef unsigned char V1 __attribute__((vector_size (16)));
+typedef unsigned short V2 __attribute__((vector_size (16)));
+typedef unsigned int V3 __attribute__((vector_size (16)));
+
+V1
+f1 (V1 x)
+{
+ return __builtin_shuffle (x, (V1) {}, (V1) { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 });
+}
+
+V2
+f2 (V2 x)
+{
+ return __builtin_shuffle (x, (V2) {}, (V2) { 0, 8, 1, 9, 2, 10, 3, 11 });
+}
+
+V3
+f3 (V3 x)
+{
+ return __builtin_shuffle (x, (V3) {}, (V3) { 0, 4, 1, 5 });
+}
+
+V1
+f4 (V1 *x)
+{
+ return __builtin_shuffle (*x, (V1) {}, (V1) { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 });
+}
+
+V2
+f5 (V2 *x)
+{
+ return __builtin_shuffle (*x, (V2) {}, (V2) { 0, 8, 1, 9, 2, 10, 3, 11 });
+}
+
+V3
+f6 (V3 *x)
+{
+ return __builtin_shuffle (*x, (V3) {}, (V3) { 0, 4, 1, 5 });
+}