typedef _Float16 __m256h __attribute__ ((__vector_size__ (32), __may_alias__));
typedef _Float16 __m512h __attribute__ ((__vector_size__ (64), __may_alias__));
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_ph (_Float16 __A7, _Float16 __A6, _Float16 __A5,
+ _Float16 __A4, _Float16 __A3, _Float16 __A2,
+ _Float16 __A1, _Float16 __A0)
+{
+ return __extension__ (__m128h)(__v8hf){ __A0, __A1, __A2, __A3,
+ __A4, __A5, __A6, __A7 };
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_set_ph (_Float16 __A15, _Float16 __A14, _Float16 __A13,
+ _Float16 __A12, _Float16 __A11, _Float16 __A10,
+ _Float16 __A9, _Float16 __A8, _Float16 __A7,
+ _Float16 __A6, _Float16 __A5, _Float16 __A4,
+ _Float16 __A3, _Float16 __A2, _Float16 __A1,
+ _Float16 __A0)
+{
+ return __extension__ (__m256h)(__v16hf){ __A0, __A1, __A2, __A3,
+ __A4, __A5, __A6, __A7,
+ __A8, __A9, __A10, __A11,
+ __A12, __A13, __A14, __A15 };
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_set_ph (_Float16 __A31, _Float16 __A30, _Float16 __A29,
+ _Float16 __A28, _Float16 __A27, _Float16 __A26,
+ _Float16 __A25, _Float16 __A24, _Float16 __A23,
+ _Float16 __A22, _Float16 __A21, _Float16 __A20,
+ _Float16 __A19, _Float16 __A18, _Float16 __A17,
+ _Float16 __A16, _Float16 __A15, _Float16 __A14,
+ _Float16 __A13, _Float16 __A12, _Float16 __A11,
+ _Float16 __A10, _Float16 __A9, _Float16 __A8,
+ _Float16 __A7, _Float16 __A6, _Float16 __A5,
+ _Float16 __A4, _Float16 __A3, _Float16 __A2,
+ _Float16 __A1, _Float16 __A0)
+{
+ return __extension__ (__m512h)(__v32hf){ __A0, __A1, __A2, __A3,
+ __A4, __A5, __A6, __A7,
+ __A8, __A9, __A10, __A11,
+ __A12, __A13, __A14, __A15,
+ __A16, __A17, __A18, __A19,
+ __A20, __A21, __A22, __A23,
+ __A24, __A25, __A26, __A27,
+ __A28, __A29, __A30, __A31 };
+}
+
+/* Create vectors of elements in the reversed order from _mm_set_ph,
+ _mm256_set_ph and _mm512_set_ph functions. */
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setr_ph (_Float16 __A0, _Float16 __A1, _Float16 __A2,
+ _Float16 __A3, _Float16 __A4, _Float16 __A5,
+ _Float16 __A6, _Float16 __A7)
+{
+ return _mm_set_ph (__A7, __A6, __A5, __A4, __A3, __A2, __A1, __A0);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_setr_ph (_Float16 __A0, _Float16 __A1, _Float16 __A2,
+ _Float16 __A3, _Float16 __A4, _Float16 __A5,
+ _Float16 __A6, _Float16 __A7, _Float16 __A8,
+ _Float16 __A9, _Float16 __A10, _Float16 __A11,
+ _Float16 __A12, _Float16 __A13, _Float16 __A14,
+ _Float16 __A15)
+{
+ return _mm256_set_ph (__A15, __A14, __A13, __A12, __A11, __A10, __A9,
+ __A8, __A7, __A6, __A5, __A4, __A3, __A2, __A1,
+ __A0);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_setr_ph (_Float16 __A0, _Float16 __A1, _Float16 __A2,
+ _Float16 __A3, _Float16 __A4, _Float16 __A5,
+ _Float16 __A6, _Float16 __A7, _Float16 __A8,
+ _Float16 __A9, _Float16 __A10, _Float16 __A11,
+ _Float16 __A12, _Float16 __A13, _Float16 __A14,
+ _Float16 __A15, _Float16 __A16, _Float16 __A17,
+ _Float16 __A18, _Float16 __A19, _Float16 __A20,
+ _Float16 __A21, _Float16 __A22, _Float16 __A23,
+ _Float16 __A24, _Float16 __A25, _Float16 __A26,
+ _Float16 __A27, _Float16 __A28, _Float16 __A29,
+ _Float16 __A30, _Float16 __A31)
+
+{
+ return _mm512_set_ph (__A31, __A30, __A29, __A28, __A27, __A26, __A25,
+ __A24, __A23, __A22, __A21, __A20, __A19, __A18,
+ __A17, __A16, __A15, __A14, __A13, __A12, __A11,
+ __A10, __A9, __A8, __A7, __A6, __A5, __A4, __A3,
+ __A2, __A1, __A0);
+}
+
+/* Broadcast _Float16 to vector. */
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set1_ph (_Float16 __A)
+{
+ return _mm_set_ph (__A, __A, __A, __A, __A, __A, __A, __A);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_set1_ph (_Float16 __A)
+{
+ return _mm256_set_ph (__A, __A, __A, __A, __A, __A, __A, __A,
+ __A, __A, __A, __A, __A, __A, __A, __A);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_set1_ph (_Float16 __A)
+{
+ return _mm512_set_ph (__A, __A, __A, __A, __A, __A, __A, __A,
+ __A, __A, __A, __A, __A, __A, __A, __A,
+ __A, __A, __A, __A, __A, __A, __A, __A,
+ __A, __A, __A, __A, __A, __A, __A, __A);
+}
+
+/* Create a vector with all zeros. */
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_setzero_ph (void)
+{
+ return _mm_set1_ph (0.0f);
+}
+
+extern __inline __m256h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm256_setzero_ph (void)
+{
+ return _mm256_set1_ph (0.0f);
+}
+
+extern __inline __m512h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm512_setzero_ph (void)
+{
+ return _mm512_set1_ph (0.0f);
+}
+
+/* Create a vector with element 0 as F and the rest zero. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_set_sh (_Float16 __F)
+{
+ return _mm_set_ph (0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, __F);
+}
+
+/* Create a vector with element 0 as *P and the rest zero. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_load_sh (void const *__P)
+{
+ return _mm_set_ph (0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ *(_Float16 const *) __P);
+}
+
+/* Stores the lower _Float16 value. */
+extern __inline void
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_store_sh (void *__P, __m128h __A)
+{
+ *(_Float16 *) __P = ((__v8hf)__A)[0];
+}
+
#ifdef __DISABLE_AVX512FP16__
#undef __DISABLE_AVX512FP16__
#pragma GCC pop_options
(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
(V4TI "TARGET_AVX512F") (V2TI "TARGET_AVX") V1TI
+ (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF
(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF])
[V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
V32HI (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")])
+(define_mode_iterator VI12HF_AVX512VL
+ [V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
+ V32HI (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")
+ (V32HF "TARGET_AVX512FP16")
+ (V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
+ (V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")])
+
;; Same iterator, but without supposed TARGET_AVX512BW
(define_mode_iterator VI12_AVX512VLBW
[(V64QI "TARGET_AVX512BW") (V16QI "TARGET_AVX512VL")
(V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
+ (V32HF "TARGET_AVX512FP16") (V16HF "TARGET_AVX512FP16")
+ (V8HF "TARGET_AVX512FP16")
(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
(define_mode_iterator V_256_512
[V32QI V16HI V8SI V4DI V8SF V4DF
(V64QI "TARGET_AVX512F") (V32HI "TARGET_AVX512F") (V16SI "TARGET_AVX512F")
- (V8DI "TARGET_AVX512F") (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F")])
+ (V8DI "TARGET_AVX512F") (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F")
+ (V16HF "TARGET_AVX512FP16") (V32HF "TARGET_AVX512FP16")])
;; All vector float modes
(define_mode_iterator VF
(define_mode_iterator VF_128
[V4SF (V2DF "TARGET_SSE2")])
+;; All 128bit vector HF/SF/DF modes
+(define_mode_iterator VFH_128
+ [(V8HF "TARGET_AVX512FP16")
+ V4SF (V2DF "TARGET_SSE2")])
+
;; All 256bit vector float modes
(define_mode_iterator VF_256
[V8SF V4DF])
(define_mode_iterator VF1_AVX512VL
[V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")])
+(define_mode_iterator VF_AVX512FP16
+ [V32HF V16HF V8HF])
+
;; All vector integer modes
(define_mode_iterator VI
[(V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")
(V8SI "TARGET_AVX") V4SI
(V4DI "TARGET_AVX") V2DI])
+;; All vector integer and HF modes
+(define_mode_iterator VIHF
+ [(V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")
+ (V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX") V8HI
+ (V8SI "TARGET_AVX") V4SI
+ (V4DI "TARGET_AVX") V2DI
+ (V32HF "TARGET_AVX512FP16") (V16HF "TARGET_AVX512FP16")
+ (V8HF "TARGET_AVX512FP16")])
+
(define_mode_iterator VI_AVX2
[(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI
(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI
(V8HI "avx512vl") (V16HI "avx512vl") (V32HI "avx512bw")
(V4SI "avx512vl") (V8SI "avx512vl") (V16SI "avx512f")
(V2DI "avx512vl") (V4DI "avx512vl") (V8DI "avx512f")
+ (V8HF "avx512fp16") (V16HF "avx512vl") (V32HF "avx512bw")
(V4SF "avx512vl") (V8SF "avx512vl") (V16SF "avx512f")
(V2DF "avx512vl") (V4DF "avx512vl") (V8DF "avx512f")])
(V8HI "avx512vl") (V16HI "avx512vl") (V32HI "avx512bw")])
(define_mode_attr shuffletype
- [(V16SF "f") (V16SI "i") (V8DF "f") (V8DI "i")
- (V8SF "f") (V8SI "i") (V4DF "f") (V4DI "i")
- (V4SF "f") (V4SI "i") (V2DF "f") (V2DI "i")
- (V32HI "i") (V16HI "i") (V8HI "i")
- (V64QI "i") (V32QI "i") (V16QI "i")
- (V4TI "i") (V2TI "i") (V1TI "i")])
+ [(V32HF "f") (V16HF "f") (V8HF "f")
+ (V16SF "f") (V16SI "i") (V8DF "f") (V8DI "i")
+ (V8SF "f") (V8SI "i") (V4DF "f") (V4DI "i")
+ (V4SF "f") (V4SI "i") (V2DF "f") (V2DI "i")
+ (V32HI "i") (V16HI "i") (V8HI "i")
+ (V64QI "i") (V32QI "i") (V16QI "i")
+ (V4TI "i") (V2TI "i") (V1TI "i")])
(define_mode_attr ssequartermode
[(V16SF "V4SF") (V8DF "V2DF") (V16SI "V4SI") (V8DI "V2DI")])
;; All 128 and 256bit vector integer modes
(define_mode_iterator VI_128_256 [V16QI V8HI V4SI V2DI V32QI V16HI V8SI V4DI])
+;; All 256bit vector integer and HF modes
+(define_mode_iterator VIHF_256 [V32QI V16HI V8SI V4DI V16HF])
;; Various 128bit vector integer mode combinations
(define_mode_iterator VI12_128 [V16QI V8HI])
(define_mode_iterator VI4_256_8_512 [V8SI V8DI])
(define_mode_iterator VI_AVX512BW
[V16SI V8DI (V32HI "TARGET_AVX512BW") (V64QI "TARGET_AVX512BW")])
+(define_mode_iterator VIHF_AVX512BW
+ [V16SI V8DI (V32HI "TARGET_AVX512BW") (V64QI "TARGET_AVX512BW")
+ (V32HF "TARGET_AVX512FP16")])
;; Int-float size matches
(define_mode_iterator VI4F_128 [V4SI V4SF])
(V8SF "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")
V16SF V8DF])
+(define_mode_iterator V16_256 [V16HI V16HF])
+(define_mode_iterator V32_512 [V32HI V32HF])
+
(define_mode_attr avx512bcst
[(V4SI "%{1to4%}") (V2DI "%{1to2%}")
(V8SI "%{1to8%}") (V4DI "%{1to4%}")
;; Mapping from float mode to required SSE level
(define_mode_attr sse
- [(SF "sse") (DF "sse2")
+ [(SF "sse") (DF "sse2") (HF "avx512fp16")
(V4SF "sse") (V2DF "sse2")
+ (V32HF "avx512fp16") (V16HF "avx512fp16")
+ (V8HF "avx512fp16")
(V16SF "avx512f") (V8SF "avx")
(V8DF "avx512f") (V4DF "avx")])
(V16SF "V16SF") (V8DF "V8DF")
(V8SF "V8SF") (V4DF "V4DF")
(V4SF "V4SF") (V2DF "V2DF")
+ (V8HF "TI") (V16HF "OI") (V32HF "XI")
(TI "TI")])
;; SSE constant -1 constraint
[(V64QI "BC") (V32HI "BC") (V16SI "BC") (V8DI "BC") (V4TI "BC")
(V32QI "BC") (V16HI "BC") (V8SI "BC") (V4DI "BC") (V2TI "BC")
(V16QI "BC") (V8HI "BC") (V4SI "BC") (V2DI "BC") (V1TI "BC")
- (V16SF "BF") (V8DF "BF")
- (V8SF "BF") (V4DF "BF")
- (V4SF "BF") (V2DF "BF")])
+ (V32HF "BF") (V16SF "BF") (V8DF "BF")
+ (V16HF "BF") (V8SF "BF") (V4DF "BF")
+ (V8HF "BF") (V4SF "BF") (V2DF "BF")])
+
+;; SSE integer instruction suffix for various modes
+(define_mode_attr sseintmodesuffix
+ [(V16QI "b") (V8HI "w") (V4SI "d") (V2DI "q")
+ (V32QI "b") (V16HI "w") (V8SI "d") (V4DI "q")
+ (V64QI "b") (V32HI "w") (V16SI "d") (V8DI "q")
+ (V8HF "w") (V16HF "w") (V32HF "w")])
;; Mapping of vector modes to corresponding mask size
(define_mode_attr avx512fmaskmode
(V32HI "SI") (V16HI "HI") (V8HI "QI") (V4HI "QI")
(V16SI "HI") (V8SI "QI") (V4SI "QI")
(V8DI "QI") (V4DI "QI") (V2DI "QI")
+ (V32HF "SI") (V16HF "HI") (V8HF "QI")
(V16SF "HI") (V8SF "QI") (V4SF "QI")
(V8DF "QI") (V4DF "QI") (V2DF "QI")])
(V32HI "si") (V16HI "hi") (V8HI "qi") (V4HI "qi")
(V16SI "hi") (V8SI "qi") (V4SI "qi")
(V8DI "qi") (V4DI "qi") (V2DI "qi")
+ (V32HF "si") (V16HF "hi") (V8HF "qi")
(V16SF "hi") (V8SF "qi") (V4SF "qi")
(V8DF "qi") (V4DF "qi") (V2DF "qi")])
(V16QI "V32QI") (V8HI "V16HI") (V4SI "V8SI") (V2DI "V4DI")
(V16SF "V32SF") (V8DF "V16DF")
(V8SF "V16SF") (V4DF "V8DF")
- (V4SF "V8SF") (V2DF "V4DF")])
+ (V4SF "V8SF") (V2DF "V4DF")
+ (V32HF "V64HF") (V16HF "V32HF") (V8HF "V16HF")])
;; Mapping of vector modes to a vector mode of half size
;; instead of V1DI/V1DF, DI/DF are used for V2DI/V2DF although they are scalar.
(V16QI "V8QI") (V8HI "V4HI") (V4SI "V2SI") (V2DI "DI")
(V16SF "V8SF") (V8DF "V4DF")
(V8SF "V4SF") (V4DF "V2DF")
- (V4SF "V2SF") (V2DF "DF")])
+ (V4SF "V2SF") (V2DF "DF")
+ (V32HF "V16HF") (V16HF "V8HF") (V8HF "V4HF")])
(define_mode_attr ssehalfvecmodelower
[(V64QI "v32qi") (V32HI "v16hi") (V16SI "v8si") (V8DI "v4di") (V4TI "v2ti")
(V16QI "v8qi") (V8HI "v4hi") (V4SI "v2si")
(V16SF "v8sf") (V8DF "v4df")
(V8SF "v4sf") (V4DF "v2df")
- (V4SF "v2sf")])
+ (V4SF "v2sf")
+ (V32HF "v16hf") (V16HF "v8hf") (V8HF "v4hf")])
-;; Mapping of vector modes ti packed single mode of the same size
+;; Mapping of vector modes to packed single mode of the same size
(define_mode_attr ssePSmode
[(V16SI "V16SF") (V8DF "V16SF")
(V16SF "V16SF") (V8DI "V16SF")
(V4DI "V8SF") (V2DI "V4SF")
(V4TI "V16SF") (V2TI "V8SF") (V1TI "V4SF")
(V8SF "V8SF") (V4SF "V4SF")
- (V4DF "V8SF") (V2DF "V4SF")])
+ (V4DF "V8SF") (V2DF "V4SF")
+ (V32HF "V16SF") (V16HF "V8SF") (V8HF "V4SF")])
(define_mode_attr ssePSmode2
[(V8DI "V8SF") (V4DI "V4SF")])
(V32HI "HI") (V16HI "HI") (V8HI "HI")
(V16SI "SI") (V8SI "SI") (V4SI "SI")
(V8DI "DI") (V4DI "DI") (V2DI "DI")
+ (V32HF "HF") (V16HF "HF") (V8HF "HF")
(V16SF "SF") (V8SF "SF") (V4SF "SF")
(V8DF "DF") (V4DF "DF") (V2DF "DF")
(V4TI "TI") (V2TI "TI")])
(V32HI "hi") (V16HI "hi") (V8HI "hi")
(V16SI "si") (V8SI "si") (V4SI "si")
(V8DI "di") (V4DI "di") (V2DI "di")
+ (V32HF "hf") (V16HF "hf") (V8HF "hf")
(V16SF "sf") (V8SF "sf") (V4SF "sf")
(V8DF "df") (V4DF "df") (V2DF "df")
(V4TI "ti") (V2TI "ti")])
(V32HI "V8HI") (V16HI "V8HI") (V8HI "V8HI")
(V16SI "V4SI") (V8SI "V4SI") (V4SI "V4SI")
(V8DI "V2DI") (V4DI "V2DI") (V2DI "V2DI")
+ (V32HF "V8HF") (V16HF "V8HF") (V8HF "V8HF")
(V16SF "V4SF") (V8SF "V4SF") (V4SF "V4SF")
(V8DF "V2DF") (V4DF "V2DF") (V2DF "V2DF")])
(V16SI "d") (V8SI "d") (V4SI "d")
(V16SF "d") (V8SF "d") (V4SF "d")
(V32HI "d") (V16HI "d") (V8HI "d")
+ (V32HF "d") (V16HF "d") (V8HF "d")
(V64QI "d") (V32QI "d") (V16QI "d")])
;; Number of scalar elements in each vector type
(V64QI "8") (V32QI "8") (V16QI "8")
(V32HI "16") (V16HI "16") (V8HI "16")
(V16SI "32") (V8SI "32") (V4SI "32")
+ (V32HF "16") (V16HF "16") (V8HF "16")
(V16SF "32") (V8SF "32") (V4SF "32")
(V8DF "64") (V4DF "64") (V2DF "64")])
-;; SSE prefix for integer vector modes
+;; SSE prefix for integer and HF vector modes
(define_mode_attr sseintprefix
[(V2DI "p") (V2DF "")
(V4DI "p") (V4DF "")
(V4SI "p") (V4SF "")
(V8SI "p") (V8SF "")
(V16SI "p") (V16SF "")
- (V16QI "p") (V8HI "p")
- (V32QI "p") (V16HI "p")
- (V64QI "p") (V32HI "p")])
+ (V16QI "p") (V8HI "p") (V8HF "p")
+ (V32QI "p") (V16HI "p") (V16HF "p")
+ (V64QI "p") (V32HI "p") (V32HF "p")])
;; SSE scalar suffix for vector modes
(define_mode_attr ssescalarmodesuffix
- [(SF "ss") (DF "sd")
- (V16SF "ss") (V8DF "sd")
- (V8SF "ss") (V4DF "sd")
- (V4SF "ss") (V2DF "sd")
+ [(HF "sh") (SF "ss") (DF "sd")
+ (V32HF "sh") (V16SF "ss") (V8DF "sd")
+ (V16HF "sh") (V8SF "ss") (V4DF "sd")
+ (V8HF "sh") (V4SF "ss") (V2DF "sd")
(V16SI "d") (V8DI "q")
(V8SI "d") (V4DI "q")
(V4SI "d") (V2DI "q")])
;; i128 for integer vectors and TARGET_AVX2, f128 otherwise.
;; i64x4 or f64x4 for 512bit modes.
(define_mode_attr i128
- [(V16SF "f64x4") (V8SF "f128") (V8DF "f64x4") (V4DF "f128")
+ [(V16HF "%~128") (V32HF "i64x4") (V16SF "f64x4") (V8SF "f128")
+ (V8DF "f64x4") (V4DF "f128")
(V64QI "i64x4") (V32QI "%~128") (V32HI "i64x4") (V16HI "%~128")
(V16SI "i64x4") (V8SI "%~128") (V8DI "i64x4") (V4DI "%~128")])
(V32HI "w") (V16HI "w") (V8HI "w")
(V16SI "d") (V8SI "d") (V4SI "d")
(V8DI "q") (V4DI "q") (V2DI "q")
+ (V32HF "w") (V16HF "w") (V8HF "w")
(V16SF "ss") (V8SF "ss") (V4SF "ss")
(V8DF "sd") (V4DF "sd") (V2DF "sd")])
;; Tie mode of assembler operand to mode iterator
(define_mode_attr xtg_mode
- [(V16QI "x") (V8HI "x") (V4SI "x") (V2DI "x") (V4SF "x") (V2DF "x")
- (V32QI "t") (V16HI "t") (V8SI "t") (V4DI "t") (V8SF "t") (V4DF "t")
- (V64QI "g") (V32HI "g") (V16SI "g") (V8DI "g") (V16SF "g") (V8DF "g")])
+ [(V16QI "x") (V8HI "x") (V4SI "x") (V2DI "x")
+ (V8HF "x") (V4SF "x") (V2DF "x")
+ (V32QI "t") (V16HI "t") (V8SI "t") (V4DI "t")
+ (V16HF "t") (V8SF "t") (V4DF "t")
+ (V64QI "g") (V32HI "g") (V16SI "g") (V8DI "g")
+ (V32HF "g") (V16SF "g") (V8DF "g")])
;; Half mask mode for unpacks
(define_mode_attr HALFMASKMODE
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
+(define_insn "<avx512>_blendm<mode>"
+ [(set (match_operand:VF_AVX512FP16 0 "register_operand" "=v,v")
+ (vec_merge:VF_AVX512FP16
+ (match_operand:VF_AVX512FP16 2 "nonimmediate_operand" "vm,vm")
+ (match_operand:VF_AVX512FP16 1 "nonimm_or_0_operand" "0C,v")
+ (match_operand:<avx512fmaskmode> 3 "register_operand" "Yk,Yk")))]
+ "TARGET_AVX512BW"
+ "@
+ vmovdqu<ssescalarsize>\t{%2, %0%{%3%}%N1|%0%{%3%}%N1, %2}
+ vpblendmw\t{%2, %1, %0%{%3%}|%0%{%3%}, %1, %2}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "evex")
+ (set_attr "mode" "<sseinsnmode>")])
+
(define_insn "<avx512>_store<mode>_mask"
[(set (match_operand:V48_AVX512VL 0 "memory_operand" "=m")
(vec_merge:V48_AVX512VL
;; Standard scalar operation patterns which preserve the rest of the
;; vector for combiner.
(define_insn "*<sse>_vm<insn><mode>3"
- [(set (match_operand:VF_128 0 "register_operand" "=x,v")
- (vec_merge:VF_128
- (vec_duplicate:VF_128
+ [(set (match_operand:VFH_128 0 "register_operand" "=x,v")
+ (vec_merge:VFH_128
+ (vec_duplicate:VFH_128
(plusminus:<ssescalarmode>
(vec_select:<ssescalarmode>
- (match_operand:VF_128 1 "register_operand" "0,v")
+ (match_operand:VFH_128 1 "register_operand" "0,v")
(parallel [(const_int 0)]))
(match_operand:<ssescalarmode> 2 "nonimmediate_operand" "xm,vm")))
(match_dup 1)
v<plusminus_mnemonic><ssescalarmodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
- (set_attr "prefix" "orig,vex")
+ (set (attr "prefix")
+ (cond [(eq_attr "alternative" "0")
+ (const_string "orig")
+ (eq_attr "alternative" "1")
+ (if_then_else
+ (match_test "<MODE>mode == V8HFmode")
+ (const_string "evex")
+ (const_string "vex"))
+ ]
+ (const_string "*")))
(set_attr "mode" "<ssescalarmode>")])
(define_insn "<sse>_vm<insn><mode>3<mask_scalar_name><round_scalar_name>"
;; Standard scalar operation patterns which preserve the rest of the
;; vector for combiner.
(define_insn "*<sse>_vm<multdiv_mnemonic><mode>3"
- [(set (match_operand:VF_128 0 "register_operand" "=x,v")
- (vec_merge:VF_128
- (vec_duplicate:VF_128
+ [(set (match_operand:VFH_128 0 "register_operand" "=x,v")
+ (vec_merge:VFH_128
+ (vec_duplicate:VFH_128
(multdiv:<ssescalarmode>
(vec_select:<ssescalarmode>
- (match_operand:VF_128 1 "register_operand" "0,v")
+ (match_operand:VFH_128 1 "register_operand" "0,v")
(parallel [(const_int 0)]))
(match_operand:<ssescalarmode> 2 "nonimmediate_operand" "xm,vm")))
(match_dup 1)
v<multdiv_mnemonic><ssescalarmodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse<multdiv_mnemonic>")
- (set_attr "prefix" "orig,vex")
+ (set (attr "prefix")
+ (cond [(eq_attr "alternative" "0")
+ (const_string "orig")
+ (eq_attr "alternative" "1")
+ (if_then_else
+ (match_test "<MODE>mode == V8HFmode")
+ (const_string "evex")
+ (const_string "vex"))
+ ]
+ (const_string "*")))
(set_attr "btver2_decode" "direct,double")
(set_attr "mode" "<ssescalarmode>")])
;; Standard scalar operation patterns which preserve the rest of the
;; vector for combiner.
(define_insn "*ieee_<ieee_maxmin><mode>3"
- [(set (match_operand:VF_128 0 "register_operand" "=x,v")
- (vec_merge:VF_128
- (vec_duplicate:VF_128
+ [(set (match_operand:VFH_128 0 "register_operand" "=x,v")
+ (vec_merge:VFH_128
+ (vec_duplicate:VFH_128
(unspec:<ssescalarmode>
[(vec_select:<ssescalarmode>
- (match_operand:VF_128 1 "register_operand" "0,v")
+ (match_operand:VFH_128 1 "register_operand" "0,v")
(parallel [(const_int 0)]))
(match_operand:<ssescalarmode> 2 "nonimmediate_operand" "xm,vm")]
IEEE_MAXMIN))
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "btver2_sse_attr" "maxmin")
- (set_attr "prefix" "orig,vex")
+ (set (attr "prefix")
+ (cond [(eq_attr "alternative" "0")
+ (const_string "orig")
+ (eq_attr "alternative" "1")
+ (if_then_else
+ (match_test "<MODE>mode == V8HFmode")
+ (const_string "evex")
+ (const_string "vex"))
+ ]
+ (const_string "*")))
(set_attr "mode" "<ssescalarmode>")])
(define_insn "<sse>_vm<code><mode>3<mask_scalar_name><round_saeonly_scalar_name>"
]
(symbol_ref "true")))])
+;; vmovw clears also the higer bits
+(define_insn "vec_set<mode>_0"
+ [(set (match_operand:VF_AVX512FP16 0 "register_operand" "=v,v")
+ (vec_merge:VF_AVX512FP16
+ (vec_duplicate:VF_AVX512FP16
+ (match_operand:HF 2 "nonimmediate_operand" "r,m"))
+ (match_operand:VF_AVX512FP16 1 "const0_operand" "C,C")
+ (const_int 1)))]
+ "TARGET_AVX512FP16"
+ "@
+ vmovw\t{%k2, %x0|%x0, %k2}
+ vmovw\t{%2, %x0|%x0, %2}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "evex")
+ (set_attr "mode" "HF")])
+
+(define_insn "*avx512fp16_movsh"
+ [(set (match_operand:V8HF 0 "register_operand" "=v")
+ (vec_merge:V8HF
+ (vec_duplicate:V8HF
+ (match_operand:HF 2 "register_operand" "v"))
+ (match_operand:V8HF 1 "register_operand" "v")
+ (const_int 1)))]
+ "TARGET_AVX512FP16"
+ "vmovsh\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "evex")
+ (set_attr "mode" "HF")])
+
+(define_insn "avx512fp16_movsh"
+ [(set (match_operand:V8HF 0 "register_operand" "=v")
+ (vec_merge:V8HF
+ (match_operand:V8HF 2 "register_operand" "v")
+ (match_operand:V8HF 1 "register_operand" "v")
+ (const_int 1)))]
+ "TARGET_AVX512FP16"
+ "vmovsh\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "evex")
+ (set_attr "mode" "HF")])
+
;; A subset is vec_setv4sf.
(define_insn "*vec_setv4sf_sse4_1"
[(set (match_operand:V4SF 0 "register_operand" "=Yr,*x,v")
DONE;
})
+(define_expand "vec_setv8hf"
+ [(match_operand:V8HF 0 "register_operand")
+ (match_operand:HF 1 "register_operand")
+ (match_operand 2 "vec_setm_sse41_operand")]
+ "TARGET_SSE"
+{
+ if (CONST_INT_P (operands[2]))
+ ix86_expand_vector_set (false, operands[0], operands[1],
+ INTVAL (operands[2]));
+ else
+ ix86_expand_vector_set_var (operands[0], operands[1], operands[2]);
+ DONE;
+})
+
(define_expand "vec_set<mode>"
[(match_operand:V_256_512 0 "register_operand")
(match_operand:<ssescalarmode> 1 "register_operand")
(set_attr "length_immediate" "1")
(set_attr "mode" "<sseinsnmode>")])
-(define_insn_and_split "vec_extract_lo_v32hi"
- [(set (match_operand:V16HI 0 "nonimmediate_operand" "=v,v,m")
- (vec_select:V16HI
- (match_operand:V32HI 1 "nonimmediate_operand" "v,m,v")
+(define_insn_and_split "vec_extract_lo_<mode>"
+ [(set (match_operand:<ssehalfvecmode> 0 "nonimmediate_operand" "=v,v,m")
+ (vec_select:<ssehalfvecmode>
+ (match_operand:V32_512 1 "nonimmediate_operand" "v,m,v")
(parallel [(const_int 0) (const_int 1)
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
if (!TARGET_AVX512VL
&& REG_P (operands[0])
&& EXT_REX_SSE_REG_P (operands[1]))
- operands[0] = lowpart_subreg (V32HImode, operands[0], V16HImode);
+ operands[0] = lowpart_subreg (<MODE>mode, operands[0],
+ <ssehalfvecmode>mode);
else
- operands[1] = gen_lowpart (V16HImode, operands[1]);
+ operands[1] = gen_lowpart (<ssehalfvecmode>mode, operands[1]);
}
[(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "evex")
(set_attr "mode" "XI")])
-(define_insn "vec_extract_hi_v32hi"
- [(set (match_operand:V16HI 0 "nonimmediate_operand" "=vm")
- (vec_select:V16HI
- (match_operand:V32HI 1 "register_operand" "v")
+(define_insn "vec_extract_hi_<mode>"
+ [(set (match_operand:<ssehalfvecmode> 0 "nonimmediate_operand" "=vm")
+ (vec_select:<ssehalfvecmode>
+ (match_operand:V32_512 1 "register_operand" "v")
(parallel [(const_int 16) (const_int 17)
(const_int 18) (const_int 19)
(const_int 20) (const_int 21)
(set_attr "prefix" "evex")
(set_attr "mode" "XI")])
-(define_insn_and_split "vec_extract_lo_v16hi"
- [(set (match_operand:V8HI 0 "nonimmediate_operand" "=v,m")
- (vec_select:V8HI
- (match_operand:V16HI 1 "nonimmediate_operand" "vm,v")
+(define_insn_and_split "vec_extract_lo_<mode>"
+ [(set (match_operand:<ssehalfvecmode> 0 "nonimmediate_operand" "=v,m")
+ (vec_select:<ssehalfvecmode>
+ (match_operand:V16_256 1 "nonimmediate_operand" "vm,v")
(parallel [(const_int 0) (const_int 1)
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
"#"
"&& reload_completed"
[(set (match_dup 0) (match_dup 1))]
- "operands[1] = gen_lowpart (V8HImode, operands[1]);")
+ "operands[1] = gen_lowpart (<ssehalfvecmode>mode, operands[1]);")
-(define_insn "vec_extract_hi_v16hi"
- [(set (match_operand:V8HI 0 "nonimmediate_operand" "=xm,vm,vm")
- (vec_select:V8HI
- (match_operand:V16HI 1 "register_operand" "x,v,v")
+(define_insn "vec_extract_hi_<mode>"
+ [(set (match_operand:<ssehalfvecmode> 0 "nonimmediate_operand" "=xm,vm,vm")
+ (vec_select:<ssehalfvecmode>
+ (match_operand:V16_256 1 "register_operand" "x,v,v")
(parallel [(const_int 8) (const_int 9)
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(set_attr "prefix" "vex,evex,evex")
(set_attr "mode" "OI")])
+;; NB: *vec_extract<mode>_0 must be placed before *vec_extracthf.
+;; Otherwise, it will be ignored.
+(define_insn_and_split "*vec_extract<mode>_0"
+ [(set (match_operand:HF 0 "nonimmediate_operand" "=v,m,r")
+ (vec_select:HF
+ (match_operand:VF_AVX512FP16 1 "nonimmediate_operand" "vm,v,m")
+ (parallel [(const_int 0)])))]
+ "TARGET_AVX512FP16 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0) (match_dup 1))]
+ "operands[1] = gen_lowpart (HFmode, operands[1]);")
+
+(define_insn "*vec_extracthf"
+ [(set (match_operand:HF 0 "register_sse4nonimm_operand" "=r,m")
+ (vec_select:HF
+ (match_operand:V8HF 1 "register_operand" "v,v")
+ (parallel
+ [(match_operand:SI 2 "const_0_to_7_operand")])))]
+ "TARGET_AVX512FP16"
+ "@
+ vpextrw\t{%2, %1, %k0|%k0, %1, %2}
+ vpextrw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog1")
+ (set_attr "prefix" "maybe_evex")
+ (set_attr "mode" "TI")])
+
;; Modes handled by vec_extract patterns.
(define_mode_iterator VEC_EXTRACT_MODE
[(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX") V16QI
(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX") V8HI
(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
+ (V32HF "TARGET_AVX512FP16") (V16HF "TARGET_AVX512FP16")
+ (V8HF "TARGET_AVX512FP16")
(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF
(V4TI "TARGET_AVX512F") (V2TI "TARGET_AVX")])
;; Modes handled by pinsr patterns.
(define_mode_iterator PINSR_MODE
- [(V16QI "TARGET_SSE4_1") V8HI
+ [(V16QI "TARGET_SSE4_1") V8HI (V8HF "TARGET_AVX512FP16")
(V4SI "TARGET_SSE4_1")
(V2DI "TARGET_SSE4_1 && TARGET_64BIT")])
(define_mode_attr sse2p4_1
- [(V16QI "sse4_1") (V8HI "sse2")
+ [(V16QI "sse4_1") (V8HI "sse2") (V8HF "sse4_1")
(V4SI "sse4_1") (V2DI "sse4_1")])
(define_mode_attr pinsr_evex_isa
- [(V16QI "avx512bw") (V8HI "avx512bw")
+ [(V16QI "avx512bw") (V8HI "avx512bw") (V8HF "avx512bw")
(V4SI "avx512dq") (V2DI "avx512dq")])
;; sse4_1_pinsrd must come before sse2_loadld since it is preferred.
case 2:
case 4:
if (GET_MODE_SIZE (<ssescalarmode>mode) < GET_MODE_SIZE (SImode))
- return "vpinsr<ssemodesuffix>\t{%3, %k2, %1, %0|%0, %1, %k2, %3}";
+ {
+ if (<MODE>mode == V8HFmode)
+ return "vpinsrw\t{%3, %k2, %1, %0|%0, %1, %k2, %3}";
+ else
+ return "vpinsr<ssemodesuffix>\t{%3, %k2, %1, %0|%0, %1, %k2, %3}";
+ }
/* FALLTHRU */
case 3:
case 5:
- return "vpinsr<ssemodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}";
+ if (<MODE>mode == V8HFmode)
+ return "vpinsrw\t{%3, %2, %1, %0|%0, %1, %2, %3}";
+ else
+ return "vpinsr<ssemodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}";
default:
gcc_unreachable ();
}
[(V64QI "avx512bw") (V32QI "avx512bw") (V16QI "avx512bw")
(V32HI "avx512bw") (V16HI "avx512bw") (V8HI "avx512bw")
(V16SI "avx512f") (V8SI "avx512f") (V4SI "avx512f")
- (V8DI "avx512f") (V4DI "avx512f") (V2DI "avx512f")])
+ (V8DI "avx512f") (V4DI "avx512f") (V2DI "avx512f")
+ (V32HF "avx512bw") (V16HF "avx512bw") (V8HF "avx512bw")])
(define_insn "avx2_pbroadcast<mode>"
- [(set (match_operand:VI 0 "register_operand" "=x,v")
- (vec_duplicate:VI
+ [(set (match_operand:VIHF 0 "register_operand" "=x,v")
+ (vec_duplicate:VIHF
(vec_select:<ssescalarmode>
(match_operand:<ssexmmmode> 1 "nonimmediate_operand" "xm,vm")
(parallel [(const_int 0)]))))]
"TARGET_AVX2"
- "vpbroadcast<ssemodesuffix>\t{%1, %0|%0, %<iptr>1}"
+ "vpbroadcast<sseintmodesuffix>\t{%1, %0|%0, %<iptr>1}"
[(set_attr "isa" "*,<pbroadcast_evex_isa>")
(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx2_pbroadcast<mode>_1"
- [(set (match_operand:VI_256 0 "register_operand" "=x,x,v,v")
- (vec_duplicate:VI_256
+ [(set (match_operand:VIHF_256 0 "register_operand" "=x,x,v,v")
+ (vec_duplicate:VIHF_256
(vec_select:<ssescalarmode>
- (match_operand:VI_256 1 "nonimmediate_operand" "m,x,m,v")
+ (match_operand:VIHF_256 1 "nonimmediate_operand" "m,x,m,v")
(parallel [(const_int 0)]))))]
"TARGET_AVX2"
"@
- vpbroadcast<ssemodesuffix>\t{%1, %0|%0, %<iptr>1}
- vpbroadcast<ssemodesuffix>\t{%x1, %0|%0, %x1}
- vpbroadcast<ssemodesuffix>\t{%1, %0|%0, %<iptr>1}
- vpbroadcast<ssemodesuffix>\t{%x1, %0|%0, %x1}"
+ vpbroadcast<sseintmodesuffix>\t{%1, %0|%0, %<iptr>1}
+ vpbroadcast<sseintmodesuffix>\t{%x1, %0|%0, %x1}
+ vpbroadcast<sseintmodesuffix>\t{%1, %0|%0, %<iptr>1}
+ vpbroadcast<sseintmodesuffix>\t{%x1, %0|%0, %x1}"
[(set_attr "isa" "*,*,<pbroadcast_evex_isa>,<pbroadcast_evex_isa>")
(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "mode" "V4DF")])
(define_insn "<avx512>_vec_dup<mode>_1"
- [(set (match_operand:VI_AVX512BW 0 "register_operand" "=v,v")
- (vec_duplicate:VI_AVX512BW
+ [(set (match_operand:VIHF_AVX512BW 0 "register_operand" "=v,v")
+ (vec_duplicate:VIHF_AVX512BW
(vec_select:<ssescalarmode>
- (match_operand:VI_AVX512BW 1 "nonimmediate_operand" "v,m")
+ (match_operand:VIHF_AVX512BW 1 "nonimmediate_operand" "v,m")
(parallel [(const_int 0)]))))]
"TARGET_AVX512F"
"@
- vpbroadcast<ssemodesuffix>\t{%x1, %0|%0, %x1}
- vpbroadcast<ssemodesuffix>\t{%x1, %0|%0, %<iptr>1}"
+ vpbroadcast<sseintmodesuffix>\t{%x1, %0|%0, %x1}
+ vpbroadcast<sseintmodesuffix>\t{%x1, %0|%0, %<iptr>1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
(set_attr "mode" "<sseinsnmode>")])
(set_attr "mode" "<sseinsnmode>")])
(define_insn "<avx512>_vec_dup<mode><mask_name>"
- [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v")
- (vec_duplicate:VI12_AVX512VL
+ [(set (match_operand:VI12HF_AVX512VL 0 "register_operand" "=v")
+ (vec_duplicate:VI12HF_AVX512VL
(vec_select:<ssescalarmode>
(match_operand:<ssexmmmode> 1 "nonimmediate_operand" "vm")
(parallel [(const_int 0)]))))]
(set_attr "mode" "<sseinsnmode>")])
(define_insn "<mask_codefor><avx512>_vec_dup_gpr<mode><mask_name>"
- [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v,v")
- (vec_duplicate:VI12_AVX512VL
+ [(set (match_operand:VI12HF_AVX512VL 0 "register_operand" "=v,v")
+ (vec_duplicate:VI12HF_AVX512VL
(match_operand:<ssescalarmode> 1 "nonimmediate_operand" "vm,r")))]
"TARGET_AVX512BW"
"@
[(V8SF "ss") (V4DF "sd") (V8SI "ss") (V4DI "sd")])
;; Modes handled by AVX2 vec_dup patterns.
(define_mode_iterator AVX2_VEC_DUP_MODE
- [V32QI V16QI V16HI V8HI V8SI V4SI])
+ [V32QI V16QI V16HI V8HI V8SI V4SI V16HF V8HF])
(define_insn "*vec_dup<mode>"
[(set (match_operand:AVX2_VEC_DUP_MODE 0 "register_operand" "=x,x,v")
(set_attr "prefix" "vex")
(set_attr "mode" "<sseinsnmode>")])
-(define_insn "vec_set_lo_v16hi"
- [(set (match_operand:V16HI 0 "register_operand" "=x,v")
- (vec_concat:V16HI
- (match_operand:V8HI 2 "nonimmediate_operand" "xm,vm")
- (vec_select:V8HI
- (match_operand:V16HI 1 "register_operand" "x,v")
+(define_insn "vec_set_lo_<mode>"
+ [(set (match_operand:V16_256 0 "register_operand" "=x,v")
+ (vec_concat:V16_256
+ (match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "xm,vm")
+ (vec_select:<ssehalfvecmode>
+ (match_operand:V16_256 1 "register_operand" "x,v")
(parallel [(const_int 8) (const_int 9)
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(set_attr "prefix" "vex,evex")
(set_attr "mode" "OI")])
-(define_insn "vec_set_hi_v16hi"
- [(set (match_operand:V16HI 0 "register_operand" "=x,v")
- (vec_concat:V16HI
- (vec_select:V8HI
- (match_operand:V16HI 1 "register_operand" "x,v")
+(define_insn "vec_set_hi_<mode>"
+ [(set (match_operand:V16_256 0 "register_operand" "=x,v")
+ (vec_concat:V16_256
+ (vec_select:<ssehalfvecmode>
+ (match_operand:V16_256 1 "register_operand" "x,v")
(parallel [(const_int 0) (const_int 1)
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))
- (match_operand:V8HI 2 "nonimmediate_operand" "xm,vm")))]
+ (match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "xm,vm")))]
"TARGET_AVX"
"@
vinsert%~128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}
(V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
+ (V32HF "TARGET_AVX512FP16") (V16HF "TARGET_AVX512FP16")
+ (V8HF "TARGET_AVX512FP16")
(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")
(V4TI "TARGET_AVX512F") (V2TI "TARGET_AVX")])
(V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX")
+ (V32HF "TARGET_AVX512FP16") (V16HF "TARGET_AVX512FP16")
+ (V8HF "TARGET_AVX512FP16")
(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX")
(V4TI "TARGET_AVX512F")])