(A), (D)))
#endif /* __OPTIMIZE__ */
+/* Intrinsics of v[add,sub,mul,div]sh. */
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_sh (__m128h __A, __m128h __B)
+{
+ __A[0] += __B[0];
+ return __A;
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_add_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return __builtin_ia32_vaddsh_v8hf_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_add_sh (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return __builtin_ia32_vaddsh_v8hf_mask (__B, __C, _mm_setzero_ph (),
+ __A);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_sh (__m128h __A, __m128h __B)
+{
+ __A[0] -= __B[0];
+ return __A;
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sub_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return __builtin_ia32_vsubsh_v8hf_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sub_sh (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return __builtin_ia32_vsubsh_v8hf_mask (__B, __C, _mm_setzero_ph (),
+ __A);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_sh (__m128h __A, __m128h __B)
+{
+ __A[0] *= __B[0];
+ return __A;
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mul_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return __builtin_ia32_vmulsh_v8hf_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mul_sh (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return __builtin_ia32_vmulsh_v8hf_mask (__B, __C, _mm_setzero_ph (), __A);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_div_sh (__m128h __A, __m128h __B)
+{
+ __A[0] /= __B[0];
+ return __A;
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_div_sh (__m128h __A, __mmask8 __B, __m128h __C, __m128h __D)
+{
+ return __builtin_ia32_vdivsh_v8hf_mask (__C, __D, __A, __B);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_div_sh (__mmask8 __A, __m128h __B, __m128h __C)
+{
+ return __builtin_ia32_vdivsh_v8hf_mask (__B, __C, _mm_setzero_ph (),
+ __A);
+}
+
+#ifdef __OPTIMIZE__
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_add_round_sh (__m128h __A, __m128h __B, const int __C)
+{
+ return __builtin_ia32_vaddsh_v8hf_mask_round (__A, __B,
+ _mm_setzero_ph (),
+ (__mmask8) -1, __C);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_add_round_sh (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return __builtin_ia32_vaddsh_v8hf_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_add_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __D)
+{
+ return __builtin_ia32_vaddsh_v8hf_mask_round (__B, __C,
+ _mm_setzero_ph (),
+ __A, __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_sub_round_sh (__m128h __A, __m128h __B, const int __C)
+{
+ return __builtin_ia32_vsubsh_v8hf_mask_round (__A, __B,
+ _mm_setzero_ph (),
+ (__mmask8) -1, __C);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_sub_round_sh (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return __builtin_ia32_vsubsh_v8hf_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_sub_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __D)
+{
+ return __builtin_ia32_vsubsh_v8hf_mask_round (__B, __C,
+ _mm_setzero_ph (),
+ __A, __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mul_round_sh (__m128h __A, __m128h __B, const int __C)
+{
+ return __builtin_ia32_vmulsh_v8hf_mask_round (__A, __B,
+ _mm_setzero_ph (),
+ (__mmask8) -1, __C);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_mul_round_sh (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return __builtin_ia32_vmulsh_v8hf_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_mul_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __D)
+{
+ return __builtin_ia32_vmulsh_v8hf_mask_round (__B, __C,
+ _mm_setzero_ph (),
+ __A, __D);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_div_round_sh (__m128h __A, __m128h __B, const int __C)
+{
+ return __builtin_ia32_vdivsh_v8hf_mask_round (__A, __B,
+ _mm_setzero_ph (),
+ (__mmask8) -1, __C);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_mask_div_round_sh (__m128h __A, __mmask8 __B, __m128h __C,
+ __m128h __D, const int __E)
+{
+ return __builtin_ia32_vdivsh_v8hf_mask_round (__C, __D, __A, __B, __E);
+}
+
+extern __inline __m128h
+__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+_mm_maskz_div_round_sh (__mmask8 __A, __m128h __B, __m128h __C,
+ const int __D)
+{
+ return __builtin_ia32_vdivsh_v8hf_mask_round (__B, __C,
+ _mm_setzero_ph (),
+ __A, __D);
+}
+#else
+#define _mm_add_round_sh(A, B, C) \
+ ((__m128h)__builtin_ia32_vaddsh_v8hf_mask_round ((A), (B), \
+ _mm_setzero_ph (), \
+ (__mmask8)-1, (C)))
+
+#define _mm_mask_add_round_sh(A, B, C, D, E) \
+ ((__m128h)__builtin_ia32_vaddsh_v8hf_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm_maskz_add_round_sh(A, B, C, D) \
+ ((__m128h)__builtin_ia32_vaddsh_v8hf_mask_round ((B), (C), \
+ _mm_setzero_ph (), \
+ (A), (D)))
+
+#define _mm_sub_round_sh(A, B, C) \
+ ((__m128h)__builtin_ia32_vsubsh_v8hf_mask_round ((A), (B), \
+ _mm_setzero_ph (), \
+ (__mmask8)-1, (C)))
+
+#define _mm_mask_sub_round_sh(A, B, C, D, E) \
+ ((__m128h)__builtin_ia32_vsubsh_v8hf_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm_maskz_sub_round_sh(A, B, C, D) \
+ ((__m128h)__builtin_ia32_vsubsh_v8hf_mask_round ((B), (C), \
+ _mm_setzero_ph (), \
+ (A), (D)))
+
+#define _mm_mul_round_sh(A, B, C) \
+ ((__m128h)__builtin_ia32_vmulsh_v8hf_mask_round ((A), (B), \
+ _mm_setzero_ph (), \
+ (__mmask8)-1, (C)))
+
+#define _mm_mask_mul_round_sh(A, B, C, D, E) \
+ ((__m128h)__builtin_ia32_vmulsh_v8hf_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm_maskz_mul_round_sh(A, B, C, D) \
+ ((__m128h)__builtin_ia32_vmulsh_v8hf_mask_round ((B), (C), \
+ _mm_setzero_ph (), \
+ (A), (D)))
+
+#define _mm_div_round_sh(A, B, C) \
+ ((__m128h)__builtin_ia32_vdivsh_v8hf_mask_round ((A), (B), \
+ _mm_setzero_ph (), \
+ (__mmask8)-1, (C)))
+
+#define _mm_mask_div_round_sh(A, B, C, D, E) \
+ ((__m128h)__builtin_ia32_vdivsh_v8hf_mask_round ((C), (D), (A), (B), (E)))
+
+#define _mm_maskz_div_round_sh(A, B, C, D) \
+ ((__m128h)__builtin_ia32_vdivsh_v8hf_mask_round ((B), (C), \
+ _mm_setzero_ph (), \
+ (A), (D)))
+#endif /* __OPTIMIZE__ */
+
#ifdef __DISABLE_AVX512FP16__
#undef __DISABLE_AVX512FP16__
#pragma GCC pop_options
# FP16 builtins
DEF_FUNCTION_TYPE (V8HF, V8HI)
+DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, INT)
DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, V8HF, UQI)
+DEF_FUNCTION_TYPE (V8HF, V8HF, V8HF, V8HF, UQI, INT)
DEF_FUNCTION_TYPE (V16HF, V16HF, V16HF, V16HF, UHI)
DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, INT)
DEF_FUNCTION_TYPE (V32HF, V32HF, V32HF, V32HF, USI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_divv8hf3_mask, "__builtin_ia32_vdivph_v8hf_mask", IX86_BUILTIN_VDIVPH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_divv16hf3_mask, "__builtin_ia32_vdivph_v16hf_mask", IX86_BUILTIN_VDIVPH_V16HF_MASK, UNKNOWN, (int) V16HF_FTYPE_V16HF_V16HF_V16HF_UHI)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_divv32hf3_mask, "__builtin_ia32_vdivph_v32hf_mask", IX86_BUILTIN_VDIVPH_V32HF_MASK, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmaddv8hf3_mask, "__builtin_ia32_vaddsh_v8hf_mask", IX86_BUILTIN_VADDSH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsubv8hf3_mask, "__builtin_ia32_vsubsh_v8hf_mask", IX86_BUILTIN_VSUBSH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmmulv8hf3_mask, "__builtin_ia32_vmulsh_v8hf_mask", IX86_BUILTIN_VMULSH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmdivv8hf3_mask, "__builtin_ia32_vdivsh_v8hf_mask", IX86_BUILTIN_VDIVSH_V8HF_MASK, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI)
/* Builtins with rounding support. */
BDESC_END (ARGS, ROUND_ARGS)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_subv32hf3_mask_round, "__builtin_ia32_vsubph_v32hf_mask_round", IX86_BUILTIN_VSUBPH_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_mulv32hf3_mask_round, "__builtin_ia32_vmulph_v32hf_mask_round", IX86_BUILTIN_VMULPH_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_divv32hf3_mask_round, "__builtin_ia32_vdivph_v32hf_mask_round", IX86_BUILTIN_VDIVPH_V32HF_MASK_ROUND, UNKNOWN, (int) V32HF_FTYPE_V32HF_V32HF_V32HF_USI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmaddv8hf3_mask_round, "__builtin_ia32_vaddsh_v8hf_mask_round", IX86_BUILTIN_VADDSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmsubv8hf3_mask_round, "__builtin_ia32_vsubsh_v8hf_mask_round", IX86_BUILTIN_VSUBSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmmulv8hf3_mask_round, "__builtin_ia32_vmulsh_v8hf_mask_round", IX86_BUILTIN_VMULSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
+BDESC (0, OPTION_MASK_ISA2_AVX512FP16, CODE_FOR_avx512fp16_vmdivv8hf3_mask_round, "__builtin_ia32_vdivsh_v8hf_mask_round", IX86_BUILTIN_VDIVSH_V8HF_MASK_ROUND, UNKNOWN, (int) V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT)
BDESC_END (ROUND_ARGS, MULTI_ARG)
nargs = 2;
break;
case V32HF_FTYPE_V32HF_V32HF_INT:
+ case V8HF_FTYPE_V8HF_V8HF_INT:
case V4SF_FTYPE_V4SF_UINT_INT:
case V4SF_FTYPE_V4SF_UINT64_INT:
case V2DF_FTYPE_V2DF_UINT64_INT:
case V4SF_FTYPE_V4SF_V4SF_V4SF_QI_INT:
case V4SF_FTYPE_V4SF_V2DF_V4SF_QI_INT:
case V4SF_FTYPE_V4SF_V2DF_V4SF_UQI_INT:
+ case V8HF_FTYPE_V8HF_V8HF_V8HF_UQI_INT:
nargs = 5;
break;
case V16SF_FTYPE_V16SF_INT_V16SF_HI_INT:
(define_mode_iterator VF2_512_256VL
[V8DF (V4DF "TARGET_AVX512VL")])
-;; All 128bit vector float modes
+;; All 128bit vector SF/DF modes
(define_mode_iterator VF_128
[V4SF (V2DF "TARGET_SSE2")])
(set_attr "mode" "<ssescalarmode>")])
(define_insn "<sse>_vm<insn><mode>3<mask_scalar_name><round_scalar_name>"
- [(set (match_operand:VF_128 0 "register_operand" "=x,v")
- (vec_merge:VF_128
- (plusminus:VF_128
- (match_operand:VF_128 1 "register_operand" "0,v")
- (match_operand:VF_128 2 "nonimmediate_operand" "xm,<round_scalar_constraint>"))
+ [(set (match_operand:VFH_128 0 "register_operand" "=x,v")
+ (vec_merge:VFH_128
+ (plusminus:VFH_128
+ (match_operand:VFH_128 1 "register_operand" "0,v")
+ (match_operand:VFH_128 2 "nonimmediate_operand" "xm,<round_scalar_constraint>"))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE"
(set_attr "mode" "<ssescalarmode>")])
(define_insn "<sse>_vm<multdiv_mnemonic><mode>3<mask_scalar_name><round_scalar_name>"
- [(set (match_operand:VF_128 0 "register_operand" "=x,v")
- (vec_merge:VF_128
- (multdiv:VF_128
- (match_operand:VF_128 1 "register_operand" "0,v")
- (match_operand:VF_128 2 "nonimmediate_operand" "xm,<round_scalar_constraint>"))
+ [(set (match_operand:VFH_128 0 "register_operand" "=x,v")
+ (vec_merge:VFH_128
+ (multdiv:VFH_128
+ (match_operand:VFH_128 1 "register_operand" "0,v")
+ (match_operand:VFH_128 2 "nonimmediate_operand" "xm,<round_scalar_constraint>"))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE"
#define __builtin_ia32_vsubph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vsubph_v32hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vmulph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vmulph_v32hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vdivph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vdivph_v32hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vaddsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vaddsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vsubsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vsubsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vmulsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vmulsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vdivsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vdivsh_v8hf_mask_round(A, B, C, D, 8)
/* vpclmulqdqintrin.h */
#define __builtin_ia32_vpclmulqdq_v4di(A, B, C) __builtin_ia32_vpclmulqdq_v4di(A, B, 1)
#define __builtin_ia32_vsubph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vsubph_v32hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vmulph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vmulph_v32hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vdivph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vdivph_v32hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vaddsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vaddsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vsubsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vsubsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vmulsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vmulsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vdivsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vdivsh_v8hf_mask_round(A, B, C, D, 8)
/* vpclmulqdqintrin.h */
#define __builtin_ia32_vpclmulqdq_v4di(A, B, C) __builtin_ia32_vpclmulqdq_v4di(A, B, 1)
test_2 (_mm512_sub_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_mul_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_div_round_ph, __m512h, __m512h, __m512h, 8)
+test_2 (_mm_add_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_sub_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_mul_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_div_round_sh, __m128h, __m128h, __m128h, 8)
test_3 (_mm512_maskz_add_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_sub_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_mul_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_div_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
+test_3 (_mm_maskz_add_round_sh, __m128h, __mmask32, __m128h, __m128h, 8)
+test_3 (_mm_maskz_sub_round_sh, __m128h, __mmask32, __m128h, __m128h, 8)
+test_3 (_mm_maskz_mul_round_sh, __m128h, __mmask32, __m128h, __m128h, 8)
+test_3 (_mm_maskz_div_round_sh, __m128h, __mmask32, __m128h, __m128h, 8)
test_4 (_mm512_mask_add_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_sub_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_mul_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_div_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
+test_4 (_mm_mask_add_round_sh, __m128h, __m128h, __mmask32, __m128h, __m128h, 8)
+test_4 (_mm_mask_sub_round_sh, __m128h, __m128h, __mmask32, __m128h, __m128h, 8)
+test_4 (_mm_mask_mul_round_sh, __m128h, __m128h, __mmask32, __m128h, __m128h, 8)
+test_4 (_mm_mask_div_round_sh, __m128h, __m128h, __mmask32, __m128h, __m128h, 8)
/* shaintrin.h */
test_2 (_mm_sha1rnds4_epu32, __m128i, __m128i, __m128i, 1)
test_2 (_mm512_sub_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_mul_round_ph, __m512h, __m512h, __m512h, 8)
test_2 (_mm512_div_round_ph, __m512h, __m512h, __m512h, 8)
+test_2 (_mm_add_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_sub_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_mul_round_sh, __m128h, __m128h, __m128h, 8)
+test_2 (_mm_div_round_sh, __m128h, __m128h, __m128h, 8)
test_3 (_mm512_maskz_add_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_sub_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_mul_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
test_3 (_mm512_maskz_div_round_ph, __m512h, __mmask32, __m512h, __m512h, 8)
+test_3 (_mm_maskz_add_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_maskz_sub_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_maskz_mul_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
+test_3 (_mm_maskz_div_round_sh, __m128h, __mmask8, __m128h, __m128h, 8)
test_4 (_mm512_mask_add_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_sub_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_mul_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
test_4 (_mm512_mask_div_round_ph, __m512h, __m512h, __mmask32, __m512h, __m512h, 8)
+test_4 (_mm_mask_add_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_sub_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_mul_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
+test_4 (_mm_mask_div_round_sh, __m128h, __m128h, __mmask8, __m128h, __m128h, 8)
/* shaintrin.h */
test_2 (_mm_sha1rnds4_epu32, __m128i, __m128i, __m128i, 1)
#define __builtin_ia32_vsubph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vsubph_v32hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vmulph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vmulph_v32hf_mask_round(A, B, C, D, 8)
#define __builtin_ia32_vdivph_v32hf_mask_round(A, B, C, D, E) __builtin_ia32_vdivph_v32hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vaddsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vaddsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vsubsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vsubsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vmulsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vmulsh_v8hf_mask_round(A, B, C, D, 8)
+#define __builtin_ia32_vdivsh_v8hf_mask_round(A, B, C, D, E) __builtin_ia32_vdivsh_v8hf_mask_round(A, B, C, D, 8)
/* vpclmulqdqintrin.h */
#define __builtin_ia32_vpclmulqdq_v4di(A, B, C) __builtin_ia32_vpclmulqdq_v4di(A, B, 1)