x86-64: Remove assembler AVX512DQ check
authorH.J. Lu <hjl.tools@gmail.com>
Fri, 20 Aug 2021 12:47:52 +0000 (05:47 -0700)
committerH.J. Lu <hjl.tools@gmail.com>
Tue, 24 Aug 2021 14:05:35 +0000 (07:05 -0700)
The minimum GNU binutils requirement is 2.25 which supports AVX512DQ.
Remove assembler AVX512DQ check.

15 files changed:
config.h.in
sysdeps/x86_64/configure
sysdeps/x86_64/configure.ac
sysdeps/x86_64/fpu/multiarch/svml_d_cos8_core_avx512.S
sysdeps/x86_64/fpu/multiarch/svml_d_exp8_core_avx512.S
sysdeps/x86_64/fpu/multiarch/svml_d_log8_core_avx512.S
sysdeps/x86_64/fpu/multiarch/svml_d_pow8_core_avx512.S
sysdeps/x86_64/fpu/multiarch/svml_d_sin8_core_avx512.S
sysdeps/x86_64/fpu/multiarch/svml_d_sincos8_core_avx512.S
sysdeps/x86_64/fpu/multiarch/svml_s_cosf16_core_avx512.S
sysdeps/x86_64/fpu/multiarch/svml_s_expf16_core_avx512.S
sysdeps/x86_64/fpu/multiarch/svml_s_logf16_core_avx512.S
sysdeps/x86_64/fpu/multiarch/svml_s_powf16_core_avx512.S
sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S
sysdeps/x86_64/fpu/multiarch/svml_s_sinf16_core_avx512.S

index 3752f9a..964873f 100644 (file)
@@ -62,9 +62,6 @@
 /* Define if _rtld_local structure should be forced into .sdata section.  */
 #undef HAVE_SDATA_SECTION
 
-/* Define if assembler supports AVX512DQ.  */
-#undef  HAVE_AVX512DQ_ASM_SUPPORT
-
 /* Define if assembler supports z10 zarch instructions as default on S390.  */
 #undef  HAVE_S390_MIN_Z10_ZARCH_ASM_SUPPORT
 
index d81accd..585279f 100755 (executable)
@@ -1,33 +1,6 @@
 # This file is generated from configure.ac by Autoconf.  DO NOT EDIT!
  # Local configure fragment for sysdeps/x86_64.
 
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for AVX512DQ support in assembler" >&5
-$as_echo_n "checking for AVX512DQ support in assembler... " >&6; }
-if ${libc_cv_asm_avx512dq+:} false; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat > conftest.s <<\EOF
-        vandpd (%rax), %zmm6, %zmm1
-EOF
-if { ac_try='${CC-cc} -c $ASFLAGS conftest.s 1>&5'
-  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
-  (eval $ac_try) 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; }; then
-  libc_cv_asm_avx512dq=yes
-else
-  libc_cv_asm_avx512dq=no
-fi
-rm -f conftest*
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_asm_avx512dq" >&5
-$as_echo "$libc_cv_asm_avx512dq" >&6; }
-if test $libc_cv_asm_avx512dq = yes; then
-  $as_echo "#define HAVE_AVX512DQ_ASM_SUPPORT 1" >>confdefs.h
-
-fi
-
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking -mprefer-vector-width=128" >&5
 $as_echo_n "checking -mprefer-vector-width=128... " >&6; }
 if ${libc_cv_cc_mprefer_vector_width+:} false; then :
index 41baed6..29e1403 100644 (file)
@@ -1,21 +1,6 @@
 GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory.
 # Local configure fragment for sysdeps/x86_64.
 
-dnl Check if asm supports AVX512DQ.
-AC_CACHE_CHECK(for AVX512DQ support in assembler, libc_cv_asm_avx512dq, [dnl
-cat > conftest.s <<\EOF
-        vandpd (%rax), %zmm6, %zmm1
-EOF
-if AC_TRY_COMMAND(${CC-cc} -c $ASFLAGS conftest.s 1>&AS_MESSAGE_LOG_FD); then
-  libc_cv_asm_avx512dq=yes
-else
-  libc_cv_asm_avx512dq=no
-fi
-rm -f conftest*])
-if test $libc_cv_asm_avx512dq = yes; then
-  AC_DEFINE(HAVE_AVX512DQ_ASM_SUPPORT)
-fi
-
 dnl Check if -mprefer-vector-width=128 works.
 AC_CACHE_CHECK(-mprefer-vector-width=128, libc_cv_cc_mprefer_vector_width, [dnl
 LIBC_TRY_CC_OPTION([-mprefer-vector-width=128],
index 58e588a..0fcb912 100644 (file)
@@ -22,9 +22,6 @@
 
        .text
 ENTRY (_ZGVeN8v_cos_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
-#else
 /*
   ALGORITHM DESCRIPTION:
 
@@ -232,13 +229,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
         call      JUMPTARGET(cos)
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN8v_cos_knl)
 
 ENTRY (_ZGVeN8v_cos_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -454,5 +447,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_cos
 
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN8v_cos_skx)
index 5181b12..c40d82b 100644 (file)
@@ -22,9 +22,6 @@
 
        .text
 ENTRY (_ZGVeN8v_exp_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -234,13 +231,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
         call      JUMPTARGET(exp)
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN8v_exp_knl)
 
 ENTRY (_ZGVeN8v_exp_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -452,5 +445,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_exp
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_2_7
 
-#endif
 END (_ZGVeN8v_exp_skx)
index f5f117d..5596c95 100644 (file)
@@ -22,9 +22,6 @@
 
        .text
 ENTRY (_ZGVeN8v_log_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_log
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -233,13 +230,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_log
         call      JUMPTARGET(log)
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN8v_log_knl)
 
 ENTRY (_ZGVeN8v_log_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_log
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -459,5 +452,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_log
 
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN8v_log_skx)
index d70b4d6..6062ec8 100644 (file)
@@ -82,9 +82,6 @@
 
        .text
 ENTRY (_ZGVeN8vv_pow_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -405,13 +402,9 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
         vmovsd    %xmm0, 1280(%rsp,%r15)
         jmp       .LBL_1_7
 
-#endif
 END (_ZGVeN8vv_pow_knl)
 
 ENTRY (_ZGVeN8vv_pow_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -737,5 +730,4 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN4vv_pow
         vmovsd    %xmm0, 1280(%rsp,%r15)
         jmp       .LBL_2_7
 
-#endif
 END (_ZGVeN8vv_pow_skx)
index 48d251d..2981f15 100644 (file)
@@ -22,9 +22,6 @@
 
        .text
 ENTRY (_ZGVeN8v_sin_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_sin
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -233,13 +230,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_sin
         call      JUMPTARGET(sin)
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN8v_sin_knl)
 
 ENTRY (_ZGVeN8v_sin_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN4v_sin
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -456,5 +449,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN4v_sin
 
         vmovsd    %xmm0, 1216(%rsp,%r15)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN8v_sin_skx)
index a4944a4..4ad3663 100644 (file)
@@ -37,9 +37,6 @@
 
        .text
 ENTRY (_ZGVeN8vl8l8_sincos_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_fFF _ZGVdN4vl8l8_sincos
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -303,14 +300,10 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN4vl8l8_sincos
         vmovsd    %xmm0, 1280(%rsp,%r15)
         jmp       .LBL_1_7
 
-#endif
 END (_ZGVeN8vl8l8_sincos_knl)
 libmvec_hidden_def(_ZGVeN8vl8l8_sincos_knl)
 
 ENTRY (_ZGVeN8vl8l8_sincos_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_fFF _ZGVdN4vl8l8_sincos
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -585,7 +578,6 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN4vl8l8_sincos
         vmovsd    %xmm0, 1280(%rsp,%r15)
         jmp       .LBL_2_7
 
-#endif
 END (_ZGVeN8vl8l8_sincos_skx)
 libmvec_hidden_def(_ZGVeN8vl8l8_sincos_skx)
 
index fe8474f..b7d79ef 100644 (file)
@@ -22,9 +22,6 @@
 
        .text
 ENTRY (_ZGVeN16v_cosf_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_cosf
-#else
 /*
   ALGORITHM DESCRIPTION:
 
@@ -235,13 +232,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_cosf
         call      JUMPTARGET(cosf)
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN16v_cosf_knl)
 
 ENTRY (_ZGVeN16v_cosf_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_cosf
-#else
 /*
   ALGORITHM DESCRIPTION:
 
@@ -451,5 +444,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_cosf
         call      JUMPTARGET(cosf)
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN16v_cosf_skx)
index 229b782..9f03b9b 100644 (file)
@@ -22,9 +22,6 @@
 
        .text
 ENTRY (_ZGVeN16v_expf_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_expf
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -223,13 +220,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_expf
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_1_7
 
-#endif
 END (_ZGVeN16v_expf_knl)
 
 ENTRY (_ZGVeN16v_expf_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_expf
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -438,5 +431,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_expf
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_2_7
 
-#endif
 END (_ZGVeN16v_expf_skx)
index fa2aae9..2ba38b0 100644 (file)
@@ -22,9 +22,6 @@
 
        .text
 ENTRY (_ZGVeN16v_logf_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_logf
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -207,13 +204,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_logf
         call      JUMPTARGET(logf)
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN16v_logf_knl)
 
 ENTRY (_ZGVeN16v_logf_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_logf
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -407,5 +400,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_logf
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_2_7
 
-#endif
 END (_ZGVeN16v_logf_skx)
index 6aea2a4..7f0272c 100644 (file)
@@ -82,9 +82,6 @@
 
        .text
 ENTRY (_ZGVeN16vv_powf_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -355,13 +352,9 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
         call      JUMPTARGET(powf)
         vmovss    %xmm0, 1280(%rsp,%r15,8)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN16vv_powf_knl)
 
 ENTRY (_ZGVeN16vv_powf_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -641,5 +634,4 @@ WRAPPER_IMPL_AVX512_ff _ZGVdN8vv_powf
         call      JUMPTARGET(powf)
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN16vv_powf_skx)
index a446c50..e1d0154 100644 (file)
@@ -50,9 +50,6 @@
 
        .text
 ENTRY (_ZGVeN16vl4l4_sincosf_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_fFF _ZGVdN8vl4l4_sincosf
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -266,14 +263,10 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN8vl4l4_sincosf
 
         vmovss    %xmm0, 1280(%rsp,%r15,8)
         jmp       .LBL_1_7
-#endif
 END (_ZGVeN16vl4l4_sincosf_knl)
 libmvec_hidden_def(_ZGVeN16vl4l4_sincosf_knl)
 
 ENTRY (_ZGVeN16vl4l4_sincosf_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
-#else
         pushq     %rbp
         cfi_adjust_cfa_offset (8)
         cfi_rel_offset (%rbp, 0)
@@ -496,7 +489,6 @@ WRAPPER_IMPL_AVX512_fFF _ZGVdN8vvv_sincosf
 
         vmovss    %xmm0, 1280(%rsp,%r15,8)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN16vl4l4_sincosf_skx)
 libmvec_hidden_def(_ZGVeN16vl4l4_sincosf_skx)
 
index c1b352d..bcb76ff 100644 (file)
@@ -22,9 +22,6 @@
 
        .text
 ENTRY(_ZGVeN16v_sinf_knl)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -239,13 +236,9 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
         call      JUMPTARGET(sinf)
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_1_7
-#endif
 END(_ZGVeN16v_sinf_knl)
 
 ENTRY (_ZGVeN16v_sinf_skx)
-#ifndef HAVE_AVX512DQ_ASM_SUPPORT
-WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
-#else
 /*
    ALGORITHM DESCRIPTION:
 
@@ -470,5 +463,4 @@ WRAPPER_IMPL_AVX512 _ZGVdN8v_sinf
 
         vmovss    %xmm0, 1216(%rsp,%r15,8)
         jmp       .LBL_2_7
-#endif
 END (_ZGVeN16v_sinf_skx)