x86: Add support for building {w}memset{_chk} with explicit ISA level
authorNoah Goldstein <goldstein.w.n@gmail.com>
Wed, 29 Jun 2022 23:07:06 +0000 (16:07 -0700)
committerNoah Goldstein <goldstein.w.n@gmail.com>
Tue, 5 Jul 2022 23:42:42 +0000 (16:42 -0700)
1. Refactor files so that all implementations are in the multiarch
   directory
    - Moved the implementation portion of memset sse2 from memset.S to
      multiarch/memset-sse2.S

    - The non-multiarch file now only includes one of the
      implementations in the multiarch directory based on the compiled
      ISA level (only used for non-multiarch builds.  Otherwise we go
      through the ifunc selector).

2. Add ISA level build guards to different implementations.
    - I.e memset-avx2-unaligned-erms.S which is ISA level 3 will only
      build if compiled ISA level <= 3. Otherwise there is no reason
      to include it as we will always use one of the ISA level 4
      implementations (memset-evex-unaligned-erms.S).

3. Add new multiarch/rtld-memset.S that just include the
   non-multiarch memset.S which will in turn select the best
   implementation based on the compiled ISA level.

4. Refactor the ifunc selector and ifunc implementation list to use
   the ISA level aware wrapper macros that allow functions below the
   compiled ISA level (with a guranteed replacement) to be skipped.

Tested with and without multiarch on x86_64 for ISA levels:
{generic, x86-64-v2, x86-64-v3, x86-64-v4}

And m32 with and without multiarch.

sysdeps/x86_64/memset.S
sysdeps/x86_64/multiarch/ifunc-impl-list.c
sysdeps/x86_64/multiarch/ifunc-memset.h
sysdeps/x86_64/multiarch/ifunc-wmemset.h
sysdeps/x86_64/multiarch/memset-avx2-unaligned-erms.S
sysdeps/x86_64/multiarch/memset-avx512-no-vzeroupper.S
sysdeps/x86_64/multiarch/memset-avx512-unaligned-erms.S
sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S
sysdeps/x86_64/multiarch/rtld-memset.S [new file with mode: 0644]

index a6eea61..f4e1bab 100644 (file)
    <https://www.gnu.org/licenses/>.  */
 
 #include <sysdep.h>
-#define USE_WITH_SSE2  1
 
-#define VEC_SIZE       16
-#define MOV_SIZE       3
-#define RET_SIZE       1
+#define MEMSET_SYMBOL(p,s)     memset
+#define MEMSET_CHK_SYMBOL(p,s) p
 
-#define VEC(i)         xmm##i
-#define VMOVU     movups
-#define VMOVA     movaps
+#define WMEMSET_SYMBOL(p,s)    __wmemset
+#define WMEMSET_CHK_SYMBOL(p,s) p
 
-# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
-  movd d, %xmm0; \
-  movq r, %rax; \
-  punpcklbw %xmm0, %xmm0; \
-  punpcklwd %xmm0, %xmm0; \
-  pshufd $0, %xmm0, %xmm0
+#define DEFAULT_IMPL_V1        "multiarch/memset-sse2-unaligned-erms.S"
+#define DEFAULT_IMPL_V3        "multiarch/memset-avx2-unaligned-erms.S"
+#define DEFAULT_IMPL_V4        "multiarch/memset-evex-unaligned-erms.S"
 
-# define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
-  movd d, %xmm0; \
-  pshufd $0, %xmm0, %xmm0; \
-  movq r, %rax
-
-# define MEMSET_VDUP_TO_VEC0_HIGH()
-# define MEMSET_VDUP_TO_VEC0_LOW()
-
-# define WMEMSET_VDUP_TO_VEC0_HIGH()
-# define WMEMSET_VDUP_TO_VEC0_LOW()
-
-#define SECTION(p)             p
-
-#ifndef MEMSET_SYMBOL
-# define MEMSET_CHK_SYMBOL(p,s)        p
-# define MEMSET_SYMBOL(p,s)    memset
-#endif
-
-#ifndef WMEMSET_SYMBOL
-# define WMEMSET_CHK_SYMBOL(p,s) p
-# define WMEMSET_SYMBOL(p,s)   __wmemset
-#endif
-
-#include "multiarch/memset-vec-unaligned-erms.S"
+#include "isa-default-impl.h"
 
 libc_hidden_builtin_def (memset)
 
index 7858aa3..21008c7 100644 (file)
@@ -213,94 +213,99 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
   IFUNC_IMPL (i, name, __memset_chk,
              IFUNC_IMPL_ADD (array, i, __memset_chk, 1,
                              __memset_chk_erms)
-             IFUNC_IMPL_ADD (array, i, __memset_chk, 1,
-                             __memset_chk_sse2_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memset_chk, 1,
-                             __memset_chk_sse2_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             CPU_FEATURE_USABLE (AVX2),
-                             __memset_chk_avx2_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             CPU_FEATURE_USABLE (AVX2),
-                             __memset_chk_avx2_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             (CPU_FEATURE_USABLE (AVX2)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memset_chk_avx2_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             (CPU_FEATURE_USABLE (AVX2)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memset_chk_avx2_unaligned_erms_rtm)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_chk_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_chk_evex_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_chk_avx512_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_chk_avx512_unaligned)
-             IFUNC_IMPL_ADD (array, i, __memset_chk,
-                             CPU_FEATURE_USABLE (AVX512F),
-                             __memset_chk_avx512_no_vzeroupper)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_chk_avx512_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_chk_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk,
+                                    CPU_FEATURE_USABLE (AVX512F),
+                                    __memset_chk_avx512_no_vzeroupper)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_chk_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __memset_chk,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_chk_evex_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk,
+                                    CPU_FEATURE_USABLE (AVX2),
+                                    __memset_chk_avx2_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk,
+                                    CPU_FEATURE_USABLE (AVX2),
+                                    __memset_chk_avx2_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk,
+                                    (CPU_FEATURE_USABLE (AVX2)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memset_chk_avx2_unaligned_rtm)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __memset_chk,
+                                    (CPU_FEATURE_USABLE (AVX2)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memset_chk_avx2_unaligned_erms_rtm)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __memset_chk, 1,
+                                    __memset_chk_sse2_unaligned)
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __memset_chk, 1,
+                                    __memset_chk_sse2_unaligned_erms)
              )
 #endif
 
   /* Support sysdeps/x86_64/multiarch/memset.c.  */
   IFUNC_IMPL (i, name, memset,
              IFUNC_IMPL_ADD (array, i, memset, 1,
-                             __memset_sse2_unaligned)
-             IFUNC_IMPL_ADD (array, i, memset, 1,
-                             __memset_sse2_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memset, 1, __memset_erms)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             CPU_FEATURE_USABLE (AVX2),
-                             __memset_avx2_unaligned)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             CPU_FEATURE_USABLE (AVX2),
-                             __memset_avx2_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             (CPU_FEATURE_USABLE (AVX2)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memset_avx2_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             (CPU_FEATURE_USABLE (AVX2)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __memset_avx2_unaligned_erms_rtm)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_evex_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_avx512_unaligned_erms)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __memset_avx512_unaligned)
-             IFUNC_IMPL_ADD (array, i, memset,
-                             CPU_FEATURE_USABLE (AVX512F),
-                             __memset_avx512_no_vzeroupper)
+                             __memset_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memset,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_avx512_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memset,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memset,
+                                    CPU_FEATURE_USABLE (AVX512F),
+                                    __memset_avx512_no_vzeroupper)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memset,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, memset,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __memset_evex_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memset,
+                                    CPU_FEATURE_USABLE (AVX2),
+                                    __memset_avx2_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memset,
+                                    CPU_FEATURE_USABLE (AVX2),
+                                    __memset_avx2_unaligned_erms)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memset,
+                                    (CPU_FEATURE_USABLE (AVX2)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memset_avx2_unaligned_rtm)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, memset,
+                                    (CPU_FEATURE_USABLE (AVX2)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __memset_avx2_unaligned_erms_rtm)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, memset, 1,
+                                    __memset_sse2_unaligned)
+             X86_IFUNC_IMPL_ADD_V2 (array, i, memset, 1,
+                                    __memset_sse2_unaligned_erms)
             )
 
   /* Support sysdeps/x86_64/multiarch/rawmemchr.c.  */
@@ -821,25 +826,27 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 
   /* Support sysdeps/x86_64/multiarch/wmemset.c.  */
   IFUNC_IMPL (i, name, wmemset,
-             IFUNC_IMPL_ADD (array, i, wmemset, 1,
-                             __wmemset_sse2_unaligned)
-             IFUNC_IMPL_ADD (array, i, wmemset,
-                             CPU_FEATURE_USABLE (AVX2),
-                             __wmemset_avx2_unaligned)
-             IFUNC_IMPL_ADD (array, i, wmemset,
-                             (CPU_FEATURE_USABLE (AVX2)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __wmemset_avx2_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, wmemset,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __wmemset_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, wmemset,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __wmemset_avx512_unaligned))
+             X86_IFUNC_IMPL_ADD_V4 (array, i, wmemset,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __wmemset_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, wmemset,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __wmemset_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, wmemset,
+                                    CPU_FEATURE_USABLE (AVX2),
+                                    __wmemset_avx2_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, wmemset,
+                                    (CPU_FEATURE_USABLE (AVX2)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __wmemset_avx2_unaligned_rtm)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, wmemset, 1,
+                                    __wmemset_sse2_unaligned))
 
 #ifdef SHARED
   /* Support sysdeps/x86_64/multiarch/memcpy_chk.c.  */
@@ -1049,25 +1056,27 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
 #ifdef SHARED
   /* Support sysdeps/x86_64/multiarch/wmemset_chk.c.  */
   IFUNC_IMPL (i, name, __wmemset_chk,
-             IFUNC_IMPL_ADD (array, i, __wmemset_chk, 1,
-                             __wmemset_chk_sse2_unaligned)
-             IFUNC_IMPL_ADD (array, i, __wmemset_chk,
-                             CPU_FEATURE_USABLE (AVX2),
-                             __wmemset_chk_avx2_unaligned)
-             IFUNC_IMPL_ADD (array, i, __wmemset_chk,
-                             (CPU_FEATURE_USABLE (AVX2)
-                              && CPU_FEATURE_USABLE (RTM)),
-                             __wmemset_chk_avx2_unaligned_rtm)
-             IFUNC_IMPL_ADD (array, i, __wmemset_chk,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __wmemset_chk_evex_unaligned)
-             IFUNC_IMPL_ADD (array, i, __wmemset_chk,
-                             (CPU_FEATURE_USABLE (AVX512VL)
-                              && CPU_FEATURE_USABLE (AVX512BW)
-                              && CPU_FEATURE_USABLE (BMI2)),
-                             __wmemset_chk_avx512_unaligned))
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __wmemset_chk,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __wmemset_chk_evex_unaligned)
+             X86_IFUNC_IMPL_ADD_V4 (array, i, __wmemset_chk,
+                                    (CPU_FEATURE_USABLE (AVX512VL)
+                                     && CPU_FEATURE_USABLE (AVX512BW)
+                                     && CPU_FEATURE_USABLE (BMI2)),
+                                    __wmemset_chk_avx512_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __wmemset_chk,
+                                    CPU_FEATURE_USABLE (AVX2),
+                                    __wmemset_chk_avx2_unaligned)
+             X86_IFUNC_IMPL_ADD_V3 (array, i, __wmemset_chk,
+                                    (CPU_FEATURE_USABLE (AVX2)
+                                     && CPU_FEATURE_USABLE (RTM)),
+                                    __wmemset_chk_avx2_unaligned_rtm)
+             /* ISA V2 wrapper for SSE2 implementation because the SSE2
+                implementation is also used at ISA level 2.  */
+             X86_IFUNC_IMPL_ADD_V2 (array, i, __wmemset_chk, 1,
+                                    __wmemset_chk_sse2_unaligned))
 #endif
 
   return 0;
index 64d1799..ed51497 100644 (file)
 #include <init-arch.h>
 
 extern __typeof (REDIRECT_NAME) OPTIMIZE (erms) attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned)
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned)
   attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms)
+extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms)
+  attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper)
+  attribute_hidden;
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned)
+  attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms)
   attribute_hidden;
+
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms)
   attribute_hidden;
@@ -31,31 +40,26 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_rtm)
   attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms_rtm)
   attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned)
-  attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms)
-  attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned)
-  attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms)
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned)
   attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_no_vzeroupper)
+extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms)
   attribute_hidden;
 
 static inline void *
 IFUNC_SELECTOR (void)
 {
-  const struct cpu_featurescpu_features = __get_cpu_features ();
+  const struct cpu_features *cpu_features = __get_cpu_features ();
 
   if (CPU_FEATURES_ARCH_P (cpu_features, Prefer_ERMS))
     return OPTIMIZE (erms);
 
-  if (CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
+  if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512F)
       && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
     {
-      if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
-          && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
-          && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
+      if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
+         && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
+         && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2))
        {
          if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
            return OPTIMIZE (avx512_unaligned_erms);
@@ -66,11 +70,11 @@ IFUNC_SELECTOR (void)
       return OPTIMIZE (avx512_no_vzeroupper);
     }
 
-  if (CPU_FEATURE_USABLE_P (cpu_features, AVX2))
+  if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX2))
     {
-      if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
-          && CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
-          && CPU_FEATURE_USABLE_P (cpu_features, BMI2))
+      if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL)
+         && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512BW)
+         && X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, BMI2))
        {
          if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
            return OPTIMIZE (evex_unaligned_erms);
@@ -86,7 +90,8 @@ IFUNC_SELECTOR (void)
          return OPTIMIZE (avx2_unaligned_rtm);
        }
 
-      if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
+      if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
+                                      Prefer_No_VZEROUPPER, !))
        {
          if (CPU_FEATURE_USABLE_P (cpu_features, ERMS))
            return OPTIMIZE (avx2_unaligned_erms);
index 87c48e2..3810c71 100644 (file)
 
 #include <init-arch.h>
 
-extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden;
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden;
+
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_rtm)
   attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden;
-extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden;
+
+extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden;
 
 static inline void *
 IFUNC_SELECTOR (void)
 {
-  const struct cpu_featurescpu_features = __get_cpu_features ();
+  const struct cpu_features *cpu_features = __get_cpu_features ();
 
-  if (CPU_FEATURE_USABLE_P (cpu_features, AVX2)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
+  if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX2)
+      && X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
+                                     AVX_Fast_Unaligned_Load, !))
     {
-      if (CPU_FEATURE_USABLE_P (cpu_features, AVX512VL))
+      if (X86_ISA_CPU_FEATURE_USABLE_P (cpu_features, AVX512VL))
        {
          if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
            return OPTIMIZE (avx512_unaligned);
@@ -44,7 +48,8 @@ IFUNC_SELECTOR (void)
       if (CPU_FEATURE_USABLE_P (cpu_features, RTM))
        return OPTIMIZE (avx2_unaligned_rtm);
 
-      if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
+      if (X86_ISA_CPU_FEATURES_ARCH_P (cpu_features,
+                                      Prefer_No_VZEROUPPER, !))
        return OPTIMIZE (avx2_unaligned);
     }
 
index c0bf287..a9054a9 100644 (file)
@@ -1,4 +1,7 @@
-#if IS_IN (libc)
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (3)
+
 # define USE_WITH_AVX2 1
 
 # define VEC_SIZE      32
index c5be8f5..8cc9c16 100644 (file)
    <https://www.gnu.org/licenses/>.  */
 
 #include <sysdep.h>
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (4)
 
-#if IS_IN (libc)
 
 #include "asm-syntax.h"
 #ifndef MEMSET
index 5241216..47623b8 100644 (file)
@@ -1,4 +1,7 @@
-#if IS_IN (libc)
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (4)
+
 # define USE_WITH_AVX512       1
 
 # define VEC_SIZE      64
 # define WMEMSET_VDUP_TO_VEC0_LOW()
 
 # define SECTION(p)            p##.evex512
+
+#ifndef MEMSET_SYMBOL
 # define MEMSET_SYMBOL(p,s)    p##_avx512_##s
+#endif
+#ifndef WMEMSET_SYMBOL
 # define WMEMSET_SYMBOL(p,s)   p##_avx512_##s
+#endif
+
+
 # define USE_LESS_VEC_MASK_STORE       1
 # include "memset-vec-unaligned-erms.S"
 #endif
index 6370021..ac4b2d2 100644 (file)
@@ -1,4 +1,7 @@
-#if IS_IN (libc)
+#include <isa-level.h>
+
+#if ISA_SHOULD_BUILD (4)
+
 # define USE_WITH_EVEX 1
 
 # define VEC_SIZE      32
 # define WMEMSET_VDUP_TO_VEC0_LOW()
 
 # define SECTION(p)            p##.evex
+
+#ifndef MEMSET_SYMBOL
 # define MEMSET_SYMBOL(p,s)    p##_evex_##s
+#endif
+#ifndef WMEMSET_SYMBOL
 # define WMEMSET_SYMBOL(p,s)   p##_evex_##s
+#endif
+
+
 # define USE_LESS_VEC_MASK_STORE       1
 # include "memset-vec-unaligned-erms.S"
 #endif
index 3d92f69..44f9b88 100644 (file)
    License along with the GNU C Library; if not, see
    <https://www.gnu.org/licenses/>.  */
 
-#include <sysdep.h>
-#include <shlib-compat.h>
+#include <isa-level.h>
 
-#if IS_IN (libc)
-# define MEMSET_SYMBOL(p,s)    p##_sse2_##s
-# define WMEMSET_SYMBOL(p,s)   p##_sse2_##s
+/* MINIMUM_X86_ISA_LEVEL <= 2 because there is no V2 implementation
+   so we need this to build for ISA V2 builds. */
+#if ISA_SHOULD_BUILD (2)
 
-# ifdef SHARED
-#  undef libc_hidden_builtin_def
-#  define libc_hidden_builtin_def(name)
+# include <sysdep.h>
+# define USE_WITH_SSE2 1
+
+# define VEC_SIZE      16
+# define MOV_SIZE      3
+# define RET_SIZE      1
+
+# define VEC(i)                xmm##i
+# define VMOVU     movups
+# define VMOVA     movaps
+
+# define MEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
+  movd d, %xmm0; \
+  movq r, %rax; \
+  punpcklbw %xmm0, %xmm0; \
+  punpcklwd %xmm0, %xmm0; \
+  pshufd $0, %xmm0, %xmm0
+
+# define WMEMSET_SET_VEC0_AND_SET_RETURN(d, r) \
+  movd d, %xmm0; \
+  pshufd $0, %xmm0, %xmm0; \
+  movq r, %rax
+
+# define MEMSET_VDUP_TO_VEC0_HIGH()
+# define MEMSET_VDUP_TO_VEC0_LOW()
+
+# define WMEMSET_VDUP_TO_VEC0_HIGH()
+# define WMEMSET_VDUP_TO_VEC0_LOW()
+
+# define SECTION(p)            p
+
+# ifndef MEMSET_SYMBOL
+#  define MEMSET_SYMBOL(p,s)   p##_sse2_##s
 # endif
 
-# undef weak_alias
-# define weak_alias(original, alias)
-# undef strong_alias
-# define strong_alias(ignored1, ignored2)
-#endif
+# ifndef WMEMSET_SYMBOL
+#  define WMEMSET_SYMBOL(p,s)  p##_sse2_##s
+# endif
+
+# include "memset-vec-unaligned-erms.S"
 
-#include <sysdeps/x86_64/memset.S>
+#endif
diff --git a/sysdeps/x86_64/multiarch/rtld-memset.S b/sysdeps/x86_64/multiarch/rtld-memset.S
new file mode 100644 (file)
index 0000000..d912bfa
--- /dev/null
@@ -0,0 +1,18 @@
+/* Copyright (C) 2022 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <https://www.gnu.org/licenses/>.  */
+
+#include "../memset.S"