2007-05-21 Ulrich Drepper <drepper@redhat.com>
+ * sysdeps/x86_64/cacheinfo.c (init_cacheinfo): Pass correct value
+ as second parameter to handle_intel.
+
* nscd/aicache.c (addhstaiX): If reported TTL is zero don't cache
the entry.
Define for kernel >= 2.6.22.
2007-05-18 Ulrich Drepper <drepper@redhat.com>
+
* elf/dl-close.c (_dl_close_worker): When removing object from
global scope, wait for all lookups to finish afterwards.
* elf/dl-open.c (add_to_global): When global scope array must
+2007-05-21 Ulrich Drepper <drepper@redhat.com>
+
+ * sysdeps/pthread/pthread-functions.h (struct pthread_functions):
+ Remove ptr_wait_lookup_done again.
+ * init.c (pthread_functions): Don't add .ptr_wait_lookup_done here.
+ (__pthread_initialize_minimal_internal): Initialize
+ _dl_wait_lookup_done pointer in _rtld_global directly.
+ * sysdeps/unix/sysv/linux/libc_pthread_init.c (__libc_pthread_init):
+ Remove code to code _dl_wait_lookup_done.
+ * sysdeps/x86_64/tls.h (THREAD_GSCOPE_WAIT): The pointer is not
+ encrypted for now.
+
2007-05-21 Jakub Jelinek <jakub@redhat.com>
* tst-robust9.c (do_test): Don't fail if ENABLE_PI and
-/*
- x86_64 cache info.
-
+/* x86_64 cache info.
Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
This file is part of the GNU C Library.
long int result = 0;
bool no_level_2_or_3 = false;
bool has_level_2 = false;
-
+
while (cnt++ < max)
{
unsigned int eax;
/* Half the core cache size for use in memory and string routines, typically
L1 size. */
long int __x86_64_core_cache_size_half attribute_hidden = 32 * 1024 / 2;
-/* Shared cache size for use in memory and string routines, typically L2 or L3 size. */
+/* Shared cache size for use in memory and string routines, typically
+ L2 or L3 size. */
long int __x86_64_shared_cache_size_half attribute_hidden = 1024 * 1024 / 2;
/* PREFETCHW support flag for use in memory and string routines. */
int __x86_64_prefetchw attribute_hidden;
/* This spells out "GenuineIntel". */
if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
{
- core = handle_intel (_SC_LEVEL1_DCACHE_SIZE, eax);
-
+ core = handle_intel (_SC_LEVEL1_DCACHE_SIZE, max_cpuid);
+
/* Try L3 first. */
level = 3;
- shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, eax);
-
+ shared = handle_intel (_SC_LEVEL3_CACHE_SIZE, max_cpuid);
+
if (shared <= 0)
{
/* Try L2 otherwise. */
level = 2;
- shared = handle_intel (_SC_LEVEL2_CACHE_SIZE, eax);
+ shared = handle_intel (_SC_LEVEL2_CACHE_SIZE, max_cpuid);
}
-
- /* Figure out the number of logical threads that share the highest cache level. */
+
+ /* Figure out the number of logical threads that share the
+ highest cache level. */
if (max_cpuid >= 4)
{
int i = 0;
-
+
/* Query until desired cache level is enumerated. */
do
{
: "0" (4), "2" (i++));
}
while (((eax >> 5) & 0x7) != level);
-
+
threads = ((eax >> 14) & 0x3ff) + 1;
}
else
asm volatile ("cpuid"
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "0" (1));
-
+
threads = (ebx >> 16) & 0xff;
}
-
- /* Cap usage of highest cache level to the number of supported threads. */
+
+ /* Cap usage of highest cache level to the number of supported
+ threads. */
if (shared > 0 && threads > 0)
shared /= threads;
}
{
core = handle_amd (_SC_LEVEL1_DCACHE_SIZE);
shared = handle_amd (_SC_LEVEL2_CACHE_SIZE);
-
+
asm volatile ("cpuid"
: "=a" (max_cpuid_ex), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "0" (0x80000000));