X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=glib%2Fgbitlock.c;h=572c2d1873d81fb01ccddca19c866cd29977b7cf;hb=d217429729aad360f372633f2ec99778c0fc08d5;hp=c62ec3055a8aacb37c22f545c292863dee6f49f3;hpb=522dafe126ed814a1699238142fbc12b7f55b0b6;p=platform%2Fupstream%2Fglib.git diff --git a/glib/gbitlock.c b/glib/gbitlock.c index c62ec30..572c2d1 100644 --- a/glib/gbitlock.c +++ b/glib/gbitlock.c @@ -13,39 +13,31 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * License along with this library; if not, see . * * Author: Ryan Lortie */ +#include "config.h" + #include "gbitlock.h" +#include #include #include #include +#include #include "gthreadprivate.h" -#include "config.h" - -#include "galias.h" #ifdef G_BIT_LOCK_FORCE_FUTEX_EMULATION #undef HAVE_FUTEX #endif #ifndef HAVE_FUTEX +static GMutex g_futex_mutex; static GSList *g_futex_address_list = NULL; -static GMutex *g_futex_mutex = NULL; -#endif - -void -_g_futex_thread_init (void) { -#ifndef HAVE_FUTEX - g_futex_mutex = g_mutex_new (); #endif -} #ifdef HAVE_FUTEX /* @@ -57,7 +49,7 @@ _g_futex_thread_init (void) { * If anyone actually gets bit by this, please file a bug. :) */ #include -#include +#include #include /* < private > @@ -81,7 +73,7 @@ static void g_futex_wait (const volatile gint *address, gint value) { - syscall (SYS_futex, address, (gsize) FUTEX_WAIT, (gsize) value, NULL); + syscall (__NR_futex, address, (gsize) FUTEX_WAIT, (gsize) value, NULL); } /* < private > @@ -98,7 +90,7 @@ g_futex_wait (const volatile gint *address, static void g_futex_wake (const volatile gint *address) { - syscall (SYS_futex, address, (gsize) FUTEX_WAKE, (gsize) 1, NULL); + syscall (__NR_futex, address, (gsize) FUTEX_WAKE, (gsize) 1, NULL); } #else @@ -108,12 +100,9 @@ typedef struct { const volatile gint *address; gint ref_count; - GCond *wait_queue; + GCond wait_queue; } WaitAddress; -static GSList *g_futex_address_list; -static GMutex *g_futex_mutex; - static WaitAddress * g_futex_find_address (const volatile gint *address) { @@ -134,7 +123,7 @@ static void g_futex_wait (const volatile gint *address, gint value) { - g_mutex_lock (g_futex_mutex); + g_mutex_lock (&g_futex_mutex); if G_LIKELY (g_atomic_int_get (address) == value) { WaitAddress *waiter; @@ -143,24 +132,24 @@ g_futex_wait (const volatile gint *address, { waiter = g_slice_new (WaitAddress); waiter->address = address; - waiter->wait_queue = g_cond_new (); + g_cond_init (&waiter->wait_queue); waiter->ref_count = 0; g_futex_address_list = g_slist_prepend (g_futex_address_list, waiter); } waiter->ref_count++; - g_cond_wait (waiter->wait_queue, g_futex_mutex); + g_cond_wait (&waiter->wait_queue, &g_futex_mutex); if (!--waiter->ref_count) { g_futex_address_list = g_slist_remove (g_futex_address_list, waiter); - g_cond_free (waiter->wait_queue); + g_cond_clear (&waiter->wait_queue); g_slice_free (WaitAddress, waiter); } } - g_mutex_unlock (g_futex_mutex); + g_mutex_unlock (&g_futex_mutex); } static void @@ -174,16 +163,22 @@ g_futex_wake (const volatile gint *address) * 2) need to -stay- locked until the end to ensure a wake() * in another thread doesn't cause 'waiter' to stop existing */ - g_mutex_lock (g_futex_mutex); + g_mutex_lock (&g_futex_mutex); if ((waiter = g_futex_find_address (address))) - g_cond_signal (waiter->wait_queue); - g_mutex_unlock (g_futex_mutex); + g_cond_signal (&waiter->wait_queue); + g_mutex_unlock (&g_futex_mutex); } #endif #define CONTENTION_CLASSES 11 static volatile gint g_bit_lock_contended[CONTENTION_CLASSES]; +#if (defined (i386) || defined (__amd64__)) + #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) + #define USE_ASM_GOTO 1 + #endif +#endif + /** * g_bit_lock: * @address: a pointer to an integer @@ -209,11 +204,39 @@ void g_bit_lock (volatile gint *address, gint lock_bit) { +#ifdef USE_ASM_GOTO + retry: + __asm__ volatile goto ("lock bts %1, (%0)\n" + "jc %l[contended]" + : /* no output */ + : "r" (address), "r" (lock_bit) + : "cc", "memory" + : contended); + return; + + contended: + { + guint mask = 1u << lock_bit; + guint v; + + v = g_atomic_int_get (address); + if (v & mask) + { + guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); + + g_atomic_int_add (&g_bit_lock_contended[class], +1); + g_futex_wait (address, v); + g_atomic_int_add (&g_bit_lock_contended[class], -1); + } + } + goto retry; +#else + guint mask = 1u << lock_bit; guint v; retry: - v = g_atomic_int_get (address); - if (v & (1u << lock_bit)) + v = g_atomic_int_or (address, mask); + if (v & mask) /* already locked */ { guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); @@ -224,16 +247,13 @@ g_bit_lock (volatile gint *address, goto retry; } - - if (!g_atomic_int_compare_and_exchange (address, v, v | (1u << lock_bit))) - goto retry; +#endif } /** * g_bit_trylock: * @address: a pointer to an integer * @lock_bit: a bit value between 0 and 31 - * @returns: %TRUE if the lock was acquired * * Sets the indicated @lock_bit in @address, returning %TRUE if * successful. If the bit is already set, returns %FALSE immediately. @@ -248,24 +268,33 @@ g_bit_lock (volatile gint *address, * @address must be atomic in order for this function to work * reliably. * + * Returns: %TRUE if the lock was acquired + * * Since: 2.24 **/ gboolean g_bit_trylock (volatile gint *address, gint lock_bit) { - guint v; +#ifdef USE_ASM_GOTO + gboolean result; - retry: - v = g_atomic_int_get (address); - if (v & (1u << lock_bit)) - /* already locked */ - return FALSE; + __asm__ volatile ("lock bts %2, (%1)\n" + "setnc %%al\n" + "movzx %%al, %0" + : "=r" (result) + : "r" (address), "r" (lock_bit) + : "cc", "memory"); - if (!g_atomic_int_compare_and_exchange (address, v, v | (1u << lock_bit))) - goto retry; + return result; +#else + guint mask = 1u << lock_bit; + guint v; + + v = g_atomic_int_or (address, mask); - return TRUE; + return ~v & mask; +#endif } /** @@ -287,17 +316,220 @@ void g_bit_unlock (volatile gint *address, gint lock_bit) { - guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); - guint v; +#ifdef USE_ASM_GOTO + asm volatile ("lock btr %1, (%0)" + : /* no output */ + : "r" (address), "r" (lock_bit) + : "cc", "memory"); +#else + guint mask = 1u << lock_bit; + g_atomic_int_and (address, ~mask); +#endif + + { + guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); + + if (g_atomic_int_get (&g_bit_lock_contended[class])) + g_futex_wake (address); + } +} + + +/* We emulate pointer-sized futex(2) because the kernel API only + * supports integers. + * + * We assume that the 'interesting' part is always the lower order bits. + * This assumption holds because pointer bitlocks are restricted to + * using the low order bits of the pointer as the lock. + * + * On 32 bits, there is nothing to do since the pointer size is equal to + * the integer size. On little endian the lower-order bits don't move, + * so do nothing. Only on 64bit big endian do we need to do a bit of + * pointer arithmetic: the low order bits are shifted by 4 bytes. We + * have a helper function that always does the right thing here. + * + * Since we always consider the low-order bits of the integer value, a + * simple cast from (gsize) to (guint) always takes care of that. + * + * After that, pointer-sized futex becomes as simple as: + * + * g_futex_wait (g_futex_int_address (address), (guint) value); + * + * and + * + * g_futex_wake (g_futex_int_address (int_address)); + */ +static const volatile gint * +g_futex_int_address (const volatile void *address) +{ + const volatile gint *int_address = address; + + /* this implementation makes these (reasonable) assumptions: */ + G_STATIC_ASSERT (G_BYTE_ORDER == G_LITTLE_ENDIAN || + (G_BYTE_ORDER == G_BIG_ENDIAN && + sizeof (int) == 4 && + (sizeof (gpointer) == 4 || sizeof (gpointer) == 8))); + +#if G_BYTE_ORDER == G_BIG_ENDIAN && GLIB_SIZEOF_VOID_P == 8 + int_address++; +#endif + + return int_address; +} + +/** + * g_pointer_bit_lock: + * @address: a pointer to a #gpointer-sized value + * @lock_bit: a bit value between 0 and 31 + * + * This is equivalent to g_bit_lock, but working on pointers (or other + * pointer-sized values). + * + * For portability reasons, you may only lock on the bottom 32 bits of + * the pointer. + * + * Since: 2.30 + **/ +void +(g_pointer_bit_lock) (volatile void *address, + gint lock_bit) +{ + g_return_if_fail (lock_bit < 32); + + { +#ifdef USE_ASM_GOTO retry: - v = g_atomic_int_get (address); - if (!g_atomic_int_compare_and_exchange (address, v, v & ~(1u << lock_bit))) + asm volatile goto ("lock bts %1, (%0)\n" + "jc %l[contended]" + : /* no output */ + : "r" (address), "r" ((gsize) lock_bit) + : "cc", "memory" + : contended); + return; + + contended: + { + volatile gsize *pointer_address = address; + gsize mask = 1u << lock_bit; + gsize v; + + v = (gsize) g_atomic_pointer_get (pointer_address); + if (v & mask) + { + guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); + + g_atomic_int_add (&g_bit_lock_contended[class], +1); + g_futex_wait (g_futex_int_address (address), v); + g_atomic_int_add (&g_bit_lock_contended[class], -1); + } + } goto retry; +#else + volatile gsize *pointer_address = address; + gsize mask = 1u << lock_bit; + gsize v; + + retry: + v = g_atomic_pointer_or (pointer_address, mask); + if (v & mask) + /* already locked */ + { + guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); - if (g_atomic_int_get (&g_bit_lock_contended[class])) - g_futex_wake (address); + g_atomic_int_add (&g_bit_lock_contended[class], +1); + g_futex_wait (g_futex_int_address (address), (guint) v); + g_atomic_int_add (&g_bit_lock_contended[class], -1); + + goto retry; + } +#endif + } } -#define __G_BITLOCK_C__ -#include "galiasdef.c" +/** + * g_pointer_bit_trylock: + * @address: a pointer to a #gpointer-sized value + * @lock_bit: a bit value between 0 and 31 + * + * This is equivalent to g_bit_trylock, but working on pointers (or + * other pointer-sized values). + * + * For portability reasons, you may only lock on the bottom 32 bits of + * the pointer. + * + * Returns: %TRUE if the lock was acquired + * + * Since: 2.30 + **/ +gboolean +(g_pointer_bit_trylock) (volatile void *address, + gint lock_bit) +{ + g_return_val_if_fail (lock_bit < 32, FALSE); + + { +#ifdef USE_ASM_GOTO + gboolean result; + + asm volatile ("lock bts %2, (%1)\n" + "setnc %%al\n" + "movzx %%al, %0" + : "=r" (result) + : "r" (address), "r" ((gsize) lock_bit) + : "cc", "memory"); + + return result; +#else + volatile gsize *pointer_address = address; + gsize mask = 1u << lock_bit; + gsize v; + + g_return_val_if_fail (lock_bit < 32, FALSE); + + v = g_atomic_pointer_or (pointer_address, mask); + + return ~v & mask; +#endif + } +} + +/** + * g_pointer_bit_unlock: + * @address: a pointer to a #gpointer-sized value + * @lock_bit: a bit value between 0 and 31 + * + * This is equivalent to g_bit_unlock, but working on pointers (or other + * pointer-sized values). + * + * For portability reasons, you may only lock on the bottom 32 bits of + * the pointer. + * + * Since: 2.30 + **/ +void +(g_pointer_bit_unlock) (volatile void *address, + gint lock_bit) +{ + g_return_if_fail (lock_bit < 32); + + { +#ifdef USE_ASM_GOTO + asm volatile ("lock btr %1, (%0)" + : /* no output */ + : "r" (address), "r" ((gsize) lock_bit) + : "cc", "memory"); +#else + volatile gsize *pointer_address = address; + gsize mask = 1u << lock_bit; + + g_atomic_pointer_and (pointer_address, ~mask); +#endif + + { + guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended); + if (g_atomic_int_get (&g_bit_lock_contended[class])) + g_futex_wake (g_futex_int_address (address)); + } + } +}