+2000-05-23 Jakub Jelinek <jakub@redhat.com>
+
+ * sysdeps/i386/fpu/bits/mathinline.h (__sincos, __sincosf,
+ __sincosl): Guard with __USE_GNU.
+
2000-05-24 Ulrich Drepper <drepper@redhat.com>
* csu/Makefile (routines): Add check_fds.
libpthread-routines := attr cancel condvar join manager mutex ptfork \
ptlongjmp pthread signals specific errno lockfile \
semaphore spinlock wrapsyscall rwlock pt-machine \
- oldsemaphore events getcpuclockid
+ oldsemaphore events getcpuclockid pspinlock
vpath %.c Examples
tests = ex1 ex2 ex3 ex4 ex5 ex6 ex7
__pthread_lock(&handle->h_lock, NULL);
if (invalid_handle(handle, thread)) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return ESRCH;
}
th = handle->h_descr;
if (th->p_canceled) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return 0;
}
th->p_woken_by_cancel = dorestart;
}
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
/* If the thread has suspended or is about to, then we unblock it by
issuing a restart, instead of a cancel signal. Otherwise we send
__pthread_lock(&cond->__c_lock, self);
did_remove = remove_from_queue(&cond->__c_waiting, th);
- __pthread_spin_unlock(&cond->__c_lock);
+ __pthread_unlock(&cond->__c_lock);
return did_remove;
}
enqueue(&cond->__c_waiting, self);
else
already_canceled = 1;
- __pthread_spin_unlock(&cond->__c_lock);
+ __pthread_unlock(&cond->__c_lock);
if (already_canceled) {
__pthread_set_own_extricate_if(self, 0);
enqueue(&cond->__c_waiting, self);
else
already_canceled = 1;
- __pthread_spin_unlock(&cond->__c_lock);
+ __pthread_unlock(&cond->__c_lock);
if (already_canceled) {
__pthread_set_own_extricate_if(self, 0);
__pthread_lock(&cond->__c_lock, self);
was_on_queue = remove_from_queue(&cond->__c_waiting, self);
- __pthread_spin_unlock(&cond->__c_lock);
+ __pthread_unlock(&cond->__c_lock);
if (was_on_queue) {
__pthread_set_own_extricate_if(self, 0);
__pthread_lock(&cond->__c_lock, NULL);
th = dequeue(&cond->__c_waiting);
- __pthread_spin_unlock(&cond->__c_lock);
+ __pthread_unlock(&cond->__c_lock);
if (th != NULL) restart(th);
return 0;
}
/* Copy the current state of the waiting queue and empty it */
tosignal = cond->__c_waiting;
cond->__c_waiting = NULL;
- __pthread_spin_unlock(&cond->__c_lock);
+ __pthread_unlock(&cond->__c_lock);
/* Now signal each process in the queue */
while ((th = dequeue(&tosignal)) != NULL) restart(th);
return 0;
pthread_t p_tid; /* Thread identifier */
int p_pid; /* PID of Unix process */
int p_priority; /* Thread priority (== 0 if not realtime) */
- pthread_spinlock_t * p_lock; /* Spinlock for synchronized accesses */
+ struct _pthread_fastlock * p_lock; /* Spinlock for synchronized accesses */
int p_signal; /* last signal received */
sigjmp_buf * p_signal_jmp; /* where to siglongjmp on a signal or NULL */
sigjmp_buf * p_cancel_jmp; /* where to siglongjmp on a cancel or NULL */
typedef struct pthread_handle_struct * pthread_handle;
struct pthread_handle_struct {
- pthread_spinlock_t h_lock; /* Fast lock for sychronized access */
+ struct _pthread_fastlock h_lock; /* Fast lock for sychronized access */
pthread_descr h_descr; /* Thread descriptor or NULL if invalid */
char * h_bottom; /* Lowest address in the stack thread */
};
THREAD_SETMEM(self, p_terminated, 1);
/* See if someone is joining on us */
joining = THREAD_GETMEM(self, p_joining);
- __pthread_spin_unlock(THREAD_GETMEM(self, p_lock));
+ __pthread_unlock(THREAD_GETMEM(self, p_lock));
/* Restart joining thread if any */
if (joining != NULL) restart(joining);
/* If this is the initial thread, block until all threads have terminated.
/* Main thread flushes stdio streams and runs atexit functions.
It also calls a handler within LinuxThreads which sends a process exit
request to the thread manager. */
- exit(0);
+ exit(0);
}
/* Threads other than the main one terminate without flushing stdio streams
or running atexit functions. */
jo = handle->h_descr;
did_remove = jo->p_joining != NULL;
jo->p_joining = NULL;
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return did_remove;
}
__pthread_lock(&handle->h_lock, self);
if (invalid_handle(handle, thread_id)) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return ESRCH;
}
th = handle->h_descr;
if (th == self) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return EDEADLK;
}
/* If detached or already joined, error */
if (th->p_detached || th->p_joining != NULL) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return EINVAL;
}
/* If not terminated yet, suspend ourselves. */
th->p_joining = self;
else
already_canceled = 1;
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
if (already_canceled) {
__pthread_set_own_extricate_if(self, 0);
}
/* Get return value */
if (thread_return != NULL) *thread_return = th->p_retval;
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
/* Send notification to thread manager */
if (__pthread_manager_request >= 0) {
request.req_thread = self;
__pthread_lock(&handle->h_lock, NULL);
if (invalid_handle(handle, thread_id)) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return ESRCH;
}
th = handle->h_descr;
/* If already detached, error */
if (th->p_detached) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return EINVAL;
}
/* If already joining, don't do anything. */
if (th->p_joining != NULL) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return 0;
}
/* Mark as detached */
th->p_detached = 1;
terminated = th->p_terminated;
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
/* If already terminated, notify thread manager to reclaim resources */
if (terminated && __pthread_manager_request >= 0) {
request.req_thread = thread_self();
__on_exit handler, which in turn will send REQ_PROCESS_EXIT
to the thread manager. In case you are wondering how the
manager terminates from its loop here. */
- }
+ }
break;
case REQ_POST:
__new_sem_post(request.req_args.post);
/* Get the lock the manager will free once all is correctly set up. */
__pthread_lock (THREAD_GETMEM((&__pthread_manager_thread), p_lock), NULL);
/* Free it immediately. */
- __pthread_spin_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));
+ __pthread_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));
return __pthread_manager(arg);
}
/* Get the lock the manager will free once all is correctly set up. */
__pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
/* Free it immediately. */
- __pthread_spin_unlock (THREAD_GETMEM(self, p_lock));
+ __pthread_unlock (THREAD_GETMEM(self, p_lock));
/* Continue with the real function. */
return pthread_start_thread (arg);
__linuxthreads_create_event ();
/* Now restart the thread. */
- __pthread_spin_unlock(new_thread->p_lock);
+ __pthread_unlock(new_thread->p_lock);
}
}
}
__pthread_lock(&handle->h_lock, NULL);
handle->h_descr = NULL;
handle->h_bottom = (char *)(-1L);
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
#ifdef FREE_THREAD
FREE_THREAD(th, th->p_nr);
#endif
}
}
detached = th->p_detached;
- __pthread_spin_unlock(th->p_lock);
+ __pthread_unlock(th->p_lock);
if (detached)
pthread_free(th);
break;
if (invalid_handle(handle, th_id)) {
/* pthread_reap_children has deallocated the thread already,
nothing needs to be done */
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return;
}
th = handle->h_descr;
if (th->p_exited) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
pthread_free(th);
} else {
/* The Unix process of the thread is still running.
Mark the thread as detached so that the thread manager will
deallocate its resources when the Unix process exits. */
th->p_detached = 1;
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
}
}
{
switch (mutex->__m_kind) {
case PTHREAD_MUTEX_FAST_NP:
- __pthread_spin_unlock(&mutex->__m_lock);
+ __pthread_unlock(&mutex->__m_lock);
return 0;
case PTHREAD_MUTEX_RECURSIVE_NP:
if (mutex->__m_count > 0) {
return 0;
}
mutex->__m_owner = NULL;
- __pthread_spin_unlock(&mutex->__m_lock);
+ __pthread_unlock(&mutex->__m_lock);
return 0;
case PTHREAD_MUTEX_ERRORCHECK_NP:
if (mutex->__m_owner != thread_self() || mutex->__m_lock.__status == 0)
return EPERM;
mutex->__m_owner = NULL;
- __pthread_spin_unlock(&mutex->__m_lock);
+ __pthread_unlock(&mutex->__m_lock);
return 0;
default:
return EINVAL;
__linuxthreads_create_event ();
/* Now restart the thread. */
- __pthread_spin_unlock(__pthread_manager_thread.p_lock);
+ __pthread_unlock(__pthread_manager_thread.p_lock);
}
}
}
__pthread_lock(&handle->h_lock, NULL);
if (invalid_handle(handle, thread)) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return ESRCH;
}
th = handle->h_descr;
if (__sched_setscheduler(th->p_pid, policy, param) == -1) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return errno;
}
th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
if (__pthread_manager_request >= 0)
__pthread_manager_adjust_prio(th->p_priority);
return 0;
__pthread_lock(&handle->h_lock, NULL);
if (invalid_handle(handle, thread)) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return ESRCH;
}
pid = handle->h_descr->p_pid;
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
pol = __sched_getscheduler(pid);
if (pol == -1) return errno;
if (__sched_getparam(pid, param) == -1) return errno;
{
__pthread_lock(self->p_lock, self);
THREAD_SETMEM(self, p_extricate, peif);
- __pthread_spin_unlock(self->p_lock);
+ __pthread_unlock(self->p_lock);
}
/* Primitives for controlling thread execution */
__pthread_lock (&rwlock->__rw_lock, NULL);
readers = rwlock->__rw_readers;
writer = rwlock->__rw_writer;
- __pthread_spin_unlock (&rwlock->__rw_lock);
+ __pthread_unlock (&rwlock->__rw_lock);
if (readers > 0 || writer != NULL)
return EBUSY;
break;
enqueue (&rwlock->__rw_read_waiting, self);
- __pthread_spin_unlock (&rwlock->__rw_lock);
+ __pthread_unlock (&rwlock->__rw_lock);
suspend (self); /* This is not a cancellation point */
}
++rwlock->__rw_readers;
- __pthread_spin_unlock (&rwlock->__rw_lock);
+ __pthread_unlock (&rwlock->__rw_lock);
if (have_lock_already || out_of_mem)
{
retval = 0;
}
- __pthread_spin_unlock (&rwlock->__rw_lock);
+ __pthread_unlock (&rwlock->__rw_lock);
if (retval == 0)
{
if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
{
rwlock->__rw_writer = self;
- __pthread_spin_unlock (&rwlock->__rw_lock);
+ __pthread_unlock (&rwlock->__rw_lock);
return 0;
}
/* Suspend ourselves, then try again */
enqueue (&rwlock->__rw_write_waiting, self);
- __pthread_spin_unlock (&rwlock->__rw_lock);
+ __pthread_unlock (&rwlock->__rw_lock);
suspend (self); /* This is not a cancellation point */
}
}
rwlock->__rw_writer = thread_self ();
result = 0;
}
- __pthread_spin_unlock (&rwlock->__rw_lock);
+ __pthread_unlock (&rwlock->__rw_lock);
return result;
}
/* Unlocking a write lock. */
if (rwlock->__rw_writer != thread_self ())
{
- __pthread_spin_unlock (&rwlock->__rw_lock);
+ __pthread_unlock (&rwlock->__rw_lock);
return EPERM;
}
rwlock->__rw_writer = NULL;
/* Restart all waiting readers. */
torestart = rwlock->__rw_read_waiting;
rwlock->__rw_read_waiting = NULL;
- __pthread_spin_unlock (&rwlock->__rw_lock);
+ __pthread_unlock (&rwlock->__rw_lock);
while ((th = dequeue (&torestart)) != NULL)
restart (th);
}
else
{
/* Restart one waiting writer. */
- __pthread_spin_unlock (&rwlock->__rw_lock);
+ __pthread_unlock (&rwlock->__rw_lock);
restart (th);
}
}
/* Unlocking a read lock. */
if (rwlock->__rw_readers == 0)
{
- __pthread_spin_unlock (&rwlock->__rw_lock);
+ __pthread_unlock (&rwlock->__rw_lock);
return EPERM;
}
else
th = NULL;
- __pthread_spin_unlock (&rwlock->__rw_lock);
+ __pthread_unlock (&rwlock->__rw_lock);
if (th != NULL)
restart (th);
errno = ENOSYS;
return -1;
}
- __pthread_init_lock((pthread_spinlock_t *) &sem->__sem_lock);
+ __pthread_init_lock((struct _pthread_fastlock *) &sem->__sem_lock);
sem->__sem_value = value;
sem->__sem_waiting = NULL;
return 0;
sem_t *sem = obj;
int did_remove = 0;
- __pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
+ __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
did_remove = remove_from_queue(&sem->__sem_waiting, th);
- __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
+ __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return did_remove;
}
extr.pu_object = sem;
extr.pu_extricate_func = new_sem_extricate_func;
- __pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
+ __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
if (sem->__sem_value > 0) {
sem->__sem_value--;
- __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
+ __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return 0;
}
/* Register extrication interface */
enqueue(&sem->__sem_waiting, self);
else
already_canceled = 1;
- __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
+ __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
if (already_canceled) {
__pthread_set_own_extricate_if(self, 0);
{
int retval;
- __pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, NULL);
+ __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, NULL);
if (sem->__sem_value == 0) {
errno = EAGAIN;
retval = -1;
sem->__sem_value--;
retval = 0;
}
- __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
+ __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return retval;
}
struct pthread_request request;
if (THREAD_GETMEM(self, p_in_sighandler) == NULL) {
- __pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
+ __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
if (sem->__sem_waiting == NULL) {
if (sem->__sem_value >= SEM_VALUE_MAX) {
/* Overflow */
errno = ERANGE;
- __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
+ __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return -1;
}
sem->__sem_value++;
- __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
+ __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
} else {
th = dequeue(&sem->__sem_waiting);
- __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
+ __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
restart(th);
}
} else {
pthread_extricate_if extr;
int already_canceled = 0;
- __pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
+ __pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
if (sem->__sem_value > 0) {
--sem->__sem_value;
- __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
+ __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return 0;
}
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) {
/* The standard requires that if the function would block and the
time value is illegal, the function returns with an error. */
- __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
+ __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
return EINVAL;
}
enqueue(&sem->__sem_waiting, self);
else
already_canceled = 1;
- __pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
+ __pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
if (already_canceled) {
__pthread_set_own_extricate_if(self, 0);
/* __pthread_lock will queue back any spurious restarts that
may happen to it. */
- __pthread_lock((pthread_spinlock_t *)&sem->__sem_lock, self);
+ __pthread_lock((struct _pthread_fastlock *)&sem->__sem_lock, self);
was_on_queue = remove_from_queue(&sem->__sem_waiting, self);
- __pthread_spin_unlock((pthread_spinlock_t *)&sem->__sem_lock);
+ __pthread_unlock((struct _pthread_fastlock *)&sem->__sem_lock);
if (was_on_queue) {
__pthread_set_own_extricate_if(self, 0);
__pthread_lock(&handle->h_lock, NULL);
if (invalid_handle(handle, thread)) {
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
return ESRCH;
}
pid = handle->h_descr->p_pid;
- __pthread_spin_unlock(&handle->h_lock);
+ __pthread_unlock(&handle->h_lock);
if (kill(pid, signo) == -1)
return errno;
else
This is safe because there are no concurrent __pthread_unlock
operations -- only the thread that locked the mutex can unlock it. */
-void internal_function __pthread_lock(pthread_spinlock_t * lock,
+void internal_function __pthread_lock(struct _pthread_fastlock * lock,
pthread_descr self)
{
long oldstatus, newstatus;
while (spurious_wakeup_count--)
restart(self);
}
-int __pthread_spin_lock(pthread_spinlock_t * lock)
-{
- __pthread_lock (lock, NULL);
- return 0;
-}
-weak_alias (__pthread_spin_lock, pthread_spin_lock)
-int __pthread_spin_unlock(pthread_spinlock_t * lock)
+int __pthread_unlock(struct _pthread_fastlock * lock)
{
long oldstatus;
pthread_descr thr, * ptr, * maxptr;
return 0;
}
-weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
-
-
-int __pthread_spin_trylock (pthread_spinlock_t *lock)
-{
- return __pthread_trylock (lock);
-}
-weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
-int __pthread_spin_init(pthread_spinlock_t *lock, int pshared)
-{
- if (pshared != 0)
- return ENOSYS;
-
- __pthread_init_lock (lock);
- return 0;
-}
-weak_alias (__pthread_spin_init, pthread_spin_init)
-
-int __pthread_spin_destroy(pthread_spinlock_t *lock)
-{
- /* Nothing to do. */
- return 0;
-}
-weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
/* Compare-and-swap emulation with a spinlock */
/* Internal locks */
-extern void internal_function __pthread_lock(pthread_spinlock_t * lock,
+extern void internal_function __pthread_lock(struct _pthread_fastlock * lock,
pthread_descr self);
-extern int __pthread_spin_unlock(pthread_spinlock_t *lock);
+extern int __pthread_unlock(struct _pthread_fastlock *lock);
-static inline void __pthread_init_lock(pthread_spinlock_t * lock)
+static inline void __pthread_init_lock(struct _pthread_fastlock * lock)
{
lock->__status = 0;
lock->__spinlock = 0;
}
-static inline int __pthread_trylock (pthread_spinlock_t * lock)
+static inline int __pthread_trylock (struct _pthread_fastlock * lock)
{
long oldstatus;
--- /dev/null
+/* POSIX spinlock implementation. Alpha version.
+ Copyright (C) 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <errno.h>
+#include <pthread.h>
+
+
+/* This implementation is similar to the one used in the Linux kernel.
+ But the kernel is byte instructions for the memory access. This is
+ faster but unusable here. The problem is that only 128
+ threads/processes could use the spinlock at the same time. If (by
+ a design error in the program) a thread/process would hold the
+ spinlock for a time long enough to accumulate 128 waiting
+ processes, the next one will find a positive value in the spinlock
+ and assume it is unlocked. We cannot accept that. */
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ unsigned int tmp;
+ asm volatile
+ ("1: ldl_l %0,%1\n"
+ " blbs %0,2f\n"
+ " or %0,1,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,2f\n"
+ " mb\n"
+ ".subsection 2\n"
+ "2: ldl %0,%1\n"
+ " blbs %0,2b\n"
+ " br 1b\n"
+ ".previous"
+ : "=r" (tmp), "=m" (lock)
+ : "m" (lock));
+ return 0;
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ unsigned long int oldval;
+ unsigned long int temp;
+
+ asm volatile
+ ("1: ldl_l %0,%1\n"
+ " and %0,%3,%2\n"
+ " bne %2,2f\n"
+ " xor %0,%3,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,3f\n"
+ " mb\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r" (temp), "=m" (*lock), "=&r" (oldval)
+ : "Ir" (1UL), "m" (*lock));
+
+ return oldval == 0 ? 0 : EBUSY;
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ asm volatile ("mb");
+ return *lock = 0;
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ *lock = 0;
+ return 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
--- /dev/null
+/* POSIX spinlock implementation. Arm version.
+ Copyright (C) 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <errno.h>
+#include <pthread.h>
+
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ unsigned int val;
+
+ do
+ asm volatile ("swp %0, %1, [%2]"
+ : "=r" (val)
+ : "0" (1), "r" (lock)
+ : "memory");
+ while (val != 0);
+
+ return 0;
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ unsigned int val;
+
+ asm volatile ("swp %0, %1, [%2]"
+ : "=r" (val)
+ : "0" (1), "r" (lock)
+ : "memory");
+
+ return val ? EBUSY : 0;
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ return *lock = 0;
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ return *lock = 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
--- /dev/null
+/* POSIX spinlock implementation. x86 version.
+ Copyright (C) 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <errno.h>
+#include <pthread.h>
+
+
+/* This implementation is similar to the one used in the Linux kernel.
+ But the kernel is byte instructions for the memory access. This is
+ faster but unusable here. The problem is that only 128
+ threads/processes could use the spinlock at the same time. If (by
+ a design error in the program) a thread/process would hold the
+ spinlock for a time long enough to accumulate 128 waiting
+ processes, the next one will find a positive value in the spinlock
+ and assume it is unlocked. We cannot accept that. */
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ asm volatile
+ ("\n"
+ "1:\n\t"
+ "lock; decl %0\n\t"
+ "js 2f\n\t"
+ ".section .text.spinlock,\"ax\"\n"
+ "2:\n\t"
+ "cmpl $0,%0\n\t"
+ "rep; nop\n\t"
+ "jle 2b\n\t"
+ "jmp 1b\n\t"
+ ".previous"
+ : "=m" (*lock));
+ return 0;
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ int oldval;
+
+ asm volatile
+ ("xchgl %0,%1"
+ : "=r" (oldval), "=m" (*lock)
+ : "0" (0));
+ return oldval > 0 ? 0 : EBUSY;
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ asm volatile
+ ("movl $1,%0"
+ : "=m" (*lock));
+ return 0;
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ *lock = 1;
+ return 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
--- /dev/null
+/* POSIX spinlock implementation. M68k version.
+ Copyright (C) 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <errno.h>
+#include <pthread.h>
+
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ unsigned int val;
+
+ do
+ asm volatile ("tas %1; sne %0"
+ : "=dm" (val), "=m" (*lock)
+ : "m" (*lock)
+ : "cc");
+ while (val);
+
+ return 0;
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ unsigned int val;
+
+ asm volatile ("tas %1; sne %0"
+ : "=dm" (val), "=m" (*lock)
+ : "m" (*lock)
+ : "cc");
+
+ return val ? EBUSY : 0;
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ return *lock = 0;
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ return *lock = 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
--- /dev/null
+/* POSIX spinlock implementation. MIPS version.
+ Copyright (C) 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <errno.h>
+#include <pthread.h>
+
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ XXX
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ XXX
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ XXX
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ XXX
+ return 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
--- /dev/null
+/* POSIX spinlock implementation. PowerPC version.
+ Copyright (C) 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <errno.h>
+#include <pthread.h>
+
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ XXX
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ XXX
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ XXX
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ XXX
+ return 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
#include <bits/sched.h>
/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
-typedef struct
+struct _pthread_fastlock
{
long int __status; /* "Free" or "taken" or head of waiting list */
int __spinlock; /* For compare-and-swap emulation */
-} pthread_spinlock_t;
+};
#ifndef _PTHREAD_DESCR_DEFINED
/* Thread descriptors */
/* Conditions (not abstract because of PTHREAD_COND_INITIALIZER */
typedef struct
{
- pthread_spinlock_t __c_lock; /* Protect against concurrent access */
+ struct _pthread_fastlock __c_lock; /* Protect against concurrent access */
_pthread_descr __c_waiting; /* Threads waiting on this condition */
} pthread_cond_t;
int __m_count; /* Depth of recursive locking */
_pthread_descr __m_owner; /* Owner thread (if recursive or errcheck) */
int __m_kind; /* Mutex kind: fast, recursive or errcheck */
- pthread_spinlock_t __m_lock; /* Underlying fast lock */
+ struct _pthread_fastlock __m_lock; /* Underlying fast lock */
} pthread_mutex_t;
/* Read-write locks. */
typedef struct _pthread_rwlock_t
{
- pthread_spinlock_t __rw_lock; /* Lock to guarantee mutual exclusion */
+ struct _pthread_fastlock __rw_lock; /* Lock to guarantee mutual exclusion */
int __rw_readers; /* Number of readers */
_pthread_descr __rw_writer; /* Identity of writer, or NULL if none */
_pthread_descr __rw_read_waiting; /* Threads waiting for reading */
} pthread_rwlockattr_t;
#endif
+#ifdef __USE_XOPEN2K
+/* POSIX spinlock data type. */
+typedef volatile int pthread_spinlock_t;
+#endif
+
/* Thread identifiers */
typedef unsigned long int pthread_t;
--- /dev/null
+/* POSIX spinlock implementation. SPARC32 version.
+ Copyright (C) 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <errno.h>
+#include <pthread.h>
+
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ XXX
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ XXX
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ XXX
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ XXX
+ return 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
--- /dev/null
+/* POSIX spinlock implementation. SPARC64 version.
+ Copyright (C) 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <errno.h>
+#include <pthread.h>
+
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ XXX
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ XXX
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ XXX
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ XXX
+ return 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
: "=t" (__value) : "0" (__value), "u" (__exponent)); \
return __value)
-#define __sincos_code \
+#ifdef __USE_GNU
+# define __sincos_code \
register long double __cosr; \
register long double __sinr; \
__asm __volatile__ \
{
__sincos_code;
}
+#endif
/* Optimized inline implementation, sometimes with reduced precision