1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* ***** BEGIN LICENSE BLOCK *****
3 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 * The contents of this file are subject to the Mozilla Public License Version
6 * 1.1 (the "License"); you may not use this file except in compliance with
7 * the License. You may obtain a copy of the License at
8 * http://www.mozilla.org/MPL/
10 * Software distributed under the License is distributed on an "AS IS" basis,
11 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
12 * for the specific language governing rights and limitations under the
15 * The Original Code is the Netscape Portable Runtime (NSPR).
17 * The Initial Developer of the Original Code is
18 * Netscape Communications Corporation.
19 * Portions created by the Initial Developer are Copyright (C) 1998-2000
20 * the Initial Developer. All Rights Reserved.
24 * Alternatively, the contents of this file may be used under the terms of
25 * either the GNU General Public License Version 2 or later (the "GPL"), or
26 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
27 * in which case the provisions of the GPL or the LGPL are applicable instead
28 * of those above. If you wish to allow use of your version of this file only
29 * under the terms of either the GPL or the LGPL, and not to allow others to
30 * use your version of this file under the terms of the MPL, indicate your
31 * decision by deleting the provisions above and replace them with the notice
32 * and other provisions required by the GPL or the LGPL. If you do not delete
33 * the provisions above, a recipient may use your version of this file under
34 * the terms of any one of the MPL, the GPL or the LGPL.
36 * ***** END LICENSE BLOCK ***** */
44 ** Some local variables report warnings on Win95 because the code paths
45 ** using them are conditioned on HAVE_CUSTOME_USER_THREADS.
46 ** The pragma suppresses the warning.
49 #pragma warning(disable : 4101)
52 /* _pr_activeLock protects the following global variables */
53 PRLock *_pr_activeLock;
54 PRInt32 _pr_primordialExitCount; /* In PR_Cleanup(), the primordial thread
55 * waits until all other user (non-system)
56 * threads have terminated before it exits.
57 * So whenever we decrement _pr_userActive,
59 * _pr_primordialExitCount.
60 * If the primordial thread is a system
61 * thread, then _pr_primordialExitCount
62 * is 0. If the primordial thread is
63 * itself a user thread, then
64 * _pr_primordialThread is 1.
66 PRCondVar *_pr_primordialExitCVar; /* When _pr_userActive is decremented to
67 * _pr_primordialExitCount, this condition
68 * variable is notified.
71 PRLock *_pr_deadQLock;
72 PRUint32 _pr_numNativeDead;
73 PRUint32 _pr_numUserDead;
74 PRCList _pr_deadNativeQ;
75 PRCList _pr_deadUserQ;
77 PRUint32 _pr_join_counter;
79 PRUint32 _pr_local_threads;
80 PRUint32 _pr_global_threads;
82 PRBool suspendAllOn = PR_FALSE;
83 PRThread *suspendAllThread = NULL;
85 extern PRCList _pr_active_global_threadQ;
86 extern PRCList _pr_active_local_threadQ;
88 static void _PR_DecrActiveThreadCount(PRThread *thread);
89 static PRThread *_PR_AttachThread(PRThreadType, PRThreadPriority, PRThreadStack *);
90 static void _PR_InitializeNativeStack(PRThreadStack *ts);
91 static void _PR_InitializeRecycledThread(PRThread *thread);
92 static void _PR_UserRunThread(void);
94 void _PR_InitThreads(PRThreadType type, PRThreadPriority priority,
100 _pr_terminationCVLock = PR_NewLock();
101 _pr_activeLock = PR_NewLock();
103 #ifndef HAVE_CUSTOM_USER_THREADS
104 stack = PR_NEWZAP(PRThreadStack);
105 #ifdef HAVE_STACK_GROWING_UP
106 stack->stackTop = (char*) ((((long)&type) >> _pr_pageShift)
109 #if defined(SOLARIS) || defined (UNIXWARE) && defined (USR_SVR4_THREADS)
110 stack->stackTop = (char*) &thread;
112 stack->stackTop = (char*) ((((long)&type + _pr_pageSize - 1)
113 >> _pr_pageShift) << _pr_pageShift);
117 /* If stack is NULL, we're using custom user threads like NT fibers. */
118 stack = PR_NEWZAP(PRThreadStack);
120 stack->stackSize = 0;
121 _PR_InitializeNativeStack(stack);
123 #endif /* HAVE_CUSTOM_USER_THREADS */
125 thread = _PR_AttachThread(type, priority, stack);
127 _PR_MD_SET_CURRENT_THREAD(thread);
129 if (type == PR_SYSTEM_THREAD) {
130 thread->flags = _PR_SYSTEM;
132 _pr_primordialExitCount = 0;
135 _pr_primordialExitCount = 1;
137 thread->no_sched = 1;
138 _pr_primordialExitCVar = PR_NewCondVar(_pr_activeLock);
141 if (!thread) PR_Abort();
142 #ifdef _PR_LOCAL_THREADS_ONLY
143 thread->flags |= _PR_PRIMORDIAL;
145 thread->flags |= _PR_PRIMORDIAL | _PR_GLOBAL_SCOPE;
149 * Needs _PR_PRIMORDIAL flag set before calling
150 * _PR_MD_INIT_THREAD()
152 if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
158 if (_PR_IS_NATIVE_THREAD(thread)) {
159 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ());
160 _pr_global_threads++;
162 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ());
166 _pr_recycleThreads = 0;
167 _pr_deadQLock = PR_NewLock();
168 _pr_numNativeDead = 0;
170 PR_INIT_CLIST(&_pr_deadNativeQ);
171 PR_INIT_CLIST(&_pr_deadUserQ);
174 void _PR_CleanupThreads(void)
176 if (_pr_terminationCVLock) {
177 PR_DestroyLock(_pr_terminationCVLock);
178 _pr_terminationCVLock = NULL;
180 if (_pr_activeLock) {
181 PR_DestroyLock(_pr_activeLock);
182 _pr_activeLock = NULL;
184 if (_pr_primordialExitCVar) {
185 PR_DestroyCondVar(_pr_primordialExitCVar);
186 _pr_primordialExitCVar = NULL;
188 /* TODO _pr_dead{Native,User}Q need to be deleted */
190 PR_DestroyLock(_pr_deadQLock);
191 _pr_deadQLock = NULL;
196 ** Initialize a stack for a native thread
198 static void _PR_InitializeNativeStack(PRThreadStack *ts)
200 if( ts && (ts->stackTop == 0) ) {
201 ts->allocSize = ts->stackSize;
204 ** Setup stackTop and stackBottom values.
206 #ifdef HAVE_STACK_GROWING_UP
207 ts->allocBase = (char*) ((((long)&ts) >> _pr_pageShift)
209 ts->stackBottom = ts->allocBase + ts->stackSize;
210 ts->stackTop = ts->allocBase;
212 ts->allocBase = (char*) ((((long)&ts + _pr_pageSize - 1)
213 >> _pr_pageShift) << _pr_pageShift);
214 ts->stackTop = ts->allocBase;
215 ts->stackBottom = ts->allocBase - ts->stackSize;
220 void _PR_NotifyJoinWaiters(PRThread *thread)
223 ** Handle joinable threads. Change the state to waiting for join.
224 ** Remove from our run Q and put it on global waiting to join Q.
225 ** Notify on our "termination" condition variable so that joining
226 ** thread will know about our termination. Switch our context and
227 ** come back later on to continue the cleanup.
229 PR_ASSERT(thread == _PR_MD_CURRENT_THREAD());
230 if (thread->term != NULL) {
231 PR_Lock(_pr_terminationCVLock);
232 _PR_THREAD_LOCK(thread);
233 thread->state = _PR_JOIN_WAIT;
234 if ( !_PR_IS_NATIVE_THREAD(thread) ) {
235 _PR_MISCQ_LOCK(thread->cpu);
236 _PR_ADD_JOINQ(thread, thread->cpu);
237 _PR_MISCQ_UNLOCK(thread->cpu);
239 _PR_THREAD_UNLOCK(thread);
240 PR_NotifyCondVar(thread->term);
241 PR_Unlock(_pr_terminationCVLock);
242 _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT);
243 PR_ASSERT(thread->state != _PR_JOIN_WAIT);
249 * Zero some of the data members of a recycled thread.
251 * Note that we can do this either when a dead thread is added to
252 * the dead thread queue or when it is reused. Here, we are doing
253 * this lazily, when the thread is reused in _PR_CreateThread().
255 static void _PR_InitializeRecycledThread(PRThread *thread)
258 * Assert that the following data members are already zeroed
259 * by _PR_CleanupThread().
262 if (thread->privateData) {
264 for (i = 0; i < thread->tpdLength; i++) {
265 PR_ASSERT(thread->privateData[i] == NULL);
269 PR_ASSERT(thread->dumpArg == 0 && thread->dump == 0);
270 PR_ASSERT(thread->errorString == 0 && thread->errorStringSize == 0);
271 PR_ASSERT(thread->errorStringLength == 0);
273 /* Reset data members in thread structure */
274 thread->errorCode = thread->osErrorCode = 0;
275 thread->io_pending = thread->io_suspended = PR_FALSE;
276 thread->environment = 0;
277 PR_INIT_CLIST(&thread->lockList);
280 PRStatus _PR_RecycleThread(PRThread *thread)
282 if ( _PR_IS_NATIVE_THREAD(thread) &&
283 _PR_NUM_DEADNATIVE < _pr_recycleThreads) {
285 PR_APPEND_LINK(&thread->links, &_PR_DEADNATIVEQ);
289 } else if ( !_PR_IS_NATIVE_THREAD(thread) &&
290 _PR_NUM_DEADUSER < _pr_recycleThreads) {
292 PR_APPEND_LINK(&thread->links, &_PR_DEADUSERQ);
301 * Decrement the active thread count, either _pr_systemActive or
302 * _pr_userActive, depending on whether the thread is a system thread
303 * or a user thread. If all the user threads, except possibly
304 * the primordial thread, have terminated, we notify the primordial
305 * thread of this condition.
307 * Since this function will lock _pr_activeLock, do not call this
308 * function while holding the _pr_activeLock lock, as this will result
313 _PR_DecrActiveThreadCount(PRThread *thread)
315 PR_Lock(_pr_activeLock);
316 if (thread->flags & _PR_SYSTEM) {
320 if (_pr_userActive == _pr_primordialExitCount) {
321 PR_NotifyCondVar(_pr_primordialExitCVar);
324 PR_Unlock(_pr_activeLock);
328 ** Detach thread structure
331 _PR_DestroyThread(PRThread *thread)
333 _PR_MD_FREE_LOCK(&thread->threadLock);
338 _PR_NativeDestroyThread(PRThread *thread)
341 PR_DestroyCondVar(thread->term);
344 if (NULL != thread->privateData) {
345 PR_ASSERT(0 != thread->tpdLength);
346 PR_DELETE(thread->privateData);
347 thread->tpdLength = 0;
349 PR_DELETE(thread->stack);
350 _PR_DestroyThread(thread);
354 _PR_UserDestroyThread(PRThread *thread)
357 PR_DestroyCondVar(thread->term);
360 if (NULL != thread->privateData) {
361 PR_ASSERT(0 != thread->tpdLength);
362 PR_DELETE(thread->privateData);
363 thread->tpdLength = 0;
365 _PR_MD_FREE_LOCK(&thread->threadLock);
366 if (thread->threadAllocatedOnStack == 1) {
367 _PR_MD_CLEAN_THREAD(thread);
369 * Because the no_sched field is set, this thread/stack will
370 * will not be re-used until the flag is cleared by the thread
371 * we will context switch to.
373 _PR_FreeStack(thread->stack);
376 _PR_MD_CLEAN_THREAD(thread);
379 * This assertion does not apply to NT. On NT, every fiber
380 * has its threadAllocatedOnStack equal to 0. Elsewhere,
381 * only the primordial thread has its threadAllocatedOnStack
384 PR_ASSERT(thread->flags & _PR_PRIMORDIAL);
391 ** Run a thread's start function. When the start function returns the
392 ** thread is done executing and no longer needs the CPU. If there are no
393 ** more user threads running then we can exit the program.
395 void _PR_NativeRunThread(void *arg)
397 PRThread *thread = (PRThread *)arg;
399 _PR_MD_SET_CURRENT_THREAD(thread);
401 _PR_MD_SET_CURRENT_CPU(NULL);
403 /* Set up the thread stack information */
404 _PR_InitializeNativeStack(thread->stack);
406 /* Set up the thread md information */
407 if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
409 * thread failed to initialize itself, possibly due to
410 * failure to allocate per-thread resources
416 thread->state = _PR_RUNNING;
419 * Add to list of active threads
421 PR_Lock(_pr_activeLock);
422 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ());
423 _pr_global_threads++;
424 PR_Unlock(_pr_activeLock);
426 (*thread->startFunc)(thread->arg);
429 * The following two assertions are meant for NT asynch io.
431 * The thread should have no asynch io in progress when it
432 * exits, otherwise the overlapped buffer, which is part of
433 * the thread structure, would become invalid.
435 PR_ASSERT(thread->io_pending == PR_FALSE);
437 * This assertion enforces the programming guideline that
438 * if an io function times out or is interrupted, the thread
439 * should close the fd to force the asynch io to abort
440 * before it exits. Right now, closing the fd is the only
441 * way to clear the io_suspended flag.
443 PR_ASSERT(thread->io_suspended == PR_FALSE);
446 * remove thread from list of active threads
448 PR_Lock(_pr_activeLock);
449 PR_REMOVE_LINK(&thread->active);
450 _pr_global_threads--;
451 PR_Unlock(_pr_activeLock);
453 PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting"));
455 /* All done, time to go away */
456 _PR_CleanupThread(thread);
458 _PR_NotifyJoinWaiters(thread);
460 _PR_DecrActiveThreadCount(thread);
462 thread->state = _PR_DEAD_STATE;
464 if (!_pr_recycleThreads || (_PR_RecycleThread(thread) ==
467 * thread not recycled
468 * platform-specific thread exit processing
469 * - for stuff like releasing native-thread resources, etc.
471 _PR_MD_EXIT_THREAD(thread);
473 * Free memory allocated for the thread
475 _PR_NativeDestroyThread(thread);
477 * thread gone, cannot de-reference thread now
482 /* Now wait for someone to activate us again... */
483 _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT);
487 static void _PR_UserRunThread(void)
489 PRThread *thread = _PR_MD_CURRENT_THREAD();
492 if (_MD_LAST_THREAD())
493 _MD_LAST_THREAD()->no_sched = 0;
495 #ifdef HAVE_CUSTOM_USER_THREADS
496 if (thread->stack == NULL) {
497 thread->stack = PR_NEWZAP(PRThreadStack);
498 _PR_InitializeNativeStack(thread->stack);
500 #endif /* HAVE_CUSTOM_USER_THREADS */
503 /* Run thread main */
504 if ( !_PR_IS_NATIVE_THREAD(thread)) _PR_MD_SET_INTSOFF(0);
507 * Add to list of active threads
509 if (!(thread->flags & _PR_IDLE_THREAD)) {
510 PR_Lock(_pr_activeLock);
511 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ());
513 PR_Unlock(_pr_activeLock);
516 (*thread->startFunc)(thread->arg);
519 * The following two assertions are meant for NT asynch io.
521 * The thread should have no asynch io in progress when it
522 * exits, otherwise the overlapped buffer, which is part of
523 * the thread structure, would become invalid.
525 PR_ASSERT(thread->io_pending == PR_FALSE);
527 * This assertion enforces the programming guideline that
528 * if an io function times out or is interrupted, the thread
529 * should close the fd to force the asynch io to abort
530 * before it exits. Right now, closing the fd is the only
531 * way to clear the io_suspended flag.
533 PR_ASSERT(thread->io_suspended == PR_FALSE);
535 PR_Lock(_pr_activeLock);
537 * remove thread from list of active threads
539 if (!(thread->flags & _PR_IDLE_THREAD)) {
540 PR_REMOVE_LINK(&thread->active);
543 PR_Unlock(_pr_activeLock);
544 PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting"));
546 /* All done, time to go away */
547 _PR_CleanupThread(thread);
551 _PR_NotifyJoinWaiters(thread);
553 _PR_DecrActiveThreadCount(thread);
555 thread->state = _PR_DEAD_STATE;
557 if (!_pr_recycleThreads || (_PR_RecycleThread(thread) ==
560 ** Destroy the thread resources
562 _PR_UserDestroyThread(thread);
566 ** Find another user thread to run. This cpu has finished the
567 ** previous threads main and is now ready to run another thread.
572 _PR_MD_SWITCH_CONTEXT(thread);
575 /* Will land here when we get scheduled again if we are recycling... */
579 void _PR_SetThreadPriority(PRThread *thread, PRThreadPriority newPri)
581 PRThread *me = _PR_MD_CURRENT_THREAD();
584 if ( _PR_IS_NATIVE_THREAD(thread) ) {
585 _PR_MD_SET_PRIORITY(&(thread->md), newPri);
589 if (!_PR_IS_NATIVE_THREAD(me))
591 _PR_THREAD_LOCK(thread);
592 if (newPri != thread->priority) {
593 _PRCPU *cpu = thread->cpu;
595 switch (thread->state) {
597 /* Change my priority */
600 thread->priority = newPri;
601 if (_PR_RUNQREADYMASK(cpu) >> (newPri + 1)) {
602 if (!_PR_IS_NATIVE_THREAD(me))
603 _PR_SET_RESCHED_FLAG();
605 _PR_RUNQ_UNLOCK(cpu);
611 /* Move to different runQ */
612 _PR_DEL_RUNQ(thread);
613 thread->priority = newPri;
614 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
615 _PR_ADD_RUNQ(thread, cpu, newPri);
616 _PR_RUNQ_UNLOCK(cpu);
618 if (newPri > me->priority) {
619 if (!_PR_IS_NATIVE_THREAD(me))
620 _PR_SET_RESCHED_FLAG();
630 thread->priority = newPri;
634 _PR_THREAD_UNLOCK(thread);
635 if (!_PR_IS_NATIVE_THREAD(me))
640 ** Suspend the named thread and copy its gc registers into regBuf
642 static void _PR_Suspend(PRThread *thread)
645 PRThread *me = _PR_MD_CURRENT_THREAD();
647 PR_ASSERT(thread != me);
648 PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread) || (!thread->cpu));
650 if (!_PR_IS_NATIVE_THREAD(me))
652 _PR_THREAD_LOCK(thread);
653 switch (thread->state) {
655 if (!_PR_IS_NATIVE_THREAD(thread)) {
656 _PR_RUNQ_LOCK(thread->cpu);
657 _PR_DEL_RUNQ(thread);
658 _PR_RUNQ_UNLOCK(thread->cpu);
660 _PR_MISCQ_LOCK(thread->cpu);
661 _PR_ADD_SUSPENDQ(thread, thread->cpu);
662 _PR_MISCQ_UNLOCK(thread->cpu);
665 * Only LOCAL threads are suspended by _PR_Suspend
669 thread->state = _PR_SUSPENDED;
674 * The thread being suspended should be a LOCAL thread with
675 * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state
683 if (_PR_IS_NATIVE_THREAD(thread)) {
684 _PR_MD_SUSPEND_THREAD(thread);
686 thread->flags |= _PR_SUSPENDING;
692 _PR_THREAD_UNLOCK(thread);
693 if (!_PR_IS_NATIVE_THREAD(me))
697 static void _PR_Resume(PRThread *thread)
699 PRThreadPriority pri;
701 PRThread *me = _PR_MD_CURRENT_THREAD();
703 if (!_PR_IS_NATIVE_THREAD(me))
705 _PR_THREAD_LOCK(thread);
706 switch (thread->state) {
708 thread->state = _PR_RUNNABLE;
709 thread->flags &= ~_PR_SUSPENDING;
710 if (!_PR_IS_NATIVE_THREAD(thread)) {
711 _PR_MISCQ_LOCK(thread->cpu);
712 _PR_DEL_SUSPENDQ(thread);
713 _PR_MISCQ_UNLOCK(thread->cpu);
715 pri = thread->priority;
717 _PR_RUNQ_LOCK(thread->cpu);
718 _PR_ADD_RUNQ(thread, thread->cpu, pri);
719 _PR_RUNQ_UNLOCK(thread->cpu);
721 if (pri > _PR_MD_CURRENT_THREAD()->priority) {
722 if (!_PR_IS_NATIVE_THREAD(me))
723 _PR_SET_RESCHED_FLAG();
732 thread->flags &= ~_PR_SUSPENDING;
733 /* PR_ASSERT(thread->wait.monitor->stickyCount == 0); */
738 PRLock *wLock = thread->wait.lock;
740 thread->flags &= ~_PR_SUSPENDING;
742 _PR_LOCK_LOCK(wLock);
743 if (thread->wait.lock->owner == 0) {
744 _PR_UnblockLockWaiter(thread->wait.lock);
746 _PR_LOCK_UNLOCK(wLock);
753 * The thread being suspended should be a LOCAL thread with
754 * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state
761 * thread should have been in one of the above-listed blocked states
762 * (_PR_JOIN_WAIT, _PR_IO_WAIT, _PR_UNBORN, _PR_DEAD_STATE)
766 _PR_THREAD_UNLOCK(thread);
767 if (!_PR_IS_NATIVE_THREAD(me))
772 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
773 static PRThread *get_thread(_PRCPU *cpu, PRBool *wakeup_cpus)
779 PRIntn priMin, priMax;
782 r = _PR_RUNQREADYMASK(cpu);
784 priMin = priMax = PR_PRIORITY_FIRST;
785 } else if (r == (1<<PR_PRIORITY_NORMAL) ) {
786 priMin = priMax = PR_PRIORITY_NORMAL;
788 priMin = PR_PRIORITY_FIRST;
789 priMax = PR_PRIORITY_LAST;
792 for (pri = priMax; pri >= priMin ; pri-- ) {
793 if (r & (1 << pri)) {
794 for (qp = _PR_RUNQ(cpu)[pri].next;
795 qp != &_PR_RUNQ(cpu)[pri];
797 thread = _PR_THREAD_PTR(qp);
799 * skip non-schedulable threads
801 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
802 if (thread->no_sched) {
805 * Need to wakeup cpus to avoid missing a
807 * Waking up all CPU's need happen only once.
810 *wakeup_cpus = PR_TRUE;
812 } else if (thread->flags & _PR_BOUND_THREAD) {
814 * Thread bound to cpu 0
819 _PR_MD_WAKEUP_PRIMORDIAL_CPU();
822 } else if (thread->io_pending == PR_TRUE) {
824 * A thread that is blocked for I/O needs to run
825 * on the same cpu on which it was blocked. This is because
826 * the cpu's ioq is accessed without lock protection and scheduling
827 * the thread on a different cpu would preclude this optimization.
832 /* Pull thread off of its run queue */
833 _PR_DEL_RUNQ(thread);
834 _PR_RUNQ_UNLOCK(cpu);
841 _PR_RUNQ_UNLOCK(cpu);
844 #endif /* !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) */
847 ** Schedule this native thread by finding the highest priority nspr
848 ** thread that is ready to run.
850 ** Note- everyone really needs to call _PR_MD_SWITCH_CONTEXT (which calls
851 ** PR_Schedule() rather than calling PR_Schedule. Otherwise if there
852 ** is initialization required for switching from SWITCH_CONTEXT,
853 ** it will not get done!
855 void _PR_Schedule(void)
857 PRThread *thread, *me = _PR_MD_CURRENT_THREAD();
858 _PRCPU *cpu = _PR_MD_CURRENT_CPU();
862 PRIntn priMin, priMax;
863 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
867 /* Interrupts must be disabled */
868 PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0);
870 /* Since we are rescheduling, we no longer want to */
871 _PR_CLEAR_RESCHED_FLAG();
874 ** Find highest priority thread to run. Bigger priority numbers are
875 ** higher priority threads
879 * if we are in SuspendAll mode, can schedule only the thread
880 * that called PR_SuspendAll
882 * The thread may be ready to run now, after completing an I/O
883 * operation, for example
885 if ((thread = suspendAllThread) != 0) {
886 if ((!(thread->no_sched)) && (thread->state == _PR_RUNNABLE)) {
887 /* Pull thread off of its run queue */
888 _PR_DEL_RUNQ(thread);
889 _PR_RUNQ_UNLOCK(cpu);
893 _PR_RUNQ_UNLOCK(cpu);
897 r = _PR_RUNQREADYMASK(cpu);
899 priMin = priMax = PR_PRIORITY_FIRST;
900 } else if (r == (1<<PR_PRIORITY_NORMAL) ) {
901 priMin = priMax = PR_PRIORITY_NORMAL;
903 priMin = PR_PRIORITY_FIRST;
904 priMax = PR_PRIORITY_LAST;
907 for (pri = priMax; pri >= priMin ; pri-- ) {
908 if (r & (1 << pri)) {
909 for (qp = _PR_RUNQ(cpu)[pri].next;
910 qp != &_PR_RUNQ(cpu)[pri];
912 thread = _PR_THREAD_PTR(qp);
914 * skip non-schedulable threads
916 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
917 if ((thread->no_sched) && (me != thread)){
921 /* Pull thread off of its run queue */
922 _PR_DEL_RUNQ(thread);
923 _PR_RUNQ_UNLOCK(cpu);
930 _PR_RUNQ_UNLOCK(cpu);
932 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
934 wakeup_cpus = PR_FALSE;
936 for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) {
937 if (cpu != _PR_CPU_PTR(qp)) {
938 if ((thread = get_thread(_PR_CPU_PTR(qp), &wakeup_cpus))
941 _PR_CPU_LIST_UNLOCK();
942 if (wakeup_cpus == PR_TRUE)
943 _PR_MD_WAKEUP_CPUS();
948 _PR_CPU_LIST_UNLOCK();
949 if (wakeup_cpus == PR_TRUE)
950 _PR_MD_WAKEUP_CPUS();
952 #endif /* _PR_LOCAL_THREADS_ONLY */
956 ** There are no threads to run. Switch to the idle thread
958 PR_LOG(_pr_sched_lm, PR_LOG_MAX, ("pausing"));
959 thread = _PR_MD_CURRENT_CPU()->idle_thread;
962 PR_ASSERT((me == thread) || ((thread->state == _PR_RUNNABLE) &&
963 (!(thread->no_sched))));
965 /* Resume the thread */
966 PR_LOG(_pr_sched_lm, PR_LOG_MAX,
967 ("switching to %d[%p]", thread->id, thread));
968 PR_ASSERT(thread->state != _PR_RUNNING);
969 thread->state = _PR_RUNNING;
971 /* If we are on the runq, it just means that we went to sleep on some
972 * resource, and by the time we got here another real native thread had
973 * already given us the resource and put us back on the runqueue
975 PR_ASSERT(thread->cpu == _PR_MD_CURRENT_CPU());
977 _PR_MD_RESTORE_CONTEXT(thread);
979 /* XXXMB; with setjmp/longjmp it is impossible to land here, but
980 * it is not with fibers... Is this a bad thing? I believe it is
983 PR_NOT_REACHED("impossible return from schedule");
988 ** Attaches a thread.
989 ** Does not set the _PR_MD_CURRENT_THREAD.
990 ** Does not specify the scope of the thread.
993 _PR_AttachThread(PRThreadType type, PRThreadPriority priority,
994 PRThreadStack *stack)
999 if (priority > PR_PRIORITY_LAST) {
1000 priority = PR_PRIORITY_LAST;
1001 } else if (priority < PR_PRIORITY_FIRST) {
1002 priority = PR_PRIORITY_FIRST;
1005 mem = (char*) PR_CALLOC(sizeof(PRThread));
1007 thread = (PRThread*) mem;
1008 thread->priority = priority;
1009 thread->stack = stack;
1010 thread->state = _PR_RUNNING;
1011 PR_INIT_CLIST(&thread->lockList);
1012 if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) {
1024 PR_IMPLEMENT(PRThread*)
1025 _PR_NativeCreateThread(PRThreadType type,
1026 void (*start)(void *arg),
1028 PRThreadPriority priority,
1029 PRThreadScope scope,
1030 PRThreadState state,
1036 thread = _PR_AttachThread(type, priority, NULL);
1039 PR_Lock(_pr_activeLock);
1040 thread->flags = (flags | _PR_GLOBAL_SCOPE);
1041 thread->id = ++_pr_utid;
1042 if (type == PR_SYSTEM_THREAD) {
1043 thread->flags |= _PR_SYSTEM;
1048 PR_Unlock(_pr_activeLock);
1050 thread->stack = PR_NEWZAP(PRThreadStack);
1051 if (!thread->stack) {
1052 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1055 thread->stack->stackSize = stackSize?stackSize:_MD_DEFAULT_STACK_SIZE;
1056 thread->stack->thr = thread;
1057 thread->startFunc = start;
1061 Set thread flags related to scope and joinable state. If joinable
1062 thread, allocate a "termination" conidition variable.
1064 if (state == PR_JOINABLE_THREAD) {
1065 thread->term = PR_NewCondVar(_pr_terminationCVLock);
1066 if (thread->term == NULL) {
1067 PR_DELETE(thread->stack);
1072 thread->state = _PR_RUNNING;
1073 if (_PR_MD_CREATE_THREAD(thread, _PR_NativeRunThread, priority,
1074 scope,state,stackSize) == PR_SUCCESS) {
1078 PR_DestroyCondVar(thread->term);
1079 thread->term = NULL;
1081 PR_DELETE(thread->stack);
1086 _PR_DecrActiveThreadCount(thread);
1087 _PR_DestroyThread(thread);
1092 /************************************************************************/
1094 PR_IMPLEMENT(PRThread*) _PR_CreateThread(PRThreadType type,
1095 void (*start)(void *arg),
1097 PRThreadPriority priority,
1098 PRThreadScope scope,
1099 PRThreadState state,
1104 PRThread *thread = NULL;
1105 PRThreadStack *stack;
1109 PRIntn useRecycled = 0;
1113 First, pin down the priority. Not all compilers catch passing out of
1114 range enum here. If we let bad values thru, priority queues won't work.
1116 if (priority > PR_PRIORITY_LAST) {
1117 priority = PR_PRIORITY_LAST;
1118 } else if (priority < PR_PRIORITY_FIRST) {
1119 priority = PR_PRIORITY_FIRST;
1122 if (!_pr_initialized) _PR_ImplicitInitialization();
1124 if (! (flags & _PR_IDLE_THREAD))
1125 me = _PR_MD_CURRENT_THREAD();
1127 #if defined(_PR_GLOBAL_THREADS_ONLY)
1129 * can create global threads only
1131 if (scope == PR_LOCAL_THREAD)
1132 scope = PR_GLOBAL_THREAD;
1135 if (_native_threads_only)
1136 scope = PR_GLOBAL_THREAD;
1138 native = (((scope == PR_GLOBAL_THREAD)|| (scope == PR_GLOBAL_BOUND_THREAD))
1139 && _PR_IS_NATIVE_THREAD_SUPPORTED());
1141 _PR_ADJUST_STACKSIZE(stackSize);
1145 * clear the IDLE_THREAD flag which applies to LOCAL
1148 flags &= ~_PR_IDLE_THREAD;
1149 flags |= _PR_GLOBAL_SCOPE;
1150 if (_PR_NUM_DEADNATIVE > 0) {
1153 if (_PR_NUM_DEADNATIVE == 0) { /* Thread safe check */
1156 thread = _PR_THREAD_PTR(_PR_DEADNATIVEQ.next);
1157 PR_REMOVE_LINK(&thread->links);
1161 _PR_InitializeRecycledThread(thread);
1162 thread->startFunc = start;
1164 thread->flags = (flags | _PR_GLOBAL_SCOPE);
1165 if (type == PR_SYSTEM_THREAD)
1167 thread->flags |= _PR_SYSTEM;
1168 PR_ATOMIC_INCREMENT(&_pr_systemActive);
1170 else PR_ATOMIC_INCREMENT(&_pr_userActive);
1172 if (state == PR_JOINABLE_THREAD) {
1174 thread->term = PR_NewCondVar(_pr_terminationCVLock);
1178 PR_DestroyCondVar(thread->term);
1183 thread->priority = priority;
1184 _PR_MD_SET_PRIORITY(&(thread->md), priority);
1185 /* XXX what about stackSize? */
1186 thread->state = _PR_RUNNING;
1187 _PR_MD_WAKEUP_WAITER(thread);
1191 thread = _PR_NativeCreateThread(type, start, arg, priority,
1192 scope, state, stackSize, flags);
1194 if (_PR_NUM_DEADUSER > 0) {
1197 if (_PR_NUM_DEADUSER == 0) { /* thread safe check */
1202 /* Go down list checking for a recycled thread with a
1203 * large enough stack. XXXMB - this has a bad degenerate case.
1205 ptr = _PR_DEADUSERQ.next;
1206 while( ptr != &_PR_DEADUSERQ ) {
1207 thread = _PR_THREAD_PTR(ptr);
1208 if ((thread->stack->stackSize >= stackSize) &&
1209 (!thread->no_sched)) {
1210 PR_REMOVE_LINK(&thread->links);
1222 _PR_InitializeRecycledThread(thread);
1223 thread->startFunc = start;
1225 thread->priority = priority;
1226 if (state == PR_JOINABLE_THREAD) {
1228 thread->term = PR_NewCondVar(_pr_terminationCVLock);
1231 PR_DestroyCondVar(thread->term);
1239 if (thread == NULL) {
1240 #ifndef HAVE_CUSTOM_USER_THREADS
1241 stack = _PR_NewStack(stackSize);
1243 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1247 /* Allocate thread object and per-thread data off the top of the stack*/
1248 top = stack->stackTop;
1249 #ifdef HAVE_STACK_GROWING_UP
1250 thread = (PRThread*) top;
1251 top = top + sizeof(PRThread);
1253 * Make stack 64-byte aligned
1255 if ((PRUptrdiff)top & 0x3f) {
1256 top = (char*)(((PRUptrdiff)top + 0x40) & ~0x3f);
1259 top = top - sizeof(PRThread);
1260 thread = (PRThread*) top;
1262 * Make stack 64-byte aligned
1264 if ((PRUptrdiff)top & 0x3f) {
1265 top = (char*)((PRUptrdiff)top & ~0x3f);
1268 stack->thr = thread;
1269 memset(thread, 0, sizeof(PRThread));
1270 thread->threadAllocatedOnStack = 1;
1272 thread = _PR_MD_CREATE_USER_THREAD(stackSize, start, arg);
1274 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1277 thread->threadAllocatedOnStack = 0;
1282 /* Initialize thread */
1283 thread->tpdLength = 0;
1284 thread->privateData = NULL;
1285 thread->stack = stack;
1286 thread->priority = priority;
1287 thread->startFunc = start;
1289 PR_INIT_CLIST(&thread->lockList);
1291 if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
1292 if (thread->threadAllocatedOnStack == 1)
1293 _PR_FreeStack(thread->stack);
1297 PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0);
1301 if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) {
1302 if (thread->threadAllocatedOnStack == 1)
1303 _PR_FreeStack(thread->stack);
1305 PR_DELETE(thread->privateData);
1308 PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0);
1312 _PR_MD_INIT_CONTEXT(thread, top, _PR_UserRunThread, &status);
1314 if (status == PR_FALSE) {
1315 _PR_MD_FREE_LOCK(&thread->threadLock);
1316 if (thread->threadAllocatedOnStack == 1)
1317 _PR_FreeStack(thread->stack);
1319 PR_DELETE(thread->privateData);
1326 Set thread flags related to scope and joinable state. If joinable
1327 thread, allocate a "termination" condition variable.
1329 if (state == PR_JOINABLE_THREAD) {
1330 thread->term = PR_NewCondVar(_pr_terminationCVLock);
1331 if (thread->term == NULL) {
1332 _PR_MD_FREE_LOCK(&thread->threadLock);
1333 if (thread->threadAllocatedOnStack == 1)
1334 _PR_FreeStack(thread->stack);
1336 PR_DELETE(thread->privateData);
1345 /* Update thread type counter */
1346 PR_Lock(_pr_activeLock);
1347 thread->flags = flags;
1348 thread->id = ++_pr_utid;
1349 if (type == PR_SYSTEM_THREAD) {
1350 thread->flags |= _PR_SYSTEM;
1356 /* Make thread runnable */
1357 thread->state = _PR_RUNNABLE;
1359 * Add to list of active threads
1361 PR_Unlock(_pr_activeLock);
1363 if ((! (thread->flags & _PR_IDLE_THREAD)) && _PR_IS_NATIVE_THREAD(me) )
1364 thread->cpu = _PR_GetPrimordialCPU();
1366 thread->cpu = _PR_MD_CURRENT_CPU();
1368 PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));
1370 if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me)) {
1372 _PR_RUNQ_LOCK(thread->cpu);
1373 _PR_ADD_RUNQ(thread, thread->cpu, priority);
1374 _PR_RUNQ_UNLOCK(thread->cpu);
1377 if (thread->flags & _PR_IDLE_THREAD) {
1379 ** If the creating thread is a kernel thread, we need to
1380 ** awaken the user thread idle thread somehow; potentially
1381 ** it could be sleeping in its idle loop, and we need to poke
1382 ** it. To do so, wake the idle thread...
1384 _PR_MD_WAKEUP_WAITER(NULL);
1385 } else if (_PR_IS_NATIVE_THREAD(me)) {
1386 _PR_MD_WAKEUP_WAITER(thread);
1388 if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me) )
1395 PR_IMPLEMENT(PRThread*) PR_CreateThread(PRThreadType type,
1396 void (*start)(void *arg),
1398 PRThreadPriority priority,
1399 PRThreadScope scope,
1400 PRThreadState state,
1403 return _PR_CreateThread(type, start, arg, priority, scope, state,
1408 ** Associate a thread object with an existing native thread.
1409 ** "type" is the type of thread object to attach
1410 ** "priority" is the priority to assign to the thread
1411 ** "stack" defines the shape of the threads stack
1413 ** This can return NULL if some kind of error occurs, or if memory is
1416 ** This call is not normally needed unless you create your own native
1417 ** thread. PR_Init does this automatically for the primordial thread.
1419 PRThread* _PRI_AttachThread(PRThreadType type,
1420 PRThreadPriority priority, PRThreadStack *stack, PRUint32 flags)
1424 if ((thread = _PR_MD_GET_ATTACHED_THREAD()) != NULL) {
1427 _PR_MD_SET_CURRENT_THREAD(NULL);
1429 /* Clear out any state if this thread was attached before */
1430 _PR_MD_SET_CURRENT_CPU(NULL);
1432 thread = _PR_AttachThread(type, priority, stack);
1436 _PR_MD_SET_CURRENT_THREAD(thread);
1438 thread->flags = flags | _PR_GLOBAL_SCOPE | _PR_ATTACHED;
1441 thread->stack = PR_NEWZAP(PRThreadStack);
1442 if (!thread->stack) {
1443 _PR_DestroyThread(thread);
1446 thread->stack->stackSize = _MD_DEFAULT_STACK_SIZE;
1448 PR_INIT_CLIST(&thread->links);
1450 if (_PR_MD_INIT_ATTACHED_THREAD(thread) == PR_FAILURE) {
1451 PR_DELETE(thread->stack);
1452 _PR_DestroyThread(thread);
1456 _PR_MD_SET_CURRENT_CPU(NULL);
1458 if (_PR_MD_CURRENT_CPU()) {
1460 PR_Lock(_pr_activeLock);
1462 if (type == PR_SYSTEM_THREAD) {
1463 thread->flags |= _PR_SYSTEM;
1468 if (_PR_MD_CURRENT_CPU()) {
1469 PR_Unlock(_pr_activeLock);
1476 PR_IMPLEMENT(PRThread*) PR_AttachThread(PRThreadType type,
1477 PRThreadPriority priority, PRThreadStack *stack)
1479 return PR_GetCurrentThread();
1482 PR_IMPLEMENT(void) PR_DetachThread(void)
1485 * On IRIX, Solaris, and Windows, foreign threads are detached when
1488 #if !defined(IRIX) && !defined(WIN32) \
1489 && !(defined(SOLARIS) && defined(_PR_GLOBAL_THREADS_ONLY))
1491 if (_pr_initialized) {
1492 me = _PR_MD_GET_ATTACHED_THREAD();
1493 if ((me != NULL) && (me->flags & _PR_ATTACHED))
1494 _PRI_DetachThread();
1499 void _PRI_DetachThread(void)
1501 PRThread *me = _PR_MD_CURRENT_THREAD();
1503 if (me->flags & _PR_PRIMORDIAL) {
1505 * ignore, if primordial thread
1509 PR_ASSERT(me->flags & _PR_ATTACHED);
1510 PR_ASSERT(_PR_IS_NATIVE_THREAD(me));
1511 _PR_CleanupThread(me);
1512 PR_DELETE(me->privateData);
1514 _PR_DecrActiveThreadCount(me);
1516 _PR_MD_CLEAN_THREAD(me);
1517 _PR_MD_SET_CURRENT_THREAD(NULL);
1518 if (!me->threadAllocatedOnStack)
1519 PR_DELETE(me->stack);
1520 _PR_MD_FREE_LOCK(&me->threadLock);
1525 ** Wait for thread termination:
1526 ** "thread" is the target thread
1528 ** This can return PR_FAILURE if no joinable thread could be found
1529 ** corresponding to the specified target thread.
1531 ** The calling thread is suspended until the target thread completes.
1532 ** Several threads cannot wait for the same thread to complete; one thread
1533 ** will complete successfully and others will terminate with an error PR_FAILURE.
1534 ** The calling thread will not be blocked if the target thread has already
1537 PR_IMPLEMENT(PRStatus) PR_JoinThread(PRThread *thread)
1541 PRThread *me = _PR_MD_CURRENT_THREAD();
1543 if (!_PR_IS_NATIVE_THREAD(me))
1545 term = thread->term;
1546 /* can't join a non-joinable thread */
1548 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1552 /* multiple threads can't wait on the same joinable thread */
1553 if (term->condQ.next != &term->condQ) {
1556 if (!_PR_IS_NATIVE_THREAD(me))
1559 /* wait for the target thread's termination cv invariant */
1560 PR_Lock (_pr_terminationCVLock);
1561 while (thread->state != _PR_JOIN_WAIT) {
1562 (void) PR_WaitCondVar(term, PR_INTERVAL_NO_TIMEOUT);
1564 (void) PR_Unlock (_pr_terminationCVLock);
1567 Remove target thread from global waiting to join Q; make it runnable
1568 again and put it back on its run Q. When it gets scheduled later in
1569 _PR_RunThread code, it will clean up its stack.
1571 if (!_PR_IS_NATIVE_THREAD(me))
1573 thread->state = _PR_RUNNABLE;
1574 if ( !_PR_IS_NATIVE_THREAD(thread) ) {
1575 _PR_THREAD_LOCK(thread);
1577 _PR_MISCQ_LOCK(thread->cpu);
1578 _PR_DEL_JOINQ(thread);
1579 _PR_MISCQ_UNLOCK(thread->cpu);
1581 _PR_AddThreadToRunQ(me, thread);
1582 _PR_THREAD_UNLOCK(thread);
1584 if (!_PR_IS_NATIVE_THREAD(me))
1587 _PR_MD_WAKEUP_WAITER(thread);
1592 if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is);
1596 PR_IMPLEMENT(void) PR_SetThreadPriority(PRThread *thread,
1597 PRThreadPriority newPri)
1601 First, pin down the priority. Not all compilers catch passing out of
1602 range enum here. If we let bad values thru, priority queues won't work.
1604 if ((PRIntn)newPri > (PRIntn)PR_PRIORITY_LAST) {
1605 newPri = PR_PRIORITY_LAST;
1606 } else if ((PRIntn)newPri < (PRIntn)PR_PRIORITY_FIRST) {
1607 newPri = PR_PRIORITY_FIRST;
1610 if ( _PR_IS_NATIVE_THREAD(thread) ) {
1611 thread->priority = newPri;
1612 _PR_MD_SET_PRIORITY(&(thread->md), newPri);
1613 } else _PR_SetThreadPriority(thread, newPri);
1618 ** This routine prevents all other threads from running. This call is needed by
1619 ** the garbage collector.
1621 PR_IMPLEMENT(void) PR_SuspendAll(void)
1623 PRThread *me = _PR_MD_CURRENT_THREAD();
1627 * Stop all user and native threads which are marked GC able.
1629 PR_Lock(_pr_activeLock);
1630 suspendAllOn = PR_TRUE;
1631 suspendAllThread = _PR_MD_CURRENT_THREAD();
1632 _PR_MD_BEGIN_SUSPEND_ALL();
1633 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
1634 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) {
1635 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1636 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) {
1637 _PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp));
1638 PR_ASSERT((_PR_ACTIVE_THREAD_PTR(qp))->state != _PR_RUNNING);
1641 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
1642 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
1643 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1644 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp)))
1645 /* PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); */
1646 _PR_MD_SUSPEND_THREAD(_PR_ACTIVE_THREAD_PTR(qp));
1648 _PR_MD_END_SUSPEND_ALL();
1652 ** This routine unblocks all other threads that were suspended from running by
1653 ** PR_SuspendAll(). This call is needed by the garbage collector.
1655 PR_IMPLEMENT(void) PR_ResumeAll(void)
1657 PRThread *me = _PR_MD_CURRENT_THREAD();
1661 * Resume all user and native threads which are marked GC able.
1663 _PR_MD_BEGIN_RESUME_ALL();
1664 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
1665 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) {
1666 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1667 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp)))
1668 _PR_Resume(_PR_ACTIVE_THREAD_PTR(qp));
1670 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
1671 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
1672 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1673 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp)))
1674 _PR_MD_RESUME_THREAD(_PR_ACTIVE_THREAD_PTR(qp));
1676 _PR_MD_END_RESUME_ALL();
1677 suspendAllThread = NULL;
1678 suspendAllOn = PR_FALSE;
1679 PR_Unlock(_pr_activeLock);
1682 PR_IMPLEMENT(PRStatus) PR_EnumerateThreads(PREnumerator func, void *arg)
1684 PRCList *qp, *qp_next;
1686 PRStatus rv = PR_SUCCESS;
1690 ** Currently Enumerate threads happen only with suspension and
1691 ** pr_activeLock held
1693 PR_ASSERT(suspendAllOn);
1695 /* Steve Morse, 4-23-97: Note that we can't walk a queue by taking
1696 * qp->next after applying the function "func". In particular, "func"
1697 * might remove the thread from the queue and put it into another one in
1698 * which case qp->next no longer points to the next entry in the original
1701 * To get around this problem, we save qp->next in qp_next before applying
1702 * "func" and use that saved value as the next value after applying "func".
1706 * Traverse the list of local and global threads
1708 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
1709 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp_next)
1712 t = _PR_ACTIVE_THREAD_PTR(qp);
1713 if (_PR_IS_GCABLE_THREAD(t))
1715 rv = (*func)(t, i, arg);
1716 if (rv != PR_SUCCESS)
1721 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
1722 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp_next)
1725 t = _PR_ACTIVE_THREAD_PTR(qp);
1726 if (_PR_IS_GCABLE_THREAD(t))
1728 rv = (*func)(t, i, arg);
1729 if (rv != PR_SUCCESS)
1737 /* FUNCTION: _PR_AddSleepQ
1739 ** Adds a thread to the sleep/pauseQ.
1741 ** Caller must have the RUNQ lock.
1742 ** Caller must be a user level thread
1745 _PR_AddSleepQ(PRThread *thread, PRIntervalTime timeout)
1747 _PRCPU *cpu = thread->cpu;
1749 if (timeout == PR_INTERVAL_NO_TIMEOUT) {
1750 /* append the thread to the global pause Q */
1751 PR_APPEND_LINK(&thread->links, &_PR_PAUSEQ(thread->cpu));
1752 thread->flags |= _PR_ON_PAUSEQ;
1754 PRIntervalTime sleep;
1758 /* sort onto global sleepQ */
1761 /* Check if we are longest timeout */
1762 if (timeout >= _PR_SLEEPQMAX(cpu)) {
1763 PR_INSERT_BEFORE(&thread->links, &_PR_SLEEPQ(cpu));
1764 thread->sleep = timeout - _PR_SLEEPQMAX(cpu);
1765 _PR_SLEEPQMAX(cpu) = timeout;
1767 /* Sort thread into global sleepQ at appropriate point */
1768 q = _PR_SLEEPQ(cpu).next;
1770 /* Now scan the list for where to insert this entry */
1771 while (q != &_PR_SLEEPQ(cpu)) {
1772 t = _PR_THREAD_PTR(q);
1773 if (sleep < t->sleep) {
1774 /* Found sleeper to insert in front of */
1780 thread->sleep = sleep;
1781 PR_INSERT_BEFORE(&thread->links, q);
1784 ** Subtract our sleep time from the sleeper that follows us (there
1785 ** must be one) so that they remain relative to us.
1787 PR_ASSERT (thread->links.next != &_PR_SLEEPQ(cpu));
1789 t = _PR_THREAD_PTR(thread->links.next);
1790 PR_ASSERT(_PR_THREAD_PTR(t->links.prev) == thread);
1794 thread->flags |= _PR_ON_SLEEPQ;
1798 /* FUNCTION: _PR_DelSleepQ
1800 ** Removes a thread from the sleep/pauseQ.
1802 ** If propogate_time is true, then the thread following the deleted
1803 ** thread will be get the time from the deleted thread. This is used
1804 ** when deleting a sleeper that has not timed out.
1806 ** Caller must have the RUNQ lock.
1807 ** Caller must be a user level thread
1810 _PR_DelSleepQ(PRThread *thread, PRBool propogate_time)
1812 _PRCPU *cpu = thread->cpu;
1814 /* Remove from pauseQ/sleepQ */
1815 if (thread->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
1816 if (thread->flags & _PR_ON_SLEEPQ) {
1817 PRCList *q = thread->links.next;
1818 if (q != &_PR_SLEEPQ(cpu)) {
1819 if (propogate_time == PR_TRUE) {
1820 PRThread *after = _PR_THREAD_PTR(q);
1821 after->sleep += thread->sleep;
1823 _PR_SLEEPQMAX(cpu) -= thread->sleep;
1825 /* Check if prev is the beggining of the list; if so,
1826 * we are the only element on the list.
1828 if (thread->links.prev != &_PR_SLEEPQ(cpu))
1829 _PR_SLEEPQMAX(cpu) -= thread->sleep;
1831 _PR_SLEEPQMAX(cpu) = 0;
1833 thread->flags &= ~_PR_ON_SLEEPQ;
1835 thread->flags &= ~_PR_ON_PAUSEQ;
1837 PR_REMOVE_LINK(&thread->links);
1843 _PR_AddThreadToRunQ(
1844 PRThread *me, /* the current thread */
1845 PRThread *thread) /* the local thread to be added to a run queue */
1847 PRThreadPriority pri = thread->priority;
1848 _PRCPU *cpu = thread->cpu;
1850 PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));
1854 * On NT, we can only reliably know that the current CPU
1855 * is not idle. We add the awakened thread to the run
1856 * queue of its CPU if its CPU is the current CPU.
1857 * For any other CPU, we don't really know whether it
1858 * is busy or idle. So in all other cases, we just
1859 * "post" the awakened thread to the IO completion port
1860 * for the next idle CPU to execute (this is done in
1861 * _PR_MD_WAKEUP_WAITER).
1862 * Threads with a suspended I/O operation remain bound to
1863 * the same cpu until I/O is cancelled
1865 * NOTE: the boolean expression below must be the exact
1866 * opposite of the corresponding boolean expression in
1867 * _PR_MD_WAKEUP_WAITER.
1869 if ((!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) ||
1870 (thread->md.thr_bound_cpu)) {
1871 PR_ASSERT(!thread->md.thr_bound_cpu ||
1872 (thread->md.thr_bound_cpu == cpu));
1874 _PR_ADD_RUNQ(thread, cpu, pri);
1875 _PR_RUNQ_UNLOCK(cpu);
1879 _PR_ADD_RUNQ(thread, cpu, pri);
1880 _PR_RUNQ_UNLOCK(cpu);
1881 if (!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) {
1882 if (pri > me->priority) {
1883 _PR_SET_RESCHED_FLAG();