Upload Tizen:Base source
[toolchains/nspr.git] / mozilla / nsprpub / pr / src / threads / combined / pruthr.c
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* ***** BEGIN LICENSE BLOCK *****
3  * Version: MPL 1.1/GPL 2.0/LGPL 2.1
4  *
5  * The contents of this file are subject to the Mozilla Public License Version
6  * 1.1 (the "License"); you may not use this file except in compliance with
7  * the License. You may obtain a copy of the License at
8  * http://www.mozilla.org/MPL/
9  *
10  * Software distributed under the License is distributed on an "AS IS" basis,
11  * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
12  * for the specific language governing rights and limitations under the
13  * License.
14  *
15  * The Original Code is the Netscape Portable Runtime (NSPR).
16  *
17  * The Initial Developer of the Original Code is
18  * Netscape Communications Corporation.
19  * Portions created by the Initial Developer are Copyright (C) 1998-2000
20  * the Initial Developer. All Rights Reserved.
21  *
22  * Contributor(s):
23  *
24  * Alternatively, the contents of this file may be used under the terms of
25  * either the GNU General Public License Version 2 or later (the "GPL"), or
26  * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
27  * in which case the provisions of the GPL or the LGPL are applicable instead
28  * of those above. If you wish to allow use of your version of this file only
29  * under the terms of either the GPL or the LGPL, and not to allow others to
30  * use your version of this file under the terms of the MPL, indicate your
31  * decision by deleting the provisions above and replace them with the notice
32  * and other provisions required by the GPL or the LGPL. If you do not delete
33  * the provisions above, a recipient may use your version of this file under
34  * the terms of any one of the MPL, the GPL or the LGPL.
35  *
36  * ***** END LICENSE BLOCK ***** */
37
38 #include "primpl.h"
39 #include <signal.h>
40 #include <string.h>
41
42 #if defined(WIN95)                                                                         
43 /*
44 ** Some local variables report warnings on Win95 because the code paths
45 ** using them are conditioned on HAVE_CUSTOME_USER_THREADS.
46 ** The pragma suppresses the warning.
47 **
48 */
49 #pragma warning(disable : 4101)
50 #endif          
51
52 /* _pr_activeLock protects the following global variables */
53 PRLock *_pr_activeLock;
54 PRInt32 _pr_primordialExitCount;   /* In PR_Cleanup(), the primordial thread
55                     * waits until all other user (non-system)
56                     * threads have terminated before it exits.
57                     * So whenever we decrement _pr_userActive,
58                     * it is compared with
59                     * _pr_primordialExitCount.
60                     * If the primordial thread is a system
61                     * thread, then _pr_primordialExitCount
62                     * is 0.  If the primordial thread is
63                     * itself a user thread, then 
64                     * _pr_primordialThread is 1.
65                     */
66 PRCondVar *_pr_primordialExitCVar; /* When _pr_userActive is decremented to
67                     * _pr_primordialExitCount, this condition
68                     * variable is notified.
69                     */
70
71 PRLock *_pr_deadQLock;
72 PRUint32 _pr_numNativeDead;
73 PRUint32 _pr_numUserDead;
74 PRCList _pr_deadNativeQ;
75 PRCList _pr_deadUserQ;
76
77 PRUint32 _pr_join_counter;
78
79 PRUint32 _pr_local_threads;
80 PRUint32 _pr_global_threads;
81
82 PRBool suspendAllOn = PR_FALSE;
83 PRThread *suspendAllThread = NULL;
84
85 extern PRCList _pr_active_global_threadQ;
86 extern PRCList _pr_active_local_threadQ;
87
88 static void _PR_DecrActiveThreadCount(PRThread *thread);
89 static PRThread *_PR_AttachThread(PRThreadType, PRThreadPriority, PRThreadStack *);
90 static void _PR_InitializeNativeStack(PRThreadStack *ts);
91 static void _PR_InitializeRecycledThread(PRThread *thread);
92 static void _PR_UserRunThread(void);
93
94 void _PR_InitThreads(PRThreadType type, PRThreadPriority priority,
95     PRUintn maxPTDs)
96 {
97     PRThread *thread;
98     PRThreadStack *stack;
99
100     _pr_terminationCVLock = PR_NewLock();
101     _pr_activeLock = PR_NewLock();
102
103 #ifndef HAVE_CUSTOM_USER_THREADS
104     stack = PR_NEWZAP(PRThreadStack);
105 #ifdef HAVE_STACK_GROWING_UP
106     stack->stackTop = (char*) ((((long)&type) >> _pr_pageShift)
107                   << _pr_pageShift);
108 #else
109 #if defined(SOLARIS) || defined (UNIXWARE) && defined (USR_SVR4_THREADS)
110     stack->stackTop = (char*) &thread;
111 #else
112     stack->stackTop = (char*) ((((long)&type + _pr_pageSize - 1)
113                 >> _pr_pageShift) << _pr_pageShift);
114 #endif
115 #endif
116 #else
117     /* If stack is NULL, we're using custom user threads like NT fibers. */
118     stack = PR_NEWZAP(PRThreadStack);
119     if (stack) {
120         stack->stackSize = 0;
121         _PR_InitializeNativeStack(stack);
122     }
123 #endif /* HAVE_CUSTOM_USER_THREADS */
124
125     thread = _PR_AttachThread(type, priority, stack);
126     if (thread) {
127         _PR_MD_SET_CURRENT_THREAD(thread);
128
129         if (type == PR_SYSTEM_THREAD) {
130             thread->flags = _PR_SYSTEM;
131             _pr_systemActive++;
132             _pr_primordialExitCount = 0;
133         } else {
134             _pr_userActive++;
135             _pr_primordialExitCount = 1;
136         }
137     thread->no_sched = 1;
138     _pr_primordialExitCVar = PR_NewCondVar(_pr_activeLock);
139     }
140
141     if (!thread) PR_Abort();
142 #ifdef _PR_LOCAL_THREADS_ONLY
143     thread->flags |= _PR_PRIMORDIAL;
144 #else
145     thread->flags |= _PR_PRIMORDIAL | _PR_GLOBAL_SCOPE;
146 #endif
147
148     /*
149      * Needs _PR_PRIMORDIAL flag set before calling
150      * _PR_MD_INIT_THREAD()
151      */
152     if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
153         /*
154          * XXX do what?
155          */
156     }
157
158     if (_PR_IS_NATIVE_THREAD(thread)) {
159         PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ());
160         _pr_global_threads++;
161     } else {
162         PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ());
163         _pr_local_threads++;
164     }
165
166     _pr_recycleThreads = 0;
167     _pr_deadQLock = PR_NewLock();
168     _pr_numNativeDead = 0;
169     _pr_numUserDead = 0;
170     PR_INIT_CLIST(&_pr_deadNativeQ);
171     PR_INIT_CLIST(&_pr_deadUserQ);
172 }
173
174 void _PR_CleanupThreads(void)
175 {
176     if (_pr_terminationCVLock) {
177         PR_DestroyLock(_pr_terminationCVLock);
178         _pr_terminationCVLock = NULL;
179     }
180     if (_pr_activeLock) {
181         PR_DestroyLock(_pr_activeLock);
182         _pr_activeLock = NULL;
183     }
184     if (_pr_primordialExitCVar) {
185         PR_DestroyCondVar(_pr_primordialExitCVar);
186         _pr_primordialExitCVar = NULL;
187     }
188     /* TODO _pr_dead{Native,User}Q need to be deleted */
189     if (_pr_deadQLock) {
190         PR_DestroyLock(_pr_deadQLock);
191         _pr_deadQLock = NULL;
192     }
193 }
194
195 /*
196 ** Initialize a stack for a native thread
197 */
198 static void _PR_InitializeNativeStack(PRThreadStack *ts)
199 {
200     if( ts && (ts->stackTop == 0) ) {
201         ts->allocSize = ts->stackSize;
202
203         /*
204         ** Setup stackTop and stackBottom values.
205         */
206 #ifdef HAVE_STACK_GROWING_UP
207     ts->allocBase = (char*) ((((long)&ts) >> _pr_pageShift)
208                   << _pr_pageShift);
209         ts->stackBottom = ts->allocBase + ts->stackSize;
210         ts->stackTop = ts->allocBase;
211 #else
212         ts->allocBase = (char*) ((((long)&ts + _pr_pageSize - 1)
213                 >> _pr_pageShift) << _pr_pageShift);
214         ts->stackTop    = ts->allocBase;
215         ts->stackBottom = ts->allocBase - ts->stackSize;
216 #endif
217     }
218 }
219
220 void _PR_NotifyJoinWaiters(PRThread *thread)
221 {
222     /*
223     ** Handle joinable threads.  Change the state to waiting for join.
224     ** Remove from our run Q and put it on global waiting to join Q.
225     ** Notify on our "termination" condition variable so that joining
226     ** thread will know about our termination.  Switch our context and
227     ** come back later on to continue the cleanup.
228     */    
229     PR_ASSERT(thread == _PR_MD_CURRENT_THREAD());
230     if (thread->term != NULL) {
231         PR_Lock(_pr_terminationCVLock);
232         _PR_THREAD_LOCK(thread);
233         thread->state = _PR_JOIN_WAIT;
234         if ( !_PR_IS_NATIVE_THREAD(thread) ) {
235             _PR_MISCQ_LOCK(thread->cpu);
236             _PR_ADD_JOINQ(thread, thread->cpu);
237             _PR_MISCQ_UNLOCK(thread->cpu);
238         }
239         _PR_THREAD_UNLOCK(thread);
240         PR_NotifyCondVar(thread->term);
241         PR_Unlock(_pr_terminationCVLock);
242         _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT);
243         PR_ASSERT(thread->state != _PR_JOIN_WAIT);
244     }
245
246 }
247
248 /*
249  * Zero some of the data members of a recycled thread.
250  *
251  * Note that we can do this either when a dead thread is added to
252  * the dead thread queue or when it is reused.  Here, we are doing
253  * this lazily, when the thread is reused in _PR_CreateThread().
254  */
255 static void _PR_InitializeRecycledThread(PRThread *thread)
256 {
257     /*
258      * Assert that the following data members are already zeroed
259      * by _PR_CleanupThread().
260      */
261 #ifdef DEBUG
262     if (thread->privateData) {
263         unsigned int i;
264         for (i = 0; i < thread->tpdLength; i++) {
265             PR_ASSERT(thread->privateData[i] == NULL);
266         }
267     }
268 #endif
269     PR_ASSERT(thread->dumpArg == 0 && thread->dump == 0);
270     PR_ASSERT(thread->errorString == 0 && thread->errorStringSize == 0);
271     PR_ASSERT(thread->errorStringLength == 0);
272
273     /* Reset data members in thread structure */
274     thread->errorCode = thread->osErrorCode = 0;
275     thread->io_pending = thread->io_suspended = PR_FALSE;
276     thread->environment = 0;
277     PR_INIT_CLIST(&thread->lockList);
278 }
279
280 PRStatus _PR_RecycleThread(PRThread *thread)
281 {
282     if ( _PR_IS_NATIVE_THREAD(thread) &&
283             _PR_NUM_DEADNATIVE < _pr_recycleThreads) {
284         _PR_DEADQ_LOCK;
285         PR_APPEND_LINK(&thread->links, &_PR_DEADNATIVEQ);
286         _PR_INC_DEADNATIVE;
287         _PR_DEADQ_UNLOCK;
288     return (PR_SUCCESS);
289     } else if ( !_PR_IS_NATIVE_THREAD(thread) &&
290                 _PR_NUM_DEADUSER < _pr_recycleThreads) {
291         _PR_DEADQ_LOCK;
292         PR_APPEND_LINK(&thread->links, &_PR_DEADUSERQ);
293         _PR_INC_DEADUSER;
294         _PR_DEADQ_UNLOCK;
295     return (PR_SUCCESS);
296     }
297     return (PR_FAILURE);
298 }
299
300 /*
301  * Decrement the active thread count, either _pr_systemActive or
302  * _pr_userActive, depending on whether the thread is a system thread
303  * or a user thread.  If all the user threads, except possibly
304  * the primordial thread, have terminated, we notify the primordial
305  * thread of this condition.
306  *
307  * Since this function will lock _pr_activeLock, do not call this
308  * function while holding the _pr_activeLock lock, as this will result
309  * in a deadlock.
310  */
311
312 static void
313 _PR_DecrActiveThreadCount(PRThread *thread)
314 {
315     PR_Lock(_pr_activeLock);
316     if (thread->flags & _PR_SYSTEM) {
317         _pr_systemActive--;
318     } else {
319         _pr_userActive--;
320         if (_pr_userActive == _pr_primordialExitCount) {
321             PR_NotifyCondVar(_pr_primordialExitCVar);
322         }
323     }
324     PR_Unlock(_pr_activeLock);
325 }
326
327 /*
328 ** Detach thread structure
329 */
330 static void
331 _PR_DestroyThread(PRThread *thread)
332 {
333     _PR_MD_FREE_LOCK(&thread->threadLock);
334     PR_DELETE(thread);
335 }
336
337 void
338 _PR_NativeDestroyThread(PRThread *thread)
339 {
340     if(thread->term) {
341         PR_DestroyCondVar(thread->term);
342         thread->term = 0;
343     }
344     if (NULL != thread->privateData) {
345         PR_ASSERT(0 != thread->tpdLength);
346         PR_DELETE(thread->privateData);
347         thread->tpdLength = 0;
348     }
349     PR_DELETE(thread->stack);
350     _PR_DestroyThread(thread);
351 }
352
353 void
354 _PR_UserDestroyThread(PRThread *thread)
355 {
356     if(thread->term) {
357         PR_DestroyCondVar(thread->term);
358         thread->term = 0;
359     }
360     if (NULL != thread->privateData) {
361         PR_ASSERT(0 != thread->tpdLength);
362         PR_DELETE(thread->privateData);
363         thread->tpdLength = 0;
364     }
365     _PR_MD_FREE_LOCK(&thread->threadLock);
366     if (thread->threadAllocatedOnStack == 1) {
367         _PR_MD_CLEAN_THREAD(thread);
368         /*
369          *  Because the no_sched field is set, this thread/stack will
370          *  will not be re-used until the flag is cleared by the thread
371          *  we will context switch to.
372          */
373         _PR_FreeStack(thread->stack);
374     } else {
375 #ifdef WINNT
376         _PR_MD_CLEAN_THREAD(thread);
377 #else
378         /*
379          * This assertion does not apply to NT.  On NT, every fiber
380          * has its threadAllocatedOnStack equal to 0.  Elsewhere,
381          * only the primordial thread has its threadAllocatedOnStack
382          * equal to 0.
383          */
384         PR_ASSERT(thread->flags & _PR_PRIMORDIAL);
385 #endif
386     }
387 }
388
389
390 /*
391 ** Run a thread's start function. When the start function returns the
392 ** thread is done executing and no longer needs the CPU. If there are no
393 ** more user threads running then we can exit the program.
394 */
395 void _PR_NativeRunThread(void *arg)
396 {
397     PRThread *thread = (PRThread *)arg;
398
399     _PR_MD_SET_CURRENT_THREAD(thread);
400
401     _PR_MD_SET_CURRENT_CPU(NULL);
402
403     /* Set up the thread stack information */
404     _PR_InitializeNativeStack(thread->stack);
405
406     /* Set up the thread md information */
407     if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
408         /*
409          * thread failed to initialize itself, possibly due to
410          * failure to allocate per-thread resources
411          */
412         return;
413     }
414
415     while(1) {
416         thread->state = _PR_RUNNING;
417
418         /*
419          * Add to list of active threads
420          */
421         PR_Lock(_pr_activeLock);
422         PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ());
423         _pr_global_threads++;
424         PR_Unlock(_pr_activeLock);
425
426         (*thread->startFunc)(thread->arg);
427
428         /*
429          * The following two assertions are meant for NT asynch io.
430          *
431          * The thread should have no asynch io in progress when it
432          * exits, otherwise the overlapped buffer, which is part of
433          * the thread structure, would become invalid.
434          */
435         PR_ASSERT(thread->io_pending == PR_FALSE);
436         /*
437          * This assertion enforces the programming guideline that
438          * if an io function times out or is interrupted, the thread
439          * should close the fd to force the asynch io to abort
440          * before it exits.  Right now, closing the fd is the only
441          * way to clear the io_suspended flag.
442          */
443         PR_ASSERT(thread->io_suspended == PR_FALSE);
444
445         /*
446          * remove thread from list of active threads
447          */
448         PR_Lock(_pr_activeLock);
449         PR_REMOVE_LINK(&thread->active);
450         _pr_global_threads--;
451         PR_Unlock(_pr_activeLock);
452
453         PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting"));
454
455         /* All done, time to go away */
456         _PR_CleanupThread(thread);
457
458         _PR_NotifyJoinWaiters(thread);
459
460         _PR_DecrActiveThreadCount(thread);
461
462         thread->state = _PR_DEAD_STATE;
463
464         if (!_pr_recycleThreads || (_PR_RecycleThread(thread) ==
465                         PR_FAILURE)) {
466             /*
467              * thread not recycled
468              * platform-specific thread exit processing
469              *        - for stuff like releasing native-thread resources, etc.
470              */
471             _PR_MD_EXIT_THREAD(thread);
472             /*
473              * Free memory allocated for the thread
474              */
475             _PR_NativeDestroyThread(thread);
476             /*
477              * thread gone, cannot de-reference thread now
478              */
479             return;
480         }
481
482         /* Now wait for someone to activate us again... */
483         _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT);
484     }
485 }
486
487 static void _PR_UserRunThread(void)
488 {
489     PRThread *thread = _PR_MD_CURRENT_THREAD();
490     PRIntn is;
491
492     if (_MD_LAST_THREAD())
493     _MD_LAST_THREAD()->no_sched = 0;
494
495 #ifdef HAVE_CUSTOM_USER_THREADS
496     if (thread->stack == NULL) {
497         thread->stack = PR_NEWZAP(PRThreadStack);
498         _PR_InitializeNativeStack(thread->stack);
499     }
500 #endif /* HAVE_CUSTOM_USER_THREADS */
501
502     while(1) {
503         /* Run thread main */
504         if ( !_PR_IS_NATIVE_THREAD(thread)) _PR_MD_SET_INTSOFF(0);
505
506     /*
507      * Add to list of active threads
508      */
509     if (!(thread->flags & _PR_IDLE_THREAD)) {
510         PR_Lock(_pr_activeLock);
511         PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ());
512         _pr_local_threads++;
513         PR_Unlock(_pr_activeLock);
514     }
515
516         (*thread->startFunc)(thread->arg);
517
518         /*
519          * The following two assertions are meant for NT asynch io.
520          *
521          * The thread should have no asynch io in progress when it
522          * exits, otherwise the overlapped buffer, which is part of
523          * the thread structure, would become invalid.
524          */
525         PR_ASSERT(thread->io_pending == PR_FALSE);
526         /*
527          * This assertion enforces the programming guideline that
528          * if an io function times out or is interrupted, the thread
529          * should close the fd to force the asynch io to abort
530          * before it exits.  Right now, closing the fd is the only
531          * way to clear the io_suspended flag.
532          */
533         PR_ASSERT(thread->io_suspended == PR_FALSE);
534
535         PR_Lock(_pr_activeLock);
536     /*
537      * remove thread from list of active threads
538      */
539     if (!(thread->flags & _PR_IDLE_THREAD)) {
540            PR_REMOVE_LINK(&thread->active);
541         _pr_local_threads--;
542     }
543     PR_Unlock(_pr_activeLock);
544         PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting"));
545
546         /* All done, time to go away */
547         _PR_CleanupThread(thread);
548
549         _PR_INTSOFF(is);    
550
551         _PR_NotifyJoinWaiters(thread);
552
553     _PR_DecrActiveThreadCount(thread);
554
555         thread->state = _PR_DEAD_STATE;
556
557         if (!_pr_recycleThreads || (_PR_RecycleThread(thread) ==
558                         PR_FAILURE)) {
559             /*
560             ** Destroy the thread resources
561             */
562         _PR_UserDestroyThread(thread);
563         }
564
565         /*
566         ** Find another user thread to run. This cpu has finished the
567         ** previous threads main and is now ready to run another thread.
568         */
569         {
570             PRInt32 is;
571             _PR_INTSOFF(is);
572             _PR_MD_SWITCH_CONTEXT(thread);
573         }
574
575         /* Will land here when we get scheduled again if we are recycling... */
576     }
577 }
578
579 void _PR_SetThreadPriority(PRThread *thread, PRThreadPriority newPri)
580 {
581     PRThread *me = _PR_MD_CURRENT_THREAD();
582     PRIntn is;
583
584     if ( _PR_IS_NATIVE_THREAD(thread) ) {
585         _PR_MD_SET_PRIORITY(&(thread->md), newPri);
586         return;
587     }
588
589     if (!_PR_IS_NATIVE_THREAD(me))
590     _PR_INTSOFF(is);
591     _PR_THREAD_LOCK(thread);
592     if (newPri != thread->priority) {
593     _PRCPU *cpu = thread->cpu;
594
595     switch (thread->state) {
596       case _PR_RUNNING:
597         /* Change my priority */
598
599             _PR_RUNQ_LOCK(cpu);
600         thread->priority = newPri;
601         if (_PR_RUNQREADYMASK(cpu) >> (newPri + 1)) {
602             if (!_PR_IS_NATIVE_THREAD(me))
603                     _PR_SET_RESCHED_FLAG();
604         }
605             _PR_RUNQ_UNLOCK(cpu);
606         break;
607
608       case _PR_RUNNABLE:
609
610         _PR_RUNQ_LOCK(cpu);
611             /* Move to different runQ */
612             _PR_DEL_RUNQ(thread);
613             thread->priority = newPri;
614             PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
615             _PR_ADD_RUNQ(thread, cpu, newPri);
616         _PR_RUNQ_UNLOCK(cpu);
617
618             if (newPri > me->priority) {
619             if (!_PR_IS_NATIVE_THREAD(me))
620                     _PR_SET_RESCHED_FLAG();
621             }
622
623         break;
624
625       case _PR_LOCK_WAIT:
626       case _PR_COND_WAIT:
627       case _PR_IO_WAIT:
628       case _PR_SUSPENDED:
629
630         thread->priority = newPri;
631         break;
632     }
633     }
634     _PR_THREAD_UNLOCK(thread);
635     if (!_PR_IS_NATIVE_THREAD(me))
636     _PR_INTSON(is);
637 }
638
639 /*
640 ** Suspend the named thread and copy its gc registers into regBuf
641 */
642 static void _PR_Suspend(PRThread *thread)
643 {
644     PRIntn is;
645     PRThread *me = _PR_MD_CURRENT_THREAD();
646
647     PR_ASSERT(thread != me);
648     PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread) || (!thread->cpu));
649
650     if (!_PR_IS_NATIVE_THREAD(me))
651         _PR_INTSOFF(is);
652     _PR_THREAD_LOCK(thread);
653     switch (thread->state) {
654       case _PR_RUNNABLE:
655         if (!_PR_IS_NATIVE_THREAD(thread)) {
656             _PR_RUNQ_LOCK(thread->cpu);
657             _PR_DEL_RUNQ(thread);
658             _PR_RUNQ_UNLOCK(thread->cpu);
659
660             _PR_MISCQ_LOCK(thread->cpu);
661             _PR_ADD_SUSPENDQ(thread, thread->cpu);
662             _PR_MISCQ_UNLOCK(thread->cpu);
663         } else {
664             /*
665              * Only LOCAL threads are suspended by _PR_Suspend
666              */
667              PR_ASSERT(0);
668         }
669         thread->state = _PR_SUSPENDED;
670         break;
671
672       case _PR_RUNNING:
673         /*
674          * The thread being suspended should be a LOCAL thread with
675          * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state
676          */
677         PR_ASSERT(0);
678         break;
679
680       case _PR_LOCK_WAIT:
681       case _PR_IO_WAIT:
682       case _PR_COND_WAIT:
683         if (_PR_IS_NATIVE_THREAD(thread)) {
684             _PR_MD_SUSPEND_THREAD(thread);
685     }
686         thread->flags |= _PR_SUSPENDING;
687         break;
688
689       default:
690         PR_Abort();
691     }
692     _PR_THREAD_UNLOCK(thread);
693     if (!_PR_IS_NATIVE_THREAD(me))
694     _PR_INTSON(is);
695 }
696
697 static void _PR_Resume(PRThread *thread)
698 {
699     PRThreadPriority pri;
700     PRIntn is;
701     PRThread *me = _PR_MD_CURRENT_THREAD();
702
703     if (!_PR_IS_NATIVE_THREAD(me))
704     _PR_INTSOFF(is);
705     _PR_THREAD_LOCK(thread);
706     switch (thread->state) {
707       case _PR_SUSPENDED:
708         thread->state = _PR_RUNNABLE;
709         thread->flags &= ~_PR_SUSPENDING;
710         if (!_PR_IS_NATIVE_THREAD(thread)) {
711             _PR_MISCQ_LOCK(thread->cpu);
712             _PR_DEL_SUSPENDQ(thread);
713             _PR_MISCQ_UNLOCK(thread->cpu);
714
715             pri = thread->priority;
716
717             _PR_RUNQ_LOCK(thread->cpu);
718             _PR_ADD_RUNQ(thread, thread->cpu, pri);
719             _PR_RUNQ_UNLOCK(thread->cpu);
720
721             if (pri > _PR_MD_CURRENT_THREAD()->priority) {
722                 if (!_PR_IS_NATIVE_THREAD(me))
723                     _PR_SET_RESCHED_FLAG();
724             }
725         } else {
726             PR_ASSERT(0);
727         }
728         break;
729
730       case _PR_IO_WAIT:
731       case _PR_COND_WAIT:
732         thread->flags &= ~_PR_SUSPENDING;
733 /*      PR_ASSERT(thread->wait.monitor->stickyCount == 0); */
734         break;
735
736       case _PR_LOCK_WAIT: 
737       {
738         PRLock *wLock = thread->wait.lock;
739
740         thread->flags &= ~_PR_SUSPENDING;
741  
742         _PR_LOCK_LOCK(wLock);
743         if (thread->wait.lock->owner == 0) {
744             _PR_UnblockLockWaiter(thread->wait.lock);
745         }
746         _PR_LOCK_UNLOCK(wLock);
747         break;
748       }
749       case _PR_RUNNABLE:
750         break;
751       case _PR_RUNNING:
752         /*
753          * The thread being suspended should be a LOCAL thread with
754          * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state
755          */
756         PR_ASSERT(0);
757         break;
758
759       default:
760     /*
761      * thread should have been in one of the above-listed blocked states
762      * (_PR_JOIN_WAIT, _PR_IO_WAIT, _PR_UNBORN, _PR_DEAD_STATE)
763      */
764         PR_Abort();
765     }
766     _PR_THREAD_UNLOCK(thread);
767     if (!_PR_IS_NATIVE_THREAD(me))
768         _PR_INTSON(is);
769
770 }
771
772 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
773 static PRThread *get_thread(_PRCPU *cpu, PRBool *wakeup_cpus)
774 {
775     PRThread *thread;
776     PRIntn pri;
777     PRUint32 r;
778     PRCList *qp;
779     PRIntn priMin, priMax;
780
781     _PR_RUNQ_LOCK(cpu);
782     r = _PR_RUNQREADYMASK(cpu);
783     if (r==0) {
784         priMin = priMax = PR_PRIORITY_FIRST;
785     } else if (r == (1<<PR_PRIORITY_NORMAL) ) {
786         priMin = priMax = PR_PRIORITY_NORMAL;
787     } else {
788         priMin = PR_PRIORITY_FIRST;
789         priMax = PR_PRIORITY_LAST;
790     }
791     thread = NULL;
792     for (pri = priMax; pri >= priMin ; pri-- ) {
793     if (r & (1 << pri)) {
794             for (qp = _PR_RUNQ(cpu)[pri].next; 
795                  qp != &_PR_RUNQ(cpu)[pri];
796                  qp = qp->next) {
797                 thread = _PR_THREAD_PTR(qp);
798                 /*
799                 * skip non-schedulable threads
800                 */
801                 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
802                 if (thread->no_sched) {
803                     thread = NULL;
804                     /*
805                      * Need to wakeup cpus to avoid missing a
806                      * runnable thread
807                      * Waking up all CPU's need happen only once.
808                      */
809
810                     *wakeup_cpus = PR_TRUE;
811                     continue;
812                 } else if (thread->flags & _PR_BOUND_THREAD) {
813                     /*
814                      * Thread bound to cpu 0
815                      */
816
817                     thread = NULL;
818 #ifdef IRIX
819                                         _PR_MD_WAKEUP_PRIMORDIAL_CPU();
820 #endif
821                     continue;
822                 } else if (thread->io_pending == PR_TRUE) {
823                     /*
824                      * A thread that is blocked for I/O needs to run
825                      * on the same cpu on which it was blocked. This is because
826                      * the cpu's ioq is accessed without lock protection and scheduling
827                      * the thread on a different cpu would preclude this optimization.
828                      */
829                     thread = NULL;
830                     continue;
831                 } else {
832                     /* Pull thread off of its run queue */
833                     _PR_DEL_RUNQ(thread);
834                     _PR_RUNQ_UNLOCK(cpu);
835                     return(thread);
836                 }
837             }
838         }
839         thread = NULL;
840     }
841     _PR_RUNQ_UNLOCK(cpu);
842     return(thread);
843 }
844 #endif /* !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) */
845
846 /*
847 ** Schedule this native thread by finding the highest priority nspr
848 ** thread that is ready to run.
849 **
850 ** Note- everyone really needs to call _PR_MD_SWITCH_CONTEXT (which calls
851 **       PR_Schedule() rather than calling PR_Schedule.  Otherwise if there
852 **       is initialization required for switching from SWITCH_CONTEXT,
853 **       it will not get done!
854 */
855 void _PR_Schedule(void)
856 {
857     PRThread *thread, *me = _PR_MD_CURRENT_THREAD();
858     _PRCPU *cpu = _PR_MD_CURRENT_CPU();
859     PRIntn pri;
860     PRUint32 r;
861     PRCList *qp;
862     PRIntn priMin, priMax;
863 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
864     PRBool wakeup_cpus;
865 #endif
866
867     /* Interrupts must be disabled */
868     PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0);
869
870     /* Since we are rescheduling, we no longer want to */
871     _PR_CLEAR_RESCHED_FLAG();
872
873     /*
874     ** Find highest priority thread to run. Bigger priority numbers are
875     ** higher priority threads
876     */
877     _PR_RUNQ_LOCK(cpu);
878     /*
879      *  if we are in SuspendAll mode, can schedule only the thread
880      *    that called PR_SuspendAll
881      *
882      *  The thread may be ready to run now, after completing an I/O
883      *  operation, for example
884      */
885     if ((thread = suspendAllThread) != 0) {
886     if ((!(thread->no_sched)) && (thread->state == _PR_RUNNABLE)) {
887             /* Pull thread off of its run queue */
888             _PR_DEL_RUNQ(thread);
889             _PR_RUNQ_UNLOCK(cpu);
890             goto found_thread;
891     } else {
892             thread = NULL;
893             _PR_RUNQ_UNLOCK(cpu);
894             goto idle_thread;
895     }
896     }
897     r = _PR_RUNQREADYMASK(cpu);
898     if (r==0) {
899         priMin = priMax = PR_PRIORITY_FIRST;
900     } else if (r == (1<<PR_PRIORITY_NORMAL) ) {
901         priMin = priMax = PR_PRIORITY_NORMAL;
902     } else {
903         priMin = PR_PRIORITY_FIRST;
904         priMax = PR_PRIORITY_LAST;
905     }
906     thread = NULL;
907     for (pri = priMax; pri >= priMin ; pri-- ) {
908     if (r & (1 << pri)) {
909             for (qp = _PR_RUNQ(cpu)[pri].next; 
910                  qp != &_PR_RUNQ(cpu)[pri];
911                  qp = qp->next) {
912                 thread = _PR_THREAD_PTR(qp);
913                 /*
914                 * skip non-schedulable threads
915                 */
916                 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
917                 if ((thread->no_sched) && (me != thread)){
918                     thread = NULL;
919                     continue;
920                 } else {
921                     /* Pull thread off of its run queue */
922                     _PR_DEL_RUNQ(thread);
923                     _PR_RUNQ_UNLOCK(cpu);
924                     goto found_thread;
925                 }
926             }
927         }
928         thread = NULL;
929     }
930     _PR_RUNQ_UNLOCK(cpu);
931
932 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
933
934     wakeup_cpus = PR_FALSE;
935     _PR_CPU_LIST_LOCK();
936     for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) {
937         if (cpu != _PR_CPU_PTR(qp)) {
938             if ((thread = get_thread(_PR_CPU_PTR(qp), &wakeup_cpus))
939                                         != NULL) {
940                 thread->cpu = cpu;
941                 _PR_CPU_LIST_UNLOCK();
942                 if (wakeup_cpus == PR_TRUE)
943                     _PR_MD_WAKEUP_CPUS();
944                 goto found_thread;
945             }
946         }
947     }
948     _PR_CPU_LIST_UNLOCK();
949     if (wakeup_cpus == PR_TRUE)
950         _PR_MD_WAKEUP_CPUS();
951
952 #endif        /* _PR_LOCAL_THREADS_ONLY */
953
954 idle_thread:
955    /*
956     ** There are no threads to run. Switch to the idle thread
957     */
958     PR_LOG(_pr_sched_lm, PR_LOG_MAX, ("pausing"));
959     thread = _PR_MD_CURRENT_CPU()->idle_thread;
960
961 found_thread:
962     PR_ASSERT((me == thread) || ((thread->state == _PR_RUNNABLE) &&
963                     (!(thread->no_sched))));
964
965     /* Resume the thread */
966     PR_LOG(_pr_sched_lm, PR_LOG_MAX,
967        ("switching to %d[%p]", thread->id, thread));
968     PR_ASSERT(thread->state != _PR_RUNNING);
969     thread->state = _PR_RUNNING;
970  
971     /* If we are on the runq, it just means that we went to sleep on some
972      * resource, and by the time we got here another real native thread had
973      * already given us the resource and put us back on the runqueue 
974      */
975         PR_ASSERT(thread->cpu == _PR_MD_CURRENT_CPU());
976     if (thread != me) 
977         _PR_MD_RESTORE_CONTEXT(thread);
978 #if 0
979     /* XXXMB; with setjmp/longjmp it is impossible to land here, but 
980      * it is not with fibers... Is this a bad thing?  I believe it is 
981      * still safe.
982      */
983     PR_NOT_REACHED("impossible return from schedule");
984 #endif
985 }
986
987 /*
988 ** Attaches a thread.  
989 ** Does not set the _PR_MD_CURRENT_THREAD.  
990 ** Does not specify the scope of the thread.
991 */
992 static PRThread *
993 _PR_AttachThread(PRThreadType type, PRThreadPriority priority,
994     PRThreadStack *stack)
995 {
996     PRThread *thread;
997     char *mem;
998
999     if (priority > PR_PRIORITY_LAST) {
1000         priority = PR_PRIORITY_LAST;
1001     } else if (priority < PR_PRIORITY_FIRST) {
1002         priority = PR_PRIORITY_FIRST;
1003     }
1004
1005     mem = (char*) PR_CALLOC(sizeof(PRThread));
1006     if (mem) {
1007         thread = (PRThread*) mem;
1008         thread->priority = priority;
1009         thread->stack = stack;
1010         thread->state = _PR_RUNNING;
1011         PR_INIT_CLIST(&thread->lockList);
1012         if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) {
1013         PR_DELETE(thread);
1014         return 0;
1015     }
1016
1017         return thread;
1018     }
1019     return 0;
1020 }
1021
1022
1023
1024 PR_IMPLEMENT(PRThread*) 
1025 _PR_NativeCreateThread(PRThreadType type,
1026                      void (*start)(void *arg),
1027                      void *arg,
1028                      PRThreadPriority priority,
1029                      PRThreadScope scope,
1030                      PRThreadState state,
1031                      PRUint32 stackSize,
1032                      PRUint32 flags)
1033 {
1034     PRThread *thread;
1035
1036     thread = _PR_AttachThread(type, priority, NULL);
1037
1038     if (thread) {
1039         PR_Lock(_pr_activeLock);
1040         thread->flags = (flags | _PR_GLOBAL_SCOPE);
1041         thread->id = ++_pr_utid;
1042         if (type == PR_SYSTEM_THREAD) {
1043             thread->flags |= _PR_SYSTEM;
1044             _pr_systemActive++;
1045         } else {
1046             _pr_userActive++;
1047         }
1048         PR_Unlock(_pr_activeLock);
1049
1050         thread->stack = PR_NEWZAP(PRThreadStack);
1051         if (!thread->stack) {
1052             PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1053             goto done;
1054         }
1055         thread->stack->stackSize = stackSize?stackSize:_MD_DEFAULT_STACK_SIZE;
1056         thread->stack->thr = thread;
1057         thread->startFunc = start;
1058         thread->arg = arg;
1059
1060         /* 
1061           Set thread flags related to scope and joinable state. If joinable
1062           thread, allocate a "termination" conidition variable.
1063          */
1064         if (state == PR_JOINABLE_THREAD) {
1065             thread->term = PR_NewCondVar(_pr_terminationCVLock);
1066         if (thread->term == NULL) {
1067         PR_DELETE(thread->stack);
1068         goto done;
1069         }
1070         }
1071
1072     thread->state = _PR_RUNNING;
1073         if (_PR_MD_CREATE_THREAD(thread, _PR_NativeRunThread, priority,
1074             scope,state,stackSize) == PR_SUCCESS) {
1075             return thread;
1076         }
1077         if (thread->term) {
1078             PR_DestroyCondVar(thread->term);
1079             thread->term = NULL;
1080         }
1081     PR_DELETE(thread->stack);
1082     }
1083
1084 done:
1085     if (thread) {
1086     _PR_DecrActiveThreadCount(thread);
1087         _PR_DestroyThread(thread);
1088     }
1089     return NULL;
1090 }
1091
1092 /************************************************************************/
1093
1094 PR_IMPLEMENT(PRThread*) _PR_CreateThread(PRThreadType type,
1095                      void (*start)(void *arg),
1096                      void *arg,
1097                      PRThreadPriority priority,
1098                      PRThreadScope scope,
1099                      PRThreadState state,
1100                      PRUint32 stackSize,
1101                      PRUint32 flags)
1102 {
1103     PRThread *me;
1104     PRThread *thread = NULL;
1105     PRThreadStack *stack;
1106     char *top;
1107     PRIntn is;
1108     PRIntn native = 0;
1109     PRIntn useRecycled = 0;
1110     PRBool status;
1111
1112     /* 
1113     First, pin down the priority.  Not all compilers catch passing out of
1114     range enum here.  If we let bad values thru, priority queues won't work.
1115     */
1116     if (priority > PR_PRIORITY_LAST) {
1117         priority = PR_PRIORITY_LAST;
1118     } else if (priority < PR_PRIORITY_FIRST) {
1119         priority = PR_PRIORITY_FIRST;
1120     }
1121         
1122     if (!_pr_initialized) _PR_ImplicitInitialization();
1123
1124     if (! (flags & _PR_IDLE_THREAD))
1125         me = _PR_MD_CURRENT_THREAD();
1126
1127 #if    defined(_PR_GLOBAL_THREADS_ONLY)
1128         /*
1129          * can create global threads only
1130          */
1131     if (scope == PR_LOCAL_THREAD)
1132         scope = PR_GLOBAL_THREAD;
1133 #endif
1134
1135         if (_native_threads_only)
1136                 scope = PR_GLOBAL_THREAD;
1137
1138     native = (((scope == PR_GLOBAL_THREAD)|| (scope == PR_GLOBAL_BOUND_THREAD))
1139                                                         && _PR_IS_NATIVE_THREAD_SUPPORTED());
1140
1141     _PR_ADJUST_STACKSIZE(stackSize);
1142
1143     if (native) {
1144     /*
1145      * clear the IDLE_THREAD flag which applies to LOCAL
1146      * threads only
1147      */
1148     flags &= ~_PR_IDLE_THREAD;
1149         flags |= _PR_GLOBAL_SCOPE;
1150         if (_PR_NUM_DEADNATIVE > 0) {
1151             _PR_DEADQ_LOCK;
1152
1153             if (_PR_NUM_DEADNATIVE == 0) { /* Thread safe check */
1154                 _PR_DEADQ_UNLOCK;
1155             } else {
1156                 thread = _PR_THREAD_PTR(_PR_DEADNATIVEQ.next);
1157                 PR_REMOVE_LINK(&thread->links);
1158                 _PR_DEC_DEADNATIVE;
1159                 _PR_DEADQ_UNLOCK;
1160
1161                 _PR_InitializeRecycledThread(thread);
1162                 thread->startFunc = start;
1163                 thread->arg = arg;
1164             thread->flags = (flags | _PR_GLOBAL_SCOPE);
1165             if (type == PR_SYSTEM_THREAD)
1166             {
1167                 thread->flags |= _PR_SYSTEM;
1168                 PR_ATOMIC_INCREMENT(&_pr_systemActive);
1169             }
1170             else PR_ATOMIC_INCREMENT(&_pr_userActive);
1171
1172             if (state == PR_JOINABLE_THREAD) {
1173                 if (!thread->term) 
1174                        thread->term = PR_NewCondVar(_pr_terminationCVLock);
1175             }
1176         else {
1177                 if(thread->term) {
1178                     PR_DestroyCondVar(thread->term);
1179                         thread->term = 0;
1180             }
1181             }
1182
1183                 thread->priority = priority;
1184         _PR_MD_SET_PRIORITY(&(thread->md), priority);
1185         /* XXX what about stackSize? */
1186         thread->state = _PR_RUNNING;
1187                 _PR_MD_WAKEUP_WAITER(thread);
1188         return thread;
1189             }
1190         }
1191         thread = _PR_NativeCreateThread(type, start, arg, priority, 
1192                                             scope, state, stackSize, flags);
1193     } else {
1194         if (_PR_NUM_DEADUSER > 0) {
1195             _PR_DEADQ_LOCK;
1196
1197             if (_PR_NUM_DEADUSER == 0) {  /* thread safe check */
1198                 _PR_DEADQ_UNLOCK;
1199             } else {
1200                 PRCList *ptr;
1201
1202                 /* Go down list checking for a recycled thread with a 
1203                  * large enough stack.  XXXMB - this has a bad degenerate case.
1204                  */
1205                 ptr = _PR_DEADUSERQ.next;
1206                 while( ptr != &_PR_DEADUSERQ ) {
1207                     thread = _PR_THREAD_PTR(ptr);
1208                     if ((thread->stack->stackSize >= stackSize) &&
1209                 (!thread->no_sched)) {
1210                         PR_REMOVE_LINK(&thread->links);
1211                         _PR_DEC_DEADUSER;
1212                         break;
1213                     } else {
1214                         ptr = ptr->next;
1215                         thread = NULL;
1216                     }
1217                 } 
1218
1219                 _PR_DEADQ_UNLOCK;
1220
1221                if (thread) {
1222                     _PR_InitializeRecycledThread(thread);
1223                     thread->startFunc = start;
1224                     thread->arg = arg;
1225                     thread->priority = priority;
1226             if (state == PR_JOINABLE_THREAD) {
1227             if (!thread->term) 
1228                thread->term = PR_NewCondVar(_pr_terminationCVLock);
1229             } else {
1230             if(thread->term) {
1231                PR_DestroyCondVar(thread->term);
1232                 thread->term = 0;
1233             }
1234             }
1235                     useRecycled++;
1236                 }
1237             }
1238         } 
1239         if (thread == NULL) {
1240 #ifndef HAVE_CUSTOM_USER_THREADS
1241             stack = _PR_NewStack(stackSize);
1242             if (!stack) {
1243                 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1244                 return NULL;
1245             }
1246
1247             /* Allocate thread object and per-thread data off the top of the stack*/
1248             top = stack->stackTop;
1249 #ifdef HAVE_STACK_GROWING_UP
1250             thread = (PRThread*) top;
1251             top = top + sizeof(PRThread);
1252             /*
1253              * Make stack 64-byte aligned
1254              */
1255             if ((PRUptrdiff)top & 0x3f) {
1256                 top = (char*)(((PRUptrdiff)top + 0x40) & ~0x3f);
1257             }
1258 #else
1259             top = top - sizeof(PRThread);
1260             thread = (PRThread*) top;
1261             /*
1262              * Make stack 64-byte aligned
1263              */
1264             if ((PRUptrdiff)top & 0x3f) {
1265                 top = (char*)((PRUptrdiff)top & ~0x3f);
1266             }
1267 #endif
1268             stack->thr = thread;
1269             memset(thread, 0, sizeof(PRThread));
1270             thread->threadAllocatedOnStack = 1;
1271 #else
1272             thread = _PR_MD_CREATE_USER_THREAD(stackSize, start, arg);
1273             if (!thread) {
1274                 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1275                 return NULL;
1276             }
1277             thread->threadAllocatedOnStack = 0;
1278             stack = NULL;
1279             top = NULL;
1280 #endif
1281
1282             /* Initialize thread */
1283             thread->tpdLength = 0;
1284             thread->privateData = NULL;
1285             thread->stack = stack;
1286             thread->priority = priority;
1287             thread->startFunc = start;
1288             thread->arg = arg;
1289             PR_INIT_CLIST(&thread->lockList);
1290
1291             if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
1292                 if (thread->threadAllocatedOnStack == 1)
1293                     _PR_FreeStack(thread->stack);
1294                 else {
1295                     PR_DELETE(thread);
1296                 }
1297                 PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0);
1298                 return NULL;
1299             }
1300
1301             if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) {
1302                 if (thread->threadAllocatedOnStack == 1)
1303                     _PR_FreeStack(thread->stack);
1304                 else {
1305                     PR_DELETE(thread->privateData);
1306                     PR_DELETE(thread);
1307                 }
1308                 PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0);
1309                 return NULL;
1310             }
1311
1312             _PR_MD_INIT_CONTEXT(thread, top, _PR_UserRunThread, &status);
1313
1314             if (status == PR_FALSE) {
1315                 _PR_MD_FREE_LOCK(&thread->threadLock);
1316                 if (thread->threadAllocatedOnStack == 1)
1317                     _PR_FreeStack(thread->stack);
1318                 else {
1319                     PR_DELETE(thread->privateData);
1320                     PR_DELETE(thread);
1321                 }
1322                 return NULL;
1323             }
1324
1325             /* 
1326               Set thread flags related to scope and joinable state. If joinable
1327               thread, allocate a "termination" condition variable.
1328             */
1329             if (state == PR_JOINABLE_THREAD) {
1330                 thread->term = PR_NewCondVar(_pr_terminationCVLock);
1331                 if (thread->term == NULL) {
1332                     _PR_MD_FREE_LOCK(&thread->threadLock);
1333                     if (thread->threadAllocatedOnStack == 1)
1334                         _PR_FreeStack(thread->stack);
1335                     else {
1336                         PR_DELETE(thread->privateData);
1337                         PR_DELETE(thread);
1338                     }
1339                     return NULL;
1340                 }
1341             }
1342   
1343         }
1344   
1345         /* Update thread type counter */
1346         PR_Lock(_pr_activeLock);
1347         thread->flags = flags;
1348         thread->id = ++_pr_utid;
1349         if (type == PR_SYSTEM_THREAD) {
1350             thread->flags |= _PR_SYSTEM;
1351             _pr_systemActive++;
1352         } else {
1353             _pr_userActive++;
1354         }
1355
1356         /* Make thread runnable */
1357         thread->state = _PR_RUNNABLE;
1358     /*
1359      * Add to list of active threads
1360      */
1361         PR_Unlock(_pr_activeLock);
1362
1363         if ((! (thread->flags & _PR_IDLE_THREAD)) && _PR_IS_NATIVE_THREAD(me) )
1364             thread->cpu = _PR_GetPrimordialCPU();
1365         else
1366             thread->cpu = _PR_MD_CURRENT_CPU();
1367
1368         PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));
1369
1370         if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me)) {
1371             _PR_INTSOFF(is);
1372             _PR_RUNQ_LOCK(thread->cpu);
1373             _PR_ADD_RUNQ(thread, thread->cpu, priority);
1374             _PR_RUNQ_UNLOCK(thread->cpu);
1375         }
1376
1377         if (thread->flags & _PR_IDLE_THREAD) {
1378             /*
1379             ** If the creating thread is a kernel thread, we need to
1380             ** awaken the user thread idle thread somehow; potentially
1381             ** it could be sleeping in its idle loop, and we need to poke
1382             ** it.  To do so, wake the idle thread...  
1383             */
1384             _PR_MD_WAKEUP_WAITER(NULL);
1385         } else if (_PR_IS_NATIVE_THREAD(me)) {
1386             _PR_MD_WAKEUP_WAITER(thread);
1387         }
1388         if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me) )
1389             _PR_INTSON(is);
1390     }
1391
1392     return thread;
1393 }
1394
1395 PR_IMPLEMENT(PRThread*) PR_CreateThread(PRThreadType type,
1396                      void (*start)(void *arg),
1397                      void *arg,
1398                      PRThreadPriority priority,
1399                      PRThreadScope scope,
1400                      PRThreadState state,
1401                      PRUint32 stackSize)
1402 {
1403     return _PR_CreateThread(type, start, arg, priority, scope, state, 
1404                             stackSize, 0);
1405 }
1406
1407 /*
1408 ** Associate a thread object with an existing native thread.
1409 **     "type" is the type of thread object to attach
1410 **     "priority" is the priority to assign to the thread
1411 **     "stack" defines the shape of the threads stack
1412 **
1413 ** This can return NULL if some kind of error occurs, or if memory is
1414 ** tight.
1415 **
1416 ** This call is not normally needed unless you create your own native
1417 ** thread. PR_Init does this automatically for the primordial thread.
1418 */
1419 PRThread* _PRI_AttachThread(PRThreadType type,
1420     PRThreadPriority priority, PRThreadStack *stack, PRUint32 flags)
1421 {
1422     PRThread *thread;
1423
1424     if ((thread = _PR_MD_GET_ATTACHED_THREAD()) != NULL) {
1425         return thread;
1426     }
1427     _PR_MD_SET_CURRENT_THREAD(NULL);
1428
1429     /* Clear out any state if this thread was attached before */
1430     _PR_MD_SET_CURRENT_CPU(NULL);
1431
1432     thread = _PR_AttachThread(type, priority, stack);
1433     if (thread) {
1434         PRIntn is;
1435
1436         _PR_MD_SET_CURRENT_THREAD(thread);
1437
1438         thread->flags = flags | _PR_GLOBAL_SCOPE | _PR_ATTACHED;
1439
1440         if (!stack) {
1441             thread->stack = PR_NEWZAP(PRThreadStack);
1442             if (!thread->stack) {
1443                 _PR_DestroyThread(thread);
1444                 return NULL;
1445             }
1446             thread->stack->stackSize = _MD_DEFAULT_STACK_SIZE;
1447         }
1448         PR_INIT_CLIST(&thread->links);
1449
1450         if (_PR_MD_INIT_ATTACHED_THREAD(thread) == PR_FAILURE) {
1451                 PR_DELETE(thread->stack);
1452                 _PR_DestroyThread(thread);
1453                 return NULL;
1454         }
1455
1456         _PR_MD_SET_CURRENT_CPU(NULL);
1457
1458         if (_PR_MD_CURRENT_CPU()) {
1459             _PR_INTSOFF(is);
1460             PR_Lock(_pr_activeLock);
1461         }
1462         if (type == PR_SYSTEM_THREAD) {
1463             thread->flags |= _PR_SYSTEM;
1464             _pr_systemActive++;
1465         } else {
1466             _pr_userActive++;
1467         }
1468         if (_PR_MD_CURRENT_CPU()) {
1469             PR_Unlock(_pr_activeLock);
1470             _PR_INTSON(is);
1471         }
1472     }
1473     return thread;
1474 }
1475
1476 PR_IMPLEMENT(PRThread*) PR_AttachThread(PRThreadType type,
1477     PRThreadPriority priority, PRThreadStack *stack)
1478 {
1479     return PR_GetCurrentThread();
1480 }
1481
1482 PR_IMPLEMENT(void) PR_DetachThread(void)
1483 {
1484     /*
1485      * On IRIX, Solaris, and Windows, foreign threads are detached when
1486      * they terminate.
1487      */
1488 #if !defined(IRIX) && !defined(WIN32) \
1489         && !(defined(SOLARIS) && defined(_PR_GLOBAL_THREADS_ONLY))
1490     PRThread *me;
1491     if (_pr_initialized) {
1492         me = _PR_MD_GET_ATTACHED_THREAD();
1493         if ((me != NULL) && (me->flags & _PR_ATTACHED))
1494             _PRI_DetachThread();
1495     }
1496 #endif
1497 }
1498
1499 void _PRI_DetachThread(void)
1500 {
1501     PRThread *me = _PR_MD_CURRENT_THREAD();
1502
1503         if (me->flags & _PR_PRIMORDIAL) {
1504                 /*
1505                  * ignore, if primordial thread
1506                  */
1507                 return;
1508         }
1509     PR_ASSERT(me->flags & _PR_ATTACHED);
1510     PR_ASSERT(_PR_IS_NATIVE_THREAD(me));
1511     _PR_CleanupThread(me);
1512     PR_DELETE(me->privateData);
1513
1514     _PR_DecrActiveThreadCount(me);
1515
1516     _PR_MD_CLEAN_THREAD(me);
1517     _PR_MD_SET_CURRENT_THREAD(NULL);
1518     if (!me->threadAllocatedOnStack) 
1519         PR_DELETE(me->stack);
1520     _PR_MD_FREE_LOCK(&me->threadLock);
1521     PR_DELETE(me);
1522 }
1523
1524 /*
1525 ** Wait for thread termination:
1526 **     "thread" is the target thread 
1527 **
1528 ** This can return PR_FAILURE if no joinable thread could be found 
1529 ** corresponding to the specified target thread.
1530 **
1531 ** The calling thread is suspended until the target thread completes.
1532 ** Several threads cannot wait for the same thread to complete; one thread
1533 ** will complete successfully and others will terminate with an error PR_FAILURE.
1534 ** The calling thread will not be blocked if the target thread has already
1535 ** terminated.
1536 */
1537 PR_IMPLEMENT(PRStatus) PR_JoinThread(PRThread *thread)
1538 {
1539     PRIntn is;
1540     PRCondVar *term;
1541     PRThread *me = _PR_MD_CURRENT_THREAD();
1542
1543     if (!_PR_IS_NATIVE_THREAD(me))
1544         _PR_INTSOFF(is);
1545     term = thread->term;
1546     /* can't join a non-joinable thread */
1547     if (term == NULL) {
1548         PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1549         goto ErrorExit;
1550     }
1551
1552     /* multiple threads can't wait on the same joinable thread */
1553     if (term->condQ.next != &term->condQ) {
1554         goto ErrorExit;
1555     }
1556     if (!_PR_IS_NATIVE_THREAD(me))
1557         _PR_INTSON(is);
1558
1559     /* wait for the target thread's termination cv invariant */
1560     PR_Lock (_pr_terminationCVLock);
1561     while (thread->state != _PR_JOIN_WAIT) {
1562         (void) PR_WaitCondVar(term, PR_INTERVAL_NO_TIMEOUT);
1563     }
1564     (void) PR_Unlock (_pr_terminationCVLock);
1565     
1566     /* 
1567      Remove target thread from global waiting to join Q; make it runnable
1568      again and put it back on its run Q.  When it gets scheduled later in
1569      _PR_RunThread code, it will clean up its stack.
1570     */    
1571     if (!_PR_IS_NATIVE_THREAD(me))
1572         _PR_INTSOFF(is);
1573     thread->state = _PR_RUNNABLE;
1574     if ( !_PR_IS_NATIVE_THREAD(thread) ) {
1575         _PR_THREAD_LOCK(thread);
1576
1577         _PR_MISCQ_LOCK(thread->cpu);
1578         _PR_DEL_JOINQ(thread);
1579         _PR_MISCQ_UNLOCK(thread->cpu);
1580
1581         _PR_AddThreadToRunQ(me, thread);
1582         _PR_THREAD_UNLOCK(thread);
1583     }
1584     if (!_PR_IS_NATIVE_THREAD(me))
1585         _PR_INTSON(is);
1586
1587     _PR_MD_WAKEUP_WAITER(thread);
1588
1589     return PR_SUCCESS;
1590
1591 ErrorExit:
1592     if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is);
1593     return PR_FAILURE;   
1594 }
1595
1596 PR_IMPLEMENT(void) PR_SetThreadPriority(PRThread *thread,
1597     PRThreadPriority newPri)
1598 {
1599
1600     /* 
1601     First, pin down the priority.  Not all compilers catch passing out of
1602     range enum here.  If we let bad values thru, priority queues won't work.
1603     */
1604     if ((PRIntn)newPri > (PRIntn)PR_PRIORITY_LAST) {
1605         newPri = PR_PRIORITY_LAST;
1606     } else if ((PRIntn)newPri < (PRIntn)PR_PRIORITY_FIRST) {
1607         newPri = PR_PRIORITY_FIRST;
1608     }
1609         
1610     if ( _PR_IS_NATIVE_THREAD(thread) ) {
1611         thread->priority = newPri;
1612         _PR_MD_SET_PRIORITY(&(thread->md), newPri);
1613     } else _PR_SetThreadPriority(thread, newPri);
1614 }
1615
1616
1617 /*
1618 ** This routine prevents all other threads from running. This call is needed by 
1619 ** the garbage collector.
1620 */
1621 PR_IMPLEMENT(void) PR_SuspendAll(void)
1622 {
1623     PRThread *me = _PR_MD_CURRENT_THREAD();
1624     PRCList *qp;
1625
1626     /*
1627      * Stop all user and native threads which are marked GC able.
1628      */
1629     PR_Lock(_pr_activeLock);
1630     suspendAllOn = PR_TRUE;
1631     suspendAllThread = _PR_MD_CURRENT_THREAD();
1632     _PR_MD_BEGIN_SUSPEND_ALL();
1633     for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
1634         qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) {
1635         if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && 
1636             _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) {
1637             _PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp));
1638                 PR_ASSERT((_PR_ACTIVE_THREAD_PTR(qp))->state != _PR_RUNNING);
1639             }
1640     }
1641     for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
1642         qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
1643         if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1644             _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp)))
1645             /* PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); */
1646                 _PR_MD_SUSPEND_THREAD(_PR_ACTIVE_THREAD_PTR(qp)); 
1647     }
1648     _PR_MD_END_SUSPEND_ALL();
1649 }
1650
1651 /*
1652 ** This routine unblocks all other threads that were suspended from running by 
1653 ** PR_SuspendAll(). This call is needed by the garbage collector.
1654 */
1655 PR_IMPLEMENT(void) PR_ResumeAll(void)
1656 {
1657     PRThread *me = _PR_MD_CURRENT_THREAD();
1658     PRCList *qp;
1659
1660     /*
1661      * Resume all user and native threads which are marked GC able.
1662      */
1663     _PR_MD_BEGIN_RESUME_ALL();
1664     for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
1665         qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) {
1666         if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && 
1667             _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp)))
1668             _PR_Resume(_PR_ACTIVE_THREAD_PTR(qp));
1669     }
1670     for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
1671         qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
1672         if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1673             _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp)))
1674                 _PR_MD_RESUME_THREAD(_PR_ACTIVE_THREAD_PTR(qp));
1675     }
1676     _PR_MD_END_RESUME_ALL();
1677     suspendAllThread = NULL;
1678     suspendAllOn = PR_FALSE;
1679     PR_Unlock(_pr_activeLock);
1680 }
1681
1682 PR_IMPLEMENT(PRStatus) PR_EnumerateThreads(PREnumerator func, void *arg)
1683 {
1684     PRCList *qp, *qp_next;
1685     PRIntn i = 0;
1686     PRStatus rv = PR_SUCCESS;
1687     PRThread* t;
1688
1689     /*
1690     ** Currently Enumerate threads happen only with suspension and
1691     ** pr_activeLock held
1692     */
1693     PR_ASSERT(suspendAllOn);
1694
1695     /* Steve Morse, 4-23-97: Note that we can't walk a queue by taking
1696      * qp->next after applying the function "func".  In particular, "func"
1697      * might remove the thread from the queue and put it into another one in
1698      * which case qp->next no longer points to the next entry in the original
1699      * queue.
1700      *
1701      * To get around this problem, we save qp->next in qp_next before applying
1702      * "func" and use that saved value as the next value after applying "func".
1703      */
1704
1705     /*
1706      * Traverse the list of local and global threads
1707      */
1708     for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
1709          qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp_next)
1710     {
1711         qp_next = qp->next;
1712         t = _PR_ACTIVE_THREAD_PTR(qp);
1713         if (_PR_IS_GCABLE_THREAD(t))
1714         {
1715             rv = (*func)(t, i, arg);
1716             if (rv != PR_SUCCESS)
1717                 return rv;
1718             i++;
1719         }
1720     }
1721     for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
1722          qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp_next)
1723     {
1724         qp_next = qp->next;
1725         t = _PR_ACTIVE_THREAD_PTR(qp);
1726         if (_PR_IS_GCABLE_THREAD(t))
1727         {
1728             rv = (*func)(t, i, arg);
1729             if (rv != PR_SUCCESS)
1730                 return rv;
1731             i++;
1732         }
1733     }
1734     return rv;
1735 }
1736
1737 /* FUNCTION: _PR_AddSleepQ
1738 ** DESCRIPTION:
1739 **    Adds a thread to the sleep/pauseQ.
1740 ** RESTRICTIONS:
1741 **    Caller must have the RUNQ lock.
1742 **    Caller must be a user level thread
1743 */
1744 PR_IMPLEMENT(void)
1745 _PR_AddSleepQ(PRThread *thread, PRIntervalTime timeout)
1746 {
1747     _PRCPU *cpu = thread->cpu;
1748
1749     if (timeout == PR_INTERVAL_NO_TIMEOUT) {
1750         /* append the thread to the global pause Q */
1751         PR_APPEND_LINK(&thread->links, &_PR_PAUSEQ(thread->cpu));
1752         thread->flags |= _PR_ON_PAUSEQ;
1753     } else {
1754         PRIntervalTime sleep;
1755         PRCList *q;
1756         PRThread *t;
1757
1758         /* sort onto global sleepQ */
1759         sleep = timeout;
1760
1761         /* Check if we are longest timeout */
1762         if (timeout >= _PR_SLEEPQMAX(cpu)) {
1763             PR_INSERT_BEFORE(&thread->links, &_PR_SLEEPQ(cpu));
1764             thread->sleep = timeout - _PR_SLEEPQMAX(cpu);
1765             _PR_SLEEPQMAX(cpu) = timeout;
1766         } else {
1767             /* Sort thread into global sleepQ at appropriate point */
1768             q = _PR_SLEEPQ(cpu).next;
1769
1770             /* Now scan the list for where to insert this entry */
1771             while (q != &_PR_SLEEPQ(cpu)) {
1772                 t = _PR_THREAD_PTR(q);
1773                 if (sleep < t->sleep) {
1774                     /* Found sleeper to insert in front of */
1775                     break;
1776                 }
1777                 sleep -= t->sleep;
1778                 q = q->next;
1779             }
1780             thread->sleep = sleep;
1781             PR_INSERT_BEFORE(&thread->links, q);
1782
1783             /*
1784             ** Subtract our sleep time from the sleeper that follows us (there
1785             ** must be one) so that they remain relative to us.
1786             */
1787             PR_ASSERT (thread->links.next != &_PR_SLEEPQ(cpu));
1788           
1789             t = _PR_THREAD_PTR(thread->links.next);
1790             PR_ASSERT(_PR_THREAD_PTR(t->links.prev) == thread);
1791             t->sleep -= sleep;
1792         }
1793
1794         thread->flags |= _PR_ON_SLEEPQ;
1795     }
1796 }
1797
1798 /* FUNCTION: _PR_DelSleepQ
1799 ** DESCRIPTION:
1800 **    Removes a thread from the sleep/pauseQ.
1801 ** INPUTS:
1802 **    If propogate_time is true, then the thread following the deleted
1803 **    thread will be get the time from the deleted thread.  This is used
1804 **    when deleting a sleeper that has not timed out.
1805 ** RESTRICTIONS:
1806 **    Caller must have the RUNQ lock.
1807 **    Caller must be a user level thread
1808 */
1809 PR_IMPLEMENT(void)
1810 _PR_DelSleepQ(PRThread *thread, PRBool propogate_time)
1811 {
1812     _PRCPU *cpu = thread->cpu;
1813
1814     /* Remove from pauseQ/sleepQ */
1815     if (thread->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
1816         if (thread->flags & _PR_ON_SLEEPQ) {
1817             PRCList *q = thread->links.next;
1818             if (q != &_PR_SLEEPQ(cpu)) {
1819                 if (propogate_time == PR_TRUE) {
1820                     PRThread *after = _PR_THREAD_PTR(q);
1821                     after->sleep += thread->sleep;
1822                 } else 
1823                     _PR_SLEEPQMAX(cpu) -= thread->sleep;
1824             } else {
1825                 /* Check if prev is the beggining of the list; if so,
1826                  * we are the only element on the list.  
1827                  */
1828                 if (thread->links.prev != &_PR_SLEEPQ(cpu))
1829                     _PR_SLEEPQMAX(cpu) -= thread->sleep;
1830                 else
1831                     _PR_SLEEPQMAX(cpu) = 0;
1832             }
1833             thread->flags &= ~_PR_ON_SLEEPQ;
1834         } else {
1835             thread->flags &= ~_PR_ON_PAUSEQ;
1836         }
1837         PR_REMOVE_LINK(&thread->links);
1838     } else 
1839         PR_ASSERT(0);
1840 }
1841
1842 void
1843 _PR_AddThreadToRunQ(
1844     PRThread *me,     /* the current thread */
1845     PRThread *thread) /* the local thread to be added to a run queue */
1846 {
1847     PRThreadPriority pri = thread->priority;
1848     _PRCPU *cpu = thread->cpu;
1849
1850     PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));
1851
1852 #if defined(WINNT)
1853     /*
1854      * On NT, we can only reliably know that the current CPU
1855      * is not idle.  We add the awakened thread to the run
1856      * queue of its CPU if its CPU is the current CPU.
1857      * For any other CPU, we don't really know whether it
1858      * is busy or idle.  So in all other cases, we just
1859      * "post" the awakened thread to the IO completion port
1860      * for the next idle CPU to execute (this is done in
1861      * _PR_MD_WAKEUP_WAITER).
1862          * Threads with a suspended I/O operation remain bound to
1863          * the same cpu until I/O is cancelled
1864      *
1865      * NOTE: the boolean expression below must be the exact
1866      * opposite of the corresponding boolean expression in
1867      * _PR_MD_WAKEUP_WAITER.
1868      */
1869     if ((!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) ||
1870                                         (thread->md.thr_bound_cpu)) {
1871                 PR_ASSERT(!thread->md.thr_bound_cpu ||
1872                                                         (thread->md.thr_bound_cpu == cpu));
1873         _PR_RUNQ_LOCK(cpu);
1874         _PR_ADD_RUNQ(thread, cpu, pri);
1875         _PR_RUNQ_UNLOCK(cpu);
1876     }
1877 #else
1878     _PR_RUNQ_LOCK(cpu);
1879     _PR_ADD_RUNQ(thread, cpu, pri);
1880     _PR_RUNQ_UNLOCK(cpu);
1881     if (!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) {
1882         if (pri > me->priority) {
1883             _PR_SET_RESCHED_FLAG();
1884         }
1885     }
1886 #endif
1887 }