Importing Upstream version 4.8.2
[platform/upstream/gcc48.git] / libgo / runtime / lock_sema.c
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // +build darwin netbsd openbsd plan9 windows
6
7 #include "runtime.h"
8
9 // This implementation depends on OS-specific implementations of
10 //
11 //      uintptr runtime_semacreate(void)
12 //              Create a semaphore, which will be assigned to m->waitsema.
13 //              The zero value is treated as absence of any semaphore,
14 //              so be sure to return a non-zero value.
15 //
16 //      int32 runtime_semasleep(int64 ns)
17 //              If ns < 0, acquire m->waitsema and return 0.
18 //              If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
19 //              Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
20 //
21 //      int32 runtime_semawakeup(M *mp)
22 //              Wake up mp, which is or will soon be sleeping on mp->waitsema.
23 //
24
25 enum
26 {
27         LOCKED = 1,
28
29         ACTIVE_SPIN = 4,
30         ACTIVE_SPIN_CNT = 30,
31         PASSIVE_SPIN = 1,
32 };
33
34 void
35 runtime_lock(Lock *l)
36 {
37         M *m;
38         uintptr v;
39         uint32 i, spin;
40
41         m = runtime_m();
42         if(m->locks++ < 0)
43                 runtime_throw("runtime_lock: lock count");
44
45         // Speculative grab for lock.
46         if(runtime_casp((void**)&l->key, nil, (void*)LOCKED))
47                 return;
48
49         if(m->waitsema == 0)
50                 m->waitsema = runtime_semacreate();
51
52         // On uniprocessor's, no point spinning.
53         // On multiprocessors, spin for ACTIVE_SPIN attempts.
54         spin = 0;
55         if(runtime_ncpu > 1)
56                 spin = ACTIVE_SPIN;
57
58         for(i=0;; i++) {
59                 v = (uintptr)runtime_atomicloadp((void**)&l->key);
60                 if((v&LOCKED) == 0) {
61 unlocked:
62                         if(runtime_casp((void**)&l->key, (void*)v, (void*)(v|LOCKED)))
63                                 return;
64                         i = 0;
65                 }
66                 if(i<spin)
67                         runtime_procyield(ACTIVE_SPIN_CNT);
68                 else if(i<spin+PASSIVE_SPIN)
69                         runtime_osyield();
70                 else {
71                         // Someone else has it.
72                         // l->waitm points to a linked list of M's waiting
73                         // for this lock, chained through m->nextwaitm.
74                         // Queue this M.
75                         for(;;) {
76                                 m->nextwaitm = (void*)(v&~LOCKED);
77                                 if(runtime_casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED)))
78                                         break;
79                                 v = (uintptr)runtime_atomicloadp((void**)&l->key);
80                                 if((v&LOCKED) == 0)
81                                         goto unlocked;
82                         }
83                         if(v&LOCKED) {
84                                 // Queued.  Wait.
85                                 runtime_semasleep(-1);
86                                 i = 0;
87                         }
88                 }
89         }
90 }
91
92 void
93 runtime_unlock(Lock *l)
94 {
95         uintptr v;
96         M *mp;
97
98         if(--runtime_m()->locks < 0)
99                 runtime_throw("runtime_unlock: lock count");
100
101         for(;;) {
102                 v = (uintptr)runtime_atomicloadp((void**)&l->key);
103                 if(v == LOCKED) {
104                         if(runtime_casp((void**)&l->key, (void*)LOCKED, nil))
105                                 break;
106                 } else {
107                         // Other M's are waiting for the lock.
108                         // Dequeue an M.
109                         mp = (void*)(v&~LOCKED);
110                         if(runtime_casp((void**)&l->key, (void*)v, mp->nextwaitm)) {
111                                 // Dequeued an M.  Wake it.
112                                 runtime_semawakeup(mp);
113                                 break;
114                         }
115                 }
116         }
117 }
118
119 // One-time notifications.
120 void
121 runtime_noteclear(Note *n)
122 {
123         n->key = 0;
124 }
125
126 void
127 runtime_notewakeup(Note *n)
128 {
129         M *mp;
130
131         do
132                 mp = runtime_atomicloadp((void**)&n->key);
133         while(!runtime_casp((void**)&n->key, mp, (void*)LOCKED));
134
135         // Successfully set waitm to LOCKED.
136         // What was it before?
137         if(mp == nil) {
138                 // Nothing was waiting.  Done.
139         } else if(mp == (M*)LOCKED) {
140                 // Two notewakeups!  Not allowed.
141                 runtime_throw("notewakeup - double wakeup");
142         } else {
143                 // Must be the waiting m.  Wake it up.
144                 runtime_semawakeup(mp);
145         }
146 }
147
148 void
149 runtime_notesleep(Note *n)
150 {
151         M *m;
152
153         m = runtime_m();
154         if(m->waitsema == 0)
155                 m->waitsema = runtime_semacreate();
156         if(!runtime_casp((void**)&n->key, nil, m)) {  // must be LOCKED (got wakeup)
157                 if(n->key != LOCKED)
158                         runtime_throw("notesleep - waitm out of sync");
159                 return;
160         }
161         // Queued.  Sleep.
162         if(m->profilehz > 0)
163                 runtime_setprof(false);
164         runtime_semasleep(-1);
165         if(m->profilehz > 0)
166                 runtime_setprof(true);
167 }
168
169 void
170 runtime_notetsleep(Note *n, int64 ns)
171 {
172         M *m;
173         M *mp;
174         int64 deadline, now;
175
176         if(ns < 0) {
177                 runtime_notesleep(n);
178                 return;
179         }
180
181         m = runtime_m();
182         if(m->waitsema == 0)
183                 m->waitsema = runtime_semacreate();
184
185         // Register for wakeup on n->waitm.
186         if(!runtime_casp((void**)&n->key, nil, m)) {  // must be LOCKED (got wakeup already)
187                 if(n->key != LOCKED)
188                         runtime_throw("notetsleep - waitm out of sync");
189                 return;
190         }
191
192         if(m->profilehz > 0)
193                 runtime_setprof(false);
194         deadline = runtime_nanotime() + ns;
195         for(;;) {
196                 // Registered.  Sleep.
197                 if(runtime_semasleep(ns) >= 0) {
198                         // Acquired semaphore, semawakeup unregistered us.
199                         // Done.
200                         if(m->profilehz > 0)
201                                 runtime_setprof(true);
202                         return;
203                 }
204
205                 // Interrupted or timed out.  Still registered.  Semaphore not acquired.
206                 now = runtime_nanotime();
207                 if(now >= deadline)
208                         break;
209
210                 // Deadline hasn't arrived.  Keep sleeping.
211                 ns = deadline - now;
212         }
213
214         if(m->profilehz > 0)
215                 runtime_setprof(true);
216
217         // Deadline arrived.  Still registered.  Semaphore not acquired.
218         // Want to give up and return, but have to unregister first,
219         // so that any notewakeup racing with the return does not
220         // try to grant us the semaphore when we don't expect it.
221         for(;;) {
222                 mp = runtime_atomicloadp((void**)&n->key);
223                 if(mp == m) {
224                         // No wakeup yet; unregister if possible.
225                         if(runtime_casp((void**)&n->key, mp, nil))
226                                 return;
227                 } else if(mp == (M*)LOCKED) {
228                         // Wakeup happened so semaphore is available.
229                         // Grab it to avoid getting out of sync.
230                         if(runtime_semasleep(-1) < 0)
231                                 runtime_throw("runtime: unable to acquire - semaphore out of sync");
232                         return;
233                 } else {
234                         runtime_throw("runtime: unexpected waitm - semaphore out of sync");
235                 }
236         }
237 }