[Tizen] Unify dnetmemoryenumlib terms to match the codebase (#291)
[platform/upstream/coreclr.git] / src / vm / spinlock.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 // 
5 // spinlock.cpp
6 //
7
8 // 
9
10
11 #include "common.h"
12
13 #include "slist.h"
14 #include "spinlock.h"
15 #include "threads.h"
16 #include "corhost.h"
17
18 enum
19 {
20         BACKOFF_LIMIT = 1000            // used in spin to acquire
21 };
22
23 #ifdef _DEBUG
24
25         // profile information
26 ULONG   SpinLockProfiler::s_ulBackOffs = 0;
27 ULONG   SpinLockProfiler::s_ulCollisons [LOCK_TYPE_DEFAULT + 1] = { 0 };
28 ULONG   SpinLockProfiler::s_ulSpins [LOCK_TYPE_DEFAULT + 1] = { 0 };
29
30 #endif
31
32 SpinLock::SpinLock()
33 {
34     // Global SpinLock variables will cause the constructor to be
35     // called during DllInit, which means we cannot use full contracts
36     // because we have not called InitUtilCode yet.
37     STATIC_CONTRACT_NOTHROW;
38     STATIC_CONTRACT_GC_NOTRIGGER;
39
40     m_Initialized = UnInitialized;
41 }
42
43
44 SpinLock::~SpinLock()
45 {
46     CONTRACTL
47     {
48         NOTHROW;
49         GC_NOTRIGGER;
50     }
51     CONTRACTL_END;
52
53 }
54
55 void SpinLock::Init(LOCK_TYPE type, bool RequireCoopGC)
56 {
57     CONTRACTL
58     {
59         THROWS;
60         GC_NOTRIGGER;
61     }
62     CONTRACTL_END;
63
64     if (m_Initialized == Initialized)
65     {
66         _ASSERTE (type == m_LockType);
67         _ASSERTE (RequireCoopGC == m_requireCoopGCMode);
68
69         // We have initialized this spinlock.
70         return;
71     }
72
73     while (TRUE)
74     {
75         LONG curValue = FastInterlockCompareExchange((LONG*)&m_Initialized, BeingInitialized, UnInitialized);
76         if (curValue == Initialized)
77         {
78             return;
79         }
80         else if (curValue == UnInitialized)
81         {
82             // We are the first to initialize the lock
83             break;
84         }
85         else
86         {
87             __SwitchToThread(10, CALLER_LIMITS_SPINNING);
88         }
89     }
90
91     {
92         m_lock = 0;
93     }
94
95 #ifdef _DEBUG
96     m_LockType = type;
97     m_requireCoopGCMode = RequireCoopGC;
98 #endif
99
100     _ASSERTE (m_Initialized == BeingInitialized);
101     m_Initialized = Initialized;
102 }
103
104 #ifdef _DEBUG
105 BOOL SpinLock::OwnedByCurrentThread()
106 {
107     CONTRACTL
108     {
109         NOTHROW;
110         GC_NOTRIGGER;
111         DEBUG_ONLY;
112     }
113     CONTRACTL_END;
114
115     return m_holdingThreadId.IsCurrentThread();
116 }
117 #endif
118
119 DEBUG_NOINLINE void SpinLock::AcquireLock(SpinLock *s, Thread * pThread)
120 {
121     SCAN_SCOPE_BEGIN;
122     STATIC_CONTRACT_GC_NOTRIGGER;
123
124     s->GetLock(pThread); 
125 }
126
127 DEBUG_NOINLINE void SpinLock::ReleaseLock(SpinLock *s, Thread * pThread) 
128
129     SCAN_SCOPE_END;
130
131     s->FreeLock(pThread); 
132 }
133
134
135 void SpinLock::GetLock(Thread* pThread) 
136 {
137     CONTRACTL
138     {
139         DISABLED(THROWS);  // need to rewrite spin locks to no-throw.
140         GC_NOTRIGGER;
141         CAN_TAKE_LOCK;
142     }
143     CONTRACTL_END;
144
145     _ASSERTE(m_Initialized == Initialized);
146
147 #ifdef _DEBUG
148     dbg_PreEnterLock();
149 #endif
150
151     {
152         // Not CLR Sync hosted, so we use interlocked operations on
153         // m_lock to acquire the lock.  This will automatically cause
154         // us to call EE_LOCK_TAKEN(this);
155         if (!GetLockNoWait()) 
156         {
157             SpinToAcquire();
158         }
159     }
160
161     INCTHREADLOCKCOUNTTHREAD(pThread);
162 #ifdef _DEBUG
163     m_holdingThreadId.SetToCurrentThread();
164     dbg_EnterLock();
165 #endif
166 }
167
168 //----------------------------------------------------------------------------
169 // SpinLock::GetLockNoWait   
170 // used interlocked exchange and fast lock acquire
171
172 BOOL SpinLock::GetLockNoWait()
173 {
174     CONTRACTL
175     {
176         NOTHROW;
177         GC_NOTRIGGER;
178         CAN_TAKE_LOCK;
179     }
180     CONTRACTL_END;
181
182     {
183         if (VolatileLoad(&m_lock) == 0 && FastInterlockExchange (&m_lock, 1) == 0)
184         {
185             EE_LOCK_TAKEN(this);
186             return 1;
187         }
188         return 0;
189     }
190 }
191
192 //----------------------------------------------------------------------------
193 // SpinLock::FreeLock   
194 //  Release the spinlock
195 //
196 void SpinLock::FreeLock(Thread* pThread)
197 {
198     CONTRACTL
199     {
200         NOTHROW;
201         GC_NOTRIGGER;
202     }
203     CONTRACTL_END;
204
205         _ASSERTE(m_Initialized == Initialized);
206
207 #ifdef _DEBUG
208     _ASSERTE(OwnedByCurrentThread());
209     m_holdingThreadId.Clear();
210     dbg_LeaveLock();
211 #endif
212
213     {
214         VolatileStore(&m_lock, (LONG)0);
215     }
216
217     DECTHREADLOCKCOUNTTHREAD(pThread);
218     EE_LOCK_RELEASED(this);
219
220 } // SpinLock::FreeLock ()
221
222
223 //----------------------------------------------------------------------------
224 // SpinLock::SpinToAcquire   , non-inline function, called from inline Acquire
225 //  
226 //  Spin waiting for a spinlock to become free.
227 //
228 //  
229 void
230 SpinLock::SpinToAcquire()
231 {
232     CONTRACTL
233     {
234         NOTHROW;
235         GC_NOTRIGGER;
236         CAN_TAKE_LOCK;
237     }
238     CONTRACTL_END;
239
240     DWORD backoffs = 0;
241     ULONG ulSpins = 0;
242     YieldProcessorNormalizationInfo normalizationInfo;
243
244     while (true)
245     {
246         for (ULONG i = ulSpins + 10000;
247              ulSpins < i;
248              ulSpins++)
249         {
250             YieldProcessorNormalized(normalizationInfo); // indicate to the processor that we are spinning 
251
252             // Note: Must use Volatile to ensure the lock is
253             // refetched from memory.
254             //
255             if (VolatileLoad(&m_lock) == 0)
256             {
257                 break;
258             }
259         }
260
261         // Try the inline atomic test again.
262         //
263         if (GetLockNoWait())
264         {
265             // EE_LOCK_TAKEN(this) has already been called by GetLockNoWait
266             break;
267         }
268
269         //backoff
270         __SwitchToThread(0, backoffs++);
271     }
272
273 #ifdef _DEBUG
274     //profile info
275     SpinLockProfiler::IncrementCollisions (m_LockType);
276     SpinLockProfiler::IncrementSpins (m_LockType, ulSpins);
277     SpinLockProfiler::IncrementBackoffs (backoffs);
278 #endif
279
280 } // SpinLock::SpinToAcquire ()
281
282 #ifdef _DEBUG
283 // If a GC is not allowed when we enter the lock, we'd better not do anything inside
284 // the lock that could provoke a GC.  Otherwise other threads attempting to block
285 // (which are presumably in the same GC mode as this one) will block.  This will cause
286 // a deadlock if we do attempt a GC because we can't suspend blocking threads and we
287 // can't release the spin lock.
288 void SpinLock::dbg_PreEnterLock()
289 {
290     CONTRACTL
291     {
292         NOTHROW;
293         GC_NOTRIGGER;
294         DEBUG_ONLY;
295     }
296     CONTRACTL_END;
297
298     Thread* pThread = GetThread();
299     if (pThread)
300     {
301         // SpinLock can not be nested.
302         _ASSERTE ((pThread->m_StateNC & Thread::TSNC_OwnsSpinLock) == 0);
303
304         pThread->SetThreadStateNC(Thread::TSNC_OwnsSpinLock);
305
306         if (!pThread->PreemptiveGCDisabled())
307             _ASSERTE(!m_requireCoopGCMode);
308     }
309 }
310
311 void SpinLock::dbg_EnterLock()
312 {
313     CONTRACTL
314     {
315         NOTHROW;
316         GC_NOTRIGGER;
317         DEBUG_ONLY;
318     }
319     CONTRACTL_END;
320
321     Thread* pThread = GetThread();
322     if (pThread)
323     {
324         INCONTRACT(pThread->BeginNoTriggerGC(__FILE__, __LINE__));
325     }
326 }
327
328 void SpinLock::dbg_LeaveLock()
329 {
330     CONTRACTL
331     {
332         NOTHROW;
333         GC_NOTRIGGER;
334         DEBUG_ONLY;
335     }
336     CONTRACTL_END;
337
338     Thread* pThread = GetThread();
339     if (pThread)
340     {
341         _ASSERTE ((pThread->m_StateNC & Thread::TSNC_OwnsSpinLock) != 0);
342         pThread->ResetThreadStateNC(Thread::TSNC_OwnsSpinLock);
343         INCONTRACT(pThread->EndNoTriggerGC());
344     }
345 }
346
347
348 void SpinLockProfiler::InitStatics ()
349 {
350     CONTRACTL
351     {
352         NOTHROW;
353         GC_NOTRIGGER;
354         DEBUG_ONLY;
355     }
356     CONTRACTL_END;
357
358     s_ulBackOffs = 0;
359     memset (s_ulCollisons, 0, sizeof (s_ulCollisons));
360     memset (s_ulSpins, 0, sizeof (s_ulSpins));
361 }
362
363 void SpinLockProfiler::IncrementSpins (LOCK_TYPE type, ULONG value)
364 {
365     CONTRACTL
366     {
367         NOTHROW;
368         GC_NOTRIGGER;
369         DEBUG_ONLY;
370     }
371     CONTRACTL_END;
372
373     _ASSERTE(type <= LOCK_TYPE_DEFAULT);
374     s_ulSpins [type] += value;
375 }
376
377 void SpinLockProfiler::IncrementCollisions (LOCK_TYPE type)
378 {
379     CONTRACTL
380     {
381         NOTHROW;
382         GC_NOTRIGGER;
383         DEBUG_ONLY;
384     }
385     CONTRACTL_END;
386
387     ++s_ulCollisons [type];
388 }
389
390 void SpinLockProfiler::IncrementBackoffs (ULONG value)
391 {
392     CONTRACTL
393     {
394         NOTHROW;
395         GC_NOTRIGGER;
396         DEBUG_ONLY;
397     }
398     CONTRACTL_END;
399
400     s_ulBackOffs += value;
401 }
402
403 void SpinLockProfiler::DumpStatics()
404 {
405     CONTRACTL
406     {
407         NOTHROW;
408         GC_NOTRIGGER;
409         DEBUG_ONLY;
410     }
411     CONTRACTL_END;
412
413     //<TODO>todo </TODO>
414 }
415
416 #endif  // _DEBUG
417
418 // End of file: spinlock.cpp