2 * See the file LICENSE for redistribution information.
4 * Copyright (c) 2002-2009 Oracle. All rights reserved.
11 #define LOAD_ACTUAL_MUTEX_CODE
14 #include "dbinc/atomic.h"
16 * This is where we load in the actual test-and-set mutex code.
18 #include "dbinc/mutex_int.h"
20 /* We don't want to run this code even in "ordinary" diagnostic mode. */
24 * Common code to get an event handle. This is executed whenever a mutex
25 * blocks, or when unlocking a mutex that a thread is waiting on. We can't
26 * keep these handles around, since the mutex structure is in shared memory,
27 * and each process gets its own handle value.
29 * We pass security attributes so that the created event is accessible by all
30 * users, in case a Windows service is sharing an environment with a local
31 * process run as a different user.
33 static _TCHAR hex_digits[] = _T("0123456789abcdef");
34 static SECURITY_DESCRIPTOR null_sd;
35 static SECURITY_ATTRIBUTES all_sa;
36 static int security_initialized = 0;
38 static __inline int get_handle(env, mutexp, eventp)
43 _TCHAR idbuf[] = _T("db.m00000000");
44 _TCHAR *p = idbuf + 12;
48 for (id = (mutexp)->id; id != 0; id >>= 4)
49 *--p = hex_digits[id & 0xf];
52 if (!security_initialized) {
53 InitializeSecurityDescriptor(&null_sd,
54 SECURITY_DESCRIPTOR_REVISION);
55 SetSecurityDescriptorDacl(&null_sd, TRUE, 0, FALSE);
56 all_sa.nLength = sizeof(SECURITY_ATTRIBUTES);
57 all_sa.bInheritHandle = FALSE;
58 all_sa.lpSecurityDescriptor = &null_sd;
59 security_initialized = 1;
63 if ((*eventp = CreateEvent(&all_sa, FALSE, FALSE, idbuf)) == NULL) {
64 ret = __os_get_syserr();
65 __db_syserr(env, ret, "Win32 create event failed");
72 * __db_win32_mutex_lock_int
73 * Internal function to lock a win32 mutex
75 * If the wait paramter is 0, this function will return DB_LOCK_NOTGRANTED
80 __db_win32_mutex_lock_int(env, mutex, wait)
88 DB_MUTEXREGION *mtxregion;
98 if (!MUTEX_ON(env) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
101 mtxmgr = env->mutex_handle;
102 mtxregion = mtxmgr->reginfo.primary;
103 mutexp = MUTEXP_SET(mtxmgr, mutex);
105 CHECK_MTX_THREAD(env, mutexp);
108 * See WINCE_ATOMIC_MAGIC definition for details.
109 * Use sharecount, because the value just needs to be a db_atomic_t
110 * memory mapped onto the same page as those being Interlocked*.
112 WINCE_ATOMIC_MAGIC(&mutexp->sharecount);
119 * Only check the thread state once, by initializing the thread
120 * control block pointer to null. If it is not the failchk
121 * thread, then ip will have a valid value subsequent times
126 loop: /* Attempt to acquire the mutex mutex_tas_spins times, if waiting. */
128 mtxregion->stat.st_mutex_tas_spins; nspins > 0; --nspins) {
130 * We can avoid the (expensive) interlocked instructions if
131 * the mutex is already busy.
133 if (MUTEXP_IS_BUSY(mutexp) || !MUTEXP_ACQUIRE(mutexp)) {
134 if (F_ISSET(dbenv, DB_ENV_FAILCHK) &&
135 ip == NULL && dbenv->is_alive(dbenv,
136 mutexp->pid, mutexp->tid, 0) == 0) {
137 ret = __env_set_state(env, &ip, THREAD_VERIFY);
139 ip->dbth_state == THREAD_FAILCHK)
140 return (DB_RUNRECOVERY);
143 return (DB_LOCK_NOTGRANTED);
145 * Some systems (notably those with newer Intel CPUs)
146 * need a small pause before retrying. [#6975]
153 if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
154 char buf[DB_THREADID_STRLEN];
156 "Win32 lock failed: mutex already locked by %s",
157 dbenv->thread_id_string(dbenv,
158 mutexp->pid, mutexp->tid, buf));
159 return (__env_panic(env, EACCES));
162 F_SET(mutexp, DB_MUTEX_LOCKED);
163 dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid);
165 #ifdef HAVE_STATISTICS
167 ++mutexp->mutex_set_nowait;
169 ++mutexp->mutex_set_wait;
173 InterlockedDecrement(&mutexp->nwaiters);
175 if (ret != WAIT_OBJECT_0) {
176 QueryPerformanceCounter(&now);
177 printf("[%I64d]: Lost signal on mutex %p, "
179 now.QuadPart, mutexp, mutexp->id, ms);
186 * We want to switch threads as often as possible. Yield
187 * every time we get a mutex to ensure contention.
189 if (F_ISSET(dbenv, DB_ENV_YIELDCPU))
190 __os_yield(env, 0, 0);
197 * Yield the processor; wait 50 ms initially, up to 1 second. This
198 * loop is needed to work around a race where the signal from the
199 * unlocking thread gets lost. We start at 50 ms because it's unlikely
200 * to happen often and we want to avoid wasting CPU.
204 QueryPerformanceCounter(&now);
205 printf("[%I64d]: Waiting on mutex %p, id %d\n",
206 now.QuadPart, mutexp, mutexp->id);
208 InterlockedIncrement(&mutexp->nwaiters);
209 if ((ret = get_handle(env, mutexp, &event)) != 0)
212 if ((ret = WaitForSingleObject(event, ms)) == WAIT_FAILED) {
213 ret = __os_get_syserr();
216 if ((ms <<= 1) > MS_PER_SEC)
222 err: __db_syserr(env, ret, "Win32 lock failed");
223 return (__env_panic(env, __os_posix_err(ret)));
227 * __db_win32_mutex_init --
228 * Initialize a Win32 mutex.
230 * PUBLIC: int __db_win32_mutex_init __P((ENV *, db_mutex_t, u_int32_t));
233 __db_win32_mutex_init(env, mutex, flags)
240 mutexp = MUTEXP_SET(env->mutex_handle, mutex);
241 mutexp->id = ((getpid() & 0xffff) << 16) ^ P_TO_UINT32(mutexp);
242 F_SET(mutexp, flags);
248 * __db_win32_mutex_lock
249 * Lock on a mutex, blocking if necessary.
251 * PUBLIC: int __db_win32_mutex_lock __P((ENV *, db_mutex_t));
254 __db_win32_mutex_lock(env, mutex)
258 return (__db_win32_mutex_lock_int(env, mutex, 1));
262 * __db_win32_mutex_trylock
263 * Try to lock a mutex, returning without waiting if it is busy
265 * PUBLIC: int __db_win32_mutex_trylock __P((ENV *, db_mutex_t));
268 __db_win32_mutex_trylock(env, mutex)
272 return (__db_win32_mutex_lock_int(env, mutex, 0));
275 #if defined(HAVE_SHARED_LATCHES)
277 * __db_win32_mutex_readlock_int
278 * Try to lock a mutex, possibly waiting if requested and necessary.
281 __db_win32_mutex_readlock_int(env, mutex, nowait)
289 DB_MUTEXREGION *mtxregion;
293 long exch_ret, mtx_val;
299 if (!MUTEX_ON(env) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
302 mtxmgr = env->mutex_handle;
303 mtxregion = mtxmgr->reginfo.primary;
304 mutexp = MUTEXP_SET(mtxmgr, mutex);
306 CHECK_MTX_THREAD(env, mutexp);
309 * See WINCE_ATOMIC_MAGIC definition for details.
310 * Use sharecount, because the value just needs to be a db_atomic_t
311 * memory mapped onto the same page as those being Interlocked*.
313 WINCE_ATOMIC_MAGIC(&mutexp->sharecount);
319 * This needs to be initialized, since if mutexp->tas
320 * is write locked on the first pass, it needs a value.
324 loop: /* Attempt to acquire the resource for N spins. */
326 mtxregion->stat.st_mutex_tas_spins; nspins > 0; --nspins) {
328 * We can avoid the (expensive) interlocked instructions if
329 * the mutex is already "set".
331 retry: mtx_val = atomic_read(&mutexp->sharecount);
332 if (mtx_val == MUTEX_SHARE_ISEXCLUSIVE) {
334 return (DB_LOCK_NOTGRANTED);
337 } else if (!atomic_compare_exchange(env, &mutexp->sharecount,
338 mtx_val, mtx_val + 1)) {
340 * Some systems (notably those with newer Intel CPUs)
341 * need a small pause here. [#6975]
347 #ifdef HAVE_STATISTICS
349 ++mutexp->mutex_set_rd_nowait;
351 ++mutexp->mutex_set_rd_wait;
355 InterlockedDecrement(&mutexp->nwaiters);
357 if (ret != WAIT_OBJECT_0) {
358 QueryPerformanceCounter(&now);
359 printf("[%I64d]: Lost signal on mutex %p, "
361 now.QuadPart, mutexp, mutexp->id, ms);
368 * We want to switch threads as often as possible. Yield
369 * every time we get a mutex to ensure contention.
371 if (F_ISSET(dbenv, DB_ENV_YIELDCPU))
372 __os_yield(env, 0, 0);
379 * Yield the processor; wait 50 ms initially, up to 1 second. This
380 * loop is needed to work around a race where the signal from the
381 * unlocking thread gets lost. We start at 50 ms because it's unlikely
382 * to happen often and we want to avoid wasting CPU.
386 QueryPerformanceCounter(&now);
387 printf("[%I64d]: Waiting on mutex %p, id %d\n",
388 now.QuadPart, mutexp, mutexp->id);
390 InterlockedIncrement(&mutexp->nwaiters);
391 if ((ret = get_handle(env, mutexp, &event)) != 0)
394 if ((ret = WaitForSingleObject(event, ms)) == WAIT_FAILED) {
395 ret = __os_get_syserr();
398 if ((ms <<= 1) > MS_PER_SEC)
404 err: __db_syserr(env, ret, "Win32 read lock failed");
405 return (__env_panic(env, __os_posix_err(ret)));
409 * __db_win32_mutex_readlock
410 * Get a shared lock on a latch
412 * PUBLIC: #if defined(HAVE_SHARED_LATCHES)
413 * PUBLIC: int __db_win32_mutex_readlock __P((ENV *, db_mutex_t));
417 __db_win32_mutex_readlock(env, mutex)
421 return (__db_win32_mutex_readlock_int(env, mutex, 0));
425 * __db_win32_mutex_tryreadlock
426 * Try to a shared lock on a latch
428 * PUBLIC: #if defined(HAVE_SHARED_LATCHES)
429 * PUBLIC: int __db_win32_mutex_tryreadlock __P((ENV *, db_mutex_t));
433 __db_win32_mutex_tryreadlock(env, mutex)
437 return (__db_win32_mutex_readlock_int(env, mutex, 1));
442 * __db_win32_mutex_unlock --
445 * PUBLIC: int __db_win32_mutex_unlock __P((ENV *, db_mutex_t));
448 __db_win32_mutex_unlock(env, mutex)
462 if (!MUTEX_ON(env) || F_ISSET(dbenv, DB_ENV_NOLOCKING))
465 mtxmgr = env->mutex_handle;
466 mutexp = MUTEXP_SET(mtxmgr, mutex);
469 if (!MUTEXP_IS_BUSY(mutexp) || !(F_ISSET(mutexp, DB_MUTEX_SHARED) ||
470 F_ISSET(mutexp, DB_MUTEX_LOCKED))) {
472 "Win32 unlock failed: lock already unlocked: mutex %d busy %d",
473 mutex, MUTEXP_BUSY_FIELD(mutexp));
474 return (__env_panic(env, EACCES));
478 * If we have a shared latch, and a read lock (DB_MUTEX_LOCKED is only
479 * set for write locks), then decrement the latch. If the readlock is
480 * still held by other threads, just return. Otherwise go ahead and
481 * notify any waiting threads.
483 #ifdef HAVE_SHARED_LATCHES
484 if (F_ISSET(mutexp, DB_MUTEX_SHARED)) {
485 if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) {
486 F_CLR(mutexp, DB_MUTEX_LOCKED);
487 if ((ret = InterlockedExchange(
488 (interlocked_val)(&atomic_read(
489 &mutexp->sharecount)), 0)) !=
490 MUTEX_SHARE_ISEXCLUSIVE) {
491 ret = DB_RUNRECOVERY;
494 } else if (InterlockedDecrement(
495 (interlocked_val)(&atomic_read(&mutexp->sharecount))) > 0)
500 F_CLR(mutexp, DB_MUTEX_LOCKED);
501 MUTEX_UNSET(&mutexp->tas);
504 if (mutexp->nwaiters > 0) {
505 if ((ret = get_handle(env, mutexp, &event)) != 0)
509 QueryPerformanceCounter(&now);
510 printf("[%I64d]: Signalling mutex %p, id %d\n",
511 now.QuadPart, mutexp, mutexp->id);
513 if (!PulseEvent(event)) {
514 ret = __os_get_syserr();
524 err: __db_syserr(env, ret, "Win32 unlock failed");
525 return (__env_panic(env, __os_posix_err(ret)));
529 * __db_win32_mutex_destroy --
532 * PUBLIC: int __db_win32_mutex_destroy __P((ENV *, db_mutex_t));
535 __db_win32_mutex_destroy(env, mutex)