2 * Copyright (c) 2015, Linaro Limited
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
28 #include <kernel/mutex.h>
29 #include <kernel/panic.h>
30 #include <kernel/spinlock.h>
31 #include <kernel/thread.h>
34 void mutex_init(struct mutex *m)
36 *m = (struct mutex)MUTEX_INITIALIZER;
39 static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
41 assert_have_no_spinlock();
42 assert(thread_get_id_may_fail() != -1);
45 uint32_t old_itr_status;
46 enum mutex_value old_value;
47 struct wait_queue_elem wqe;
48 int owner = MUTEX_OWNER_ID_NONE;
51 * If the mutex is locked we need to initialize the wqe
52 * before releasing the spinlock to guarantee that we don't
53 * miss the wakeup from mutex_unlock().
55 * If the mutex is unlocked we don't need to use the wqe at
59 old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
60 cpu_spin_lock(&m->spin_lock);
63 if (old_value == MUTEX_VALUE_LOCKED) {
64 wq_wait_init(&m->wq, &wqe);
67 m->value = MUTEX_VALUE_LOCKED;
71 cpu_spin_unlock(&m->spin_lock);
72 thread_unmask_exceptions(old_itr_status);
74 if (old_value == MUTEX_VALUE_LOCKED) {
76 * Someone else is holding the lock, wait in normal
77 * world for the lock to become available.
79 wq_wait_final(&m->wq, &wqe, m, owner, fname, lineno);
85 static void __mutex_unlock(struct mutex *m, const char *fname, int lineno)
87 uint32_t old_itr_status;
89 assert_have_no_spinlock();
90 assert(thread_get_id_may_fail() != -1);
92 old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
93 cpu_spin_lock(&m->spin_lock);
95 if (m->value != MUTEX_VALUE_LOCKED)
99 m->value = MUTEX_VALUE_UNLOCKED;
101 cpu_spin_unlock(&m->spin_lock);
102 thread_unmask_exceptions(old_itr_status);
104 wq_wake_one(&m->wq, m, fname, lineno);
107 static bool __mutex_trylock(struct mutex *m, const char *fname __unused,
110 uint32_t old_itr_status;
111 enum mutex_value old_value;
113 assert_have_no_spinlock();
114 assert(thread_get_id_may_fail() != -1);
116 old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
117 cpu_spin_lock(&m->spin_lock);
119 old_value = m->value;
120 if (old_value == MUTEX_VALUE_UNLOCKED) {
121 m->value = MUTEX_VALUE_LOCKED;
125 cpu_spin_unlock(&m->spin_lock);
126 thread_unmask_exceptions(old_itr_status);
128 return old_value == MUTEX_VALUE_UNLOCKED;
131 #ifdef CFG_MUTEX_DEBUG
132 void mutex_unlock_debug(struct mutex *m, const char *fname, int lineno)
134 __mutex_unlock(m, fname, lineno);
137 void mutex_lock_debug(struct mutex *m, const char *fname, int lineno)
139 __mutex_lock(m, fname, lineno);
142 bool mutex_trylock_debug(struct mutex *m, const char *fname, int lineno)
144 return __mutex_trylock(m, fname, lineno);
147 void mutex_unlock(struct mutex *m)
149 __mutex_unlock(m, NULL, -1);
152 void mutex_lock(struct mutex *m)
154 __mutex_lock(m, NULL, -1);
157 bool mutex_trylock(struct mutex *m)
159 return __mutex_trylock(m, NULL, -1);
165 void mutex_destroy(struct mutex *m)
168 * Caller guarantees that no one will try to take the mutex so
169 * there's no need to take the spinlock before accessing it.
171 if (m->value != MUTEX_VALUE_UNLOCKED)
173 if (!wq_is_empty(&m->wq))
174 panic("waitqueue not empty");
177 void condvar_init(struct condvar *cv)
179 *cv = (struct condvar)CONDVAR_INITIALIZER;
182 void condvar_destroy(struct condvar *cv)
184 if (cv->m && wq_have_condvar(&cv->m->wq, cv))
190 static void cv_signal(struct condvar *cv, bool only_one, const char *fname,
193 uint32_t old_itr_status;
196 old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
197 cpu_spin_lock(&cv->spin_lock);
199 cpu_spin_unlock(&cv->spin_lock);
200 thread_unmask_exceptions(old_itr_status);
203 wq_promote_condvar(&m->wq, cv, only_one, m, fname, lineno);
207 #ifdef CFG_MUTEX_DEBUG
208 void condvar_signal_debug(struct condvar *cv, const char *fname, int lineno)
210 cv_signal(cv, true /* only one */, fname, lineno);
213 void condvar_broadcast_debug(struct condvar *cv, const char *fname, int lineno)
215 cv_signal(cv, false /* all */, fname, lineno);
219 void condvar_signal(struct condvar *cv)
221 cv_signal(cv, true /* only one */, NULL, -1);
224 void condvar_broadcast(struct condvar *cv)
226 cv_signal(cv, false /* all */, NULL, -1);
228 #endif /*CFG_MUTEX_DEBUG*/
230 static void __condvar_wait(struct condvar *cv, struct mutex *m,
231 const char *fname, int lineno)
233 uint32_t old_itr_status;
234 struct wait_queue_elem wqe;
236 old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
238 /* Link this condvar to this mutex until reinitialized */
239 cpu_spin_lock(&cv->spin_lock);
240 if (cv->m && cv->m != m)
241 panic("invalid mutex");
244 cpu_spin_unlock(&cv->spin_lock);
246 cpu_spin_lock(&m->spin_lock);
248 /* Add to mutex wait queue as a condvar waiter */
249 wq_wait_init_condvar(&m->wq, &wqe, cv);
251 /* Unlock the mutex */
252 if (m->value != MUTEX_VALUE_LOCKED)
256 m->value = MUTEX_VALUE_UNLOCKED;
258 cpu_spin_unlock(&m->spin_lock);
260 thread_unmask_exceptions(old_itr_status);
262 /* Wake eventual waiters */
263 wq_wake_one(&m->wq, m, fname, lineno);
265 wq_wait_final(&m->wq, &wqe,
266 m, MUTEX_OWNER_ID_CONDVAR_SLEEP, fname, lineno);
271 #ifdef CFG_MUTEX_DEBUG
272 void condvar_wait_debug(struct condvar *cv, struct mutex *m,
273 const char *fname, int lineno)
275 __condvar_wait(cv, m, fname, lineno);
278 void condvar_wait(struct condvar *cv, struct mutex *m)
280 __condvar_wait(cv, m, NULL, -1);