2 * Copyright (c) 2015-2016, Linaro Limited
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
28 #include <types_ext.h>
29 #include <tee_api_defines.h>
31 #include <optee_msg.h>
32 #include <kernel/spinlock.h>
33 #include <kernel/wait_queue.h>
34 #include <kernel/thread.h>
37 static unsigned wq_spin_lock;
40 void wq_init(struct wait_queue *wq)
42 *wq = (struct wait_queue)WAIT_QUEUE_INITIALIZER;
45 static void wq_rpc(uint32_t func, int id, const void *sync_obj __maybe_unused,
46 const char *fname, int lineno __maybe_unused)
49 struct optee_msg_param params;
50 const char *cmd_str __maybe_unused =
51 func == OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP ? "sleep" : "wake ";
54 DMSG("%s thread %u %p %s:%d", cmd_str, id,
55 sync_obj, fname, lineno);
57 DMSG("%s thread %u %p", cmd_str, id, sync_obj);
59 memset(¶ms, 0, sizeof(params));
60 params.attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
61 params.u.value.a = func;
62 params.u.value.b = id;
64 ret = thread_rpc_cmd(OPTEE_MSG_RPC_CMD_WAIT_QUEUE, 1, ¶ms);
65 if (ret != TEE_SUCCESS)
66 DMSG("%s thread %u ret 0x%x", cmd_str, id, ret);
69 static void slist_add_tail(struct wait_queue *wq, struct wait_queue_elem *wqe)
71 struct wait_queue_elem *wqe_iter;
73 /* Add elem to end of wait queue */
74 wqe_iter = SLIST_FIRST(wq);
76 while (SLIST_NEXT(wqe_iter, link))
77 wqe_iter = SLIST_NEXT(wqe_iter, link);
78 SLIST_INSERT_AFTER(wqe_iter, wqe, link);
80 SLIST_INSERT_HEAD(wq, wqe, link);
83 void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
86 uint32_t old_itr_status;
88 wqe->handle = thread_get_id();
92 old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
93 cpu_spin_lock(&wq_spin_lock);
95 slist_add_tail(wq, wqe);
97 cpu_spin_unlock(&wq_spin_lock);
98 thread_unmask_exceptions(old_itr_status);
101 void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
102 const void *sync_obj, const char *fname, int lineno)
104 uint32_t old_itr_status;
108 wq_rpc(OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP, wqe->handle,
109 sync_obj, fname, lineno);
111 old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
112 cpu_spin_lock(&wq_spin_lock);
116 SLIST_REMOVE(wq, wqe, wait_queue_elem, link);
118 cpu_spin_unlock(&wq_spin_lock);
119 thread_unmask_exceptions(old_itr_status);
123 void wq_wake_one(struct wait_queue *wq, const void *sync_obj,
124 const char *fname, int lineno)
126 uint32_t old_itr_status;
127 struct wait_queue_elem *wqe;
129 bool do_wakeup = false;
131 old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
132 cpu_spin_lock(&wq_spin_lock);
134 SLIST_FOREACH(wqe, wq, link) {
136 do_wakeup = !wqe->done;
138 handle = wqe->handle;
143 cpu_spin_unlock(&wq_spin_lock);
144 thread_unmask_exceptions(old_itr_status);
147 wq_rpc(OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP, handle,
148 sync_obj, fname, lineno);
151 void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv,
152 bool only_one, const void *sync_obj __unused,
153 const char *fname, int lineno __maybe_unused)
155 uint32_t old_itr_status;
156 struct wait_queue_elem *wqe;
161 old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
162 cpu_spin_lock(&wq_spin_lock);
165 * Find condvar waiter(s) and promote each to an active waiter.
166 * This is a bit unfair to eventual other active waiters as a
167 * condvar waiter is added the the queue when waiting for the
170 SLIST_FOREACH(wqe, wq, link) {
173 FMSG("promote thread %u %p %s:%d",
174 wqe->handle, (void *)cv->m, fname, lineno);
176 FMSG("promote thread %u %p",
177 wqe->handle, (void *)cv->m);
185 cpu_spin_unlock(&wq_spin_lock);
186 thread_unmask_exceptions(old_itr_status);
189 bool wq_have_condvar(struct wait_queue *wq, struct condvar *cv)
191 uint32_t old_itr_status;
192 struct wait_queue_elem *wqe;
195 old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
196 cpu_spin_lock(&wq_spin_lock);
198 SLIST_FOREACH(wqe, wq, link) {
205 cpu_spin_unlock(&wq_spin_lock);
206 thread_unmask_exceptions(old_itr_status);
211 bool wq_is_empty(struct wait_queue *wq)
213 uint32_t old_itr_status;
216 old_itr_status = thread_mask_exceptions(THREAD_EXCP_ALL);
217 cpu_spin_lock(&wq_spin_lock);
219 ret = SLIST_EMPTY(wq);
221 cpu_spin_unlock(&wq_spin_lock);
222 thread_unmask_exceptions(old_itr_status);