2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef __sctp_process_lock_h__
35 #define __sctp_process_lock_h__
38 * Need to yet define five atomic fuctions or
40 * - atomic_add_int(&foo, val) - add atomically the value
41 * - atomic_fetchadd_int(&foo, val) - does same as atomic_add_int
42 * but value it was is returned.
43 * - atomic_subtract_int(&foo, val) - can be made from atomic_add_int()
45 * - atomic_cmpset_int(&foo, value, newvalue) - Does a set of newvalue
46 * in foo if and only if
47 * foo is value. Returns 0
51 #ifdef SCTP_PER_SOCKET_LOCKING
53 * per socket level locking
57 /* Lock for INFO stuff */
58 #define SCTP_INP_INFO_LOCK_INIT()
59 #define SCTP_INP_INFO_RLOCK()
60 #define SCTP_INP_INFO_RUNLOCK()
61 #define SCTP_INP_INFO_WLOCK()
62 #define SCTP_INP_INFO_WUNLOCK()
63 #define SCTP_INP_INFO_LOCK_DESTROY()
64 #define SCTP_IPI_COUNT_INIT()
65 #define SCTP_IPI_COUNT_DESTROY()
67 #define SCTP_INP_INFO_LOCK_INIT()
68 #define SCTP_INP_INFO_RLOCK()
69 #define SCTP_INP_INFO_RUNLOCK()
70 #define SCTP_INP_INFO_WLOCK()
71 #define SCTP_INP_INFO_WUNLOCK()
72 #define SCTP_INP_INFO_LOCK_DESTROY()
73 #define SCTP_IPI_COUNT_INIT()
74 #define SCTP_IPI_COUNT_DESTROY()
77 #define SCTP_TCB_SEND_LOCK_INIT(_tcb)
78 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb)
79 #define SCTP_TCB_SEND_LOCK(_tcb)
80 #define SCTP_TCB_SEND_UNLOCK(_tcb)
83 #define SCTP_INP_LOCK_INIT(_inp)
84 #define SCTP_INP_LOCK_DESTROY(_inp)
86 #define SCTP_INP_RLOCK(_inp)
87 #define SCTP_INP_RUNLOCK(_inp)
88 #define SCTP_INP_WLOCK(_inp)
89 #define SCTP_INP_WUNLOCK(_inp)
90 #define SCTP_INP_RLOCK_ASSERT(_inp)
91 #define SCTP_INP_WLOCK_ASSERT(_inp)
92 #define SCTP_INP_INCR_REF(_inp)
93 #define SCTP_INP_DECR_REF(_inp)
95 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp)
96 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp)
97 #define SCTP_ASOC_CREATE_LOCK(_inp)
98 #define SCTP_ASOC_CREATE_UNLOCK(_inp)
100 #define SCTP_INP_READ_INIT(_inp)
101 #define SCTP_INP_READ_DESTROY(_inp)
102 #define SCTP_INP_READ_LOCK(_inp)
103 #define SCTP_INP_READ_UNLOCK(_inp)
106 #define SCTP_TCB_LOCK_INIT(_tcb)
107 #define SCTP_TCB_LOCK_DESTROY(_tcb)
108 #define SCTP_TCB_LOCK(_tcb)
109 #define SCTP_TCB_TRYLOCK(_tcb) 1
110 #define SCTP_TCB_UNLOCK(_tcb)
111 #define SCTP_TCB_UNLOCK_IFOWNED(_tcb)
112 #define SCTP_TCB_LOCK_ASSERT(_tcb)
116 * per tcb level locking
118 #define SCTP_IPI_COUNT_INIT()
121 #define SCTP_WQ_ADDR_INIT() \
122 InitializeCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
123 #define SCTP_WQ_ADDR_DESTROY() \
124 DeleteCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
125 #define SCTP_WQ_ADDR_LOCK() \
126 EnterCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
127 #define SCTP_WQ_ADDR_UNLOCK() \
128 LeaveCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
129 #define SCTP_WQ_ADDR_LOCK_ASSERT()
131 #define SCTP_INP_INFO_LOCK_INIT() \
132 InitializeCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
133 #define SCTP_INP_INFO_LOCK_DESTROY() \
134 DeleteCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
135 #define SCTP_INP_INFO_RLOCK() \
136 EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
137 #define SCTP_INP_INFO_TRYLOCK() \
138 TryEnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
139 #define SCTP_INP_INFO_WLOCK() \
140 EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
141 #define SCTP_INP_INFO_RUNLOCK() \
142 LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
143 #define SCTP_INP_INFO_WUNLOCK() \
144 LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
146 #define SCTP_IP_PKTLOG_INIT() \
147 InitializeCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
148 #define SCTP_IP_PKTLOG_DESTROY () \
149 DeleteCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
150 #define SCTP_IP_PKTLOG_LOCK() \
151 EnterCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
152 #define SCTP_IP_PKTLOG_UNLOCK() \
153 LeaveCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
156 * The INP locks we will use for locking an SCTP endpoint, so for example if
157 * we want to change something at the endpoint level for example random_store
158 * or cookie secrets we lock the INP level.
160 #define SCTP_INP_READ_INIT(_inp) \
161 InitializeCriticalSection(&(_inp)->inp_rdata_mtx)
162 #define SCTP_INP_READ_DESTROY(_inp) \
163 DeleteCriticalSection(&(_inp)->inp_rdata_mtx)
164 #define SCTP_INP_READ_LOCK(_inp) \
165 EnterCriticalSection(&(_inp)->inp_rdata_mtx)
166 #define SCTP_INP_READ_UNLOCK(_inp) \
167 LeaveCriticalSection(&(_inp)->inp_rdata_mtx)
169 #define SCTP_INP_LOCK_INIT(_inp) \
170 InitializeCriticalSection(&(_inp)->inp_mtx)
171 #define SCTP_INP_LOCK_DESTROY(_inp) \
172 DeleteCriticalSection(&(_inp)->inp_mtx)
173 #ifdef SCTP_LOCK_LOGGING
174 #define SCTP_INP_RLOCK(_inp) do { \
175 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
176 sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
177 EnterCriticalSection(&(_inp)->inp_mtx); \
179 #define SCTP_INP_WLOCK(_inp) do { \
180 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
181 sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
182 EnterCriticalSection(&(_inp)->inp_mtx); \
185 #define SCTP_INP_RLOCK(_inp) \
186 EnterCriticalSection(&(_inp)->inp_mtx)
187 #define SCTP_INP_WLOCK(_inp) \
188 EnterCriticalSection(&(_inp)->inp_mtx)
190 #define SCTP_INP_RLOCK_ASSERT(_tcb)
191 #define SCTP_INP_WLOCK_ASSERT(_tcb)
193 #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
194 InitializeCriticalSection(&(_tcb)->tcb_send_mtx)
195 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
196 DeleteCriticalSection(&(_tcb)->tcb_send_mtx)
197 #define SCTP_TCB_SEND_LOCK(_tcb) \
198 EnterCriticalSection(&(_tcb)->tcb_send_mtx)
199 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
200 LeaveCriticalSection(&(_tcb)->tcb_send_mtx)
202 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
203 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
205 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
206 InitializeCriticalSection(&(_inp)->inp_create_mtx)
207 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
208 DeleteCriticalSection(&(_inp)->inp_create_mtx)
209 #ifdef SCTP_LOCK_LOGGING
210 #define SCTP_ASOC_CREATE_LOCK(_inp) do { \
211 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
212 sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE); \
213 EnterCriticalSection(&(_inp)->inp_create_mtx); \
216 #define SCTP_ASOC_CREATE_LOCK(_inp) \
217 EnterCriticalSection(&(_inp)->inp_create_mtx)
220 #define SCTP_INP_RUNLOCK(_inp) \
221 LeaveCriticalSection(&(_inp)->inp_mtx)
222 #define SCTP_INP_WUNLOCK(_inp) \
223 LeaveCriticalSection(&(_inp)->inp_mtx)
224 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
225 LeaveCriticalSection(&(_inp)->inp_create_mtx)
228 * For the majority of things (once we have found the association) we will
229 * lock the actual association mutex. This will protect all the assoiciation
230 * level queues and streams and such. We will need to lock the socket layer
231 * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
232 * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
235 #define SCTP_TCB_LOCK_INIT(_tcb) \
236 InitializeCriticalSection(&(_tcb)->tcb_mtx)
237 #define SCTP_TCB_LOCK_DESTROY(_tcb) \
238 DeleteCriticalSection(&(_tcb)->tcb_mtx)
239 #ifdef SCTP_LOCK_LOGGING
240 #define SCTP_TCB_LOCK(_tcb) do { \
241 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
242 sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB); \
243 EnterCriticalSection(&(_tcb)->tcb_mtx); \
246 #define SCTP_TCB_LOCK(_tcb) \
247 EnterCriticalSection(&(_tcb)->tcb_mtx)
249 #define SCTP_TCB_TRYLOCK(_tcb) ((TryEnterCriticalSection(&(_tcb)->tcb_mtx)))
250 #define SCTP_TCB_UNLOCK(_tcb) \
251 LeaveCriticalSection(&(_tcb)->tcb_mtx)
252 #define SCTP_TCB_LOCK_ASSERT(_tcb)
254 #else /* all Userspaces except Windows */
255 #define SCTP_WQ_ADDR_INIT() \
256 (void)pthread_mutex_init(&SCTP_BASE_INFO(wq_addr_mtx), &SCTP_BASE_VAR(mtx_attr))
257 #define SCTP_WQ_ADDR_DESTROY() \
258 (void)pthread_mutex_destroy(&SCTP_BASE_INFO(wq_addr_mtx))
260 #define SCTP_WQ_ADDR_LOCK() \
261 KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s: wq_addr_mtx already locked", __func__))
262 #define SCTP_WQ_ADDR_UNLOCK() \
263 KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx)) == 0, ("%s: wq_addr_mtx not locked", __func__))
265 #define SCTP_WQ_ADDR_LOCK() \
266 (void)pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx))
267 #define SCTP_WQ_ADDR_UNLOCK() \
268 (void)pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx))
270 #define SCTP_WQ_ADDR_LOCK_ASSERT() \
271 KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(wq_addr_mtx)) == EBUSY, ("%s: wq_addr_mtx not locked", __func__))
273 #define SCTP_INP_INFO_LOCK_INIT() \
274 (void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_ep_mtx), &SCTP_BASE_VAR(mtx_attr))
275 #define SCTP_INP_INFO_LOCK_DESTROY() \
276 (void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_ep_mtx))
278 #define SCTP_INP_INFO_RLOCK() \
279 KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx already locked", __func__))
280 #define SCTP_INP_INFO_WLOCK() \
281 KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx already locked", __func__))
282 #define SCTP_INP_INFO_RUNLOCK() \
283 KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx not locked", __func__))
284 #define SCTP_INP_INFO_WUNLOCK() \
285 KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx)) == 0, ("%s: ipi_ep_mtx not locked", __func__))
287 #define SCTP_INP_INFO_RLOCK() \
288 (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx))
289 #define SCTP_INP_INFO_WLOCK() \
290 (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx))
291 #define SCTP_INP_INFO_RUNLOCK() \
292 (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
293 #define SCTP_INP_INFO_WUNLOCK() \
294 (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
296 #define SCTP_INP_INFO_TRYLOCK() \
297 (!(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_ep_mtx))))
299 #define SCTP_IP_PKTLOG_INIT() \
300 (void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), &SCTP_BASE_VAR(mtx_attr))
301 #define SCTP_IP_PKTLOG_DESTROY() \
302 (void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
304 #define SCTP_IP_PKTLOG_LOCK() \
305 KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s: ipi_pktlog_mtx already locked", __func__))
306 #define SCTP_IP_PKTLOG_UNLOCK() \
307 KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx)) == 0, ("%s: ipi_pktlog_mtx not locked", __func__))
309 #define SCTP_IP_PKTLOG_LOCK() \
310 (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
311 #define SCTP_IP_PKTLOG_UNLOCK() \
312 (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
317 * The INP locks we will use for locking an SCTP endpoint, so for example if
318 * we want to change something at the endpoint level for example random_store
319 * or cookie secrets we lock the INP level.
321 #define SCTP_INP_READ_INIT(_inp) \
322 (void)pthread_mutex_init(&(_inp)->inp_rdata_mtx, &SCTP_BASE_VAR(mtx_attr))
323 #define SCTP_INP_READ_DESTROY(_inp) \
324 (void)pthread_mutex_destroy(&(_inp)->inp_rdata_mtx)
326 #define SCTP_INP_READ_LOCK(_inp) \
327 KASSERT(pthread_mutex_lock(&(_inp)->inp_rdata_mtx) == 0, ("%s: inp_rdata_mtx already locked", __func__))
328 #define SCTP_INP_READ_UNLOCK(_inp) \
329 KASSERT(pthread_mutex_unlock(&(_inp)->inp_rdata_mtx) == 0, ("%s: inp_rdata_mtx not locked", __func__))
331 #define SCTP_INP_READ_LOCK(_inp) \
332 (void)pthread_mutex_lock(&(_inp)->inp_rdata_mtx)
333 #define SCTP_INP_READ_UNLOCK(_inp) \
334 (void)pthread_mutex_unlock(&(_inp)->inp_rdata_mtx)
337 #define SCTP_INP_LOCK_INIT(_inp) \
338 (void)pthread_mutex_init(&(_inp)->inp_mtx, &SCTP_BASE_VAR(mtx_attr))
339 #define SCTP_INP_LOCK_DESTROY(_inp) \
340 (void)pthread_mutex_destroy(&(_inp)->inp_mtx)
342 #ifdef SCTP_LOCK_LOGGING
343 #define SCTP_INP_RLOCK(_inp) do { \
344 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
345 sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
346 KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__)) \
348 #define SCTP_INP_WLOCK(_inp) do { \
349 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
350 sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
351 KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))
354 #define SCTP_INP_RLOCK(_inp) \
355 KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))
356 #define SCTP_INP_WLOCK(_inp) \
357 KASSERT(pthread_mutex_lock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx already locked", __func__))
359 #define SCTP_INP_RUNLOCK(_inp) \
360 KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx not locked", __func__))
361 #define SCTP_INP_WUNLOCK(_inp) \
362 KASSERT(pthread_mutex_unlock(&(_inp)->inp_mtx) == 0, ("%s: inp_mtx not locked", __func__))
364 #ifdef SCTP_LOCK_LOGGING
365 #define SCTP_INP_RLOCK(_inp) do { \
366 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
367 sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
368 (void)pthread_mutex_lock(&(_inp)->inp_mtx); \
370 #define SCTP_INP_WLOCK(_inp) do { \
371 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
372 sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_INP); \
373 (void)pthread_mutex_lock(&(_inp)->inp_mtx); \
376 #define SCTP_INP_RLOCK(_inp) \
377 (void)pthread_mutex_lock(&(_inp)->inp_mtx)
378 #define SCTP_INP_WLOCK(_inp) \
379 (void)pthread_mutex_lock(&(_inp)->inp_mtx)
381 #define SCTP_INP_RUNLOCK(_inp) \
382 (void)pthread_mutex_unlock(&(_inp)->inp_mtx)
383 #define SCTP_INP_WUNLOCK(_inp) \
384 (void)pthread_mutex_unlock(&(_inp)->inp_mtx)
386 #define SCTP_INP_RLOCK_ASSERT(_inp) \
387 KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s: inp_mtx not locked", __func__))
388 #define SCTP_INP_WLOCK_ASSERT(_inp) \
389 KASSERT(pthread_mutex_trylock(&(_inp)->inp_mtx) == EBUSY, ("%s: inp_mtx not locked", __func__))
390 #define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
391 #define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
393 #define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
394 (void)pthread_mutex_init(&(_tcb)->tcb_send_mtx, &SCTP_BASE_VAR(mtx_attr))
395 #define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
396 (void)pthread_mutex_destroy(&(_tcb)->tcb_send_mtx)
398 #define SCTP_TCB_SEND_LOCK(_tcb) \
399 KASSERT(pthread_mutex_lock(&(_tcb)->tcb_send_mtx) == 0, ("%s: tcb_send_mtx already locked", __func__))
400 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
401 KASSERT(pthread_mutex_unlock(&(_tcb)->tcb_send_mtx) == 0, ("%s: tcb_send_mtx not locked", __func__))
403 #define SCTP_TCB_SEND_LOCK(_tcb) \
404 (void)pthread_mutex_lock(&(_tcb)->tcb_send_mtx)
405 #define SCTP_TCB_SEND_UNLOCK(_tcb) \
406 (void)pthread_mutex_unlock(&(_tcb)->tcb_send_mtx)
409 #define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
410 (void)pthread_mutex_init(&(_inp)->inp_create_mtx, &SCTP_BASE_VAR(mtx_attr))
411 #define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
412 (void)pthread_mutex_destroy(&(_inp)->inp_create_mtx)
414 #ifdef SCTP_LOCK_LOGGING
415 #define SCTP_ASOC_CREATE_LOCK(_inp) do { \
416 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
417 sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE); \
418 KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s: inp_create_mtx already locked", __func__)) \
421 #define SCTP_ASOC_CREATE_LOCK(_inp) \
422 KASSERT(pthread_mutex_lock(&(_inp)->inp_create_mtx) == 0, ("%s: inp_create_mtx already locked", __func__))
424 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
425 KASSERT(pthread_mutex_unlock(&(_inp)->inp_create_mtx) == 0, ("%s: inp_create_mtx not locked", __func__))
427 #ifdef SCTP_LOCK_LOGGING
428 #define SCTP_ASOC_CREATE_LOCK(_inp) do { \
429 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
430 sctp_log_lock(_inp, NULL, SCTP_LOG_LOCK_CREATE); \
431 (void)pthread_mutex_lock(&(_inp)->inp_create_mtx); \
434 #define SCTP_ASOC_CREATE_LOCK(_inp) \
435 (void)pthread_mutex_lock(&(_inp)->inp_create_mtx)
437 #define SCTP_ASOC_CREATE_UNLOCK(_inp) \
438 (void)pthread_mutex_unlock(&(_inp)->inp_create_mtx)
441 * For the majority of things (once we have found the association) we will
442 * lock the actual association mutex. This will protect all the assoiciation
443 * level queues and streams and such. We will need to lock the socket layer
444 * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
445 * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
448 #define SCTP_TCB_LOCK_INIT(_tcb) \
449 (void)pthread_mutex_init(&(_tcb)->tcb_mtx, &SCTP_BASE_VAR(mtx_attr))
450 #define SCTP_TCB_LOCK_DESTROY(_tcb) \
451 (void)pthread_mutex_destroy(&(_tcb)->tcb_mtx)
453 #ifdef SCTP_LOCK_LOGGING
454 #define SCTP_TCB_LOCK(_tcb) do { \
455 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
456 sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB); \
457 KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s: tcb_mtx already locked", __func__)) \
460 #define SCTP_TCB_LOCK(_tcb) \
461 KASSERT(pthread_mutex_lock(&(_tcb)->tcb_mtx) == 0, ("%s: tcb_mtx already locked", __func__))
463 #define SCTP_TCB_UNLOCK(_tcb) \
464 KASSERT(pthread_mutex_unlock(&(_tcb)->tcb_mtx) == 0, ("%s: tcb_mtx not locked", __func__))
466 #ifdef SCTP_LOCK_LOGGING
467 #define SCTP_TCB_LOCK(_tcb) do { \
468 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) \
469 sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB); \
470 (void)pthread_mutex_lock(&(_tcb)->tcb_mtx); \
473 #define SCTP_TCB_LOCK(_tcb) \
474 (void)pthread_mutex_lock(&(_tcb)->tcb_mtx)
476 #define SCTP_TCB_UNLOCK(_tcb) (void)pthread_mutex_unlock(&(_tcb)->tcb_mtx)
478 #define SCTP_TCB_LOCK_ASSERT(_tcb) \
479 KASSERT(pthread_mutex_trylock(&(_tcb)->tcb_mtx) == EBUSY, ("%s: tcb_mtx not locked", __func__))
480 #define SCTP_TCB_TRYLOCK(_tcb) (!(pthread_mutex_trylock(&(_tcb)->tcb_mtx)))
483 #endif /* SCTP_PER_SOCKET_LOCKING */
490 /* copied over to compile */
491 #define SCTP_INP_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
492 #define SCTP_INP_READ_CONTENDED(_inp) (0) /* Don't know if this is possible */
493 #define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
498 #define SOCKBUF_LOCK_ASSERT(_so_buf)
499 #define SOCKBUF_LOCK(_so_buf) \
500 EnterCriticalSection(&(_so_buf)->sb_mtx)
501 #define SOCKBUF_UNLOCK(_so_buf) \
502 LeaveCriticalSection(&(_so_buf)->sb_mtx)
503 #define SOCK_LOCK(_so) \
504 SOCKBUF_LOCK(&(_so)->so_rcv)
505 #define SOCK_UNLOCK(_so) \
506 SOCKBUF_UNLOCK(&(_so)->so_rcv)
508 #define SOCKBUF_LOCK_ASSERT(_so_buf) \
509 KASSERT(pthread_mutex_trylock(SOCKBUF_MTX(_so_buf)) == EBUSY, ("%s: socket buffer not locked", __func__))
511 #define SOCKBUF_LOCK(_so_buf) \
512 KASSERT(pthread_mutex_lock(SOCKBUF_MTX(_so_buf)) == 0, ("%s: sockbuf_mtx already locked", __func__))
513 #define SOCKBUF_UNLOCK(_so_buf) \
514 KASSERT(pthread_mutex_unlock(SOCKBUF_MTX(_so_buf)) == 0, ("%s: sockbuf_mtx not locked", __func__))
516 #define SOCKBUF_LOCK(_so_buf) \
517 pthread_mutex_lock(SOCKBUF_MTX(_so_buf))
518 #define SOCKBUF_UNLOCK(_so_buf) \
519 pthread_mutex_unlock(SOCKBUF_MTX(_so_buf))
521 #define SOCK_LOCK(_so) \
522 SOCKBUF_LOCK(&(_so)->so_rcv)
523 #define SOCK_UNLOCK(_so) \
524 SOCKBUF_UNLOCK(&(_so)->so_rcv)
527 #define SCTP_STATLOG_INIT_LOCK()
528 #define SCTP_STATLOG_LOCK()
529 #define SCTP_STATLOG_UNLOCK()
530 #define SCTP_STATLOG_DESTROY()
533 /* address list locks */
534 #define SCTP_IPI_ADDR_INIT() \
535 InitializeCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
536 #define SCTP_IPI_ADDR_DESTROY() \
537 DeleteCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
538 #define SCTP_IPI_ADDR_RLOCK() \
539 EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
540 #define SCTP_IPI_ADDR_RUNLOCK() \
541 LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
542 #define SCTP_IPI_ADDR_WLOCK() \
543 EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
544 #define SCTP_IPI_ADDR_WUNLOCK() \
545 LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
546 #define SCTP_IPI_ADDR_LOCK_ASSERT()
547 #define SCTP_IPI_ADDR_WLOCK_ASSERT()
551 #define SCTP_ITERATOR_LOCK_INIT() \
552 InitializeCriticalSection(&sctp_it_ctl.it_mtx)
553 #define SCTP_ITERATOR_LOCK_DESTROY() \
554 DeleteCriticalSection(&sctp_it_ctl.it_mtx)
555 #define SCTP_ITERATOR_LOCK() \
556 EnterCriticalSection(&sctp_it_ctl.it_mtx)
557 #define SCTP_ITERATOR_UNLOCK() \
558 LeaveCriticalSection(&sctp_it_ctl.it_mtx)
560 #define SCTP_IPI_ITERATOR_WQ_INIT() \
561 InitializeCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
562 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
563 DeleteCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
564 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
565 EnterCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
566 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
567 LeaveCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
570 /* address list locks */
571 #define SCTP_IPI_ADDR_INIT() \
572 (void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_addr_mtx), &SCTP_BASE_VAR(mtx_attr))
573 #define SCTP_IPI_ADDR_DESTROY() \
574 (void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_addr_mtx))
576 #define SCTP_IPI_ADDR_RLOCK() \
577 KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx already locked", __func__))
578 #define SCTP_IPI_ADDR_RUNLOCK() \
579 KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx not locked", __func__))
580 #define SCTP_IPI_ADDR_WLOCK() \
581 KASSERT(pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx already locked", __func__))
582 #define SCTP_IPI_ADDR_WUNLOCK() \
583 KASSERT(pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx)) == 0, ("%s: ipi_addr_mtx not locked", __func__))
584 #define SCTP_IPI_ADDR_LOCK_ASSERT() \
585 KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_addr_mtx)) == EBUSY, ("%s: ipi_addr_mtx not locked", __func__))
586 #define SCTP_IPI_ADDR_WLOCK_ASSERT() \
587 KASSERT(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_addr_mtx)) == EBUSY, ("%s: ipi_addr_mtx not locked", __func__))
589 #define SCTP_IPI_ADDR_RLOCK() \
590 (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx))
591 #define SCTP_IPI_ADDR_RUNLOCK() \
592 (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
593 #define SCTP_IPI_ADDR_WLOCK() \
594 (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx))
595 #define SCTP_IPI_ADDR_WUNLOCK() \
596 (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
597 #define SCTP_IPI_ADDR_LOCK_ASSERT()
598 #define SCTP_IPI_ADDR_WLOCK_ASSERT()
602 #define SCTP_ITERATOR_LOCK_INIT() \
603 (void)pthread_mutex_init(&sctp_it_ctl.it_mtx, &SCTP_BASE_VAR(mtx_attr))
604 #define SCTP_ITERATOR_LOCK_DESTROY() \
605 (void)pthread_mutex_destroy(&sctp_it_ctl.it_mtx)
607 #define SCTP_ITERATOR_LOCK() \
608 KASSERT(pthread_mutex_lock(&sctp_it_ctl.it_mtx) == 0, ("%s: it_mtx already locked", __func__))
609 #define SCTP_ITERATOR_UNLOCK() \
610 KASSERT(pthread_mutex_unlock(&sctp_it_ctl.it_mtx) == 0, ("%s: it_mtx not locked", __func__))
612 #define SCTP_ITERATOR_LOCK() \
613 (void)pthread_mutex_lock(&sctp_it_ctl.it_mtx)
614 #define SCTP_ITERATOR_UNLOCK() \
615 (void)pthread_mutex_unlock(&sctp_it_ctl.it_mtx)
618 #define SCTP_IPI_ITERATOR_WQ_INIT() \
619 (void)pthread_mutex_init(&sctp_it_ctl.ipi_iterator_wq_mtx, &SCTP_BASE_VAR(mtx_attr))
620 #define SCTP_IPI_ITERATOR_WQ_DESTROY() \
621 (void)pthread_mutex_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx)
623 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
624 KASSERT(pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s: ipi_iterator_wq_mtx already locked", __func__))
625 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
626 KASSERT(pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx) == 0, ("%s: ipi_iterator_wq_mtx not locked", __func__))
628 #define SCTP_IPI_ITERATOR_WQ_LOCK() \
629 (void)pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx)
630 #define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
631 (void)pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx)
635 #define SCTP_INCR_EP_COUNT() \
636 atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
638 #define SCTP_DECR_EP_COUNT() \
639 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1)
641 #define SCTP_INCR_ASOC_COUNT() \
642 atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
644 #define SCTP_DECR_ASOC_COUNT() \
645 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1)
647 #define SCTP_INCR_LADDR_COUNT() \
648 atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
650 #define SCTP_DECR_LADDR_COUNT() \
651 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1)
653 #define SCTP_INCR_RADDR_COUNT() \
654 atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
656 #define SCTP_DECR_RADDR_COUNT() \
657 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr), 1)
659 #define SCTP_INCR_CHK_COUNT() \
660 atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
662 #define SCTP_DECR_CHK_COUNT() \
663 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1)
665 #define SCTP_INCR_READQ_COUNT() \
666 atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
668 #define SCTP_DECR_READQ_COUNT() \
669 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1)
671 #define SCTP_INCR_STRMOQ_COUNT() \
672 atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)
674 #define SCTP_DECR_STRMOQ_COUNT() \
675 atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1)