2 * Copyright (c) 2008-2009 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
32 dummy_function_r0(void)
38 static struct dispatch_semaphore_s _dispatch_thread_mediator[] = {
40 .do_vtable = &_dispatch_semaphore_vtable,
41 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
42 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
45 .do_vtable = &_dispatch_semaphore_vtable,
46 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
47 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
50 .do_vtable = &_dispatch_semaphore_vtable,
51 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
52 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
55 .do_vtable = &_dispatch_semaphore_vtable,
56 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
57 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
60 .do_vtable = &_dispatch_semaphore_vtable,
61 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
62 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
65 .do_vtable = &_dispatch_semaphore_vtable,
66 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
67 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
71 static inline dispatch_queue_t
72 _dispatch_get_root_queue(long priority, bool overcommit)
74 if (overcommit) switch (priority) {
75 case DISPATCH_QUEUE_PRIORITY_LOW:
76 return &_dispatch_root_queues[1];
77 case DISPATCH_QUEUE_PRIORITY_DEFAULT:
78 return &_dispatch_root_queues[3];
79 case DISPATCH_QUEUE_PRIORITY_HIGH:
80 return &_dispatch_root_queues[5];
83 case DISPATCH_QUEUE_PRIORITY_LOW:
84 return &_dispatch_root_queues[0];
85 case DISPATCH_QUEUE_PRIORITY_DEFAULT:
86 return &_dispatch_root_queues[2];
87 case DISPATCH_QUEUE_PRIORITY_HIGH:
88 return &_dispatch_root_queues[4];
96 _dispatch_Block_copy(dispatch_block_t db)
98 dispatch_block_t rval;
100 while (!(rval = Block_copy(db))) {
106 #define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x))
109 _dispatch_call_block_and_release(void *block)
111 void (^b)(void) = block;
117 _dispatch_call_block_and_release2(void *block, void *ctxt)
119 void (^b)(void*) = block;
124 #endif /* __BLOCKS__ */
126 struct dispatch_queue_attr_vtable_s {
127 DISPATCH_VTABLE_HEADER(dispatch_queue_attr_s);
130 struct dispatch_queue_attr_s {
131 DISPATCH_STRUCT_HEADER(dispatch_queue_attr_s, dispatch_queue_attr_vtable_s);
133 #ifndef DISPATCH_NO_LEGACY
136 void* finalizer_ctxt;
137 dispatch_queue_finalizer_function_t finalizer_func;
140 unsigned long qa_flags;
144 static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset);
146 #define _dispatch_queue_trylock(dq) dispatch_atomic_cmpxchg(&(dq)->dq_running, 0, 1)
147 static inline void _dispatch_queue_unlock(dispatch_queue_t dq);
148 static void _dispatch_queue_invoke(dispatch_queue_t dq);
149 static bool _dispatch_queue_wakeup_global(dispatch_queue_t dq);
150 static struct dispatch_object_s *_dispatch_queue_concurrent_drain_one(dispatch_queue_t dq);
152 static bool _dispatch_program_is_probably_callback_driven;
154 #if DISPATCH_COCOA_COMPAT
155 void (*dispatch_begin_thread_4GC)(void) = dummy_function;
156 void (*dispatch_end_thread_4GC)(void) = dummy_function;
157 void *(*_dispatch_begin_NSAutoReleasePool)(void) = (void *)dummy_function;
158 void (*_dispatch_end_NSAutoReleasePool)(void *) = (void *)dummy_function;
159 static void _dispatch_queue_wakeup_main(void);
161 static dispatch_once_t _dispatch_main_q_port_pred;
162 static bool main_q_is_draining;
163 static mach_port_t main_q_port;
166 static void _dispatch_cache_cleanup2(void *value);
168 static const struct dispatch_queue_vtable_s _dispatch_queue_vtable = {
169 .do_type = DISPATCH_QUEUE_TYPE,
171 .do_dispose = _dispatch_queue_dispose,
172 .do_invoke = (void *)dummy_function_r0,
173 .do_probe = (void *)dummy_function_r0,
174 .do_debug = dispatch_queue_debug,
177 static const struct dispatch_queue_vtable_s _dispatch_queue_root_vtable = {
178 .do_type = DISPATCH_QUEUE_GLOBAL_TYPE,
179 .do_kind = "global-queue",
180 .do_debug = dispatch_queue_debug,
181 .do_probe = _dispatch_queue_wakeup_global,
184 #define MAX_THREAD_COUNT 255
186 struct dispatch_root_queue_context_s {
187 #if HAVE_PTHREAD_WORKQUEUES
188 pthread_workqueue_t dgq_kworkqueue;
190 uint32_t dgq_pending;
191 uint32_t dgq_thread_pool_size;
192 dispatch_semaphore_t dgq_thread_mediator;
195 static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = {
197 .dgq_thread_mediator = &_dispatch_thread_mediator[0],
198 .dgq_thread_pool_size = MAX_THREAD_COUNT,
201 .dgq_thread_mediator = &_dispatch_thread_mediator[1],
202 .dgq_thread_pool_size = MAX_THREAD_COUNT,
205 .dgq_thread_mediator = &_dispatch_thread_mediator[2],
206 .dgq_thread_pool_size = MAX_THREAD_COUNT,
209 .dgq_thread_mediator = &_dispatch_thread_mediator[3],
210 .dgq_thread_pool_size = MAX_THREAD_COUNT,
213 .dgq_thread_mediator = &_dispatch_thread_mediator[4],
214 .dgq_thread_pool_size = MAX_THREAD_COUNT,
217 .dgq_thread_mediator = &_dispatch_thread_mediator[5],
218 .dgq_thread_pool_size = MAX_THREAD_COUNT,
222 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
223 // dq_running is set to 2 so that barrier operations go through the slow path
224 struct dispatch_queue_s _dispatch_root_queues[] = {
226 .do_vtable = &_dispatch_queue_root_vtable,
227 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
228 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
229 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
230 .do_ctxt = &_dispatch_root_queue_contexts[0],
232 .dq_label = "com.apple.root.low-priority",
234 .dq_width = UINT32_MAX,
238 .do_vtable = &_dispatch_queue_root_vtable,
239 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
240 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
241 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
242 .do_ctxt = &_dispatch_root_queue_contexts[1],
244 .dq_label = "com.apple.root.low-overcommit-priority",
246 .dq_width = UINT32_MAX,
250 .do_vtable = &_dispatch_queue_root_vtable,
251 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
252 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
253 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
254 .do_ctxt = &_dispatch_root_queue_contexts[2],
256 .dq_label = "com.apple.root.default-priority",
258 .dq_width = UINT32_MAX,
262 .do_vtable = &_dispatch_queue_root_vtable,
263 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
264 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
265 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
266 .do_ctxt = &_dispatch_root_queue_contexts[3],
268 .dq_label = "com.apple.root.default-overcommit-priority",
270 .dq_width = UINT32_MAX,
274 .do_vtable = &_dispatch_queue_root_vtable,
275 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
276 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
277 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
278 .do_ctxt = &_dispatch_root_queue_contexts[4],
280 .dq_label = "com.apple.root.high-priority",
282 .dq_width = UINT32_MAX,
286 .do_vtable = &_dispatch_queue_root_vtable,
287 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
288 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
289 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
290 .do_ctxt = &_dispatch_root_queue_contexts[5],
292 .dq_label = "com.apple.root.high-overcommit-priority",
294 .dq_width = UINT32_MAX,
299 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
300 struct dispatch_queue_s _dispatch_main_q = {
301 .do_vtable = &_dispatch_queue_vtable,
302 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
303 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
304 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
305 .do_targetq = &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_COUNT / 2],
307 .dq_label = "com.apple.main-thread",
313 #if DISPATCH_PERF_MON
314 static OSSpinLock _dispatch_stats_lock;
315 static size_t _dispatch_bad_ratio;
318 uint64_t count_total;
319 uint64_t thread_total;
320 } _dispatch_stats[65]; // ffs*/fls*() returns zero when no bits are set
321 static void _dispatch_queue_merge_stats(uint64_t start);
324 static void *_dispatch_worker_thread(void *context);
325 static void _dispatch_worker_thread2(void *context);
327 malloc_zone_t *_dispatch_ccache_zone;
330 _dispatch_continuation_free(dispatch_continuation_t dc)
332 dispatch_continuation_t prev_dc = _dispatch_thread_getspecific(dispatch_cache_key);
333 dc->do_next = prev_dc;
334 _dispatch_thread_setspecific(dispatch_cache_key, dc);
338 _dispatch_continuation_pop(dispatch_object_t dou)
340 dispatch_continuation_t dc = dou._dc;
343 if (DISPATCH_OBJ_IS_VTABLE(dou._do)) {
344 return _dispatch_queue_invoke(dou._dq);
347 // Add the item back to the cache before calling the function. This
348 // allows the 'hot' continuation to be used for a quick callback.
350 // The ccache version is per-thread.
351 // Therefore, the object has not been reused yet.
352 // This generates better assembly.
353 if ((long)dou._do->do_vtable & DISPATCH_OBJ_ASYNC_BIT) {
354 _dispatch_continuation_free(dc);
356 if ((long)dou._do->do_vtable & DISPATCH_OBJ_GROUP_BIT) {
361 dc->dc_func(dc->dc_ctxt);
363 dispatch_group_leave(dg);
364 _dispatch_release(dg);
368 struct dispatch_object_s *
369 _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq)
371 struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul;
373 // The mediator value acts both as a "lock" and a signal
374 head = dispatch_atomic_xchg(&dq->dq_items_head, mediator);
376 if (slowpath(head == NULL)) {
377 // The first xchg on the tail will tell the enqueueing thread that it
378 // is safe to blindly write out to the head pointer. A cmpxchg honors
380 dispatch_atomic_cmpxchg(&dq->dq_items_head, mediator, NULL);
381 _dispatch_debug("no work on global work queue");
385 if (slowpath(head == mediator)) {
386 // This thread lost the race for ownership of the queue.
388 // The ratio of work to libdispatch overhead must be bad. This
389 // scenario implies that there are too many threads in the pool.
390 // Create a new pending thread and then exit this thread.
391 // The kernel will grant a new thread when the load subsides.
392 _dispatch_debug("Contention on queue: %p", dq);
393 _dispatch_queue_wakeup_global(dq);
394 #if DISPATCH_PERF_MON
395 dispatch_atomic_inc(&_dispatch_bad_ratio);
400 // Restore the head pointer to a sane value before returning.
401 // If 'next' is NULL, then this item _might_ be the last item.
402 next = fastpath(head->do_next);
404 if (slowpath(!next)) {
405 dq->dq_items_head = NULL;
407 if (dispatch_atomic_cmpxchg(&dq->dq_items_tail, head, NULL)) {
408 // both head and tail are NULL now
412 // There must be a next item now. This thread won't wait long.
413 while (!(next = head->do_next)) {
414 _dispatch_hardware_pause();
418 dq->dq_items_head = next;
419 _dispatch_queue_wakeup_global(dq);
425 dispatch_get_current_queue(void)
427 return _dispatch_queue_get_current() ?: _dispatch_get_root_queue(0, true);
430 #undef dispatch_get_main_queue
431 __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA)
432 dispatch_queue_t dispatch_get_main_queue(void);
435 dispatch_get_main_queue(void)
437 return &_dispatch_main_q;
439 #define dispatch_get_main_queue() (&_dispatch_main_q)
441 struct _dispatch_hw_config_s _dispatch_hw_config;
444 _dispatch_queue_set_width_init(void)
447 size_t valsz = sizeof(uint32_t);
450 ret = sysctlbyname("hw.activecpu", &_dispatch_hw_config.cc_max_active,
452 (void)dispatch_assume_zero(ret);
453 dispatch_assume(valsz == sizeof(uint32_t));
455 ret = sysctlbyname("hw.logicalcpu_max",
456 &_dispatch_hw_config.cc_max_logical, &valsz, NULL, 0);
457 (void)dispatch_assume_zero(ret);
458 dispatch_assume(valsz == sizeof(uint32_t));
460 ret = sysctlbyname("hw.physicalcpu_max",
461 &_dispatch_hw_config.cc_max_physical, &valsz, NULL, 0);
462 (void)dispatch_assume_zero(ret);
463 dispatch_assume(valsz == sizeof(uint32_t));
464 #elif defined(__FreeBSD__)
465 size_t valsz = sizeof(uint32_t);
468 ret = sysctlbyname("kern.smp.cpus", &_dispatch_hw_config.cc_max_active,
470 (void)dispatch_assume_zero(ret);
471 (void)dispatch_assume(valsz == sizeof(uint32_t));
473 _dispatch_hw_config.cc_max_logical =
474 _dispatch_hw_config.cc_max_physical =
475 _dispatch_hw_config.cc_max_active;
476 #elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN)
477 _dispatch_hw_config.cc_max_active = (int)sysconf(_SC_NPROCESSORS_ONLN);
478 if (_dispatch_hw_config.cc_max_active < 0)
479 _dispatch_hw_config.cc_max_active = 1;
480 _dispatch_hw_config.cc_max_logical =
481 _dispatch_hw_config.cc_max_physical =
482 _dispatch_hw_config.cc_max_active;
484 #warning "_dispatch_queue_set_width_init: no supported way to query CPU count"
485 _dispatch_hw_config.cc_max_logical =
486 _dispatch_hw_config.cc_max_physical =
487 _dispatch_hw_config.cc_max_active = 1;
492 dispatch_queue_set_width(dispatch_queue_t dq, long width)
494 int w = (int)width; // intentional truncation
497 if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) {
500 if (w == 1 || w == 0) {
507 case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS:
508 tmp = _dispatch_hw_config.cc_max_physical;
510 case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS:
511 tmp = _dispatch_hw_config.cc_max_active;
515 case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS:
516 tmp = _dispatch_hw_config.cc_max_logical;
519 // multiply by two since the running count is inc/dec by two (the low bit == barrier)
520 dq->dq_width = tmp * 2;
522 // XXX if the queue has items and the width is increased, we should try to wake the queue
529 // 4,5,6,7,8,9 - global queues
530 // we use 'xadd' on Intel, so the initial value == next assigned
531 static unsigned long _dispatch_queue_serial_numbers = 10;
533 // Note to later developers: ensure that any initialization changes are
534 // made for statically allocated queues (i.e. _dispatch_main_q).
536 _dispatch_queue_init(dispatch_queue_t dq)
538 dq->do_vtable = &_dispatch_queue_vtable;
539 dq->do_next = DISPATCH_OBJECT_LISTLESS;
542 dq->do_targetq = _dispatch_get_root_queue(0, true);
545 dq->dq_serialnum = dispatch_atomic_inc(&_dispatch_queue_serial_numbers) - 1;
549 dispatch_queue_create(const char *label, dispatch_queue_attr_t attr)
558 label_len = strlen(label);
559 if (label_len < (DISPATCH_QUEUE_MIN_LABEL_SIZE - 1)) {
560 label_len = (DISPATCH_QUEUE_MIN_LABEL_SIZE - 1);
563 // XXX switch to malloc()
564 dq = calloc(1ul, sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_MIN_LABEL_SIZE + label_len + 1);
569 _dispatch_queue_init(dq);
570 strcpy(dq->dq_label, label);
572 #ifndef DISPATCH_NO_LEGACY
573 if (slowpath(attr)) {
574 dq->do_targetq = _dispatch_get_root_queue(attr->qa_priority, attr->qa_flags & DISPATCH_QUEUE_OVERCOMMIT);
575 dq->dq_finalizer_ctxt = attr->finalizer_ctxt;
576 dq->dq_finalizer_func = attr->finalizer_func;
578 if (attr->finalizer_func == (void*)_dispatch_call_block_and_release2) {
579 // if finalizer_ctxt is a Block, retain it.
580 dq->dq_finalizer_ctxt = Block_copy(dq->dq_finalizer_ctxt);
581 if (!(dq->dq_finalizer_ctxt)) {
593 #if !defined(DISPATCH_NO_LEGACY) && defined(__BLOCKS__)
600 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
602 _dispatch_queue_dispose(dispatch_queue_t dq)
604 if (slowpath(dq == _dispatch_queue_get_current())) {
605 DISPATCH_CRASH("Release of a queue by itself");
607 if (slowpath(dq->dq_items_tail)) {
608 DISPATCH_CRASH("Release of a queue while items are enqueued");
611 #ifndef DISPATCH_NO_LEGACY
612 if (dq->dq_finalizer_func) {
613 dq->dq_finalizer_func(dq->dq_finalizer_ctxt, dq);
617 // trash the tail queue so that use after free will crash
618 dq->dq_items_tail = (void *)0x200;
620 _dispatch_dispose(dq);
625 _dispatch_queue_push_list_slow(dispatch_queue_t dq, struct dispatch_object_s *obj)
627 // The queue must be retained before dq_items_head is written in order
628 // to ensure that the reference is still valid when _dispatch_wakeup is
629 // called. Otherwise, if preempted between the assignment to
630 // dq_items_head and _dispatch_wakeup, the blocks submitted to the
631 // queue may release the last reference to the queue when invoked by
632 // _dispatch_queue_drain. <rdar://problem/6932776>
633 _dispatch_retain(dq);
634 dq->dq_items_head = obj;
635 _dispatch_wakeup(dq);
636 _dispatch_release(dq);
641 _dispatch_barrier_async_f_slow(dispatch_queue_t dq, void *context, dispatch_function_t func)
643 dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_from_heap());
645 dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT);
647 dc->dc_ctxt = context;
649 _dispatch_queue_push(dq, dc);
654 dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void))
656 dispatch_barrier_async_f(dq, _dispatch_Block_copy(work), _dispatch_call_block_and_release);
662 dispatch_barrier_async_f(dispatch_queue_t dq, void *context, dispatch_function_t func)
664 dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_cacheonly());
667 return _dispatch_barrier_async_f_slow(dq, context, func);
670 dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT);
672 dc->dc_ctxt = context;
674 _dispatch_queue_push(dq, dc);
679 _dispatch_async_f_slow(dispatch_queue_t dq, void *context, dispatch_function_t func)
681 dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_from_heap());
683 dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
685 dc->dc_ctxt = context;
687 _dispatch_queue_push(dq, dc);
692 dispatch_async(dispatch_queue_t dq, void (^work)(void))
694 dispatch_async_f(dq, _dispatch_Block_copy(work), _dispatch_call_block_and_release);
700 dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
702 dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_cacheonly());
704 // unlike dispatch_sync_f(), we do NOT need to check the queue width,
705 // the "drain" function will do this test
708 return _dispatch_async_f_slow(dq, ctxt, func);
711 dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
715 _dispatch_queue_push(dq, dc);
718 struct dispatch_barrier_sync_slow2_s {
719 dispatch_queue_t dbss2_dq;
720 dispatch_function_t dbss2_func;
721 dispatch_function_t dbss2_ctxt;
722 dispatch_semaphore_t dbss2_sema;
726 _dispatch_barrier_sync_f_slow_invoke(void *ctxt)
728 struct dispatch_barrier_sync_slow2_s *dbss2 = ctxt;
730 dispatch_assert(dbss2->dbss2_dq == dispatch_get_current_queue());
731 // ALL blocks on the main queue, must be run on the main thread
732 if (dbss2->dbss2_dq == dispatch_get_main_queue()) {
733 dbss2->dbss2_func(dbss2->dbss2_ctxt);
735 dispatch_suspend(dbss2->dbss2_dq);
737 dispatch_semaphore_signal(dbss2->dbss2_sema);
742 _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
745 // It's preferred to execute synchronous blocks on the current thread
746 // due to thread-local side effects, garbage collection, etc. However,
747 // blocks submitted to the main thread MUST be run on the main thread
749 struct dispatch_barrier_sync_slow2_s dbss2 = {
753 .dbss2_sema = _dispatch_get_thread_semaphore(),
755 struct dispatch_barrier_sync_slow_s {
756 DISPATCH_CONTINUATION_HEADER(dispatch_barrier_sync_slow_s);
758 .do_vtable = (void *)DISPATCH_OBJ_BARRIER_BIT,
759 .dc_func = _dispatch_barrier_sync_f_slow_invoke,
763 dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
764 _dispatch_queue_push(dq, (void *)&dbss);
765 dispatch_semaphore_wait(dbss2.dbss2_sema, DISPATCH_TIME_FOREVER);
767 if (dq != dispatch_get_main_queue()) {
768 _dispatch_thread_setspecific(dispatch_queue_key, dq);
770 _dispatch_workitem_inc();
771 _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
774 _dispatch_put_thread_semaphore(dbss2.dbss2_sema);
779 dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void))
781 // Blocks submitted to the main queue MUST be run on the main thread,
782 // therefore we must Block_copy in order to notify the thread-local
783 // garbage collector that the objects are transferring to the main thread
784 if (dq == dispatch_get_main_queue()) {
785 dispatch_block_t block = Block_copy(work);
786 return dispatch_barrier_sync_f(dq, block, _dispatch_call_block_and_release);
788 struct Block_basic *bb = (void *)work;
790 dispatch_barrier_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke);
796 dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
798 dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
800 // 1) ensure that this thread hasn't enqueued anything ahead of this call
801 // 2) the queue is not suspended
802 // 3) the queue is not weird
803 if (slowpath(dq->dq_items_tail)
804 || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))
805 || slowpath(!_dispatch_queue_trylock(dq))) {
806 return _dispatch_barrier_sync_f_slow(dq, ctxt, func);
809 _dispatch_thread_setspecific(dispatch_queue_key, dq);
811 _dispatch_workitem_inc();
812 _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
813 _dispatch_queue_unlock(dq);
817 _dispatch_sync_f_slow2(void *ctxt)
819 dispatch_queue_t dq = _dispatch_queue_get_current();
820 dispatch_atomic_add(&dq->dq_running, 2);
821 dispatch_semaphore_signal(ctxt);
826 _dispatch_sync_f_slow(dispatch_queue_t dq)
828 // the global root queues do not need strict ordering
829 if (dq->do_targetq == NULL) {
830 dispatch_atomic_add(&dq->dq_running, 2);
834 struct dispatch_sync_slow_s {
835 DISPATCH_CONTINUATION_HEADER(dispatch_sync_slow_s);
838 .dc_func = _dispatch_sync_f_slow2,
839 .dc_ctxt = _dispatch_get_thread_semaphore(),
842 // XXX FIXME -- concurrent queues can be come serial again
843 _dispatch_queue_push(dq, (void *)&dss);
845 dispatch_semaphore_wait(dss.dc_ctxt, DISPATCH_TIME_FOREVER);
846 _dispatch_put_thread_semaphore(dss.dc_ctxt);
851 dispatch_sync(dispatch_queue_t dq, void (^work)(void))
853 struct Block_basic *bb = (void *)work;
854 dispatch_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke);
860 dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
862 typeof(dq->dq_running) prev_cnt;
863 dispatch_queue_t old_dq;
865 if (dq->dq_width == 1) {
866 return dispatch_barrier_sync_f(dq, ctxt, func);
869 // 1) ensure that this thread hasn't enqueued anything ahead of this call
870 // 2) the queue is not suspended
871 if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) {
872 _dispatch_sync_f_slow(dq);
874 prev_cnt = dispatch_atomic_add(&dq->dq_running, 2) - 2;
876 if (slowpath(prev_cnt & 1)) {
877 if (dispatch_atomic_sub(&dq->dq_running, 2) == 0) {
878 _dispatch_wakeup(dq);
880 _dispatch_sync_f_slow(dq);
884 old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
885 _dispatch_thread_setspecific(dispatch_queue_key, dq);
887 _dispatch_workitem_inc();
888 _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
890 if (slowpath(dispatch_atomic_sub(&dq->dq_running, 2) == 0)) {
891 _dispatch_wakeup(dq);
896 dispatch_queue_get_label(dispatch_queue_t dq)
901 #if DISPATCH_COCOA_COMPAT
903 _dispatch_main_q_port_init(void *ctxt __attribute__((unused)))
907 kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &main_q_port);
908 DISPATCH_VERIFY_MIG(kr);
909 (void)dispatch_assume_zero(kr);
910 kr = mach_port_insert_right(mach_task_self(), main_q_port, main_q_port, MACH_MSG_TYPE_MAKE_SEND);
911 DISPATCH_VERIFY_MIG(kr);
912 (void)dispatch_assume_zero(kr);
914 _dispatch_program_is_probably_callback_driven = true;
915 _dispatch_safe_fork = false;
918 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
921 _dispatch_queue_set_mainq_drain_state(bool arg)
923 main_q_is_draining = arg;
928 * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not
929 * marked noreturn, leading to a build error as dispatch_main() *is* marked
930 * noreturn. Mask by marking __builtin_trap() as noreturn locally.
932 #ifndef HAVE_NORETURN_BUILTIN_TRAP
933 void __builtin_trap(void) __attribute__((__noreturn__));
940 #if HAVE_PTHREAD_MAIN_NP
941 if (pthread_main_np()) {
943 _dispatch_program_is_probably_callback_driven = true;
945 DISPATCH_CRASH("pthread_exit() returned");
946 #if HAVE_PTHREAD_MAIN_NP
948 DISPATCH_CLIENT_CRASH("dispatch_main() must be called on the main thread");
953 _dispatch_sigsuspend(void *ctxt __attribute__((unused)))
955 static const sigset_t mask;
964 _dispatch_queue_cleanup2(void)
966 dispatch_atomic_dec(&_dispatch_main_q.dq_running);
968 if (dispatch_atomic_sub(&_dispatch_main_q.do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK) == 0) {
969 _dispatch_wakeup(&_dispatch_main_q);
972 // overload the "probably" variable to mean that dispatch_main() or
973 // similar non-POSIX API was called
974 // this has to run before the DISPATCH_COCOA_COMPAT below
975 if (_dispatch_program_is_probably_callback_driven) {
976 dispatch_async_f(_dispatch_get_root_queue(0, 0), NULL, _dispatch_sigsuspend);
977 sleep(1); // workaround 6778970
980 #if DISPATCH_COCOA_COMPAT
981 dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init);
983 mach_port_t mp = main_q_port;
989 kr = mach_port_deallocate(mach_task_self(), mp);
990 DISPATCH_VERIFY_MIG(kr);
991 (void)dispatch_assume_zero(kr);
992 kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, -1);
993 DISPATCH_VERIFY_MIG(kr);
994 (void)dispatch_assume_zero(kr);
999 #ifndef DISPATCH_NO_LEGACY
1001 dispatch_get_concurrent_queue(long pri)
1004 pri = DISPATCH_QUEUE_PRIORITY_HIGH;
1005 } else if (pri < 0) {
1006 pri = DISPATCH_QUEUE_PRIORITY_LOW;
1008 return _dispatch_get_root_queue(pri, false);
1013 _dispatch_queue_cleanup(void *ctxt)
1015 if (ctxt == &_dispatch_main_q) {
1016 return _dispatch_queue_cleanup2();
1018 // POSIX defines that destructors are only called if 'ctxt' is non-null
1019 DISPATCH_CRASH("Premature thread exit while a dispatch queue is running");
1023 dispatch_get_global_queue(long priority, unsigned long flags)
1025 if (flags & ~DISPATCH_QUEUE_OVERCOMMIT) {
1028 return _dispatch_get_root_queue(priority, flags & DISPATCH_QUEUE_OVERCOMMIT);
1031 #define countof(x) (sizeof(x) / sizeof(x[0]))
1033 libdispatch_init(void)
1035 dispatch_assert(DISPATCH_QUEUE_PRIORITY_COUNT == 3);
1036 dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 6);
1038 dispatch_assert(DISPATCH_QUEUE_PRIORITY_LOW == -DISPATCH_QUEUE_PRIORITY_HIGH);
1039 dispatch_assert(countof(_dispatch_root_queues) == DISPATCH_ROOT_QUEUE_COUNT);
1040 dispatch_assert(countof(_dispatch_thread_mediator) == DISPATCH_ROOT_QUEUE_COUNT);
1041 dispatch_assert(countof(_dispatch_root_queue_contexts) == DISPATCH_ROOT_QUEUE_COUNT);
1043 #if HAVE_PTHREAD_KEY_INIT_NP
1044 _dispatch_thread_key_init_np(dispatch_queue_key, _dispatch_queue_cleanup);
1045 _dispatch_thread_key_init_np(dispatch_sema4_key, (void (*)(void *))dispatch_release); // use the extern release
1046 _dispatch_thread_key_init_np(dispatch_cache_key, _dispatch_cache_cleanup2);
1047 #if DISPATCH_PERF_MON
1048 _dispatch_thread_key_init_np(dispatch_bcounter_key, NULL);
1050 #else /* !HAVE_PTHREAD_KEY_INIT_NP */
1051 _dispatch_thread_key_create(&dispatch_queue_key,
1052 _dispatch_queue_cleanup);
1053 _dispatch_thread_key_create(&dispatch_sema4_key,
1054 (void (*)(void *))dispatch_release); // use the extern release
1055 _dispatch_thread_key_create(&dispatch_cache_key,
1056 _dispatch_cache_cleanup2);
1057 #ifdef DISPATCH_PERF_MON
1058 _dispatch_thread_key_create(&dispatch_bcounter_key, NULL);
1060 #endif /* HAVE_PTHREAD_KEY_INIT_NP */
1062 _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q);
1064 _dispatch_queue_set_width_init();
1068 _dispatch_queue_unlock(dispatch_queue_t dq)
1070 if (slowpath(dispatch_atomic_dec(&dq->dq_running))) {
1074 _dispatch_wakeup(dq);
1077 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
1079 _dispatch_wakeup(dispatch_object_t dou)
1081 dispatch_queue_t tq;
1083 if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) {
1086 if (!dx_probe(dou._do) && !dou._dq->dq_items_tail) {
1090 if (!_dispatch_trylock(dou._do)) {
1091 #if DISPATCH_COCOA_COMPAT
1092 if (dou._dq == &_dispatch_main_q) {
1093 _dispatch_queue_wakeup_main();
1098 _dispatch_retain(dou._do);
1099 tq = dou._do->do_targetq;
1100 _dispatch_queue_push(tq, dou._do);
1101 return tq; // libdispatch doesn't need this, but the Instrument DTrace probe does
1104 #if DISPATCH_COCOA_COMPAT
1107 _dispatch_queue_wakeup_main(void)
1111 dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init);
1113 kr = _dispatch_send_wakeup_main_thread(main_q_port, 0);
1116 case MACH_SEND_TIMEOUT:
1117 case MACH_SEND_TIMED_OUT:
1118 case MACH_SEND_INVALID_DEST:
1121 (void)dispatch_assume_zero(kr);
1125 _dispatch_safe_fork = false;
1129 #if HAVE_PTHREAD_WORKQUEUES
1131 _dispatch_rootq2wq_pri(long idx)
1133 #ifdef WORKQ_DEFAULT_PRIOQUEUE
1137 return WORKQ_LOW_PRIOQUEUE;
1141 return WORKQ_DEFAULT_PRIOQUEUE;
1144 return WORKQ_HIGH_PRIOQUEUE;
1153 _dispatch_root_queues_init(void *context __attribute__((unused)))
1155 #if HAVE_PTHREAD_WORKQUEUES
1156 bool disable_wq = getenv("LIBDISPATCH_DISABLE_KWQ");
1157 pthread_workqueue_attr_t pwq_attr;
1168 #if HAVE_PTHREAD_WORKQUEUES
1169 r = pthread_workqueue_attr_init_np(&pwq_attr);
1170 (void)dispatch_assume_zero(r);
1173 for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
1174 // some software hangs if the non-overcommitting queues do not overcommit when threads block
1177 dispatch_root_queue_contexts[i].dgq_thread_pool_size = _dispatch_hw_config.cc_max_active;
1180 #if HAVE_PTHREAD_WORKQUEUES
1181 r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr, _dispatch_rootq2wq_pri(i));
1182 (void)dispatch_assume_zero(r);
1183 r = pthread_workqueue_attr_setovercommit_np(&pwq_attr, i & 1);
1184 (void)dispatch_assume_zero(r);
1186 if (disable_wq || (r = pthread_workqueue_create_np(&_dispatch_root_queue_contexts[i].dgq_kworkqueue, &pwq_attr))) {
1188 (void)dispatch_assume_zero(r);
1190 #endif /* HAVE_PTHREAD_WORKQUEUES */
1192 // override the default FIFO behavior for the pool semaphores
1193 kr = semaphore_create(mach_task_self(), &_dispatch_thread_mediator[i].dsema_port, SYNC_POLICY_LIFO, 0);
1194 DISPATCH_VERIFY_MIG(kr);
1195 (void)dispatch_assume_zero(kr);
1196 dispatch_assume(_dispatch_thread_mediator[i].dsema_port);
1199 /* XXXRW: POSIX semaphores don't support LIFO? */
1200 ret = sem_init(&_dispatch_thread_mediator[i].dsema_sem, 0, 0);
1201 (void)dispatch_assume_zero(ret);
1203 #if HAVE_PTHREAD_WORKQUEUES
1205 dispatch_assume(_dispatch_root_queue_contexts[i].dgq_kworkqueue);
1210 #if HAVE_PTHREAD_WORKQUEUES
1211 r = pthread_workqueue_attr_destroy_np(&pwq_attr);
1212 (void)dispatch_assume_zero(r);
1217 _dispatch_queue_wakeup_global(dispatch_queue_t dq)
1219 static dispatch_once_t pred;
1220 struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
1221 #if HAVE_PTHREAD_WORKQUEUES
1222 pthread_workitem_handle_t wh;
1223 unsigned int gen_cnt;
1228 if (!dq->dq_items_tail) {
1232 _dispatch_safe_fork = false;
1234 dispatch_debug_queue(dq, __PRETTY_FUNCTION__);
1236 dispatch_once_f(&pred, NULL, _dispatch_root_queues_init);
1238 #if HAVE_PTHREAD_WORKQUEUES
1239 if (qc->dgq_kworkqueue) {
1240 if (dispatch_atomic_cmpxchg(&qc->dgq_pending, 0, 1)) {
1241 _dispatch_debug("requesting new worker thread");
1243 r = pthread_workqueue_additem_np(qc->dgq_kworkqueue, _dispatch_worker_thread2, dq, &wh, &gen_cnt);
1244 (void)dispatch_assume_zero(r);
1246 _dispatch_debug("work thread request still pending on global queue: %p", dq);
1252 if (dispatch_semaphore_signal(qc->dgq_thread_mediator)) {
1257 t_count = qc->dgq_thread_pool_size;
1259 _dispatch_debug("The thread pool is full: %p", dq);
1262 } while (!dispatch_atomic_cmpxchg(&qc->dgq_thread_pool_size, t_count, t_count - 1));
1264 while ((r = pthread_create(&pthr, NULL, _dispatch_worker_thread, dq))) {
1266 (void)dispatch_assume_zero(r);
1270 r = pthread_detach(pthr);
1271 (void)dispatch_assume_zero(r);
1278 _dispatch_queue_serial_drain_till_empty(dispatch_queue_t dq)
1280 #if DISPATCH_PERF_MON
1281 uint64_t start = _dispatch_absolute_time();
1283 _dispatch_queue_drain(dq);
1284 #if DISPATCH_PERF_MON
1285 _dispatch_queue_merge_stats(start);
1287 _dispatch_force_cache_cleanup();
1290 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
1293 _dispatch_queue_invoke(dispatch_queue_t dq)
1295 dispatch_queue_t tq = dq->do_targetq;
1297 if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && fastpath(_dispatch_queue_trylock(dq))) {
1298 _dispatch_queue_drain(dq);
1299 if (tq == dq->do_targetq) {
1302 tq = dq->do_targetq;
1304 // We do not need to check the result.
1305 // When the suspend-count lock is dropped, then the check will happen.
1306 dispatch_atomic_dec(&dq->dq_running);
1308 return _dispatch_queue_push(tq, dq);
1312 dq->do_next = DISPATCH_OBJECT_LISTLESS;
1313 if (dispatch_atomic_sub(&dq->do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK) == 0) {
1314 if (dq->dq_running == 0) {
1315 _dispatch_wakeup(dq); // verify that the queue is idle
1318 _dispatch_release(dq); // added when the queue is put on the list
1321 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
1323 _dispatch_set_target_queue2(void *ctxt)
1325 dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current();
1327 prev_dq = dq->do_targetq;
1328 dq->do_targetq = ctxt;
1329 _dispatch_release(prev_dq);
1333 dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq)
1335 if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) {
1338 // NOTE: we test for NULL target queues internally to detect root queues
1339 // therefore, if the retain crashes due to a bad input, that is OK
1340 _dispatch_retain(dq);
1341 dispatch_barrier_async_f(dou._dq, dq, _dispatch_set_target_queue2);
1345 _dispatch_async_f_redirect2(void *_ctxt)
1347 struct dispatch_continuation_s *dc = _ctxt;
1348 struct dispatch_continuation_s *other_dc = dc->dc_data[1];
1349 dispatch_queue_t old_dq, dq = dc->dc_data[0];
1351 old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
1352 _dispatch_thread_setspecific(dispatch_queue_key, dq);
1353 _dispatch_continuation_pop(other_dc);
1354 _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
1356 if (dispatch_atomic_sub(&dq->dq_running, 2) == 0) {
1357 _dispatch_wakeup(dq);
1359 _dispatch_release(dq);
1363 _dispatch_async_f_redirect(dispatch_queue_t dq, struct dispatch_object_s *other_dc)
1365 dispatch_continuation_t dc = (void *)other_dc;
1366 dispatch_queue_t root_dq = dq;
1368 if (dc->dc_func == _dispatch_sync_f_slow2) {
1369 return dc->dc_func(dc->dc_ctxt);
1372 dispatch_atomic_add(&dq->dq_running, 2);
1373 _dispatch_retain(dq);
1375 dc = _dispatch_continuation_alloc_cacheonly() ?: _dispatch_continuation_alloc_from_heap();
1377 dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
1378 dc->dc_func = _dispatch_async_f_redirect2;
1380 dc->dc_data[0] = dq;
1381 dc->dc_data[1] = other_dc;
1384 root_dq = root_dq->do_targetq;
1385 } while (root_dq->do_targetq);
1387 _dispatch_queue_push(root_dq, dc);
1392 _dispatch_queue_drain(dispatch_queue_t dq)
1394 dispatch_queue_t orig_tq, old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
1395 struct dispatch_object_s *dc = NULL, *next_dc = NULL;
1397 orig_tq = dq->do_targetq;
1399 _dispatch_thread_setspecific(dispatch_queue_key, dq);
1401 while (dq->dq_items_tail) {
1402 while (!fastpath(dq->dq_items_head)) {
1403 _dispatch_hardware_pause();
1406 dc = dq->dq_items_head;
1407 dq->dq_items_head = NULL;
1410 // Enqueue is TIGHTLY controlled, we won't wait long.
1412 next_dc = fastpath(dc->do_next);
1413 } while (!next_dc && !dispatch_atomic_cmpxchg(&dq->dq_items_tail, dc, NULL));
1414 if (DISPATCH_OBJECT_SUSPENDED(dq)) {
1417 if (dq->dq_running > dq->dq_width) {
1420 if (orig_tq != dq->do_targetq) {
1423 if (fastpath(dq->dq_width == 1)) {
1424 _dispatch_continuation_pop(dc);
1425 _dispatch_workitem_inc();
1426 } else if ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) {
1427 if (dq->dq_running > 1) {
1430 _dispatch_continuation_pop(dc);
1431 _dispatch_workitem_inc();
1433 _dispatch_async_f_redirect(dq, dc);
1435 } while ((dc = next_dc));
1439 // if this is not a complete drain, we must undo some things
1441 // 'dc' must NOT be "popped"
1442 // 'dc' might be the last item
1443 if (next_dc || dispatch_atomic_cmpxchg(&dq->dq_items_tail, NULL, dc)) {
1444 dq->dq_items_head = dc;
1446 while (!(next_dc = dq->dq_items_head)) {
1447 _dispatch_hardware_pause();
1449 dq->dq_items_head = dc;
1450 dc->do_next = next_dc;
1454 _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
1457 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
1459 _dispatch_worker_thread(void *context)
1461 dispatch_queue_t dq = context;
1462 struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
1466 // workaround tweaks the kernel workqueue does for us
1467 r = sigfillset(&mask);
1468 (void)dispatch_assume_zero(r);
1469 r = _dispatch_pthread_sigmask(SIG_BLOCK, &mask, NULL);
1470 (void)dispatch_assume_zero(r);
1473 _dispatch_worker_thread2(context);
1474 // we use 65 seconds in case there are any timers that run once a minute
1475 } while (dispatch_semaphore_wait(qc->dgq_thread_mediator, dispatch_time(0, 65ull * NSEC_PER_SEC)) == 0);
1477 dispatch_atomic_inc(&qc->dgq_thread_pool_size);
1478 if (dq->dq_items_tail) {
1479 _dispatch_queue_wakeup_global(dq);
1485 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
1487 _dispatch_worker_thread2(void *context)
1489 struct dispatch_object_s *item;
1490 dispatch_queue_t dq = context;
1491 struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
1493 if (_dispatch_thread_getspecific(dispatch_queue_key)) {
1494 DISPATCH_CRASH("Premature thread recycling");
1497 _dispatch_thread_setspecific(dispatch_queue_key, dq);
1498 qc->dgq_pending = 0;
1500 #if DISPATCH_COCOA_COMPAT
1501 // ensure that high-level memory management techniques do not leak/crash
1502 dispatch_begin_thread_4GC();
1503 void *pool = _dispatch_begin_NSAutoReleasePool();
1506 #if DISPATCH_PERF_MON
1507 uint64_t start = _dispatch_absolute_time();
1509 while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) {
1510 _dispatch_continuation_pop(item);
1512 #if DISPATCH_PERF_MON
1513 _dispatch_queue_merge_stats(start);
1516 #if DISPATCH_COCOA_COMPAT
1517 _dispatch_end_NSAutoReleasePool(pool);
1518 dispatch_end_thread_4GC();
1521 _dispatch_thread_setspecific(dispatch_queue_key, NULL);
1523 _dispatch_force_cache_cleanup();
1526 #if DISPATCH_PERF_MON
1528 _dispatch_queue_merge_stats(uint64_t start)
1530 uint64_t avg, delta = _dispatch_absolute_time() - start;
1531 unsigned long count, bucket;
1533 count = (size_t)_dispatch_thread_getspecific(dispatch_bcounter_key);
1534 _dispatch_thread_setspecific(dispatch_bcounter_key, NULL);
1537 avg = delta / count;
1538 bucket = flsll(avg);
1543 // 64-bit counters on 32-bit require a lock or a queue
1544 OSSpinLockLock(&_dispatch_stats_lock);
1546 _dispatch_stats[bucket].time_total += delta;
1547 _dispatch_stats[bucket].count_total += count;
1548 _dispatch_stats[bucket].thread_total++;
1550 OSSpinLockUnlock(&_dispatch_stats_lock);
1555 dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz)
1557 return snprintf(buf, bufsiz, "parent = %p ", dq->do_targetq);
1561 dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz)
1564 offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dq->dq_label, dq);
1565 offset += dispatch_object_debug_attr(dq, &buf[offset], bufsiz - offset);
1566 offset += dispatch_queue_debug_attr(dq, &buf[offset], bufsiz - offset);
1567 offset += snprintf(&buf[offset], bufsiz - offset, "}");
1573 dispatch_debug_queue(dispatch_queue_t dq, const char* str) {
1575 dispatch_debug(dq, "%s", str);
1577 _dispatch_log("queue[NULL]: %s", str);
1582 #if DISPATCH_COCOA_COMPAT
1584 _dispatch_main_queue_callback_4CF(mach_msg_header_t *msg __attribute__((unused)))
1586 if (main_q_is_draining) {
1589 _dispatch_queue_set_mainq_drain_state(true);
1590 _dispatch_queue_serial_drain_till_empty(&_dispatch_main_q);
1591 _dispatch_queue_set_mainq_drain_state(false);
1595 _dispatch_get_main_queue_port_4CF(void)
1597 dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init);
1602 #ifndef DISPATCH_NO_LEGACY
1604 dispatch_queue_attr_dispose(dispatch_queue_attr_t attr)
1606 dispatch_queue_attr_set_finalizer_f(attr, NULL, NULL);
1607 _dispatch_dispose(attr);
1610 static const struct dispatch_queue_attr_vtable_s dispatch_queue_attr_vtable = {
1611 .do_type = DISPATCH_QUEUE_ATTR_TYPE,
1612 .do_kind = "queue-attr",
1613 .do_dispose = dispatch_queue_attr_dispose,
1616 dispatch_queue_attr_t
1617 dispatch_queue_attr_create(void)
1619 dispatch_queue_attr_t a = calloc(1, sizeof(struct dispatch_queue_attr_s));
1622 a->do_vtable = &dispatch_queue_attr_vtable;
1623 a->do_next = DISPATCH_OBJECT_LISTLESS;
1626 a->do_targetq = _dispatch_get_root_queue(0, 0);
1627 a->qa_flags = DISPATCH_QUEUE_OVERCOMMIT;
1633 dispatch_queue_attr_set_flags(dispatch_queue_attr_t attr, uint64_t flags)
1635 dispatch_assert_zero(flags & ~DISPATCH_QUEUE_FLAGS_MASK);
1636 attr->qa_flags = (unsigned long)flags & DISPATCH_QUEUE_FLAGS_MASK;
1640 dispatch_queue_attr_set_priority(dispatch_queue_attr_t attr, int priority)
1642 dispatch_debug_assert(attr, "NULL pointer");
1643 dispatch_debug_assert(priority <= DISPATCH_QUEUE_PRIORITY_HIGH && priority >= DISPATCH_QUEUE_PRIORITY_LOW, "Invalid priority");
1646 priority = DISPATCH_QUEUE_PRIORITY_HIGH;
1647 } else if (priority < 0) {
1648 priority = DISPATCH_QUEUE_PRIORITY_LOW;
1651 attr->qa_priority = priority;
1655 dispatch_queue_attr_set_finalizer_f(dispatch_queue_attr_t attr,
1656 void *context, dispatch_queue_finalizer_function_t finalizer)
1659 if (attr->finalizer_func == (void*)_dispatch_call_block_and_release2) {
1660 Block_release(attr->finalizer_ctxt);
1663 attr->finalizer_ctxt = context;
1664 attr->finalizer_func = finalizer;
1669 dispatch_queue_attr_set_finalizer(dispatch_queue_attr_t attr,
1670 dispatch_queue_finalizer_t finalizer)
1673 dispatch_queue_finalizer_function_t func;
1676 if (!(ctxt = Block_copy(finalizer))) {
1679 func = (void *)_dispatch_call_block_and_release2;
1685 dispatch_queue_attr_set_finalizer_f(attr, ctxt, func);
1690 #endif /* DISPATCH_NO_LEGACY */
1693 _dispatch_ccache_init(void *context __attribute__((unused)))
1695 _dispatch_ccache_zone = malloc_create_zone(0, 0);
1696 dispatch_assert(_dispatch_ccache_zone);
1697 malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations");
1700 dispatch_continuation_t
1701 _dispatch_continuation_alloc_from_heap(void)
1703 static dispatch_once_t pred;
1704 dispatch_continuation_t dc;
1706 dispatch_once_f(&pred, NULL, _dispatch_ccache_init);
1708 while (!(dc = fastpath(malloc_zone_calloc(_dispatch_ccache_zone, 1, ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) {
1716 _dispatch_force_cache_cleanup(void)
1718 dispatch_continuation_t dc = _dispatch_thread_getspecific(dispatch_cache_key);
1720 _dispatch_thread_setspecific(dispatch_cache_key, NULL);
1721 _dispatch_cache_cleanup2(dc);
1727 _dispatch_cache_cleanup2(void *value)
1729 dispatch_continuation_t dc, next_dc = value;
1731 while ((dc = next_dc)) {
1732 next_dc = dc->do_next;
1733 malloc_zone_free(_dispatch_ccache_zone, dc);
1737 static char _dispatch_build[16];
1740 * XXXRW: What to do here for !Mac OS X?
1743 _dispatch_bug_init(void *context __attribute__((unused)))
1746 int mib[] = { CTL_KERN, KERN_OSVERSION };
1747 size_t bufsz = sizeof(_dispatch_build);
1749 sysctl(mib, 2, _dispatch_build, &bufsz, NULL, 0);
1751 memset(_dispatch_build, 0, sizeof(_dispatch_build));
1756 _dispatch_bug(size_t line, long val)
1758 static dispatch_once_t pred;
1759 static void *last_seen;
1760 void *ra = __builtin_return_address(0);
1762 dispatch_once_f(&pred, NULL, _dispatch_bug_init);
1763 if (last_seen != ra) {
1765 _dispatch_log("BUG in libdispatch: %s - %lu - 0x%lx", _dispatch_build, (unsigned long)line, val);
1770 _dispatch_abort(size_t line, long val)
1772 _dispatch_bug(line, val);
1777 _dispatch_log(const char *msg, ...)
1783 _dispatch_logv(msg, ap);
1789 _dispatch_logv(const char *msg, va_list ap)
1792 static FILE *logfile, *tmp;
1793 char newbuf[strlen(msg) + 2];
1794 char path[PATH_MAX];
1796 sprintf(newbuf, "%s\n", msg);
1799 snprintf(path, sizeof(path), "/var/tmp/libdispatch.%d.log", getpid());
1800 tmp = fopen(path, "a");
1802 if (!dispatch_atomic_cmpxchg(&logfile, NULL, tmp)) {
1806 gettimeofday(&tv, NULL);
1807 fprintf(logfile, "=== log file opened for %s[%u] at %ld.%06u ===\n",
1808 getprogname() ?: "", getpid(), tv.tv_sec, tv.tv_usec);
1811 vfprintf(logfile, newbuf, ap);
1814 vsyslog(LOG_NOTICE, msg, ap);
1819 _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset)
1823 /* Workaround: 6269619 Not all signals can be delivered on any thread */
1825 r = sigdelset(set, SIGILL);
1826 (void)dispatch_assume_zero(r);
1827 r = sigdelset(set, SIGTRAP);
1828 (void)dispatch_assume_zero(r);
1829 #if HAVE_DECL_SIGEMT
1830 r = sigdelset(set, SIGEMT);
1831 (void)dispatch_assume_zero(r);
1833 r = sigdelset(set, SIGFPE);
1834 (void)dispatch_assume_zero(r);
1835 r = sigdelset(set, SIGBUS);
1836 (void)dispatch_assume_zero(r);
1837 r = sigdelset(set, SIGSEGV);
1838 (void)dispatch_assume_zero(r);
1839 r = sigdelset(set, SIGSYS);
1840 (void)dispatch_assume_zero(r);
1841 r = sigdelset(set, SIGPIPE);
1842 (void)dispatch_assume_zero(r);
1844 return pthread_sigmask(how, set, oset);
1847 bool _dispatch_safe_fork = true;
1850 dispatch_atfork_prepare(void)
1855 dispatch_atfork_parent(void)
1860 dispatch_atfork_child(void)
1862 void *crash = (void *)0x100;
1865 if (_dispatch_safe_fork) {
1869 _dispatch_main_q.dq_items_head = crash;
1870 _dispatch_main_q.dq_items_tail = crash;
1872 _dispatch_mgr_q.dq_items_head = crash;
1873 _dispatch_mgr_q.dq_items_tail = crash;
1875 for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
1876 _dispatch_root_queues[i].dq_items_head = crash;
1877 _dispatch_root_queues[i].dq_items_tail = crash;
1882 dispatch_init_pthread(pthread_t pthr __attribute__((unused)))
1886 const struct dispatch_queue_offsets_s dispatch_queue_offsets = {
1888 .dqo_label = offsetof(struct dispatch_queue_s, dq_label),
1889 .dqo_label_size = sizeof(_dispatch_main_q.dq_label),
1891 .dqo_flags_size = 0,
1892 .dqo_width = offsetof(struct dispatch_queue_s, dq_width),
1893 .dqo_width_size = sizeof(_dispatch_main_q.dq_width),
1894 .dqo_serialnum = offsetof(struct dispatch_queue_s, dq_serialnum),
1895 .dqo_serialnum_size = sizeof(_dispatch_main_q.dq_serialnum),
1896 .dqo_running = offsetof(struct dispatch_queue_s, dq_running),
1897 .dqo_running_size = sizeof(_dispatch_main_q.dq_running),
1902 dispatch_after(dispatch_time_t when, dispatch_queue_t queue, dispatch_block_t work)
1904 // test before the copy of the block
1905 if (when == DISPATCH_TIME_FOREVER) {
1907 DISPATCH_CLIENT_CRASH("dispatch_after() called with 'when' == infinity");
1911 dispatch_after_f(when, queue, _dispatch_Block_copy(work), _dispatch_call_block_and_release);
1915 struct _dispatch_after_time_s {
1917 void (*datc_func)(void *);
1918 dispatch_source_t ds;
1922 _dispatch_after_timer_cancel(void *ctxt)
1924 struct _dispatch_after_time_s *datc = ctxt;
1925 dispatch_source_t ds = datc->ds;
1928 dispatch_release(ds); // MUST NOT be _dispatch_release()
1932 _dispatch_after_timer_callback(void *ctxt)
1934 struct _dispatch_after_time_s *datc = ctxt;
1936 dispatch_assert(datc->datc_func);
1937 datc->datc_func(datc->datc_ctxt);
1939 dispatch_source_cancel(datc->ds);
1944 dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, void (*func)(void *))
1947 struct _dispatch_after_time_s *datc = NULL;
1948 dispatch_source_t ds = NULL;
1950 if (when == DISPATCH_TIME_FOREVER) {
1952 DISPATCH_CLIENT_CRASH("dispatch_after_f() called with 'when' == infinity");
1957 delta = _dispatch_timeout(when);
1959 return dispatch_async_f(queue, ctxt, func);
1962 // this function should be optimized to not use a dispatch source
1963 ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue);
1964 dispatch_assert(ds);
1966 datc = malloc(sizeof(struct _dispatch_after_time_s));
1967 dispatch_assert(datc);
1968 datc->datc_ctxt = ctxt;
1969 datc->datc_func = func;
1972 dispatch_set_context(ds, datc);
1973 dispatch_source_set_event_handler_f(ds, _dispatch_after_timer_callback);
1974 dispatch_source_set_cancel_handler_f(ds, _dispatch_after_timer_cancel);
1975 dispatch_source_set_timer(ds, when, 0, 0);
1976 dispatch_resume(ds);