Tizen 2.1 base
[platform/upstream/gcd.git] / dispatch-1.0 / src / queue.c
1 /*
2  * Copyright (c) 2008-2009 Apple Inc. All rights reserved.
3  *
4  * @APPLE_APACHE_LICENSE_HEADER_START@
5  * 
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  * 
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  * 
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  * 
18  * @APPLE_APACHE_LICENSE_HEADER_END@
19  */
20
21 #include "internal.h"
22 #if HAVE_MACH
23 #include "protocol.h"
24 #endif
25
26 void
27 dummy_function(void)
28 {
29 }
30
31 long
32 dummy_function_r0(void)
33 {
34         return 0;
35 }
36
37
38 static struct dispatch_semaphore_s _dispatch_thread_mediator[] = {
39         {
40                 .do_vtable = &_dispatch_semaphore_vtable,
41                 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
42                 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
43         },
44         {
45                 .do_vtable = &_dispatch_semaphore_vtable,
46                 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
47                 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
48         },
49         {
50                 .do_vtable = &_dispatch_semaphore_vtable,
51                 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
52                 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
53         },
54         {
55                 .do_vtable = &_dispatch_semaphore_vtable,
56                 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
57                 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
58         },
59         {
60                 .do_vtable = &_dispatch_semaphore_vtable,
61                 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
62                 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
63         },
64         {
65                 .do_vtable = &_dispatch_semaphore_vtable,
66                 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
67                 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
68         },
69 };
70
71 static inline dispatch_queue_t
72 _dispatch_get_root_queue(long priority, bool overcommit)
73 {
74         if (overcommit) switch (priority) {
75         case DISPATCH_QUEUE_PRIORITY_LOW:
76                 return &_dispatch_root_queues[1];
77         case DISPATCH_QUEUE_PRIORITY_DEFAULT:
78                 return &_dispatch_root_queues[3];
79         case DISPATCH_QUEUE_PRIORITY_HIGH:
80                 return &_dispatch_root_queues[5];
81         }
82         switch (priority) {
83         case DISPATCH_QUEUE_PRIORITY_LOW:
84                 return &_dispatch_root_queues[0];
85         case DISPATCH_QUEUE_PRIORITY_DEFAULT:
86                 return &_dispatch_root_queues[2];
87         case DISPATCH_QUEUE_PRIORITY_HIGH:
88                 return &_dispatch_root_queues[4];
89         default:
90                 return NULL;
91         }
92 }
93
94 #ifdef __BLOCKS__
95 dispatch_block_t
96 _dispatch_Block_copy(dispatch_block_t db)
97 {
98         dispatch_block_t rval;
99
100         while (!(rval = Block_copy(db))) {
101                 sleep(1);
102         }
103
104         return rval;
105 }
106 #define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x))
107
108 void
109 _dispatch_call_block_and_release(void *block)
110 {
111         void (^b)(void) = block;
112         b();
113         Block_release(b);
114 }
115
116 void
117 _dispatch_call_block_and_release2(void *block, void *ctxt)
118 {
119         void (^b)(void*) = block;
120         b(ctxt);
121         Block_release(b);
122 }
123
124 #endif /* __BLOCKS__ */
125
126 struct dispatch_queue_attr_vtable_s {
127         DISPATCH_VTABLE_HEADER(dispatch_queue_attr_s);
128 };
129
130 struct dispatch_queue_attr_s {
131         DISPATCH_STRUCT_HEADER(dispatch_queue_attr_s, dispatch_queue_attr_vtable_s);
132
133 #ifndef DISPATCH_NO_LEGACY
134         // Public:
135         int qa_priority;
136         void* finalizer_ctxt;
137         dispatch_queue_finalizer_function_t finalizer_func;
138
139         // Private:
140         unsigned long qa_flags;
141 #endif
142 };
143
144 static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset);
145
146 #define _dispatch_queue_trylock(dq) dispatch_atomic_cmpxchg(&(dq)->dq_running, 0, 1)
147 static inline void _dispatch_queue_unlock(dispatch_queue_t dq);
148 static void _dispatch_queue_invoke(dispatch_queue_t dq);
149 static bool _dispatch_queue_wakeup_global(dispatch_queue_t dq);
150 static struct dispatch_object_s *_dispatch_queue_concurrent_drain_one(dispatch_queue_t dq);
151
152 static bool _dispatch_program_is_probably_callback_driven;
153
154 #if DISPATCH_COCOA_COMPAT
155 void (*dispatch_begin_thread_4GC)(void) = dummy_function;
156 void (*dispatch_end_thread_4GC)(void) = dummy_function;
157 void *(*_dispatch_begin_NSAutoReleasePool)(void) = (void *)dummy_function;
158 void (*_dispatch_end_NSAutoReleasePool)(void *) = (void *)dummy_function;
159 static void _dispatch_queue_wakeup_main(void);
160
161 static dispatch_once_t _dispatch_main_q_port_pred;
162 static bool main_q_is_draining;
163 static mach_port_t main_q_port;
164 #endif
165
166 static void _dispatch_cache_cleanup2(void *value);
167
168 static const struct dispatch_queue_vtable_s _dispatch_queue_vtable = {
169         .do_type = DISPATCH_QUEUE_TYPE,
170         .do_kind = "queue",
171         .do_dispose = _dispatch_queue_dispose,
172         .do_invoke = (void *)dummy_function_r0,
173         .do_probe = (void *)dummy_function_r0,
174         .do_debug = dispatch_queue_debug,
175 };
176
177 static const struct dispatch_queue_vtable_s _dispatch_queue_root_vtable = {
178         .do_type = DISPATCH_QUEUE_GLOBAL_TYPE,
179         .do_kind = "global-queue",
180         .do_debug = dispatch_queue_debug,
181         .do_probe = _dispatch_queue_wakeup_global,
182 };
183
184 #define MAX_THREAD_COUNT 255
185
186 struct dispatch_root_queue_context_s {
187 #if HAVE_PTHREAD_WORKQUEUES
188         pthread_workqueue_t dgq_kworkqueue;
189 #endif
190         uint32_t dgq_pending;
191         uint32_t dgq_thread_pool_size;
192         dispatch_semaphore_t dgq_thread_mediator;
193 };
194
195 static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = {
196         {
197                 .dgq_thread_mediator = &_dispatch_thread_mediator[0],
198                 .dgq_thread_pool_size = MAX_THREAD_COUNT,
199         },
200         {
201                 .dgq_thread_mediator = &_dispatch_thread_mediator[1],
202                 .dgq_thread_pool_size = MAX_THREAD_COUNT,
203         },
204         {
205                 .dgq_thread_mediator = &_dispatch_thread_mediator[2],
206                 .dgq_thread_pool_size = MAX_THREAD_COUNT,
207         },
208         {
209                 .dgq_thread_mediator = &_dispatch_thread_mediator[3],
210                 .dgq_thread_pool_size = MAX_THREAD_COUNT,
211         },
212         {
213                 .dgq_thread_mediator = &_dispatch_thread_mediator[4],
214                 .dgq_thread_pool_size = MAX_THREAD_COUNT,
215         },
216         {
217                 .dgq_thread_mediator = &_dispatch_thread_mediator[5],
218                 .dgq_thread_pool_size = MAX_THREAD_COUNT,
219         },
220 };
221
222 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
223 // dq_running is set to 2 so that barrier operations go through the slow path
224 struct dispatch_queue_s _dispatch_root_queues[] = {
225         {
226                 .do_vtable = &_dispatch_queue_root_vtable,
227                 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
228                 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
229                 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
230                 .do_ctxt = &_dispatch_root_queue_contexts[0],
231
232                 .dq_label = "com.apple.root.low-priority",
233                 .dq_running = 2,
234                 .dq_width = UINT32_MAX,
235                 .dq_serialnum = 4,
236         },
237         {
238                 .do_vtable = &_dispatch_queue_root_vtable,
239                 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
240                 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
241                 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
242                 .do_ctxt = &_dispatch_root_queue_contexts[1],
243
244                 .dq_label = "com.apple.root.low-overcommit-priority",
245                 .dq_running = 2,
246                 .dq_width = UINT32_MAX,
247                 .dq_serialnum = 5,
248         },
249         {
250                 .do_vtable = &_dispatch_queue_root_vtable,
251                 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
252                 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
253                 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
254                 .do_ctxt = &_dispatch_root_queue_contexts[2],
255
256                 .dq_label = "com.apple.root.default-priority",
257                 .dq_running = 2,
258                 .dq_width = UINT32_MAX,
259                 .dq_serialnum = 6,
260         },
261         {
262                 .do_vtable = &_dispatch_queue_root_vtable,
263                 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
264                 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
265                 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
266                 .do_ctxt = &_dispatch_root_queue_contexts[3],
267
268                 .dq_label = "com.apple.root.default-overcommit-priority",
269                 .dq_running = 2,
270                 .dq_width = UINT32_MAX,
271                 .dq_serialnum = 7,
272         },
273         {
274                 .do_vtable = &_dispatch_queue_root_vtable,
275                 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
276                 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
277                 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
278                 .do_ctxt = &_dispatch_root_queue_contexts[4],
279
280                 .dq_label = "com.apple.root.high-priority",
281                 .dq_running = 2,
282                 .dq_width = UINT32_MAX,
283                 .dq_serialnum = 8,
284         },
285         {
286                 .do_vtable = &_dispatch_queue_root_vtable,
287                 .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
288                 .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
289                 .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
290                 .do_ctxt = &_dispatch_root_queue_contexts[5],
291
292                 .dq_label = "com.apple.root.high-overcommit-priority",
293                 .dq_running = 2,
294                 .dq_width = UINT32_MAX,
295                 .dq_serialnum = 9,
296         },
297 };
298
299 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
300 struct dispatch_queue_s _dispatch_main_q = {
301         .do_vtable = &_dispatch_queue_vtable,
302         .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
303         .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT,
304         .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK,
305         .do_targetq = &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_COUNT / 2],
306
307         .dq_label = "com.apple.main-thread",
308         .dq_running = 1,
309         .dq_width = 1,
310         .dq_serialnum = 1,
311 };
312
313 #if DISPATCH_PERF_MON
314 static OSSpinLock _dispatch_stats_lock;
315 static size_t _dispatch_bad_ratio;
316 static struct {
317         uint64_t time_total;
318         uint64_t count_total;
319         uint64_t thread_total;
320 } _dispatch_stats[65]; // ffs*/fls*() returns zero when no bits are set
321 static void _dispatch_queue_merge_stats(uint64_t start);
322 #endif
323
324 static void *_dispatch_worker_thread(void *context);
325 static void _dispatch_worker_thread2(void *context);
326
327 malloc_zone_t *_dispatch_ccache_zone;
328
329 static inline void
330 _dispatch_continuation_free(dispatch_continuation_t dc)
331 {
332         dispatch_continuation_t prev_dc = _dispatch_thread_getspecific(dispatch_cache_key);
333         dc->do_next = prev_dc;
334         _dispatch_thread_setspecific(dispatch_cache_key, dc);
335 }
336
337 static inline void
338 _dispatch_continuation_pop(dispatch_object_t dou)
339 {
340         dispatch_continuation_t dc = dou._dc;
341         dispatch_group_t dg;
342
343         if (DISPATCH_OBJ_IS_VTABLE(dou._do)) {
344                 return _dispatch_queue_invoke(dou._dq);
345         }
346
347         // Add the item back to the cache before calling the function. This
348         // allows the 'hot' continuation to be used for a quick callback.
349         //
350         // The ccache version is per-thread.
351         // Therefore, the object has not been reused yet.
352         // This generates better assembly.
353         if ((long)dou._do->do_vtable & DISPATCH_OBJ_ASYNC_BIT) {
354                 _dispatch_continuation_free(dc);
355         }
356         if ((long)dou._do->do_vtable & DISPATCH_OBJ_GROUP_BIT) {
357                 dg = dc->dc_group;
358         } else {
359                 dg = NULL;
360         }
361         dc->dc_func(dc->dc_ctxt);
362         if (dg) {
363                 dispatch_group_leave(dg);
364                 _dispatch_release(dg);
365         }
366 }
367
368 struct dispatch_object_s *
369 _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq)
370 {
371         struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul;
372
373         // The mediator value acts both as a "lock" and a signal
374         head = dispatch_atomic_xchg(&dq->dq_items_head, mediator);
375
376         if (slowpath(head == NULL)) {
377                 // The first xchg on the tail will tell the enqueueing thread that it
378                 // is safe to blindly write out to the head pointer. A cmpxchg honors
379                 // the algorithm.
380                 dispatch_atomic_cmpxchg(&dq->dq_items_head, mediator, NULL);
381                 _dispatch_debug("no work on global work queue");
382                 return NULL;
383         }
384
385         if (slowpath(head == mediator)) {
386                 // This thread lost the race for ownership of the queue.
387                 //
388                 // The ratio of work to libdispatch overhead must be bad. This
389                 // scenario implies that there are too many threads in the pool.
390                 // Create a new pending thread and then exit this thread.
391                 // The kernel will grant a new thread when the load subsides.
392                 _dispatch_debug("Contention on queue: %p", dq);
393                 _dispatch_queue_wakeup_global(dq);
394 #if DISPATCH_PERF_MON
395                 dispatch_atomic_inc(&_dispatch_bad_ratio);
396 #endif
397                 return NULL;
398         }
399
400         // Restore the head pointer to a sane value before returning.
401         // If 'next' is NULL, then this item _might_ be the last item.
402         next = fastpath(head->do_next);
403
404         if (slowpath(!next)) {
405                 dq->dq_items_head = NULL;
406                 
407                 if (dispatch_atomic_cmpxchg(&dq->dq_items_tail, head, NULL)) {
408                         // both head and tail are NULL now
409                         goto out;
410                 }
411
412                 // There must be a next item now. This thread won't wait long.
413                 while (!(next = head->do_next)) {
414                         _dispatch_hardware_pause();
415                 }
416         }
417
418         dq->dq_items_head = next;
419         _dispatch_queue_wakeup_global(dq);
420 out:
421         return head;
422 }
423
424 dispatch_queue_t
425 dispatch_get_current_queue(void)
426 {
427         return _dispatch_queue_get_current() ?: _dispatch_get_root_queue(0, true);
428 }
429
430 #undef dispatch_get_main_queue
431 __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA)
432 dispatch_queue_t dispatch_get_main_queue(void);
433
434 dispatch_queue_t
435 dispatch_get_main_queue(void)
436 {
437         return &_dispatch_main_q;
438 }
439 #define dispatch_get_main_queue() (&_dispatch_main_q)
440
441 struct _dispatch_hw_config_s _dispatch_hw_config;
442
443 static void
444 _dispatch_queue_set_width_init(void)
445 {
446 #ifdef __APPLE__
447         size_t valsz = sizeof(uint32_t);
448         int ret;
449
450         ret = sysctlbyname("hw.activecpu", &_dispatch_hw_config.cc_max_active,
451             &valsz, NULL, 0);
452         (void)dispatch_assume_zero(ret);
453         dispatch_assume(valsz == sizeof(uint32_t));
454
455         ret = sysctlbyname("hw.logicalcpu_max",
456             &_dispatch_hw_config.cc_max_logical, &valsz, NULL, 0);
457         (void)dispatch_assume_zero(ret);
458         dispatch_assume(valsz == sizeof(uint32_t));
459
460         ret = sysctlbyname("hw.physicalcpu_max",
461             &_dispatch_hw_config.cc_max_physical, &valsz, NULL, 0);
462         (void)dispatch_assume_zero(ret);
463         dispatch_assume(valsz == sizeof(uint32_t));
464 #elif defined(__FreeBSD__)
465         size_t valsz = sizeof(uint32_t);
466         int ret;
467
468         ret = sysctlbyname("kern.smp.cpus", &_dispatch_hw_config.cc_max_active,
469             &valsz, NULL, 0);
470         (void)dispatch_assume_zero(ret);
471         (void)dispatch_assume(valsz == sizeof(uint32_t));
472
473         _dispatch_hw_config.cc_max_logical =
474             _dispatch_hw_config.cc_max_physical =
475             _dispatch_hw_config.cc_max_active;
476 #elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN)
477         _dispatch_hw_config.cc_max_active = (int)sysconf(_SC_NPROCESSORS_ONLN);
478         if (_dispatch_hw_config.cc_max_active < 0)
479                 _dispatch_hw_config.cc_max_active = 1;
480         _dispatch_hw_config.cc_max_logical =
481             _dispatch_hw_config.cc_max_physical =
482             _dispatch_hw_config.cc_max_active;
483 #else
484 #warning "_dispatch_queue_set_width_init: no supported way to query CPU count"
485         _dispatch_hw_config.cc_max_logical =
486             _dispatch_hw_config.cc_max_physical =
487             _dispatch_hw_config.cc_max_active = 1;
488 #endif
489 }
490
491 void
492 dispatch_queue_set_width(dispatch_queue_t dq, long width)
493 {
494         int w = (int)width;     // intentional truncation
495         uint32_t tmp;
496
497         if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) {
498                 return;
499         }
500         if (w == 1 || w == 0) {
501                 dq->dq_width = 1;
502                 return;
503         }
504         if (w > 0) {
505                 tmp = w;
506         } else switch (w) {
507         case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS:
508                 tmp = _dispatch_hw_config.cc_max_physical;
509                 break;
510         case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS:
511                 tmp = _dispatch_hw_config.cc_max_active;
512                 break;
513         default:
514                 // fall through
515         case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS:
516                 tmp = _dispatch_hw_config.cc_max_logical;
517                 break;
518         }
519         // multiply by two since the running count is inc/dec by two (the low bit == barrier)
520         dq->dq_width = tmp * 2;
521
522         // XXX if the queue has items and the width is increased, we should try to wake the queue
523 }
524
525 // skip zero
526 // 1 - main_q
527 // 2 - mgr_q
528 // 3 - _unused_
529 // 4,5,6,7,8,9 - global queues
530 // we use 'xadd' on Intel, so the initial value == next assigned
531 static unsigned long _dispatch_queue_serial_numbers = 10;
532
533 // Note to later developers: ensure that any initialization changes are
534 // made for statically allocated queues (i.e. _dispatch_main_q).
535 inline void
536 _dispatch_queue_init(dispatch_queue_t dq)
537 {
538         dq->do_vtable = &_dispatch_queue_vtable;
539         dq->do_next = DISPATCH_OBJECT_LISTLESS;
540         dq->do_ref_cnt = 1;
541         dq->do_xref_cnt = 1;
542         dq->do_targetq = _dispatch_get_root_queue(0, true);
543         dq->dq_running = 0;
544         dq->dq_width = 1;
545         dq->dq_serialnum = dispatch_atomic_inc(&_dispatch_queue_serial_numbers) - 1;
546 }
547
548 dispatch_queue_t
549 dispatch_queue_create(const char *label, dispatch_queue_attr_t attr)
550 {
551         dispatch_queue_t dq;
552         size_t label_len;
553
554         if (!label) {
555                 label = "";
556         }
557
558         label_len = strlen(label);
559         if (label_len < (DISPATCH_QUEUE_MIN_LABEL_SIZE - 1)) {
560                 label_len = (DISPATCH_QUEUE_MIN_LABEL_SIZE - 1);
561         }
562
563         // XXX switch to malloc()
564         dq = calloc(1ul, sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_MIN_LABEL_SIZE + label_len + 1);
565         if (slowpath(!dq)) {
566                 return dq;
567         }
568
569         _dispatch_queue_init(dq);
570         strcpy(dq->dq_label, label);
571
572 #ifndef DISPATCH_NO_LEGACY
573         if (slowpath(attr)) {
574                 dq->do_targetq = _dispatch_get_root_queue(attr->qa_priority, attr->qa_flags & DISPATCH_QUEUE_OVERCOMMIT);
575                 dq->dq_finalizer_ctxt = attr->finalizer_ctxt;
576                 dq->dq_finalizer_func = attr->finalizer_func;
577 #ifdef __BLOCKS__
578                 if (attr->finalizer_func == (void*)_dispatch_call_block_and_release2) {
579                         // if finalizer_ctxt is a Block, retain it.
580                         dq->dq_finalizer_ctxt = Block_copy(dq->dq_finalizer_ctxt);
581                         if (!(dq->dq_finalizer_ctxt)) {
582                                 goto out_bad;
583                         }
584                 }
585 #endif
586         }
587 #else
588         (void)attr;
589 #endif
590
591         return dq;
592
593 #if !defined(DISPATCH_NO_LEGACY) && defined(__BLOCKS__)
594 out_bad:
595 #endif
596         free(dq);
597         return NULL;
598 }
599
600 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
601 void
602 _dispatch_queue_dispose(dispatch_queue_t dq)
603 {
604         if (slowpath(dq == _dispatch_queue_get_current())) {
605                 DISPATCH_CRASH("Release of a queue by itself");
606         }
607         if (slowpath(dq->dq_items_tail)) {
608                 DISPATCH_CRASH("Release of a queue while items are enqueued");
609         }
610
611 #ifndef DISPATCH_NO_LEGACY
612         if (dq->dq_finalizer_func) {
613                 dq->dq_finalizer_func(dq->dq_finalizer_ctxt, dq);
614         }
615 #endif
616
617         // trash the tail queue so that use after free will crash
618         dq->dq_items_tail = (void *)0x200;
619
620         _dispatch_dispose(dq);
621 }
622
623 DISPATCH_NOINLINE 
624 void
625 _dispatch_queue_push_list_slow(dispatch_queue_t dq, struct dispatch_object_s *obj)
626 {
627         // The queue must be retained before dq_items_head is written in order
628         // to ensure that the reference is still valid when _dispatch_wakeup is
629         // called. Otherwise, if preempted between the assignment to
630         // dq_items_head and _dispatch_wakeup, the blocks submitted to the
631         // queue may release the last reference to the queue when invoked by
632         // _dispatch_queue_drain. <rdar://problem/6932776>
633         _dispatch_retain(dq);
634         dq->dq_items_head = obj;
635         _dispatch_wakeup(dq);
636         _dispatch_release(dq);
637 }
638
639 DISPATCH_NOINLINE
640 static void
641 _dispatch_barrier_async_f_slow(dispatch_queue_t dq, void *context, dispatch_function_t func)
642 {
643         dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_from_heap());
644
645         dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT);
646         dc->dc_func = func;
647         dc->dc_ctxt = context;
648
649         _dispatch_queue_push(dq, dc);
650 }
651
652 #ifdef __BLOCKS__
653 void
654 dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void))
655 {
656         dispatch_barrier_async_f(dq, _dispatch_Block_copy(work), _dispatch_call_block_and_release);
657 }
658 #endif
659
660 DISPATCH_NOINLINE
661 void
662 dispatch_barrier_async_f(dispatch_queue_t dq, void *context, dispatch_function_t func)
663 {
664         dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_cacheonly());
665
666         if (!dc) {
667                 return _dispatch_barrier_async_f_slow(dq, context, func);
668         }
669
670         dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT);
671         dc->dc_func = func;
672         dc->dc_ctxt = context;
673
674         _dispatch_queue_push(dq, dc);
675 }
676
677 DISPATCH_NOINLINE
678 static void
679 _dispatch_async_f_slow(dispatch_queue_t dq, void *context, dispatch_function_t func)
680 {
681         dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_from_heap());
682
683         dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
684         dc->dc_func = func;
685         dc->dc_ctxt = context;
686
687         _dispatch_queue_push(dq, dc);
688 }
689
690 #ifdef __BLOCKS__
691 void
692 dispatch_async(dispatch_queue_t dq, void (^work)(void))
693 {
694         dispatch_async_f(dq, _dispatch_Block_copy(work), _dispatch_call_block_and_release);
695 }
696 #endif
697
698 DISPATCH_NOINLINE
699 void
700 dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
701 {
702         dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_cacheonly());
703
704         // unlike dispatch_sync_f(), we do NOT need to check the queue width,
705         // the "drain" function will do this test
706
707         if (!dc) {
708                 return _dispatch_async_f_slow(dq, ctxt, func);
709         }
710
711         dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
712         dc->dc_func = func;
713         dc->dc_ctxt = ctxt;
714
715         _dispatch_queue_push(dq, dc);
716 }
717
718 struct dispatch_barrier_sync_slow2_s {
719         dispatch_queue_t dbss2_dq;
720         dispatch_function_t dbss2_func;
721         dispatch_function_t dbss2_ctxt; 
722         dispatch_semaphore_t dbss2_sema;
723 };
724
725 static void
726 _dispatch_barrier_sync_f_slow_invoke(void *ctxt)
727 {
728         struct dispatch_barrier_sync_slow2_s *dbss2 = ctxt;
729
730         dispatch_assert(dbss2->dbss2_dq == dispatch_get_current_queue());
731         // ALL blocks on the main queue, must be run on the main thread
732         if (dbss2->dbss2_dq == dispatch_get_main_queue()) {
733                 dbss2->dbss2_func(dbss2->dbss2_ctxt);
734         } else {
735                 dispatch_suspend(dbss2->dbss2_dq);
736         }
737         dispatch_semaphore_signal(dbss2->dbss2_sema);
738 }
739
740 DISPATCH_NOINLINE
741 static void
742 _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
743 {
744         
745         // It's preferred to execute synchronous blocks on the current thread
746         // due to thread-local side effects, garbage collection, etc. However,
747         // blocks submitted to the main thread MUST be run on the main thread
748         
749         struct dispatch_barrier_sync_slow2_s dbss2 = {
750                 .dbss2_dq = dq,
751                 .dbss2_func = func,
752                 .dbss2_ctxt = ctxt,             
753                 .dbss2_sema = _dispatch_get_thread_semaphore(),
754         };
755         struct dispatch_barrier_sync_slow_s {
756                 DISPATCH_CONTINUATION_HEADER(dispatch_barrier_sync_slow_s);
757         } dbss = {
758                 .do_vtable = (void *)DISPATCH_OBJ_BARRIER_BIT,
759                 .dc_func = _dispatch_barrier_sync_f_slow_invoke,
760                 .dc_ctxt = &dbss2,
761         };
762         
763         dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
764         _dispatch_queue_push(dq, (void *)&dbss);
765         dispatch_semaphore_wait(dbss2.dbss2_sema, DISPATCH_TIME_FOREVER);
766
767         if (dq != dispatch_get_main_queue()) {
768                 _dispatch_thread_setspecific(dispatch_queue_key, dq);
769                 func(ctxt);
770                 _dispatch_workitem_inc();
771                 _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
772                 dispatch_resume(dq);
773         }
774         _dispatch_put_thread_semaphore(dbss2.dbss2_sema);
775 }
776
777 #ifdef __BLOCKS__
778 void
779 dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void))
780 {
781         // Blocks submitted to the main queue MUST be run on the main thread,
782         // therefore we must Block_copy in order to notify the thread-local
783         // garbage collector that the objects are transferring to the main thread
784         if (dq == dispatch_get_main_queue()) {
785                 dispatch_block_t block = Block_copy(work);
786                 return dispatch_barrier_sync_f(dq, block, _dispatch_call_block_and_release);
787         }       
788         struct Block_basic *bb = (void *)work;
789
790         dispatch_barrier_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke);
791 }
792 #endif
793
794 DISPATCH_NOINLINE
795 void
796 dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
797 {
798         dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
799
800         // 1) ensure that this thread hasn't enqueued anything ahead of this call
801         // 2) the queue is not suspended
802         // 3) the queue is not weird
803         if (slowpath(dq->dq_items_tail)
804                         || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))
805                         || slowpath(!_dispatch_queue_trylock(dq))) {
806                 return _dispatch_barrier_sync_f_slow(dq, ctxt, func);
807         }
808
809         _dispatch_thread_setspecific(dispatch_queue_key, dq);
810         func(ctxt);
811         _dispatch_workitem_inc();
812         _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
813         _dispatch_queue_unlock(dq);
814 }
815
816 static void
817 _dispatch_sync_f_slow2(void *ctxt)
818 {
819         dispatch_queue_t dq = _dispatch_queue_get_current();
820         dispatch_atomic_add(&dq->dq_running, 2);
821         dispatch_semaphore_signal(ctxt);
822 }
823
824 DISPATCH_NOINLINE
825 static void
826 _dispatch_sync_f_slow(dispatch_queue_t dq)
827 {
828         // the global root queues do not need strict ordering
829         if (dq->do_targetq == NULL) {
830                 dispatch_atomic_add(&dq->dq_running, 2);
831                 return;
832         }
833
834         struct dispatch_sync_slow_s {
835                 DISPATCH_CONTINUATION_HEADER(dispatch_sync_slow_s);
836         } dss = {
837                 .do_vtable = NULL,
838                 .dc_func = _dispatch_sync_f_slow2,
839                 .dc_ctxt = _dispatch_get_thread_semaphore(),
840         };
841
842         // XXX FIXME -- concurrent queues can be come serial again
843         _dispatch_queue_push(dq, (void *)&dss);
844
845         dispatch_semaphore_wait(dss.dc_ctxt, DISPATCH_TIME_FOREVER);
846         _dispatch_put_thread_semaphore(dss.dc_ctxt);
847 }
848
849 #ifdef __BLOCKS__
850 void
851 dispatch_sync(dispatch_queue_t dq, void (^work)(void))
852 {
853         struct Block_basic *bb = (void *)work;
854         dispatch_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke);
855 }
856 #endif
857
858 DISPATCH_NOINLINE
859 void
860 dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func)
861 {
862         typeof(dq->dq_running) prev_cnt;
863         dispatch_queue_t old_dq;
864
865         if (dq->dq_width == 1) {
866                 return dispatch_barrier_sync_f(dq, ctxt, func);
867         }
868
869         // 1) ensure that this thread hasn't enqueued anything ahead of this call
870         // 2) the queue is not suspended
871         if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) {
872                 _dispatch_sync_f_slow(dq);
873         } else {
874                 prev_cnt = dispatch_atomic_add(&dq->dq_running, 2) - 2;
875
876                 if (slowpath(prev_cnt & 1)) {
877                         if (dispatch_atomic_sub(&dq->dq_running, 2) == 0) {
878                                 _dispatch_wakeup(dq);
879                         }
880                         _dispatch_sync_f_slow(dq);
881                 }
882         }
883
884         old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
885         _dispatch_thread_setspecific(dispatch_queue_key, dq);
886         func(ctxt);
887         _dispatch_workitem_inc();
888         _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
889
890         if (slowpath(dispatch_atomic_sub(&dq->dq_running, 2) == 0)) {
891                 _dispatch_wakeup(dq);
892         }
893 }
894
895 const char *
896 dispatch_queue_get_label(dispatch_queue_t dq)
897 {
898         return dq->dq_label;
899 }
900
901 #if DISPATCH_COCOA_COMPAT
902 static void
903 _dispatch_main_q_port_init(void *ctxt __attribute__((unused)))
904 {
905         kern_return_t kr;
906
907         kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &main_q_port);
908         DISPATCH_VERIFY_MIG(kr);
909         (void)dispatch_assume_zero(kr);
910         kr = mach_port_insert_right(mach_task_self(), main_q_port, main_q_port, MACH_MSG_TYPE_MAKE_SEND);
911         DISPATCH_VERIFY_MIG(kr);
912         (void)dispatch_assume_zero(kr);
913
914         _dispatch_program_is_probably_callback_driven = true;
915         _dispatch_safe_fork = false;
916 }
917
918 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
919 DISPATCH_NOINLINE
920 static void
921 _dispatch_queue_set_mainq_drain_state(bool arg)
922 {
923         main_q_is_draining = arg;
924 }
925 #endif
926
927 /*
928  * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not
929  * marked noreturn, leading to a build error as dispatch_main() *is* marked
930  * noreturn.  Mask by marking __builtin_trap() as noreturn locally.
931  */
932 #ifndef HAVE_NORETURN_BUILTIN_TRAP
933 void __builtin_trap(void) __attribute__((__noreturn__));
934 #endif
935
936 void
937 dispatch_main(void)
938 {
939
940 #if HAVE_PTHREAD_MAIN_NP
941         if (pthread_main_np()) {
942 #endif
943                 _dispatch_program_is_probably_callback_driven = true;
944                 pthread_exit(NULL);
945                 DISPATCH_CRASH("pthread_exit() returned");
946 #if HAVE_PTHREAD_MAIN_NP
947         }
948         DISPATCH_CLIENT_CRASH("dispatch_main() must be called on the main thread");
949 #endif
950 }
951
952 static void
953 _dispatch_sigsuspend(void *ctxt __attribute__((unused)))
954 {
955         static const sigset_t mask;
956
957         for (;;) {
958                 sigsuspend(&mask);
959         }
960 }
961
962 DISPATCH_NOINLINE
963 static void
964 _dispatch_queue_cleanup2(void)
965 {
966         dispatch_atomic_dec(&_dispatch_main_q.dq_running);
967
968         if (dispatch_atomic_sub(&_dispatch_main_q.do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK) == 0) {
969                 _dispatch_wakeup(&_dispatch_main_q);
970         }
971
972         // overload the "probably" variable to mean that dispatch_main() or
973         // similar non-POSIX API was called
974         // this has to run before the DISPATCH_COCOA_COMPAT below
975         if (_dispatch_program_is_probably_callback_driven) {
976                 dispatch_async_f(_dispatch_get_root_queue(0, 0), NULL, _dispatch_sigsuspend);
977                 sleep(1);       // workaround 6778970
978         }
979
980 #if DISPATCH_COCOA_COMPAT
981         dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init);
982
983         mach_port_t mp = main_q_port;
984         kern_return_t kr;
985
986         main_q_port = 0;
987
988         if (mp) {
989                 kr = mach_port_deallocate(mach_task_self(), mp);
990                 DISPATCH_VERIFY_MIG(kr);
991                 (void)dispatch_assume_zero(kr);
992                 kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, -1);
993                 DISPATCH_VERIFY_MIG(kr);
994                 (void)dispatch_assume_zero(kr);
995         }
996 #endif
997 }
998
999 #ifndef DISPATCH_NO_LEGACY
1000 dispatch_queue_t
1001 dispatch_get_concurrent_queue(long pri)
1002 {
1003         if (pri > 0) {
1004                 pri = DISPATCH_QUEUE_PRIORITY_HIGH;
1005         } else if (pri < 0) {
1006                 pri = DISPATCH_QUEUE_PRIORITY_LOW;
1007         }
1008         return _dispatch_get_root_queue(pri, false);
1009 }
1010 #endif
1011
1012 static void
1013 _dispatch_queue_cleanup(void *ctxt)
1014 {
1015         if (ctxt == &_dispatch_main_q) {
1016                 return _dispatch_queue_cleanup2();
1017         }
1018         // POSIX defines that destructors are only called if 'ctxt' is non-null
1019         DISPATCH_CRASH("Premature thread exit while a dispatch queue is running");
1020 }
1021
1022 dispatch_queue_t
1023 dispatch_get_global_queue(long priority, unsigned long flags)
1024 {
1025         if (flags & ~DISPATCH_QUEUE_OVERCOMMIT) {
1026                 return NULL;
1027         }
1028         return _dispatch_get_root_queue(priority, flags & DISPATCH_QUEUE_OVERCOMMIT);
1029 }
1030
1031 #define countof(x)      (sizeof(x) / sizeof(x[0]))
1032 void
1033 libdispatch_init(void)
1034 {
1035         dispatch_assert(DISPATCH_QUEUE_PRIORITY_COUNT == 3);
1036         dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 6);
1037
1038         dispatch_assert(DISPATCH_QUEUE_PRIORITY_LOW == -DISPATCH_QUEUE_PRIORITY_HIGH);
1039         dispatch_assert(countof(_dispatch_root_queues) == DISPATCH_ROOT_QUEUE_COUNT);
1040         dispatch_assert(countof(_dispatch_thread_mediator) == DISPATCH_ROOT_QUEUE_COUNT);
1041         dispatch_assert(countof(_dispatch_root_queue_contexts) == DISPATCH_ROOT_QUEUE_COUNT);
1042
1043 #if HAVE_PTHREAD_KEY_INIT_NP
1044         _dispatch_thread_key_init_np(dispatch_queue_key, _dispatch_queue_cleanup);
1045         _dispatch_thread_key_init_np(dispatch_sema4_key, (void (*)(void *))dispatch_release);   // use the extern release
1046         _dispatch_thread_key_init_np(dispatch_cache_key, _dispatch_cache_cleanup2);
1047 #if DISPATCH_PERF_MON
1048         _dispatch_thread_key_init_np(dispatch_bcounter_key, NULL);
1049 #endif
1050 #else /* !HAVE_PTHREAD_KEY_INIT_NP */
1051         _dispatch_thread_key_create(&dispatch_queue_key,
1052             _dispatch_queue_cleanup);
1053         _dispatch_thread_key_create(&dispatch_sema4_key,
1054             (void (*)(void *))dispatch_release); // use the extern release
1055         _dispatch_thread_key_create(&dispatch_cache_key,
1056             _dispatch_cache_cleanup2);
1057 #ifdef DISPATCH_PERF_MON
1058         _dispatch_thread_key_create(&dispatch_bcounter_key, NULL);
1059 #endif
1060 #endif /* HAVE_PTHREAD_KEY_INIT_NP */
1061
1062         _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q);
1063
1064         _dispatch_queue_set_width_init();
1065 }
1066
1067 void
1068 _dispatch_queue_unlock(dispatch_queue_t dq)
1069 {
1070         if (slowpath(dispatch_atomic_dec(&dq->dq_running))) {
1071                 return;
1072         }
1073
1074         _dispatch_wakeup(dq);
1075 }
1076
1077 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
1078 dispatch_queue_t
1079 _dispatch_wakeup(dispatch_object_t dou)
1080 {
1081         dispatch_queue_t tq;
1082
1083         if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) {
1084                 return NULL;
1085         }
1086         if (!dx_probe(dou._do) && !dou._dq->dq_items_tail) {
1087                 return NULL;
1088         }
1089
1090         if (!_dispatch_trylock(dou._do)) {
1091 #if DISPATCH_COCOA_COMPAT
1092                 if (dou._dq == &_dispatch_main_q) {
1093                         _dispatch_queue_wakeup_main();
1094                 }
1095 #endif
1096                 return NULL;
1097         }
1098         _dispatch_retain(dou._do);
1099         tq = dou._do->do_targetq;
1100         _dispatch_queue_push(tq, dou._do);
1101         return tq;      // libdispatch doesn't need this, but the Instrument DTrace probe does
1102 }
1103
1104 #if DISPATCH_COCOA_COMPAT
1105 DISPATCH_NOINLINE
1106 void
1107 _dispatch_queue_wakeup_main(void)
1108 {
1109         kern_return_t kr;
1110
1111         dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init);
1112
1113         kr = _dispatch_send_wakeup_main_thread(main_q_port, 0);
1114
1115         switch (kr) {
1116         case MACH_SEND_TIMEOUT:
1117         case MACH_SEND_TIMED_OUT:
1118         case MACH_SEND_INVALID_DEST:
1119                 break;
1120         default:
1121                 (void)dispatch_assume_zero(kr);
1122                 break;
1123         }
1124
1125         _dispatch_safe_fork = false;
1126 }
1127 #endif
1128
1129 #if HAVE_PTHREAD_WORKQUEUES
1130 static inline int
1131 _dispatch_rootq2wq_pri(long idx)
1132 {
1133 #ifdef WORKQ_DEFAULT_PRIOQUEUE
1134         switch (idx) {
1135         case 0:
1136         case 1:
1137                 return WORKQ_LOW_PRIOQUEUE;
1138         case 2:
1139         case 3:
1140         default:
1141                 return WORKQ_DEFAULT_PRIOQUEUE;
1142         case 4:
1143         case 5:
1144                 return WORKQ_HIGH_PRIOQUEUE;
1145         }
1146 #else
1147         return pri;
1148 #endif
1149 }
1150 #endif
1151
1152 static void
1153 _dispatch_root_queues_init(void *context __attribute__((unused)))
1154 {
1155 #if HAVE_PTHREAD_WORKQUEUES
1156         bool disable_wq = getenv("LIBDISPATCH_DISABLE_KWQ");
1157         pthread_workqueue_attr_t pwq_attr;
1158         int r;
1159 #endif
1160 #if USE_MACH_SEM
1161         kern_return_t kr;
1162 #endif
1163 #if USE_POSIX_SEM
1164         int ret;
1165 #endif
1166         int i;
1167
1168 #if HAVE_PTHREAD_WORKQUEUES
1169         r = pthread_workqueue_attr_init_np(&pwq_attr);
1170         (void)dispatch_assume_zero(r);
1171 #endif
1172
1173         for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
1174 // some software hangs if the non-overcommitting queues do not overcommit when threads block
1175 #if 0
1176                 if (!(i & 1)) {
1177                         dispatch_root_queue_contexts[i].dgq_thread_pool_size = _dispatch_hw_config.cc_max_active;
1178                 }
1179 #endif
1180 #if HAVE_PTHREAD_WORKQUEUES
1181                 r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr, _dispatch_rootq2wq_pri(i));
1182                 (void)dispatch_assume_zero(r);
1183                 r = pthread_workqueue_attr_setovercommit_np(&pwq_attr, i & 1);
1184                 (void)dispatch_assume_zero(r);
1185                 r = 0;
1186                 if (disable_wq || (r = pthread_workqueue_create_np(&_dispatch_root_queue_contexts[i].dgq_kworkqueue, &pwq_attr))) {
1187                         if (r != ENOTSUP) {
1188                                 (void)dispatch_assume_zero(r);
1189                         }
1190 #endif /* HAVE_PTHREAD_WORKQUEUES */
1191 #if USE_MACH_SEM
1192                         // override the default FIFO behavior for the pool semaphores
1193                         kr = semaphore_create(mach_task_self(), &_dispatch_thread_mediator[i].dsema_port, SYNC_POLICY_LIFO, 0);
1194                         DISPATCH_VERIFY_MIG(kr);
1195                         (void)dispatch_assume_zero(kr);
1196                         dispatch_assume(_dispatch_thread_mediator[i].dsema_port);
1197 #endif
1198 #if USE_POSIX_SEM
1199                         /* XXXRW: POSIX semaphores don't support LIFO? */
1200                         ret = sem_init(&_dispatch_thread_mediator[i].dsema_sem, 0, 0);
1201                         (void)dispatch_assume_zero(ret);
1202 #endif
1203 #if HAVE_PTHREAD_WORKQUEUES
1204                 } else {
1205                         dispatch_assume(_dispatch_root_queue_contexts[i].dgq_kworkqueue);
1206                 }
1207 #endif
1208         }
1209
1210 #if HAVE_PTHREAD_WORKQUEUES
1211         r = pthread_workqueue_attr_destroy_np(&pwq_attr);
1212         (void)dispatch_assume_zero(r);
1213 #endif
1214 }
1215
1216 bool
1217 _dispatch_queue_wakeup_global(dispatch_queue_t dq)
1218 {
1219         static dispatch_once_t pred;
1220         struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
1221 #if HAVE_PTHREAD_WORKQUEUES
1222         pthread_workitem_handle_t wh;
1223         unsigned int gen_cnt;
1224 #endif
1225         pthread_t pthr;
1226         int r, t_count;
1227
1228         if (!dq->dq_items_tail) {
1229                 return false;
1230         }
1231
1232         _dispatch_safe_fork = false;
1233
1234         dispatch_debug_queue(dq, __PRETTY_FUNCTION__);
1235
1236         dispatch_once_f(&pred, NULL, _dispatch_root_queues_init);
1237
1238 #if HAVE_PTHREAD_WORKQUEUES
1239         if (qc->dgq_kworkqueue) {
1240                 if (dispatch_atomic_cmpxchg(&qc->dgq_pending, 0, 1)) {
1241                         _dispatch_debug("requesting new worker thread");
1242
1243                         r = pthread_workqueue_additem_np(qc->dgq_kworkqueue, _dispatch_worker_thread2, dq, &wh, &gen_cnt);
1244                         (void)dispatch_assume_zero(r);
1245                 } else {
1246                         _dispatch_debug("work thread request still pending on global queue: %p", dq);
1247                 }
1248                 goto out;
1249         }
1250 #endif
1251
1252         if (dispatch_semaphore_signal(qc->dgq_thread_mediator)) {
1253                 goto out;
1254         }
1255
1256         do {
1257                 t_count = qc->dgq_thread_pool_size;
1258                 if (!t_count) {
1259                         _dispatch_debug("The thread pool is full: %p", dq);
1260                         goto out;
1261                 }
1262         } while (!dispatch_atomic_cmpxchg(&qc->dgq_thread_pool_size, t_count, t_count - 1));
1263
1264         while ((r = pthread_create(&pthr, NULL, _dispatch_worker_thread, dq))) {
1265                 if (r != EAGAIN) {
1266                         (void)dispatch_assume_zero(r);
1267                 }
1268                 sleep(1);
1269         }
1270         r = pthread_detach(pthr);
1271         (void)dispatch_assume_zero(r);
1272
1273 out:
1274         return false;
1275 }
1276
1277 void
1278 _dispatch_queue_serial_drain_till_empty(dispatch_queue_t dq)
1279 {
1280 #if DISPATCH_PERF_MON
1281         uint64_t start = _dispatch_absolute_time();
1282 #endif
1283         _dispatch_queue_drain(dq);
1284 #if DISPATCH_PERF_MON
1285         _dispatch_queue_merge_stats(start);
1286 #endif
1287         _dispatch_force_cache_cleanup();
1288 }
1289
1290 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
1291 DISPATCH_NOINLINE
1292 void
1293 _dispatch_queue_invoke(dispatch_queue_t dq)
1294 {
1295         dispatch_queue_t tq = dq->do_targetq;
1296
1297         if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && fastpath(_dispatch_queue_trylock(dq))) {
1298                 _dispatch_queue_drain(dq);
1299                 if (tq == dq->do_targetq) {
1300                         tq = dx_invoke(dq);
1301                 } else {
1302                         tq = dq->do_targetq;
1303                 }
1304                 // We do not need to check the result.
1305                 // When the suspend-count lock is dropped, then the check will happen.
1306                 dispatch_atomic_dec(&dq->dq_running);
1307                 if (tq) {
1308                         return _dispatch_queue_push(tq, dq);
1309                 }
1310         }
1311
1312         dq->do_next = DISPATCH_OBJECT_LISTLESS;
1313         if (dispatch_atomic_sub(&dq->do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK) == 0) {
1314                 if (dq->dq_running == 0) {
1315                         _dispatch_wakeup(dq);   // verify that the queue is idle
1316                 }
1317         }
1318         _dispatch_release(dq);  // added when the queue is put on the list
1319 }
1320
1321 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
1322 static void
1323 _dispatch_set_target_queue2(void *ctxt)
1324 {
1325         dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current();
1326                   
1327         prev_dq = dq->do_targetq;
1328         dq->do_targetq = ctxt;
1329         _dispatch_release(prev_dq);
1330 }
1331
1332 void
1333 dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq)
1334 {
1335         if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) {
1336                 return;
1337         }
1338         // NOTE: we test for NULL target queues internally to detect root queues
1339         // therefore, if the retain crashes due to a bad input, that is OK
1340         _dispatch_retain(dq);
1341         dispatch_barrier_async_f(dou._dq, dq, _dispatch_set_target_queue2);
1342 }
1343
1344 static void
1345 _dispatch_async_f_redirect2(void *_ctxt)
1346 {
1347         struct dispatch_continuation_s *dc = _ctxt;
1348         struct dispatch_continuation_s *other_dc = dc->dc_data[1];
1349         dispatch_queue_t old_dq, dq = dc->dc_data[0];
1350
1351         old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
1352         _dispatch_thread_setspecific(dispatch_queue_key, dq);
1353         _dispatch_continuation_pop(other_dc);
1354         _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
1355
1356         if (dispatch_atomic_sub(&dq->dq_running, 2) == 0) {
1357                 _dispatch_wakeup(dq);
1358         }
1359         _dispatch_release(dq);
1360 }
1361
1362 static void
1363 _dispatch_async_f_redirect(dispatch_queue_t dq, struct dispatch_object_s *other_dc)
1364 {
1365         dispatch_continuation_t dc = (void *)other_dc;
1366         dispatch_queue_t root_dq = dq;
1367
1368         if (dc->dc_func == _dispatch_sync_f_slow2) {
1369                 return dc->dc_func(dc->dc_ctxt);
1370         }
1371
1372         dispatch_atomic_add(&dq->dq_running, 2);
1373         _dispatch_retain(dq);
1374
1375         dc = _dispatch_continuation_alloc_cacheonly() ?: _dispatch_continuation_alloc_from_heap();
1376
1377         dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT;
1378         dc->dc_func = _dispatch_async_f_redirect2;
1379         dc->dc_ctxt = dc;
1380         dc->dc_data[0] = dq;
1381         dc->dc_data[1] = other_dc;
1382
1383         do {
1384                 root_dq = root_dq->do_targetq;
1385         } while (root_dq->do_targetq);
1386
1387         _dispatch_queue_push(root_dq, dc);
1388 }
1389
1390
1391 void
1392 _dispatch_queue_drain(dispatch_queue_t dq)
1393 {
1394         dispatch_queue_t orig_tq, old_dq = _dispatch_thread_getspecific(dispatch_queue_key);
1395         struct dispatch_object_s *dc = NULL, *next_dc = NULL;
1396
1397         orig_tq = dq->do_targetq;
1398
1399         _dispatch_thread_setspecific(dispatch_queue_key, dq);
1400
1401         while (dq->dq_items_tail) {
1402                 while (!fastpath(dq->dq_items_head)) {
1403                         _dispatch_hardware_pause();
1404                 }
1405
1406                 dc = dq->dq_items_head;
1407                 dq->dq_items_head = NULL;
1408
1409                 do {
1410                         // Enqueue is TIGHTLY controlled, we won't wait long.
1411                         do {
1412                                 next_dc = fastpath(dc->do_next);
1413                         } while (!next_dc && !dispatch_atomic_cmpxchg(&dq->dq_items_tail, dc, NULL));
1414                         if (DISPATCH_OBJECT_SUSPENDED(dq)) {
1415                                 goto out;
1416                         }
1417                         if (dq->dq_running > dq->dq_width) {
1418                                 goto out;
1419                         }
1420                         if (orig_tq != dq->do_targetq) {
1421                                 goto out;
1422                         }
1423                         if (fastpath(dq->dq_width == 1)) {
1424                                 _dispatch_continuation_pop(dc);
1425                                 _dispatch_workitem_inc();
1426                         } else if ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) {
1427                                 if (dq->dq_running > 1) {
1428                                         goto out;
1429                                 }
1430                                 _dispatch_continuation_pop(dc);
1431                                 _dispatch_workitem_inc();
1432                         } else {
1433                                 _dispatch_async_f_redirect(dq, dc);
1434                         }
1435                 } while ((dc = next_dc));
1436         }
1437
1438 out:
1439         // if this is not a complete drain, we must undo some things
1440         if (slowpath(dc)) {
1441                 // 'dc' must NOT be "popped"
1442                 // 'dc' might be the last item
1443                 if (next_dc || dispatch_atomic_cmpxchg(&dq->dq_items_tail, NULL, dc)) {
1444                         dq->dq_items_head = dc;
1445                 } else {
1446                         while (!(next_dc = dq->dq_items_head)) {
1447                                 _dispatch_hardware_pause();
1448                         }
1449                         dq->dq_items_head = dc;
1450                         dc->do_next = next_dc;
1451                 }
1452         }
1453
1454         _dispatch_thread_setspecific(dispatch_queue_key, old_dq);
1455 }
1456
1457 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
1458 void *
1459 _dispatch_worker_thread(void *context)
1460 {
1461         dispatch_queue_t dq = context;
1462         struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
1463         sigset_t mask;
1464         int r;
1465
1466         // workaround tweaks the kernel workqueue does for us
1467         r = sigfillset(&mask);
1468         (void)dispatch_assume_zero(r);
1469         r = _dispatch_pthread_sigmask(SIG_BLOCK, &mask, NULL);
1470         (void)dispatch_assume_zero(r);
1471
1472         do {
1473                 _dispatch_worker_thread2(context);
1474                 // we use 65 seconds in case there are any timers that run once a minute
1475         } while (dispatch_semaphore_wait(qc->dgq_thread_mediator, dispatch_time(0, 65ull * NSEC_PER_SEC)) == 0);
1476
1477         dispatch_atomic_inc(&qc->dgq_thread_pool_size);
1478         if (dq->dq_items_tail) {
1479                 _dispatch_queue_wakeup_global(dq);
1480         }
1481
1482         return NULL;
1483 }
1484
1485 // 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol
1486 void
1487 _dispatch_worker_thread2(void *context)
1488 {
1489         struct dispatch_object_s *item;
1490         dispatch_queue_t dq = context;
1491         struct dispatch_root_queue_context_s *qc = dq->do_ctxt;
1492
1493         if (_dispatch_thread_getspecific(dispatch_queue_key)) {
1494                 DISPATCH_CRASH("Premature thread recycling");
1495         }
1496
1497         _dispatch_thread_setspecific(dispatch_queue_key, dq);
1498         qc->dgq_pending = 0;
1499
1500 #if DISPATCH_COCOA_COMPAT
1501         // ensure that high-level memory management techniques do not leak/crash
1502         dispatch_begin_thread_4GC();
1503         void *pool = _dispatch_begin_NSAutoReleasePool();
1504 #endif
1505
1506 #if DISPATCH_PERF_MON
1507         uint64_t start = _dispatch_absolute_time();
1508 #endif
1509         while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) {
1510                 _dispatch_continuation_pop(item);
1511         }
1512 #if DISPATCH_PERF_MON
1513         _dispatch_queue_merge_stats(start);
1514 #endif
1515
1516 #if DISPATCH_COCOA_COMPAT
1517         _dispatch_end_NSAutoReleasePool(pool);
1518         dispatch_end_thread_4GC();
1519 #endif
1520
1521         _dispatch_thread_setspecific(dispatch_queue_key, NULL);
1522
1523         _dispatch_force_cache_cleanup();
1524 }
1525
1526 #if DISPATCH_PERF_MON
1527 void
1528 _dispatch_queue_merge_stats(uint64_t start)
1529 {
1530         uint64_t avg, delta = _dispatch_absolute_time() - start;
1531         unsigned long count, bucket;
1532
1533         count = (size_t)_dispatch_thread_getspecific(dispatch_bcounter_key);
1534         _dispatch_thread_setspecific(dispatch_bcounter_key, NULL);
1535
1536         if (count) {
1537                 avg = delta / count;
1538                 bucket = flsll(avg);
1539         } else {
1540                 bucket = 0;
1541         }
1542
1543         // 64-bit counters on 32-bit require a lock or a queue
1544         OSSpinLockLock(&_dispatch_stats_lock);
1545
1546         _dispatch_stats[bucket].time_total += delta;
1547         _dispatch_stats[bucket].count_total += count;
1548         _dispatch_stats[bucket].thread_total++;
1549
1550         OSSpinLockUnlock(&_dispatch_stats_lock);
1551 }
1552 #endif
1553
1554 size_t
1555 dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz)
1556 {
1557         return snprintf(buf, bufsiz, "parent = %p ", dq->do_targetq);
1558 }
1559
1560 size_t
1561 dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz)
1562 {
1563         size_t offset = 0;
1564         offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dq->dq_label, dq);
1565         offset += dispatch_object_debug_attr(dq, &buf[offset], bufsiz - offset);
1566         offset += dispatch_queue_debug_attr(dq, &buf[offset], bufsiz - offset);
1567         offset += snprintf(&buf[offset], bufsiz - offset, "}");
1568         return offset;
1569 }
1570
1571 #if DISPATCH_DEBUG
1572 void
1573 dispatch_debug_queue(dispatch_queue_t dq, const char* str) {
1574         if (fastpath(dq)) {
1575                 dispatch_debug(dq, "%s", str);
1576         } else {
1577                 _dispatch_log("queue[NULL]: %s", str);
1578         }
1579 }
1580 #endif
1581
1582 #if DISPATCH_COCOA_COMPAT
1583 void
1584 _dispatch_main_queue_callback_4CF(mach_msg_header_t *msg __attribute__((unused)))
1585 {
1586         if (main_q_is_draining) {
1587                 return;
1588         }
1589         _dispatch_queue_set_mainq_drain_state(true);
1590         _dispatch_queue_serial_drain_till_empty(&_dispatch_main_q);
1591         _dispatch_queue_set_mainq_drain_state(false);
1592 }
1593
1594 mach_port_t
1595 _dispatch_get_main_queue_port_4CF(void)
1596 {
1597         dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init);
1598         return main_q_port;
1599 }
1600 #endif
1601
1602 #ifndef DISPATCH_NO_LEGACY
1603 static void
1604 dispatch_queue_attr_dispose(dispatch_queue_attr_t attr)
1605 {
1606         dispatch_queue_attr_set_finalizer_f(attr, NULL, NULL);
1607         _dispatch_dispose(attr);
1608 }
1609
1610 static const struct dispatch_queue_attr_vtable_s dispatch_queue_attr_vtable = {
1611         .do_type = DISPATCH_QUEUE_ATTR_TYPE,
1612         .do_kind = "queue-attr",
1613         .do_dispose = dispatch_queue_attr_dispose,
1614 };
1615
1616 dispatch_queue_attr_t
1617 dispatch_queue_attr_create(void)
1618 {
1619         dispatch_queue_attr_t a = calloc(1, sizeof(struct dispatch_queue_attr_s));
1620
1621         if (a) {
1622                 a->do_vtable = &dispatch_queue_attr_vtable;
1623                 a->do_next = DISPATCH_OBJECT_LISTLESS;
1624                 a->do_ref_cnt = 1;
1625                 a->do_xref_cnt = 1;
1626                 a->do_targetq = _dispatch_get_root_queue(0, 0);
1627                 a->qa_flags = DISPATCH_QUEUE_OVERCOMMIT;
1628         }
1629         return a;
1630 }
1631
1632 void
1633 dispatch_queue_attr_set_flags(dispatch_queue_attr_t attr, uint64_t flags)
1634 {
1635         dispatch_assert_zero(flags & ~DISPATCH_QUEUE_FLAGS_MASK);
1636         attr->qa_flags = (unsigned long)flags & DISPATCH_QUEUE_FLAGS_MASK;
1637 }
1638         
1639 void
1640 dispatch_queue_attr_set_priority(dispatch_queue_attr_t attr, int priority)
1641 {
1642         dispatch_debug_assert(attr, "NULL pointer");
1643         dispatch_debug_assert(priority <= DISPATCH_QUEUE_PRIORITY_HIGH && priority >= DISPATCH_QUEUE_PRIORITY_LOW, "Invalid priority");
1644
1645         if (priority > 0) {
1646                 priority = DISPATCH_QUEUE_PRIORITY_HIGH;
1647         } else if (priority < 0) {
1648                 priority = DISPATCH_QUEUE_PRIORITY_LOW;
1649         }
1650
1651         attr->qa_priority = priority;
1652 }
1653
1654 void
1655 dispatch_queue_attr_set_finalizer_f(dispatch_queue_attr_t attr,
1656         void *context, dispatch_queue_finalizer_function_t finalizer)
1657 {
1658 #ifdef __BLOCKS__
1659         if (attr->finalizer_func == (void*)_dispatch_call_block_and_release2) {
1660                 Block_release(attr->finalizer_ctxt);
1661         }
1662 #endif
1663         attr->finalizer_ctxt = context;
1664         attr->finalizer_func = finalizer;
1665 }
1666
1667 #ifdef __BLOCKS__
1668 long
1669 dispatch_queue_attr_set_finalizer(dispatch_queue_attr_t attr,
1670         dispatch_queue_finalizer_t finalizer)
1671 {
1672         void *ctxt;
1673         dispatch_queue_finalizer_function_t func;
1674
1675         if (finalizer) {
1676                 if (!(ctxt = Block_copy(finalizer))) {
1677                         return 1;
1678                 }
1679                 func = (void *)_dispatch_call_block_and_release2;
1680         } else {
1681                 ctxt = NULL;
1682                 func = NULL;
1683         }
1684
1685         dispatch_queue_attr_set_finalizer_f(attr, ctxt, func);
1686
1687         return 0;
1688 }
1689 #endif
1690 #endif /* DISPATCH_NO_LEGACY */
1691
1692 static void
1693 _dispatch_ccache_init(void *context __attribute__((unused)))
1694 {
1695         _dispatch_ccache_zone = malloc_create_zone(0, 0);
1696         dispatch_assert(_dispatch_ccache_zone);
1697         malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations");
1698 }
1699
1700 dispatch_continuation_t
1701 _dispatch_continuation_alloc_from_heap(void)
1702 {
1703         static dispatch_once_t pred;
1704         dispatch_continuation_t dc;
1705
1706         dispatch_once_f(&pred, NULL, _dispatch_ccache_init);
1707
1708         while (!(dc = fastpath(malloc_zone_calloc(_dispatch_ccache_zone, 1, ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) {
1709                 sleep(1);
1710         }
1711
1712         return dc;
1713 }
1714
1715 void
1716 _dispatch_force_cache_cleanup(void)
1717 {
1718         dispatch_continuation_t dc = _dispatch_thread_getspecific(dispatch_cache_key);
1719         if (dc) {
1720                 _dispatch_thread_setspecific(dispatch_cache_key, NULL);
1721                 _dispatch_cache_cleanup2(dc);
1722         }
1723 }
1724
1725 DISPATCH_NOINLINE
1726 void
1727 _dispatch_cache_cleanup2(void *value)
1728 {
1729         dispatch_continuation_t dc, next_dc = value;
1730
1731         while ((dc = next_dc)) {
1732                 next_dc = dc->do_next;
1733                 malloc_zone_free(_dispatch_ccache_zone, dc);
1734         }
1735 }
1736
1737 static char _dispatch_build[16];
1738
1739 /*
1740  * XXXRW: What to do here for !Mac OS X?
1741  */
1742 static void
1743 _dispatch_bug_init(void *context __attribute__((unused)))
1744 {
1745 #ifdef __APPLE__
1746         int mib[] = { CTL_KERN, KERN_OSVERSION };
1747         size_t bufsz = sizeof(_dispatch_build);
1748
1749         sysctl(mib, 2, _dispatch_build, &bufsz, NULL, 0);
1750 #else
1751         memset(_dispatch_build, 0, sizeof(_dispatch_build));
1752 #endif
1753 }
1754
1755 void
1756 _dispatch_bug(size_t line, long val)
1757 {
1758         static dispatch_once_t pred;
1759         static void *last_seen;
1760         void *ra = __builtin_return_address(0);
1761
1762         dispatch_once_f(&pred, NULL, _dispatch_bug_init);
1763         if (last_seen != ra) {
1764                 last_seen = ra;
1765                 _dispatch_log("BUG in libdispatch: %s - %lu - 0x%lx", _dispatch_build, (unsigned long)line, val);
1766         }
1767 }
1768
1769 void
1770 _dispatch_abort(size_t line, long val)
1771 {
1772         _dispatch_bug(line, val);
1773         abort();
1774 }
1775
1776 void
1777 _dispatch_log(const char *msg, ...)
1778 {
1779         va_list ap;
1780
1781         va_start(ap, msg);
1782
1783         _dispatch_logv(msg, ap);
1784
1785         va_end(ap);
1786 }
1787
1788 void
1789 _dispatch_logv(const char *msg, va_list ap)
1790 {
1791 #if DISPATCH_DEBUG
1792         static FILE *logfile, *tmp;
1793         char newbuf[strlen(msg) + 2];
1794         char path[PATH_MAX];
1795
1796         sprintf(newbuf, "%s\n", msg);
1797
1798         if (!logfile) {
1799                 snprintf(path, sizeof(path), "/var/tmp/libdispatch.%d.log", getpid());
1800                 tmp = fopen(path, "a");
1801                 assert(tmp);
1802                 if (!dispatch_atomic_cmpxchg(&logfile, NULL, tmp)) {
1803                         fclose(tmp);
1804                 } else {
1805                         struct timeval tv;
1806                         gettimeofday(&tv, NULL);
1807                         fprintf(logfile, "=== log file opened for %s[%u] at %ld.%06u ===\n",
1808                                         getprogname() ?: "", getpid(), tv.tv_sec, tv.tv_usec);
1809                 }
1810         }
1811         vfprintf(logfile, newbuf, ap);
1812         fflush(logfile);
1813 #else
1814         vsyslog(LOG_NOTICE, msg, ap);
1815 #endif
1816 }
1817
1818 int
1819 _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset)
1820 {
1821         int r;
1822
1823         /* Workaround: 6269619 Not all signals can be delivered on any thread */
1824
1825         r = sigdelset(set, SIGILL);
1826         (void)dispatch_assume_zero(r);
1827         r = sigdelset(set, SIGTRAP);
1828         (void)dispatch_assume_zero(r);
1829 #if HAVE_DECL_SIGEMT
1830         r = sigdelset(set, SIGEMT);
1831         (void)dispatch_assume_zero(r);
1832 #endif
1833         r = sigdelset(set, SIGFPE);
1834         (void)dispatch_assume_zero(r);
1835         r = sigdelset(set, SIGBUS);
1836         (void)dispatch_assume_zero(r);
1837         r = sigdelset(set, SIGSEGV);
1838         (void)dispatch_assume_zero(r);
1839         r = sigdelset(set, SIGSYS);
1840         (void)dispatch_assume_zero(r);
1841         r = sigdelset(set, SIGPIPE);
1842         (void)dispatch_assume_zero(r);
1843
1844         return pthread_sigmask(how, set, oset);
1845 }
1846
1847 bool _dispatch_safe_fork = true;
1848
1849 void
1850 dispatch_atfork_prepare(void)
1851 {
1852 }
1853
1854 void
1855 dispatch_atfork_parent(void)
1856 {
1857 }
1858
1859 void
1860 dispatch_atfork_child(void)
1861 {
1862         void *crash = (void *)0x100;
1863         size_t i;
1864
1865         if (_dispatch_safe_fork) {
1866                 return;
1867         }
1868
1869         _dispatch_main_q.dq_items_head = crash;
1870         _dispatch_main_q.dq_items_tail = crash;
1871
1872         _dispatch_mgr_q.dq_items_head = crash;
1873         _dispatch_mgr_q.dq_items_tail = crash;
1874
1875         for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
1876                 _dispatch_root_queues[i].dq_items_head = crash;
1877                 _dispatch_root_queues[i].dq_items_tail = crash;
1878         }
1879 }
1880
1881 void
1882 dispatch_init_pthread(pthread_t pthr __attribute__((unused)))
1883 {
1884 }
1885
1886 const struct dispatch_queue_offsets_s dispatch_queue_offsets = {
1887         .dqo_version = 3,
1888         .dqo_label = offsetof(struct dispatch_queue_s, dq_label),
1889         .dqo_label_size = sizeof(_dispatch_main_q.dq_label),
1890         .dqo_flags = 0,
1891         .dqo_flags_size = 0,
1892         .dqo_width = offsetof(struct dispatch_queue_s, dq_width),
1893         .dqo_width_size = sizeof(_dispatch_main_q.dq_width),
1894         .dqo_serialnum = offsetof(struct dispatch_queue_s, dq_serialnum),
1895         .dqo_serialnum_size = sizeof(_dispatch_main_q.dq_serialnum),
1896         .dqo_running = offsetof(struct dispatch_queue_s, dq_running),
1897         .dqo_running_size = sizeof(_dispatch_main_q.dq_running),
1898 };
1899
1900 #ifdef __BLOCKS__
1901 void
1902 dispatch_after(dispatch_time_t when, dispatch_queue_t queue, dispatch_block_t work)
1903 {
1904         // test before the copy of the block
1905         if (when == DISPATCH_TIME_FOREVER) {
1906 #if DISPATCH_DEBUG
1907                 DISPATCH_CLIENT_CRASH("dispatch_after() called with 'when' == infinity");
1908 #endif
1909                 return;
1910         }
1911         dispatch_after_f(when, queue, _dispatch_Block_copy(work), _dispatch_call_block_and_release);
1912 }
1913 #endif
1914
1915 struct _dispatch_after_time_s {
1916         void *datc_ctxt;
1917         void (*datc_func)(void *);
1918         dispatch_source_t ds;
1919 };
1920
1921 static void
1922 _dispatch_after_timer_cancel(void *ctxt)
1923 {
1924         struct _dispatch_after_time_s *datc = ctxt;
1925         dispatch_source_t ds = datc->ds;
1926
1927         free(datc);
1928         dispatch_release(ds);   // MUST NOT be _dispatch_release()
1929 }
1930
1931 static void
1932 _dispatch_after_timer_callback(void *ctxt)
1933 {
1934         struct _dispatch_after_time_s *datc = ctxt;
1935
1936         dispatch_assert(datc->datc_func);
1937         datc->datc_func(datc->datc_ctxt);
1938
1939         dispatch_source_cancel(datc->ds);
1940 }
1941
1942 DISPATCH_NOINLINE
1943 void
1944 dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, void (*func)(void *))
1945 {
1946         uint64_t delta;
1947         struct _dispatch_after_time_s *datc = NULL;
1948         dispatch_source_t ds = NULL;
1949
1950         if (when == DISPATCH_TIME_FOREVER) {
1951 #if DISPATCH_DEBUG
1952                 DISPATCH_CLIENT_CRASH("dispatch_after_f() called with 'when' == infinity");
1953 #endif
1954                 return;
1955         }
1956
1957         delta = _dispatch_timeout(when);
1958         if (delta == 0) {
1959                 return dispatch_async_f(queue, ctxt, func);
1960         }
1961
1962         // this function should be optimized to not use a dispatch source
1963         ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue);
1964         dispatch_assert(ds);
1965
1966         datc = malloc(sizeof(struct _dispatch_after_time_s));
1967         dispatch_assert(datc);
1968         datc->datc_ctxt = ctxt;
1969         datc->datc_func = func;
1970         datc->ds = ds;
1971
1972         dispatch_set_context(ds, datc);
1973         dispatch_source_set_event_handler_f(ds, _dispatch_after_timer_callback);
1974         dispatch_source_set_cancel_handler_f(ds, _dispatch_after_timer_cancel);
1975         dispatch_source_set_timer(ds, when, 0, 0);
1976         dispatch_resume(ds);
1977 }