Tizen 2.1 base
[platform/upstream/gcd.git] / dispatch-1.0 / src / semaphore.c
1 /*
2  * Copyright (c) 2008-2009 Apple Inc. All rights reserved.
3  *
4  * @APPLE_APACHE_LICENSE_HEADER_START@
5  * 
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  * 
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  * 
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  * 
18  * @APPLE_APACHE_LICENSE_HEADER_END@
19  */
20
21 #include "internal.h"
22
23 // semaphores are too fundamental to use the dispatch_assume*() macros
24 #if USE_MACH_SEM
25 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do {    \
26                 if (x) {        \
27                         DISPATCH_CRASH("flawed group/semaphore logic"); \
28                 }       \
29         } while (0)
30 #endif
31 #if USE_POSIX_SEM
32 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do {                           \
33                 if ((x) == -1) {                                        \
34                         DISPATCH_CRASH("flawed group/semaphore logic"); \
35                 }                                                       \
36         } while (0)
37 #endif
38
39 struct dispatch_semaphore_vtable_s {
40         DISPATCH_VTABLE_HEADER(dispatch_semaphore_s);
41 };
42
43 static void _dispatch_semaphore_dispose(dispatch_semaphore_t dsema);
44 static size_t _dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf, size_t bufsiz);
45 static long _dispatch_group_wake(dispatch_semaphore_t dsema);
46
47 const struct dispatch_semaphore_vtable_s _dispatch_semaphore_vtable = {
48         .do_type = DISPATCH_SEMAPHORE_TYPE,
49         .do_kind = "semaphore",
50         .do_dispose = _dispatch_semaphore_dispose,
51         .do_debug = _dispatch_semaphore_debug,
52 };
53
54 dispatch_semaphore_t
55 _dispatch_get_thread_semaphore(void)
56 {
57         dispatch_semaphore_t dsema;
58         
59         dsema = fastpath(_dispatch_thread_getspecific(dispatch_sema4_key));
60         if (!dsema) {
61                 while (!(dsema = dispatch_semaphore_create(0))) {
62                         sleep(1);
63                 }
64         }
65         _dispatch_thread_setspecific(dispatch_sema4_key, NULL);
66         return dsema;
67 }
68
69 void
70 _dispatch_put_thread_semaphore(dispatch_semaphore_t dsema)
71 {
72         dispatch_semaphore_t old_sema = _dispatch_thread_getspecific(dispatch_sema4_key);
73         _dispatch_thread_setspecific(dispatch_sema4_key, dsema);
74         if (old_sema) {
75                 dispatch_release(old_sema);
76         }
77 }
78
79 dispatch_group_t
80 dispatch_group_create(void)
81 {
82         return (dispatch_group_t)dispatch_semaphore_create(LONG_MAX);
83 }
84
85 dispatch_semaphore_t
86 dispatch_semaphore_create(long value)
87 {
88         dispatch_semaphore_t dsema;
89 #if USE_POSIX_SEM
90         int ret;
91 #endif
92         
93         // If the internal value is negative, then the absolute of the value is
94         // equal to the number of waiting threads. Therefore it is bogus to
95         // initialize the semaphore with a negative value.
96         if (value < 0) {
97                 return NULL;
98         }
99         
100         dsema = calloc(1, sizeof(struct dispatch_semaphore_s));
101         
102         if (fastpath(dsema)) {
103                 dsema->do_vtable = &_dispatch_semaphore_vtable;
104                 dsema->do_next = DISPATCH_OBJECT_LISTLESS;
105                 dsema->do_ref_cnt = 1;
106                 dsema->do_xref_cnt = 1;
107                 dsema->do_targetq = dispatch_get_global_queue(0, 0);
108                 dsema->dsema_value = value;
109                 dsema->dsema_orig = value;
110 #if USE_POSIX_SEM
111                 ret = sem_init(&dsema->dsema_sem, 0, 0);
112                 (void)dispatch_assume_zero(ret);
113 #endif
114         }
115         
116         return dsema;
117 }
118
119 #if USE_MACH_SEM
120 static void
121 _dispatch_semaphore_create_port(semaphore_t *s4)
122 {
123         kern_return_t kr;
124         semaphore_t tmp;
125
126         if (*s4) {
127                 return;
128         }
129         
130         // lazily allocate the semaphore port
131         
132         // Someday:
133         // 1) Switch to a doubly-linked FIFO in user-space.
134         // 2) User-space timers for the timeout.
135         // 3) Use the per-thread semaphore port.
136         
137         while (dispatch_assume_zero(kr = semaphore_create(mach_task_self(), &tmp, SYNC_POLICY_FIFO, 0))) {
138                 DISPATCH_VERIFY_MIG(kr);
139                 sleep(1);
140         }
141         
142         if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) {
143                 kr = semaphore_destroy(mach_task_self(), tmp);
144                 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
145         }
146
147         _dispatch_safe_fork = false;
148 }
149 #endif
150
151 DISPATCH_NOINLINE
152 static long
153 _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout)
154 {
155 #if USE_MACH_SEM
156         mach_timespec_t _timeout;
157         kern_return_t kr;
158         uint64_t nsec;
159 #endif
160 #if USE_POSIX_SEM
161         struct timespec _timeout;
162         int ret;
163 #endif
164         long orig;
165         
166 again:
167         // Mach semaphores appear to sometimes spuriously wake up.  Therefore,
168         // we keep a parallel count of the number of times a Mach semaphore is
169         // signaled (6880961).
170         while ((orig = dsema->dsema_sent_ksignals)) {
171                 if (dispatch_atomic_cmpxchg(&dsema->dsema_sent_ksignals, orig, orig - 1)) {
172                         return 0;
173                 }
174         }
175
176 #if USE_MACH_SEM
177         _dispatch_semaphore_create_port(&dsema->dsema_port);
178 #endif
179
180         // From xnu/osfmk/kern/sync_sema.c:
181         // wait_semaphore->count = -1;  /* we don't keep an actual count */
182         //
183         // The code above does not match the documentation, and that fact is
184         // not surprising. The documented semantics are clumsy to use in any
185         // practical way. The above hack effectively tricks the rest of the
186         // Mach semaphore logic to behave like the libdispatch algorithm.
187
188         switch (timeout) {
189         default:
190 #if USE_MACH_SEM
191                 do {
192                         // timeout() already calculates relative time left
193                         nsec = _dispatch_timeout(timeout);
194                         _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
195                         _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
196                         kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout));
197                 } while (kr == KERN_ABORTED);
198
199                 if (kr != KERN_OPERATION_TIMED_OUT) {
200                         DISPATCH_SEMAPHORE_VERIFY_KR(kr);
201                         break;
202                 }
203 #endif
204 #if USE_POSIX_SEM
205                 do {
206                         _timeout = _dispatch_timeout_ts(timeout);
207                         ret = slowpath(sem_timedwait(&dsema->dsema_sem,
208                             &_timeout));
209                 } while (ret == -1 && errno == EINTR);
210
211                 if (!(ret == -1 && errno == ETIMEDOUT)) {
212                         DISPATCH_SEMAPHORE_VERIFY_RET(ret);
213                         break;
214                 }
215 #endif
216                 // Fall through and try to undo what the fast path did to dsema->dsema_value
217         case DISPATCH_TIME_NOW:
218                 while ((orig = dsema->dsema_value) < 0) {
219                         if (dispatch_atomic_cmpxchg(&dsema->dsema_value, orig, orig + 1)) {
220 #if USE_MACH_SEM
221                                 return KERN_OPERATION_TIMED_OUT;
222 #endif
223 #if USE_POSIX_SEM
224                                 errno = ETIMEDOUT;
225                                 return -1;
226 #endif
227                         }
228                 }
229                 // Another thread called semaphore_signal().
230                 // Fall through and drain the wakeup.
231         case DISPATCH_TIME_FOREVER:
232 #if USE_MACH_SEM
233                 do {
234                         kr = semaphore_wait(dsema->dsema_port);
235                 } while (kr == KERN_ABORTED);
236                 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
237 #endif
238 #if USE_POSIX_SEM
239                 do {
240                         ret = sem_wait(&dsema->dsema_sem);
241                 } while (ret != 0);
242                 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
243 #endif
244                 break;
245         }
246
247         goto again;
248 }
249
250 DISPATCH_NOINLINE
251 void
252 dispatch_group_enter(dispatch_group_t dg)
253 {
254         dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
255 #if USE_APPLE_SEMAPHORE_OPTIMIZATIONS && defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) && !defined(__llvm__)
256         // This assumes:
257         // 1) Way too much about the optimizer of GCC.
258         // 2) There will never be more than LONG_MAX threads.
259         //    Therefore: no overflow detection
260         asm(
261 #ifdef __LP64__ 
262                 "lock decq      %0\n\t"
263 #else
264                 "lock decl      %0\n\t"
265 #endif
266                 "js     1f\n\t"
267                 "ret\n\t"
268                 "1:"
269                 : "+m" (dsema->dsema_value)
270                 :
271                 : "cc"
272         );
273         _dispatch_semaphore_wait_slow(dsema, DISPATCH_TIME_FOREVER);
274 #else
275         dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER);
276 #endif
277 }
278
279 DISPATCH_NOINLINE
280 long
281 dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
282 {
283 #if USE_APPLE_SEMAPHORE_OPTIMIZATIONS && defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) && !defined(__llvm__)
284         // This assumes:
285         // 1) Way too much about the optimizer of GCC.
286         // 2) There will never be more than LONG_MAX threads.
287         //    Therefore: no overflow detection
288         asm(
289 #ifdef __LP64__ 
290                 "lock decq      %0\n\t"
291 #else
292                 "lock decl      %0\n\t"
293 #endif
294                 "js     1f\n\t"
295                 "xor    %%eax, %%eax\n\t"
296                 "ret\n\t"
297                 "1:"
298                 : "+m" (dsema->dsema_value)
299                 :
300                 : "cc"
301         );
302 #else
303         if (dispatch_atomic_dec(&dsema->dsema_value) >= 0) {
304                 return 0;
305         }
306 #endif
307         return _dispatch_semaphore_wait_slow(dsema, timeout);
308 }
309
310 DISPATCH_NOINLINE
311 static long
312 _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema)
313 {
314 #if USE_POSIX_SEM
315         int ret;
316 #endif
317 #if USE_MACH_SEM
318         kern_return_t kr;
319         
320         _dispatch_semaphore_create_port(&dsema->dsema_port);
321 #endif
322
323         // Before dsema_sent_ksignals is incremented we can rely on the reference
324         // held by the waiter. However, once this value is incremented the waiter
325         // may return between the atomic increment and the semaphore_signal(),
326         // therefore an explicit reference must be held in order to safely access
327         // dsema after the atomic increment.
328         _dispatch_retain(dsema);
329         
330         dispatch_atomic_inc(&dsema->dsema_sent_ksignals);
331         
332 #if USE_MACH_SEM
333         kr = semaphore_signal(dsema->dsema_port);
334         DISPATCH_SEMAPHORE_VERIFY_KR(kr);
335 #endif
336 #if USE_POSIX_SEM
337         ret = sem_post(&dsema->dsema_sem);
338         DISPATCH_SEMAPHORE_VERIFY_RET(ret);
339 #endif
340
341         _dispatch_release(dsema);
342         
343         return 1;
344 }
345
346 void
347 dispatch_group_leave(dispatch_group_t dg)
348 {
349         dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
350
351         dispatch_semaphore_signal(dsema);
352
353         if (dsema->dsema_value == dsema->dsema_orig) {
354                 _dispatch_group_wake(dsema);
355         }
356 }
357
358 DISPATCH_NOINLINE
359 long
360 dispatch_semaphore_signal(dispatch_semaphore_t dsema)
361 {
362 #if USE_APPLE_SEMAPHORE_OPTIMIZATIONS && defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) && !defined(__llvm__)
363         // overflow detection
364         // this assumes way too much about the optimizer of GCC
365         asm(
366 #ifdef __LP64__ 
367                 "lock incq      %0\n\t"
368 #else
369                 "lock incl      %0\n\t"
370 #endif
371                 "jo     1f\n\t"
372                 "jle    2f\n\t"
373                 "xor    %%eax, %%eax\n\t"
374                 "ret\n\t"
375                 "1:\n\t"
376                 "int    $4\n\t"
377                 "2:"
378                 : "+m" (dsema->dsema_value)
379                 :
380                 : "cc"
381         );
382 #else
383         if (dispatch_atomic_inc(&dsema->dsema_value) > 0) {
384                 return 0;
385         }
386 #endif
387         return _dispatch_semaphore_signal_slow(dsema);
388 }
389
390 DISPATCH_NOINLINE
391 long
392 _dispatch_group_wake(dispatch_semaphore_t dsema)
393 {
394         struct dispatch_sema_notify_s *tmp, *head = dispatch_atomic_xchg(&dsema->dsema_notify_head, NULL);
395         long rval = dispatch_atomic_xchg(&dsema->dsema_group_waiters, 0);
396         bool do_rel = head;
397 #if USE_MACH_SEM
398         long kr;
399 #endif
400 #if USE_POSIX_SEM
401         int ret;
402 #endif
403
404         // wake any "group" waiter or notify blocks
405         
406         if (rval) {
407 #if USE_MACH_SEM
408                 _dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
409                 do {
410                         kr = semaphore_signal(dsema->dsema_waiter_port);
411                         DISPATCH_SEMAPHORE_VERIFY_KR(kr);
412                 } while (--rval);
413 #endif
414 #if USE_POSIX_SEM
415                 do {
416                         ret = sem_post(&dsema->dsema_sem);
417                         DISPATCH_SEMAPHORE_VERIFY_RET(ret);
418                 } while (--rval);
419 #endif
420         }
421         while (head) {
422                 dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func);
423                 _dispatch_release(head->dsn_queue);
424                 do {
425                         tmp = head->dsn_next;
426                 } while (!tmp && !dispatch_atomic_cmpxchg(&dsema->dsema_notify_tail, head, NULL));
427                 free(head);
428                 head = tmp;
429         }
430         if (do_rel) {
431                 _dispatch_release(dsema);
432         }
433         return 0;
434 }
435
436 DISPATCH_NOINLINE
437 static long
438 _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout)
439 {
440 #if USE_MACH_SEM
441         mach_timespec_t _timeout;
442         kern_return_t kr;
443         uint64_t nsec;
444 #endif
445 #if USE_POSIX_SEM
446         struct timespec _timeout;
447         int ret;
448 #endif
449         long orig;
450         
451 again:
452         // check before we cause another signal to be sent by incrementing dsema->dsema_group_waiters
453         if (dsema->dsema_value == dsema->dsema_orig) {
454                 return _dispatch_group_wake(dsema);
455         }
456         // Mach semaphores appear to sometimes spuriously wake up.  Therefore,
457         // we keep a parallel count of the number of times a Mach semaphore is
458         // signaled (6880961).
459         dispatch_atomic_inc(&dsema->dsema_group_waiters);
460         // check the values again in case we need to wake any threads
461         if (dsema->dsema_value == dsema->dsema_orig) {
462                 return _dispatch_group_wake(dsema);
463         }
464
465 #if USE_MACH_SEM
466         _dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
467 #endif
468         
469         // From xnu/osfmk/kern/sync_sema.c:
470         // wait_semaphore->count = -1;  /* we don't keep an actual count */
471         //
472         // The code above does not match the documentation, and that fact is
473         // not surprising. The documented semantics are clumsy to use in any
474         // practical way. The above hack effectively tricks the rest of the
475         // Mach semaphore logic to behave like the libdispatch algorithm.
476         
477         switch (timeout) {
478         default:
479 #if USE_MACH_SEM
480                 do {
481                         nsec = _dispatch_timeout(timeout);
482                         _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
483                         _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
484                         kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port, _timeout));
485                 } while (kr == KERN_ABORTED);
486                 if (kr != KERN_OPERATION_TIMED_OUT) {
487                         DISPATCH_SEMAPHORE_VERIFY_KR(kr);
488                         break;
489                 }
490 #endif
491 #if USE_POSIX_SEM
492                 do {
493                         _timeout = _dispatch_timeout_ts(timeout);
494                         ret = slowpath(sem_timedwait(&dsema->dsema_sem,
495                             &_timeout));
496                 } while (ret == -1 && errno == EINTR);
497
498                 if (!(ret == -1 && errno == ETIMEDOUT)) {
499                         DISPATCH_SEMAPHORE_VERIFY_RET(ret);
500                         break;
501                 }
502 #endif
503                 // Fall through and try to undo the earlier change to dsema->dsema_group_waiters
504         case DISPATCH_TIME_NOW:
505                 while ((orig = dsema->dsema_group_waiters)) {
506                         if (dispatch_atomic_cmpxchg(&dsema->dsema_group_waiters, orig, orig - 1)) {
507 #if USE_MACH_SEM
508                                 return KERN_OPERATION_TIMED_OUT;
509 #endif
510 #if USE_POSIX_SEM
511                                 errno = ETIMEDOUT;
512                                 return -1;
513 #endif
514                         }
515                 }
516                 // Another thread called semaphore_signal().
517                 // Fall through and drain the wakeup.
518         case DISPATCH_TIME_FOREVER:
519 #if USE_MACH_SEM
520                 do {
521                         kr = semaphore_wait(dsema->dsema_waiter_port);
522                 } while (kr == KERN_ABORTED);
523                 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
524 #endif
525 #if USE_POSIX_SEM
526                 do {
527                         ret = sem_wait(&dsema->dsema_sem);
528                 } while (ret == -1 && errno == EINTR);
529                 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
530 #endif
531                 break;
532         }
533
534         goto again;
535 }
536
537 long
538 dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout)
539 {
540         dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
541
542         if (dsema->dsema_value == dsema->dsema_orig) {
543                 return 0;
544         }
545         if (timeout == 0) {
546 #if USE_MACH_SEM
547                 return KERN_OPERATION_TIMED_OUT;
548 #endif
549 #if USE_POSIX_SEM
550                 errno = ETIMEDOUT;
551                 return (-1);
552 #endif
553         }
554         return _dispatch_group_wait_slow(dsema, timeout);
555 }
556
557 #ifdef __BLOCKS__
558 void
559 dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, dispatch_block_t db)
560 {
561         dispatch_group_notify_f(dg, dq, _dispatch_Block_copy(db), _dispatch_call_block_and_release);
562 }
563 #endif
564
565 void
566 dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, void (*func)(void *))
567 {
568         dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
569         struct dispatch_sema_notify_s *dsn, *prev;
570
571         // FIXME -- this should be updated to use the continuation cache
572         while (!(dsn = malloc(sizeof(*dsn)))) {
573                 sleep(1);
574         }
575
576         dsn->dsn_next = NULL;
577         dsn->dsn_queue = dq;
578         dsn->dsn_ctxt = ctxt;
579         dsn->dsn_func = func;
580         _dispatch_retain(dq);
581
582         prev = dispatch_atomic_xchg(&dsema->dsema_notify_tail, dsn);
583         if (fastpath(prev)) {
584                 prev->dsn_next = dsn;
585         } else {
586                 _dispatch_retain(dg);
587                 dsema->dsema_notify_head = dsn;
588                 if (dsema->dsema_value == dsema->dsema_orig) {
589                         _dispatch_group_wake(dsema);
590                 }
591         }
592 }
593
594 void
595 _dispatch_semaphore_dispose(dispatch_semaphore_t dsema)
596 {
597 #if USE_MACH_SEM
598         kern_return_t kr;
599 #endif
600 #if USE_POSIX_SEM
601         int ret;
602 #endif
603         
604         if (dsema->dsema_value < dsema->dsema_orig) {
605                 DISPATCH_CLIENT_CRASH("Semaphore/group object deallocated while in use");
606         }
607         
608 #if USE_MACH_SEM
609         if (dsema->dsema_port) {
610                 kr = semaphore_destroy(mach_task_self(), dsema->dsema_port);
611                 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
612         }
613         if (dsema->dsema_waiter_port) {
614                 kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port);
615                 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
616         }
617 #endif
618 #if USE_POSIX_SEM
619         ret = sem_destroy(&dsema->dsema_sem);
620         DISPATCH_SEMAPHORE_VERIFY_RET(ret);
621 #endif
622         
623         _dispatch_dispose(dsema);
624 }
625
626 size_t
627 _dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf, size_t bufsiz)
628 {
629         size_t offset = 0;
630         offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dx_kind(dsema), dsema);
631         offset += dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset);
632 #if USE_MACH_SEM
633         offset += snprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ",
634             dsema->dsema_port);
635 #endif
636         offset += snprintf(&buf[offset], bufsiz - offset,
637             "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig);
638         return offset;
639 }
640
641 #ifdef __BLOCKS__
642 void
643 dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, dispatch_block_t db)
644 {
645         dispatch_group_async_f(dg, dq, _dispatch_Block_copy(db), _dispatch_call_block_and_release);
646 }
647 #endif
648
649 DISPATCH_NOINLINE
650 void
651 dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, void (*func)(void *))
652 {
653         dispatch_continuation_t dc;
654
655         _dispatch_retain(dg);
656         dispatch_group_enter(dg);
657
658         dc = _dispatch_continuation_alloc_cacheonly() ?: _dispatch_continuation_alloc_from_heap();
659
660         dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT|DISPATCH_OBJ_GROUP_BIT);
661         dc->dc_func = func;
662         dc->dc_ctxt = ctxt;
663         dc->dc_group = dg;
664
665         _dispatch_queue_push(dq, dc);
666 }