3 * Copyright 2015-2016 gRPC authors.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <grpc/support/port_platform.h>
20 #include "src/core/lib/surface/completion_queue.h"
28 #include "absl/strings/str_format.h"
29 #include "absl/strings/str_join.h"
31 #include <grpc/support/alloc.h>
32 #include <grpc/support/atm.h>
33 #include <grpc/support/log.h>
34 #include <grpc/support/string_util.h>
35 #include <grpc/support/time.h>
37 #include "src/core/lib/debug/stats.h"
38 #include "src/core/lib/gpr/spinlock.h"
39 #include "src/core/lib/gpr/string.h"
40 #include "src/core/lib/gpr/tls.h"
41 #include "src/core/lib/gprpp/atomic.h"
42 #include "src/core/lib/iomgr/executor.h"
43 #include "src/core/lib/iomgr/pollset.h"
44 #include "src/core/lib/iomgr/timer.h"
45 #include "src/core/lib/profiling/timers.h"
46 #include "src/core/lib/surface/api_trace.h"
47 #include "src/core/lib/surface/call.h"
48 #include "src/core/lib/surface/event_string.h"
50 grpc_core::TraceFlag grpc_trace_operation_failures(false, "op_failure");
51 grpc_core::DebugOnlyTraceFlag grpc_trace_pending_tags(false, "pending_tags");
52 grpc_core::DebugOnlyTraceFlag grpc_trace_cq_refcount(false, "cq_refcount");
56 // Specifies a cq thread local cache.
57 // The first event that occurs on a thread
58 // with a cq cache will go into that cache, and
59 // will only be returned on the thread that initialized the cache.
60 // NOTE: Only one event will ever be cached.
61 GPR_TLS_DECL(g_cached_event);
62 GPR_TLS_DECL(g_cached_cq);
65 grpc_pollset_worker** worker;
68 struct cq_poller_vtable {
72 void (*init)(grpc_pollset* pollset, gpr_mu** mu);
73 grpc_error* (*kick)(grpc_pollset* pollset,
74 grpc_pollset_worker* specific_worker);
75 grpc_error* (*work)(grpc_pollset* pollset, grpc_pollset_worker** worker,
76 grpc_millis deadline);
77 void (*shutdown)(grpc_pollset* pollset, grpc_closure* closure);
78 void (*destroy)(grpc_pollset* pollset);
80 typedef struct non_polling_worker {
83 struct non_polling_worker* next;
84 struct non_polling_worker* prev;
87 struct non_polling_poller {
89 bool kicked_without_poller;
90 non_polling_worker* root;
91 grpc_closure* shutdown;
93 size_t non_polling_poller_size(void) { return sizeof(non_polling_poller); }
95 void non_polling_poller_init(grpc_pollset* pollset, gpr_mu** mu) {
96 non_polling_poller* npp = reinterpret_cast<non_polling_poller*>(pollset);
97 gpr_mu_init(&npp->mu);
101 void non_polling_poller_destroy(grpc_pollset* pollset) {
102 non_polling_poller* npp = reinterpret_cast<non_polling_poller*>(pollset);
103 gpr_mu_destroy(&npp->mu);
106 grpc_error* non_polling_poller_work(grpc_pollset* pollset,
107 grpc_pollset_worker** worker,
108 grpc_millis deadline) {
109 non_polling_poller* npp = reinterpret_cast<non_polling_poller*>(pollset);
110 if (npp->shutdown) return GRPC_ERROR_NONE;
111 if (npp->kicked_without_poller) {
112 npp->kicked_without_poller = false;
113 return GRPC_ERROR_NONE;
115 non_polling_worker w;
117 if (worker != nullptr) *worker = reinterpret_cast<grpc_pollset_worker*>(&w);
118 if (npp->root == nullptr) {
119 npp->root = w.next = w.prev = &w;
122 w.prev = w.next->prev;
123 w.next->prev = w.prev->next = &w;
126 gpr_timespec deadline_ts =
127 grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC);
128 while (!npp->shutdown && !w.kicked &&
129 !gpr_cv_wait(&w.cv, &npp->mu, deadline_ts)) {
131 grpc_core::ExecCtx::Get()->InvalidateNow();
132 if (&w == npp->root) {
134 if (&w == npp->root) {
136 grpc_core::ExecCtx::Run(DEBUG_LOCATION, npp->shutdown, GRPC_ERROR_NONE);
141 w.next->prev = w.prev;
142 w.prev->next = w.next;
143 gpr_cv_destroy(&w.cv);
144 if (worker != nullptr) *worker = nullptr;
145 return GRPC_ERROR_NONE;
148 grpc_error* non_polling_poller_kick(grpc_pollset* pollset,
149 grpc_pollset_worker* specific_worker) {
150 non_polling_poller* p = reinterpret_cast<non_polling_poller*>(pollset);
151 if (specific_worker == nullptr) {
152 specific_worker = reinterpret_cast<grpc_pollset_worker*>(p->root);
154 if (specific_worker != nullptr) {
155 non_polling_worker* w =
156 reinterpret_cast<non_polling_worker*>(specific_worker);
159 gpr_cv_signal(&w->cv);
162 p->kicked_without_poller = true;
164 return GRPC_ERROR_NONE;
167 void non_polling_poller_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
168 non_polling_poller* p = reinterpret_cast<non_polling_poller*>(pollset);
169 GPR_ASSERT(closure != nullptr);
170 p->shutdown = closure;
171 if (p->root == nullptr) {
172 grpc_core::ExecCtx::Run(DEBUG_LOCATION, closure, GRPC_ERROR_NONE);
174 non_polling_worker* w = p->root;
176 gpr_cv_signal(&w->cv);
178 } while (w != p->root);
182 const cq_poller_vtable g_poller_vtable_by_poller_type[] = {
183 /* GRPC_CQ_DEFAULT_POLLING */
184 {true, true, grpc_pollset_size, grpc_pollset_init, grpc_pollset_kick,
185 grpc_pollset_work, grpc_pollset_shutdown, grpc_pollset_destroy},
186 /* GRPC_CQ_NON_LISTENING */
187 {true, false, grpc_pollset_size, grpc_pollset_init, grpc_pollset_kick,
188 grpc_pollset_work, grpc_pollset_shutdown, grpc_pollset_destroy},
189 /* GRPC_CQ_NON_POLLING */
190 {false, false, non_polling_poller_size, non_polling_poller_init,
191 non_polling_poller_kick, non_polling_poller_work,
192 non_polling_poller_shutdown, non_polling_poller_destroy},
198 grpc_cq_completion_type cq_completion_type;
200 void (*init)(void* data,
201 grpc_experimental_completion_queue_functor* shutdown_callback);
202 void (*shutdown)(grpc_completion_queue* cq);
203 void (*destroy)(void* data);
204 bool (*begin_op)(grpc_completion_queue* cq, void* tag);
205 void (*end_op)(grpc_completion_queue* cq, void* tag, grpc_error* error,
206 void (*done)(void* done_arg, grpc_cq_completion* storage),
207 void* done_arg, grpc_cq_completion* storage, bool internal);
208 grpc_event (*next)(grpc_completion_queue* cq, gpr_timespec deadline,
210 grpc_event (*pluck)(grpc_completion_queue* cq, void* tag,
211 gpr_timespec deadline, void* reserved);
216 /* Queue that holds the cq_completion_events. Internally uses
217 * MultiProducerSingleConsumerQueue (a lockfree multiproducer single consumer
218 * queue). It uses a queue_lock to support multiple consumers.
219 * Only used in completion queues whose completion_type is GRPC_CQ_NEXT */
222 CqEventQueue() = default;
223 ~CqEventQueue() = default;
225 /* Note: The counter is not incremented/decremented atomically with push/pop.
226 * The count is only eventually consistent */
227 intptr_t num_items() const {
228 return num_queue_items_.Load(grpc_core::MemoryOrder::RELAXED);
231 bool Push(grpc_cq_completion* c);
232 grpc_cq_completion* Pop();
235 /* Spinlock to serialize consumers i.e pop() operations */
236 gpr_spinlock queue_lock_ = GPR_SPINLOCK_INITIALIZER;
238 grpc_core::MultiProducerSingleConsumerQueue queue_;
240 /* A lazy counter of number of items in the queue. This is NOT atomically
241 incremented/decremented along with push/pop operations and hence is only
242 eventually consistent */
243 grpc_core::Atomic<intptr_t> num_queue_items_{0};
246 struct cq_next_data {
248 GPR_ASSERT(queue.num_items() == 0);
250 if (pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) != 0) {
251 gpr_log(GPR_ERROR, "Destroying CQ without draining it fully.");
256 /** Completed events for completion-queues of type GRPC_CQ_NEXT */
259 /** Counter of how many things have ever been queued on this completion queue
260 useful for avoiding locks to check the queue */
261 grpc_core::Atomic<intptr_t> things_queued_ever{0};
263 /** Number of outstanding events (+1 if not shut down)
264 Initial count is dropped by grpc_completion_queue_shutdown */
265 grpc_core::Atomic<intptr_t> pending_events{1};
267 /** 0 initially. 1 once we initiated shutdown */
268 bool shutdown_called = false;
271 struct cq_pluck_data {
273 completed_tail = &completed_head;
274 completed_head.next = reinterpret_cast<uintptr_t>(completed_tail);
278 GPR_ASSERT(completed_head.next ==
279 reinterpret_cast<uintptr_t>(&completed_head));
281 if (pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) != 0) {
282 gpr_log(GPR_ERROR, "Destroying CQ without draining it fully.");
287 /** Completed events for completion-queues of type GRPC_CQ_PLUCK */
288 grpc_cq_completion completed_head;
289 grpc_cq_completion* completed_tail;
291 /** Number of pending events (+1 if we're not shutdown).
292 Initial count is dropped by grpc_completion_queue_shutdown. */
293 grpc_core::Atomic<intptr_t> pending_events{1};
295 /** Counter of how many things have ever been queued on this completion queue
296 useful for avoiding locks to check the queue */
297 grpc_core::Atomic<intptr_t> things_queued_ever{0};
299 /** 0 initially. 1 once we completed shutting */
300 /* TODO: (sreek) This is not needed since (shutdown == 1) if and only if
301 * (pending_events == 0). So consider removing this in future and use
303 grpc_core::Atomic<bool> shutdown{false};
305 /** 0 initially. 1 once we initiated shutdown */
306 bool shutdown_called = false;
308 int num_pluckers = 0;
309 plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
312 struct cq_callback_data {
314 grpc_experimental_completion_queue_functor* shutdown_callback)
315 : shutdown_callback(shutdown_callback) {}
317 ~cq_callback_data() {
319 if (pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) != 0) {
320 gpr_log(GPR_ERROR, "Destroying CQ without draining it fully.");
325 /** No actual completed events queue, unlike other types */
327 /** Number of pending events (+1 if we're not shutdown).
328 Initial count is dropped by grpc_completion_queue_shutdown. */
329 grpc_core::Atomic<intptr_t> pending_events{1};
331 /** 0 initially. 1 once we initiated shutdown */
332 bool shutdown_called = false;
334 /** A callback that gets invoked when the CQ completes shutdown */
335 grpc_experimental_completion_queue_functor* shutdown_callback;
340 /* Completion queue structure */
341 struct grpc_completion_queue {
342 /** Once owning_refs drops to zero, we will destroy the cq */
343 grpc_core::RefCount owning_refs;
347 const cq_vtable* vtable;
348 const cq_poller_vtable* poller_vtable;
351 void** outstanding_tags;
352 size_t outstanding_tag_count;
353 size_t outstanding_tag_capacity;
356 grpc_closure pollset_shutdown_done;
360 /* Forward declarations */
361 static void cq_finish_shutdown_next(grpc_completion_queue* cq);
362 static void cq_finish_shutdown_pluck(grpc_completion_queue* cq);
363 static void cq_finish_shutdown_callback(grpc_completion_queue* cq);
364 static void cq_shutdown_next(grpc_completion_queue* cq);
365 static void cq_shutdown_pluck(grpc_completion_queue* cq);
366 static void cq_shutdown_callback(grpc_completion_queue* cq);
368 static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* tag);
369 static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag);
370 static bool cq_begin_op_for_callback(grpc_completion_queue* cq, void* tag);
372 // A cq_end_op function is called when an operation on a given CQ with
373 // a given tag has completed. The storage argument is a reference to the
374 // space reserved for this completion as it is placed into the corresponding
375 // queue. The done argument is a callback that will be invoked when it is
376 // safe to free up that storage. The storage MUST NOT be freed until the
377 // done callback is invoked.
378 static void cq_end_op_for_next(
379 grpc_completion_queue* cq, void* tag, grpc_error* error,
380 void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
381 grpc_cq_completion* storage, bool internal);
383 static void cq_end_op_for_pluck(
384 grpc_completion_queue* cq, void* tag, grpc_error* error,
385 void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
386 grpc_cq_completion* storage, bool internal);
388 static void cq_end_op_for_callback(
389 grpc_completion_queue* cq, void* tag, grpc_error* error,
390 void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
391 grpc_cq_completion* storage, bool internal);
393 static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
396 static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
397 gpr_timespec deadline, void* reserved);
399 // Note that cq_init_next and cq_init_pluck do not use the shutdown_callback
400 static void cq_init_next(
401 void* data, grpc_experimental_completion_queue_functor* shutdown_callback);
402 static void cq_init_pluck(
403 void* data, grpc_experimental_completion_queue_functor* shutdown_callback);
404 static void cq_init_callback(
405 void* data, grpc_experimental_completion_queue_functor* shutdown_callback);
406 static void cq_destroy_next(void* data);
407 static void cq_destroy_pluck(void* data);
408 static void cq_destroy_callback(void* data);
410 /* Completion queue vtables based on the completion-type */
411 static const cq_vtable g_cq_vtable[] = {
413 {GRPC_CQ_NEXT, sizeof(cq_next_data), cq_init_next, cq_shutdown_next,
414 cq_destroy_next, cq_begin_op_for_next, cq_end_op_for_next, cq_next,
417 {GRPC_CQ_PLUCK, sizeof(cq_pluck_data), cq_init_pluck, cq_shutdown_pluck,
418 cq_destroy_pluck, cq_begin_op_for_pluck, cq_end_op_for_pluck, nullptr,
420 /* GRPC_CQ_CALLBACK */
421 {GRPC_CQ_CALLBACK, sizeof(cq_callback_data), cq_init_callback,
422 cq_shutdown_callback, cq_destroy_callback, cq_begin_op_for_callback,
423 cq_end_op_for_callback, nullptr, nullptr},
426 #define DATA_FROM_CQ(cq) ((void*)((cq) + 1))
427 #define POLLSET_FROM_CQ(cq) \
428 ((grpc_pollset*)((cq)->vtable->data_size + (char*)DATA_FROM_CQ(cq)))
430 grpc_core::TraceFlag grpc_cq_pluck_trace(false, "queue_pluck");
432 #define GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, event) \
434 if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace) && \
435 (GRPC_TRACE_FLAG_ENABLED(grpc_cq_pluck_trace) || \
436 (event)->type != GRPC_QUEUE_TIMEOUT)) { \
437 gpr_log(GPR_INFO, "RETURN_EVENT[%p]: %s", cq, \
438 grpc_event_string(event).c_str()); \
442 static void on_pollset_shutdown_done(void* cq, grpc_error* error);
444 void grpc_cq_global_init() {
445 gpr_tls_init(&g_cached_event);
446 gpr_tls_init(&g_cached_cq);
449 void grpc_completion_queue_thread_local_cache_init(grpc_completion_queue* cq) {
450 if ((grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == nullptr) {
451 gpr_tls_set(&g_cached_event, (intptr_t)0);
452 gpr_tls_set(&g_cached_cq, (intptr_t)cq);
456 int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue* cq,
457 void** tag, int* ok) {
458 grpc_cq_completion* storage =
459 (grpc_cq_completion*)gpr_tls_get(&g_cached_event);
461 if (storage != nullptr &&
462 (grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq) {
464 grpc_core::ExecCtx exec_ctx;
465 *ok = (storage->next & static_cast<uintptr_t>(1)) == 1;
466 storage->done(storage->done_arg, storage);
468 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
469 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
470 GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
472 cq_finish_shutdown_next(cq);
473 gpr_mu_unlock(cq->mu);
474 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
477 gpr_tls_set(&g_cached_event, (intptr_t)0);
478 gpr_tls_set(&g_cached_cq, (intptr_t)0);
483 bool CqEventQueue::Push(grpc_cq_completion* c) {
485 reinterpret_cast<grpc_core::MultiProducerSingleConsumerQueue::Node*>(c));
486 return num_queue_items_.FetchAdd(1, grpc_core::MemoryOrder::RELAXED) == 0;
489 grpc_cq_completion* CqEventQueue::Pop() {
490 grpc_cq_completion* c = nullptr;
492 if (gpr_spinlock_trylock(&queue_lock_)) {
493 GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES();
495 bool is_empty = false;
496 c = reinterpret_cast<grpc_cq_completion*>(queue_.PopAndCheckEnd(&is_empty));
497 gpr_spinlock_unlock(&queue_lock_);
499 if (c == nullptr && !is_empty) {
500 GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES();
503 GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES();
507 num_queue_items_.FetchSub(1, grpc_core::MemoryOrder::RELAXED);
513 grpc_completion_queue* grpc_completion_queue_create_internal(
514 grpc_cq_completion_type completion_type, grpc_cq_polling_type polling_type,
515 grpc_experimental_completion_queue_functor* shutdown_callback) {
516 GPR_TIMER_SCOPE("grpc_completion_queue_create_internal", 0);
518 grpc_completion_queue* cq;
521 "grpc_completion_queue_create_internal(completion_type=%d, "
523 2, (completion_type, polling_type));
525 const cq_vtable* vtable = &g_cq_vtable[completion_type];
526 const cq_poller_vtable* poller_vtable =
527 &g_poller_vtable_by_poller_type[polling_type];
529 grpc_core::ExecCtx exec_ctx;
530 GRPC_STATS_INC_CQS_CREATED();
532 cq = static_cast<grpc_completion_queue*>(
533 gpr_zalloc(sizeof(grpc_completion_queue) + vtable->data_size +
534 poller_vtable->size()));
537 cq->poller_vtable = poller_vtable;
539 /* One for destroy(), one for pollset_shutdown */
540 new (&cq->owning_refs) grpc_core::RefCount(2);
542 poller_vtable->init(POLLSET_FROM_CQ(cq), &cq->mu);
543 vtable->init(DATA_FROM_CQ(cq), shutdown_callback);
545 GRPC_CLOSURE_INIT(&cq->pollset_shutdown_done, on_pollset_shutdown_done, cq,
546 grpc_schedule_on_exec_ctx);
550 static void cq_init_next(
552 grpc_experimental_completion_queue_functor* /*shutdown_callback*/) {
553 new (data) cq_next_data();
556 static void cq_destroy_next(void* data) {
557 cq_next_data* cqd = static_cast<cq_next_data*>(data);
558 cqd->~cq_next_data();
561 static void cq_init_pluck(
563 grpc_experimental_completion_queue_functor* /*shutdown_callback*/) {
564 new (data) cq_pluck_data();
567 static void cq_destroy_pluck(void* data) {
568 cq_pluck_data* cqd = static_cast<cq_pluck_data*>(data);
569 cqd->~cq_pluck_data();
572 static void cq_init_callback(
573 void* data, grpc_experimental_completion_queue_functor* shutdown_callback) {
574 new (data) cq_callback_data(shutdown_callback);
577 static void cq_destroy_callback(void* data) {
578 cq_callback_data* cqd = static_cast<cq_callback_data*>(data);
579 cqd->~cq_callback_data();
582 grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue* cq) {
583 return cq->vtable->cq_completion_type;
586 int grpc_get_cq_poll_num(grpc_completion_queue* cq) {
589 cur_num_polls = cq->num_polls;
590 gpr_mu_unlock(cq->mu);
591 return cur_num_polls;
595 void grpc_cq_internal_ref(grpc_completion_queue* cq, const char* reason,
596 const char* file, int line) {
597 grpc_core::DebugLocation debug_location(file, line);
599 void grpc_cq_internal_ref(grpc_completion_queue* cq) {
600 grpc_core::DebugLocation debug_location;
601 const char* reason = nullptr;
603 cq->owning_refs.Ref(debug_location, reason);
606 static void on_pollset_shutdown_done(void* arg, grpc_error* /*error*/) {
607 grpc_completion_queue* cq = static_cast<grpc_completion_queue*>(arg);
608 GRPC_CQ_INTERNAL_UNREF(cq, "pollset_destroy");
612 void grpc_cq_internal_unref(grpc_completion_queue* cq, const char* reason,
613 const char* file, int line) {
614 grpc_core::DebugLocation debug_location(file, line);
616 void grpc_cq_internal_unref(grpc_completion_queue* cq) {
617 grpc_core::DebugLocation debug_location;
618 const char* reason = nullptr;
620 if (GPR_UNLIKELY(cq->owning_refs.Unref(debug_location, reason))) {
621 cq->vtable->destroy(DATA_FROM_CQ(cq));
622 cq->poller_vtable->destroy(POLLSET_FROM_CQ(cq));
624 gpr_free(cq->outstanding_tags);
631 static void cq_check_tag(grpc_completion_queue* cq, void* tag, bool lock_cq) {
637 for (int i = 0; i < static_cast<int>(cq->outstanding_tag_count); i++) {
638 if (cq->outstanding_tags[i] == tag) {
639 cq->outstanding_tag_count--;
640 GPR_SWAP(void*, cq->outstanding_tags[i],
641 cq->outstanding_tags[cq->outstanding_tag_count]);
648 gpr_mu_unlock(cq->mu);
654 static void cq_check_tag(grpc_completion_queue* /*cq*/, void* /*tag*/,
658 static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* /*tag*/) {
659 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
660 return cqd->pending_events.IncrementIfNonzero();
663 static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* /*tag*/) {
664 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
665 return cqd->pending_events.IncrementIfNonzero();
668 static bool cq_begin_op_for_callback(grpc_completion_queue* cq, void* /*tag*/) {
669 cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq);
670 return cqd->pending_events.IncrementIfNonzero();
673 bool grpc_cq_begin_op(grpc_completion_queue* cq, void* tag) {
676 if (cq->outstanding_tag_count == cq->outstanding_tag_capacity) {
677 cq->outstanding_tag_capacity = GPR_MAX(4, 2 * cq->outstanding_tag_capacity);
678 cq->outstanding_tags = static_cast<void**>(gpr_realloc(
679 cq->outstanding_tags,
680 sizeof(*cq->outstanding_tags) * cq->outstanding_tag_capacity));
682 cq->outstanding_tags[cq->outstanding_tag_count++] = tag;
683 gpr_mu_unlock(cq->mu);
685 return cq->vtable->begin_op(cq, tag);
688 /* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a
690 * type of GRPC_CQ_NEXT) */
691 static void cq_end_op_for_next(
692 grpc_completion_queue* cq, void* tag, grpc_error* error,
693 void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
694 grpc_cq_completion* storage, bool /*internal*/) {
695 GPR_TIMER_SCOPE("cq_end_op_for_next", 0);
697 if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace) ||
698 (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
699 error != GRPC_ERROR_NONE)) {
700 const char* errmsg = grpc_error_string(error);
702 "cq_end_op_for_next(cq=%p, tag=%p, error=%s, "
703 "done=%p, done_arg=%p, storage=%p)",
704 6, (cq, tag, errmsg, done, done_arg, storage));
705 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
706 error != GRPC_ERROR_NONE) {
707 gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
710 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
711 int is_success = (error == GRPC_ERROR_NONE);
714 storage->done = done;
715 storage->done_arg = done_arg;
716 storage->next = static_cast<uintptr_t>(is_success);
718 cq_check_tag(cq, tag, true); /* Used in debug builds only */
720 if ((grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq &&
721 (grpc_cq_completion*)gpr_tls_get(&g_cached_event) == nullptr) {
722 gpr_tls_set(&g_cached_event, (intptr_t)storage);
724 /* Add the completion to the queue */
725 bool is_first = cqd->queue.Push(storage);
726 cqd->things_queued_ever.FetchAdd(1, grpc_core::MemoryOrder::RELAXED);
727 /* Since we do not hold the cq lock here, it is important to do an 'acquire'
728 load here (instead of a 'no_barrier' load) to match with the release
730 (done via pending_events.FetchSub(1, ACQ_REL)) in cq_shutdown_next
732 if (cqd->pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) != 1) {
733 /* Only kick if this is the first item queued */
736 grpc_error* kick_error =
737 cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), nullptr);
738 gpr_mu_unlock(cq->mu);
740 if (kick_error != GRPC_ERROR_NONE) {
741 const char* msg = grpc_error_string(kick_error);
742 gpr_log(GPR_ERROR, "Kick failed: %s", msg);
743 GRPC_ERROR_UNREF(kick_error);
746 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) ==
748 GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
750 cq_finish_shutdown_next(cq);
751 gpr_mu_unlock(cq->mu);
752 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
755 GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
756 cqd->pending_events.Store(0, grpc_core::MemoryOrder::RELEASE);
758 cq_finish_shutdown_next(cq);
759 gpr_mu_unlock(cq->mu);
760 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
764 GRPC_ERROR_UNREF(error);
767 /* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a
769 * type of GRPC_CQ_PLUCK) */
770 static void cq_end_op_for_pluck(
771 grpc_completion_queue* cq, void* tag, grpc_error* error,
772 void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
773 grpc_cq_completion* storage, bool /*internal*/) {
774 GPR_TIMER_SCOPE("cq_end_op_for_pluck", 0);
776 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
777 int is_success = (error == GRPC_ERROR_NONE);
779 if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace) ||
780 (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
781 error != GRPC_ERROR_NONE)) {
782 const char* errmsg = grpc_error_string(error);
784 "cq_end_op_for_pluck(cq=%p, tag=%p, error=%s, "
785 "done=%p, done_arg=%p, storage=%p)",
786 6, (cq, tag, errmsg, done, done_arg, storage));
787 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
788 error != GRPC_ERROR_NONE) {
789 gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
794 storage->done = done;
795 storage->done_arg = done_arg;
797 ((uintptr_t)&cqd->completed_head) | (static_cast<uintptr_t>(is_success));
800 cq_check_tag(cq, tag, false); /* Used in debug builds only */
802 /* Add to the list of completions */
803 cqd->things_queued_ever.FetchAdd(1, grpc_core::MemoryOrder::RELAXED);
804 cqd->completed_tail->next =
805 ((uintptr_t)storage) | (1u & cqd->completed_tail->next);
806 cqd->completed_tail = storage;
808 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
809 cq_finish_shutdown_pluck(cq);
810 gpr_mu_unlock(cq->mu);
812 grpc_pollset_worker* pluck_worker = nullptr;
813 for (int i = 0; i < cqd->num_pluckers; i++) {
814 if (cqd->pluckers[i].tag == tag) {
815 pluck_worker = *cqd->pluckers[i].worker;
820 grpc_error* kick_error =
821 cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), pluck_worker);
823 gpr_mu_unlock(cq->mu);
825 if (kick_error != GRPC_ERROR_NONE) {
826 const char* msg = grpc_error_string(kick_error);
827 gpr_log(GPR_ERROR, "Kick failed: %s", msg);
829 GRPC_ERROR_UNREF(kick_error);
833 GRPC_ERROR_UNREF(error);
836 static void functor_callback(void* arg, grpc_error* error) {
837 auto* functor = static_cast<grpc_experimental_completion_queue_functor*>(arg);
838 functor->functor_run(functor, error == GRPC_ERROR_NONE);
841 /* Complete an event on a completion queue of type GRPC_CQ_CALLBACK */
842 static void cq_end_op_for_callback(
843 grpc_completion_queue* cq, void* tag, grpc_error* error,
844 void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
845 grpc_cq_completion* storage, bool internal) {
846 GPR_TIMER_SCOPE("cq_end_op_for_callback", 0);
848 cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq);
850 if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace) ||
851 (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
852 error != GRPC_ERROR_NONE)) {
853 const char* errmsg = grpc_error_string(error);
855 "cq_end_op_for_callback(cq=%p, tag=%p, error=%s, "
856 "done=%p, done_arg=%p, storage=%p)",
857 6, (cq, tag, errmsg, done, done_arg, storage));
858 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
859 error != GRPC_ERROR_NONE) {
860 gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
864 // The callback-based CQ isn't really a queue at all and thus has no need
865 // for reserved storage. Invoke the done callback right away to release it.
866 done(done_arg, storage);
868 cq_check_tag(cq, tag, true); /* Used in debug builds only */
870 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
871 cq_finish_shutdown_callback(cq);
874 // If possible, schedule the callback onto an existing thread-local
875 // ApplicationCallbackExecCtx, which is a work queue. This is possible for:
876 // 1. The callback is internally-generated and there is an ACEC available
877 // 2. The callback is marked inlineable and there is an ACEC available
878 // 3. We are already running in a background poller thread (which always has
879 // an ACEC available at the base of the stack).
880 auto* functor = static_cast<grpc_experimental_completion_queue_functor*>(tag);
881 if (((internal || functor->inlineable) &&
882 grpc_core::ApplicationCallbackExecCtx::Available()) ||
883 grpc_iomgr_is_any_background_poller_thread()) {
884 grpc_core::ApplicationCallbackExecCtx::Enqueue(functor,
885 (error == GRPC_ERROR_NONE));
886 GRPC_ERROR_UNREF(error);
890 // Schedule the callback on a closure if not internal or triggered
891 // from a background poller thread.
892 grpc_core::Executor::Run(
893 GRPC_CLOSURE_CREATE(functor_callback, functor, nullptr), error);
896 void grpc_cq_end_op(grpc_completion_queue* cq, void* tag, grpc_error* error,
897 void (*done)(void* done_arg, grpc_cq_completion* storage),
898 void* done_arg, grpc_cq_completion* storage,
900 cq->vtable->end_op(cq, tag, error, done, done_arg, storage, internal);
903 struct cq_is_finished_arg {
904 gpr_atm last_seen_things_queued_ever;
905 grpc_completion_queue* cq;
906 grpc_millis deadline;
907 grpc_cq_completion* stolen_completion;
908 void* tag; /* for pluck */
911 class ExecCtxNext : public grpc_core::ExecCtx {
913 ExecCtxNext(void* arg) : ExecCtx(0), check_ready_to_finish_arg_(arg) {}
915 bool CheckReadyToFinish() override {
916 cq_is_finished_arg* a =
917 static_cast<cq_is_finished_arg*>(check_ready_to_finish_arg_);
918 grpc_completion_queue* cq = a->cq;
919 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
920 GPR_ASSERT(a->stolen_completion == nullptr);
922 intptr_t current_last_seen_things_queued_ever =
923 cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
925 if (current_last_seen_things_queued_ever !=
926 a->last_seen_things_queued_ever) {
927 a->last_seen_things_queued_ever =
928 cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
930 /* Pop a cq_completion from the queue. Returns NULL if the queue is empty
931 * might return NULL in some cases even if the queue is not empty; but
933 * is ok and doesn't affect correctness. Might effect the tail latencies a
935 a->stolen_completion = cqd->queue.Pop();
936 if (a->stolen_completion != nullptr) {
940 return !a->first_loop && a->deadline < grpc_core::ExecCtx::Get()->Now();
944 void* check_ready_to_finish_arg_;
948 static void dump_pending_tags(grpc_completion_queue* cq) {
949 if (!GRPC_TRACE_FLAG_ENABLED(grpc_trace_pending_tags)) return;
950 std::vector<std::string> parts;
951 parts.push_back("PENDING TAGS:");
953 for (size_t i = 0; i < cq->outstanding_tag_count; i++) {
954 parts.push_back(absl::StrFormat(" %p", cq->outstanding_tags[i]));
956 gpr_mu_unlock(cq->mu);
957 gpr_log(GPR_DEBUG, "%s", absl::StrJoin(parts, "").c_str());
960 static void dump_pending_tags(grpc_completion_queue* /*cq*/) {}
963 static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
965 GPR_TIMER_SCOPE("grpc_completion_queue_next", 0);
968 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
971 "grpc_completion_queue_next("
973 "deadline=gpr_timespec { tv_sec: %" PRId64
974 ", tv_nsec: %d, clock_type: %d }, "
977 (cq, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
979 GPR_ASSERT(!reserved);
981 dump_pending_tags(cq);
983 GRPC_CQ_INTERNAL_REF(cq, "next");
985 grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline);
986 cq_is_finished_arg is_finished_arg = {
987 cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED),
993 ExecCtxNext exec_ctx(&is_finished_arg);
995 grpc_millis iteration_deadline = deadline_millis;
997 if (is_finished_arg.stolen_completion != nullptr) {
998 grpc_cq_completion* c = is_finished_arg.stolen_completion;
999 is_finished_arg.stolen_completion = nullptr;
1000 ret.type = GRPC_OP_COMPLETE;
1001 ret.success = c->next & 1u;
1003 c->done(c->done_arg, c);
1007 grpc_cq_completion* c = cqd->queue.Pop();
1010 ret.type = GRPC_OP_COMPLETE;
1011 ret.success = c->next & 1u;
1013 c->done(c->done_arg, c);
1016 /* If c == NULL it means either the queue is empty OR in an transient
1017 inconsistent state. If it is the latter, we shold do a 0-timeout poll
1018 so that the thread comes back quickly from poll to make a second
1019 attempt at popping. Not doing this can potentially deadlock this
1020 thread forever (if the deadline is infinity) */
1021 if (cqd->queue.num_items() > 0) {
1022 iteration_deadline = 0;
1026 if (cqd->pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) == 0) {
1027 /* Before returning, check if the queue has any items left over (since
1028 MultiProducerSingleConsumerQueue::Pop() can sometimes return NULL
1029 even if the queue is not empty. If so, keep retrying but do not
1030 return GRPC_QUEUE_SHUTDOWN */
1031 if (cqd->queue.num_items() > 0) {
1032 /* Go to the beginning of the loop. No point doing a poll because
1033 (cq->shutdown == true) is only possible when there is no pending
1034 work (i.e cq->pending_events == 0) and any outstanding completion
1035 events should have already been queued on this cq */
1039 ret.type = GRPC_QUEUE_SHUTDOWN;
1044 if (!is_finished_arg.first_loop &&
1045 grpc_core::ExecCtx::Get()->Now() >= deadline_millis) {
1046 ret.type = GRPC_QUEUE_TIMEOUT;
1048 dump_pending_tags(cq);
1052 /* The main polling work happens in grpc_pollset_work */
1053 gpr_mu_lock(cq->mu);
1055 grpc_error* err = cq->poller_vtable->work(POLLSET_FROM_CQ(cq), nullptr,
1056 iteration_deadline);
1057 gpr_mu_unlock(cq->mu);
1059 if (err != GRPC_ERROR_NONE) {
1060 const char* msg = grpc_error_string(err);
1061 gpr_log(GPR_ERROR, "Completion queue next failed: %s", msg);
1063 GRPC_ERROR_UNREF(err);
1064 ret.type = GRPC_QUEUE_TIMEOUT;
1066 dump_pending_tags(cq);
1069 is_finished_arg.first_loop = false;
1072 if (cqd->queue.num_items() > 0 &&
1073 cqd->pending_events.Load(grpc_core::MemoryOrder::ACQUIRE) > 0) {
1074 gpr_mu_lock(cq->mu);
1075 cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), nullptr);
1076 gpr_mu_unlock(cq->mu);
1079 GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, &ret);
1080 GRPC_CQ_INTERNAL_UNREF(cq, "next");
1082 GPR_ASSERT(is_finished_arg.stolen_completion == nullptr);
1087 /* Finishes the completion queue shutdown. This means that there are no more
1088 completion events / tags expected from the completion queue
1089 - Must be called under completion queue lock
1090 - Must be called only once in completion queue's lifetime
1091 - grpc_completion_queue_shutdown() MUST have been called before calling
1093 static void cq_finish_shutdown_next(grpc_completion_queue* cq) {
1094 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
1096 GPR_ASSERT(cqd->shutdown_called);
1097 GPR_ASSERT(cqd->pending_events.Load(grpc_core::MemoryOrder::RELAXED) == 0);
1099 cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done);
1102 static void cq_shutdown_next(grpc_completion_queue* cq) {
1103 cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
1105 /* Need an extra ref for cq here because:
1106 * We call cq_finish_shutdown_next() below, that would call pollset shutdown.
1107 * Pollset shutdown decrements the cq ref count which can potentially destroy
1108 * the cq (if that happens to be the last ref).
1109 * Creating an extra ref here prevents the cq from getting destroyed while
1110 * this function is still active */
1111 GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
1112 gpr_mu_lock(cq->mu);
1113 if (cqd->shutdown_called) {
1114 gpr_mu_unlock(cq->mu);
1115 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
1118 cqd->shutdown_called = true;
1119 /* Doing acq/release FetchSub here to match with
1120 * cq_begin_op_for_next and cq_end_op_for_next functions which read/write
1121 * on this counter without necessarily holding a lock on cq */
1122 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
1123 cq_finish_shutdown_next(cq);
1125 gpr_mu_unlock(cq->mu);
1126 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
1129 grpc_event grpc_completion_queue_next(grpc_completion_queue* cq,
1130 gpr_timespec deadline, void* reserved) {
1131 return cq->vtable->next(cq, deadline, reserved);
1134 static int add_plucker(grpc_completion_queue* cq, void* tag,
1135 grpc_pollset_worker** worker) {
1136 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1137 if (cqd->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) {
1140 cqd->pluckers[cqd->num_pluckers].tag = tag;
1141 cqd->pluckers[cqd->num_pluckers].worker = worker;
1142 cqd->num_pluckers++;
1146 static void del_plucker(grpc_completion_queue* cq, void* tag,
1147 grpc_pollset_worker** worker) {
1148 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1149 for (int i = 0; i < cqd->num_pluckers; i++) {
1150 if (cqd->pluckers[i].tag == tag && cqd->pluckers[i].worker == worker) {
1151 cqd->num_pluckers--;
1152 GPR_SWAP(plucker, cqd->pluckers[i], cqd->pluckers[cqd->num_pluckers]);
1156 GPR_UNREACHABLE_CODE(return );
1159 class ExecCtxPluck : public grpc_core::ExecCtx {
1161 ExecCtxPluck(void* arg) : ExecCtx(0), check_ready_to_finish_arg_(arg) {}
1163 bool CheckReadyToFinish() override {
1164 cq_is_finished_arg* a =
1165 static_cast<cq_is_finished_arg*>(check_ready_to_finish_arg_);
1166 grpc_completion_queue* cq = a->cq;
1167 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1169 GPR_ASSERT(a->stolen_completion == nullptr);
1170 gpr_atm current_last_seen_things_queued_ever =
1171 cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
1172 if (current_last_seen_things_queued_ever !=
1173 a->last_seen_things_queued_ever) {
1174 gpr_mu_lock(cq->mu);
1175 a->last_seen_things_queued_ever =
1176 cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
1177 grpc_cq_completion* c;
1178 grpc_cq_completion* prev = &cqd->completed_head;
1179 while ((c = (grpc_cq_completion*)(prev->next &
1180 ~static_cast<uintptr_t>(1))) !=
1181 &cqd->completed_head) {
1182 if (c->tag == a->tag) {
1183 prev->next = (prev->next & static_cast<uintptr_t>(1)) |
1184 (c->next & ~static_cast<uintptr_t>(1));
1185 if (c == cqd->completed_tail) {
1186 cqd->completed_tail = prev;
1188 gpr_mu_unlock(cq->mu);
1189 a->stolen_completion = c;
1194 gpr_mu_unlock(cq->mu);
1196 return !a->first_loop && a->deadline < grpc_core::ExecCtx::Get()->Now();
1200 void* check_ready_to_finish_arg_;
1203 static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
1204 gpr_timespec deadline, void* reserved) {
1205 GPR_TIMER_SCOPE("grpc_completion_queue_pluck", 0);
1208 grpc_cq_completion* c;
1209 grpc_cq_completion* prev;
1210 grpc_pollset_worker* worker = nullptr;
1211 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1213 if (GRPC_TRACE_FLAG_ENABLED(grpc_cq_pluck_trace)) {
1215 "grpc_completion_queue_pluck("
1217 "deadline=gpr_timespec { tv_sec: %" PRId64
1218 ", tv_nsec: %d, clock_type: %d }, "
1221 (cq, tag, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
1224 GPR_ASSERT(!reserved);
1226 dump_pending_tags(cq);
1228 GRPC_CQ_INTERNAL_REF(cq, "pluck");
1229 gpr_mu_lock(cq->mu);
1230 grpc_millis deadline_millis = grpc_timespec_to_millis_round_up(deadline);
1231 cq_is_finished_arg is_finished_arg = {
1232 cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED),
1238 ExecCtxPluck exec_ctx(&is_finished_arg);
1240 if (is_finished_arg.stolen_completion != nullptr) {
1241 gpr_mu_unlock(cq->mu);
1242 c = is_finished_arg.stolen_completion;
1243 is_finished_arg.stolen_completion = nullptr;
1244 ret.type = GRPC_OP_COMPLETE;
1245 ret.success = c->next & 1u;
1247 c->done(c->done_arg, c);
1250 prev = &cqd->completed_head;
1252 (c = (grpc_cq_completion*)(prev->next & ~static_cast<uintptr_t>(1))) !=
1253 &cqd->completed_head) {
1254 if (c->tag == tag) {
1255 prev->next = (prev->next & static_cast<uintptr_t>(1)) |
1256 (c->next & ~static_cast<uintptr_t>(1));
1257 if (c == cqd->completed_tail) {
1258 cqd->completed_tail = prev;
1260 gpr_mu_unlock(cq->mu);
1261 ret.type = GRPC_OP_COMPLETE;
1262 ret.success = c->next & 1u;
1264 c->done(c->done_arg, c);
1269 if (cqd->shutdown.Load(grpc_core::MemoryOrder::RELAXED)) {
1270 gpr_mu_unlock(cq->mu);
1271 ret.type = GRPC_QUEUE_SHUTDOWN;
1275 if (!add_plucker(cq, tag, &worker)) {
1277 "Too many outstanding grpc_completion_queue_pluck calls: maximum "
1279 GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
1280 gpr_mu_unlock(cq->mu);
1281 /* TODO(ctiller): should we use a different result here */
1282 ret.type = GRPC_QUEUE_TIMEOUT;
1284 dump_pending_tags(cq);
1287 if (!is_finished_arg.first_loop &&
1288 grpc_core::ExecCtx::Get()->Now() >= deadline_millis) {
1289 del_plucker(cq, tag, &worker);
1290 gpr_mu_unlock(cq->mu);
1291 ret.type = GRPC_QUEUE_TIMEOUT;
1293 dump_pending_tags(cq);
1298 cq->poller_vtable->work(POLLSET_FROM_CQ(cq), &worker, deadline_millis);
1299 if (err != GRPC_ERROR_NONE) {
1300 del_plucker(cq, tag, &worker);
1301 gpr_mu_unlock(cq->mu);
1302 const char* msg = grpc_error_string(err);
1303 gpr_log(GPR_ERROR, "Completion queue pluck failed: %s", msg);
1305 GRPC_ERROR_UNREF(err);
1306 ret.type = GRPC_QUEUE_TIMEOUT;
1308 dump_pending_tags(cq);
1311 is_finished_arg.first_loop = false;
1312 del_plucker(cq, tag, &worker);
1315 GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, &ret);
1316 GRPC_CQ_INTERNAL_UNREF(cq, "pluck");
1318 GPR_ASSERT(is_finished_arg.stolen_completion == nullptr);
1323 grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq, void* tag,
1324 gpr_timespec deadline, void* reserved) {
1325 return cq->vtable->pluck(cq, tag, deadline, reserved);
1328 static void cq_finish_shutdown_pluck(grpc_completion_queue* cq) {
1329 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1331 GPR_ASSERT(cqd->shutdown_called);
1332 GPR_ASSERT(!cqd->shutdown.Load(grpc_core::MemoryOrder::RELAXED));
1333 cqd->shutdown.Store(true, grpc_core::MemoryOrder::RELAXED);
1335 cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done);
1338 /* NOTE: This function is almost exactly identical to cq_shutdown_next() but
1339 * merging them is a bit tricky and probably not worth it */
1340 static void cq_shutdown_pluck(grpc_completion_queue* cq) {
1341 cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
1343 /* Need an extra ref for cq here because:
1344 * We call cq_finish_shutdown_pluck() below, that would call pollset shutdown.
1345 * Pollset shutdown decrements the cq ref count which can potentially destroy
1346 * the cq (if that happens to be the last ref).
1347 * Creating an extra ref here prevents the cq from getting destroyed while
1348 * this function is still active */
1349 GRPC_CQ_INTERNAL_REF(cq, "shutting_down (pluck cq)");
1350 gpr_mu_lock(cq->mu);
1351 if (cqd->shutdown_called) {
1352 gpr_mu_unlock(cq->mu);
1353 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (pluck cq)");
1356 cqd->shutdown_called = true;
1357 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
1358 cq_finish_shutdown_pluck(cq);
1360 gpr_mu_unlock(cq->mu);
1361 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (pluck cq)");
1364 static void cq_finish_shutdown_callback(grpc_completion_queue* cq) {
1365 cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq);
1366 auto* callback = cqd->shutdown_callback;
1368 GPR_ASSERT(cqd->shutdown_called);
1370 cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done);
1371 if (grpc_iomgr_is_any_background_poller_thread()) {
1372 grpc_core::ApplicationCallbackExecCtx::Enqueue(callback, true);
1376 // Schedule the callback on a closure if not internal or triggered
1377 // from a background poller thread.
1378 grpc_core::Executor::Run(
1379 GRPC_CLOSURE_CREATE(functor_callback, callback, nullptr),
1383 static void cq_shutdown_callback(grpc_completion_queue* cq) {
1384 cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq);
1386 /* Need an extra ref for cq here because:
1387 * We call cq_finish_shutdown_callback() below, which calls pollset shutdown.
1388 * Pollset shutdown decrements the cq ref count which can potentially destroy
1389 * the cq (if that happens to be the last ref).
1390 * Creating an extra ref here prevents the cq from getting destroyed while
1391 * this function is still active */
1392 GRPC_CQ_INTERNAL_REF(cq, "shutting_down (callback cq)");
1393 gpr_mu_lock(cq->mu);
1394 if (cqd->shutdown_called) {
1395 gpr_mu_unlock(cq->mu);
1396 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (callback cq)");
1399 cqd->shutdown_called = true;
1400 if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
1401 gpr_mu_unlock(cq->mu);
1402 cq_finish_shutdown_callback(cq);
1404 gpr_mu_unlock(cq->mu);
1406 GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (callback cq)");
1409 /* Shutdown simply drops a ref that we reserved at creation time; if we drop
1410 to zero here, then enter shutdown mode and wake up any waiters */
1411 void grpc_completion_queue_shutdown(grpc_completion_queue* cq) {
1412 GPR_TIMER_SCOPE("grpc_completion_queue_shutdown", 0);
1413 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
1414 grpc_core::ExecCtx exec_ctx;
1415 GRPC_API_TRACE("grpc_completion_queue_shutdown(cq=%p)", 1, (cq));
1416 cq->vtable->shutdown(cq);
1419 void grpc_completion_queue_destroy(grpc_completion_queue* cq) {
1420 GPR_TIMER_SCOPE("grpc_completion_queue_destroy", 0);
1421 GRPC_API_TRACE("grpc_completion_queue_destroy(cq=%p)", 1, (cq));
1422 grpc_completion_queue_shutdown(cq);
1424 grpc_core::ExecCtx exec_ctx;
1425 GRPC_CQ_INTERNAL_UNREF(cq, "destroy");
1428 grpc_pollset* grpc_cq_pollset(grpc_completion_queue* cq) {
1429 return cq->poller_vtable->can_get_pollset ? POLLSET_FROM_CQ(cq) : nullptr;
1432 bool grpc_cq_can_listen(grpc_completion_queue* cq) {
1433 return cq->poller_vtable->can_listen;