3 * Copyright 2017 gRPC authors.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 #include <grpc/support/port_platform.h>
21 #include "src/core/lib/iomgr/port.h"
23 #include <grpc/support/log.h>
25 /* This polling engine is only relevant on linux kernels supporting epoll() */
26 #ifdef GRPC_LINUX_EPOLL_CREATE1
28 #include "src/core/lib/iomgr/ev_epollex_linux.h"
36 #include <sys/epoll.h>
37 #include <sys/socket.h>
38 #include <sys/syscall.h>
43 #include "absl/container/inlined_vector.h"
44 #include "absl/strings/str_cat.h"
45 #include "absl/strings/str_format.h"
47 #include <grpc/support/alloc.h>
49 #include "src/core/lib/debug/stats.h"
50 #include "src/core/lib/gpr/spinlock.h"
51 #include "src/core/lib/gpr/tls.h"
52 #include "src/core/lib/gpr/useful.h"
53 #include "src/core/lib/gprpp/manual_constructor.h"
54 #include "src/core/lib/gprpp/ref_counted.h"
55 #include "src/core/lib/gprpp/sync.h"
56 #include "src/core/lib/iomgr/block_annotate.h"
57 #include "src/core/lib/iomgr/iomgr_internal.h"
58 #include "src/core/lib/iomgr/is_epollexclusive_available.h"
59 #include "src/core/lib/iomgr/lockfree_event.h"
60 #include "src/core/lib/iomgr/sys_epoll_wrapper.h"
61 #include "src/core/lib/iomgr/timer.h"
62 #include "src/core/lib/iomgr/wakeup_fd_posix.h"
63 #include "src/core/lib/profiling/timers.h"
65 // debug aid: create workers on the heap (allows asan to spot
66 // use-after-destruction)
67 //#define GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP 1
69 #define MAX_EPOLL_EVENTS 100
70 #define MAX_FDS_IN_CACHE 32
72 grpc_core::DebugOnlyTraceFlag grpc_trace_pollable_refcount(false,
75 /*******************************************************************************
76 * pollable Declarations
79 typedef enum { PO_MULTI, PO_FD, PO_EMPTY } pollable_type;
81 typedef struct pollable pollable;
83 /// A pollable is something that can be polled: it has an epoll set to poll on,
84 /// and a wakeup fd for kicks
85 /// There are three broad types:
86 /// - PO_EMPTY - the empty pollable, used before file descriptors are added to
88 /// - PO_FD - a pollable containing only one FD - used to optimize single-fd
89 /// pollsets (which are common with synchronous api usage)
90 /// - PO_MULTI - a pollable containing many fds
92 pollable_type type; // immutable
93 grpc_core::RefCount refs;
96 grpc_wakeup_fd wakeup;
98 // The following are relevant only for type PO_FD
99 grpc_fd* owner_fd; // Set to the owner_fd if the type is PO_FD
100 gpr_mu owner_orphan_mu; // Synchronizes access to owner_orphaned field
101 bool owner_orphaned; // Is the owner fd orphaned
103 grpc_pollset_set* pollset_set;
108 grpc_pollset_worker* root_worker;
112 struct epoll_event events[MAX_EPOLL_EVENTS];
115 static const char* pollable_type_string(pollable_type t) {
127 static std::string pollable_desc(pollable* p) {
128 return absl::StrFormat("type=%s epfd=%d wakeup=%d",
129 pollable_type_string(p->type), p->epfd,
133 /// Shared empty pollable - used by pollset to poll on until the first fd is
135 static pollable* g_empty_pollable;
137 static grpc_error* pollable_create(pollable_type type, pollable** p);
138 static pollable* pollable_ref(pollable* p,
139 const grpc_core::DebugLocation& dbg_loc,
140 const char* reason) {
141 p->refs.Ref(dbg_loc, reason);
144 static void pollable_unref(pollable* p, const grpc_core::DebugLocation& dbg_loc,
145 const char* reason) {
146 if (p == nullptr) return;
147 if (GPR_UNLIKELY(p != nullptr && p->refs.Unref(dbg_loc, reason))) {
148 GRPC_FD_TRACE("pollable_unref: Closing epfd: %d", p->epfd);
150 grpc_wakeup_fd_destroy(&p->wakeup);
151 gpr_mu_destroy(&p->owner_orphan_mu);
152 gpr_mu_destroy(&p->mu);
156 #define POLLABLE_REF(p, r) pollable_ref((p), DEBUG_LOCATION, (r))
157 #define POLLABLE_UNREF(p, r) pollable_unref((p), DEBUG_LOCATION, (r))
159 /*******************************************************************************
164 grpc_fd(int fd, const char* name, bool track_err)
165 : fd(fd), track_err(track_err) {
166 gpr_mu_init(&orphan_mu);
167 gpr_mu_init(&pollable_mu);
168 read_closure.InitEvent();
169 write_closure.InitEvent();
170 error_closure.InitEvent();
172 std::string fd_name = absl::StrCat(name, " fd=", fd);
173 grpc_iomgr_register_object(&iomgr_object, fd_name.c_str());
175 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
176 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, this, fd_name.c_str());
181 // This is really the dtor, but the poller threads waking up from
182 // epoll_wait() may access the (read|write|error)_closure after destruction.
183 // Since the object will be added to the free pool, this behavior is
184 // not going to cause issues, except spurious events if the FD is reused
185 // while the race happens.
187 grpc_iomgr_unregister_object(&iomgr_object);
189 POLLABLE_UNREF(pollable_obj, "fd_pollable");
191 // To clear out the allocations of pollset_fds, we need to swap its
192 // contents with a newly-constructed (and soon to be destructed) local
193 // variable of its same type. This is because InlinedVector::clear is _not_
194 // guaranteed to actually free up allocations and this is important since
195 // this object doesn't have a conventional destructor.
196 absl::InlinedVector<int, 1> pollset_fds_tmp;
197 pollset_fds_tmp.swap(pollset_fds);
199 gpr_mu_destroy(&pollable_mu);
200 gpr_mu_destroy(&orphan_mu);
202 read_closure.DestroyEvent();
203 write_closure.DestroyEvent();
204 error_closure.DestroyEvent();
210 /* Since an fd is never really destroyed (i.e gpr_free() is not called), it is
211 * hard-to-debug cases where fd fields are accessed even after calling
212 * fd_destroy(). The following invalidates fd fields to make catching such
216 gpr_atm_no_barrier_store(&refst, -1);
217 memset(&orphan_mu, -1, sizeof(orphan_mu));
218 memset(&pollable_mu, -1, sizeof(pollable_mu));
219 pollable_obj = nullptr;
220 on_done_closure = nullptr;
221 memset(&iomgr_object, -1, sizeof(iomgr_object));
231 // bit 0 : 1=Active / 0=Orphaned
232 // bits 1-n : refcount
233 // Ref/Unref by two to avoid altering the orphaned bit
238 // Protects pollable_obj and pollset_fds.
240 absl::InlinedVector<int, 1> pollset_fds; // Used in PO_MULTI.
241 pollable* pollable_obj = nullptr; // Used in PO_FD.
243 grpc_core::LockfreeEvent read_closure;
244 grpc_core::LockfreeEvent write_closure;
245 grpc_core::LockfreeEvent error_closure;
247 struct grpc_fd* freelist_next = nullptr;
248 grpc_closure* on_done_closure = nullptr;
250 grpc_iomgr_object iomgr_object;
252 // Do we need to track EPOLLERR events separately?
256 static void fd_global_init(void);
257 static void fd_global_shutdown(void);
259 /*******************************************************************************
260 * Pollset Declarations
264 grpc_pollset_worker* next;
265 grpc_pollset_worker* prev;
267 typedef enum { PWLINK_POLLABLE = 0, PWLINK_POLLSET, PWLINK_COUNT } pwlinks;
269 struct grpc_pollset_worker {
273 // debug aid: which thread started this worker
277 grpc_pollset* pollset;
278 pollable* pollable_obj;
280 pwlink links[PWLINK_COUNT];
283 struct grpc_pollset {
285 gpr_atm worker_count;
286 gpr_atm active_pollable_type;
287 pollable* active_pollable;
288 bool kicked_without_poller;
289 grpc_closure* shutdown_closure;
290 bool already_shutdown;
291 grpc_pollset_worker* root_worker;
292 int containing_pollset_set_count;
295 /*******************************************************************************
296 * Pollset-set Declarations
299 struct grpc_pollset_set {
300 grpc_core::RefCount refs;
302 grpc_pollset_set* parent;
304 size_t pollset_count;
305 size_t pollset_capacity;
306 grpc_pollset** pollsets;
313 /*******************************************************************************
317 static bool append_error(grpc_error** composite, grpc_error* error,
319 if (error == GRPC_ERROR_NONE) return true;
320 if (*composite == GRPC_ERROR_NONE) {
321 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
323 *composite = grpc_error_add_child(*composite, error);
327 /*******************************************************************************
331 /* We need to keep a freelist not because of any concerns of malloc performance
332 * but instead so that implementations with multiple threads in (for example)
333 * epoll_wait deal with the race between pollset removal and incoming poll
336 * The problem is that the poller ultimately holds a reference to this
337 * object, so it is very difficult to know when is safe to free it, at least
338 * without some expensive synchronization.
340 * If we keep the object freelisted, in the worst case losing this race just
341 * becomes a spurious read notification on a reused fd.
344 static grpc_fd* fd_freelist = nullptr;
345 static gpr_mu fd_freelist_mu;
348 #define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
349 #define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
350 static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
352 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
354 "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
355 fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
356 gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
359 #define REF_BY(fd, n, reason) \
364 #define UNREF_BY(fd, n, reason) \
369 static void ref_by(grpc_fd* fd, int n) {
371 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
374 /* Uninitialize and add to the freelist */
375 static void fd_destroy(void* arg, grpc_error* /*error*/) {
376 grpc_fd* fd = static_cast<grpc_fd*>(arg);
379 /* Add the fd to the freelist */
380 gpr_mu_lock(&fd_freelist_mu);
381 fd->freelist_next = fd_freelist;
383 gpr_mu_unlock(&fd_freelist_mu);
387 static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file,
389 if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
391 "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
392 fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
393 gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
396 static void unref_by(grpc_fd* fd, int n) {
398 gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
400 grpc_core::ExecCtx::Run(
402 GRPC_CLOSURE_CREATE(fd_destroy, fd, grpc_schedule_on_exec_ctx),
409 static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
411 static void fd_global_shutdown(void) {
412 // TODO(guantaol): We don't have a reasonable explanation about this
413 // lock()/unlock() pattern. It can be a valid barrier if there is at most one
414 // pending lock() at this point. Otherwise, there is still a possibility of
415 // use-after-free race. Need to reason about the code and/or clean it up.
416 gpr_mu_lock(&fd_freelist_mu);
417 gpr_mu_unlock(&fd_freelist_mu);
418 while (fd_freelist != nullptr) {
419 grpc_fd* fd = fd_freelist;
420 fd_freelist = fd_freelist->freelist_next;
423 gpr_mu_destroy(&fd_freelist_mu);
426 static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
427 grpc_fd* new_fd = nullptr;
429 gpr_mu_lock(&fd_freelist_mu);
430 if (fd_freelist != nullptr) {
431 new_fd = fd_freelist;
432 fd_freelist = fd_freelist->freelist_next;
434 gpr_mu_unlock(&fd_freelist_mu);
436 if (new_fd == nullptr) {
437 new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
440 return new (new_fd) grpc_fd(fd, name, track_err);
443 static int fd_wrapped_fd(grpc_fd* fd) {
445 return (gpr_atm_acq_load(&fd->refst) & 1) ? ret_fd : -1;
448 static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
449 const char* reason) {
450 bool is_fd_closed = false;
452 gpr_mu_lock(&fd->orphan_mu);
454 // Get the fd->pollable_obj and set the owner_orphaned on that pollable to
455 // true so that the pollable will no longer access its owner_fd field.
456 gpr_mu_lock(&fd->pollable_mu);
457 pollable* pollable_obj = fd->pollable_obj;
460 gpr_mu_lock(&pollable_obj->owner_orphan_mu);
461 pollable_obj->owner_orphaned = true;
464 fd->on_done_closure = on_done;
466 /* If release_fd is not NULL, we should be relinquishing control of the file
467 descriptor fd->fd (but we still own the grpc_fd structure). */
468 if (release_fd != nullptr) {
469 // Remove the FD from all epolls sets, before releasing it.
470 // Otherwise, we will receive epoll events after we release the FD.
472 memset(&ev_fd, 0, sizeof(ev_fd));
473 if (pollable_obj != nullptr) { // For PO_FD.
474 epoll_ctl(pollable_obj->epfd, EPOLL_CTL_DEL, fd->fd, &ev_fd);
476 for (size_t i = 0; i < fd->pollset_fds.size(); ++i) { // For PO_MULTI.
477 const int epfd = fd->pollset_fds[i];
478 epoll_ctl(epfd, EPOLL_CTL_DEL, fd->fd, &ev_fd);
480 *release_fd = fd->fd;
486 // TODO(sreek): handle fd removal (where is_fd_closed=false)
488 GRPC_FD_TRACE("epoll_fd %p (%d) was orphaned but not closed.", fd, fd->fd);
491 /* Remove the active status but keep referenced. We want this grpc_fd struct
492 to be alive (and not added to freelist) until the end of this function */
493 REF_BY(fd, 1, reason);
495 grpc_core::ExecCtx::Run(DEBUG_LOCATION, fd->on_done_closure, GRPC_ERROR_NONE);
498 gpr_mu_unlock(&pollable_obj->owner_orphan_mu);
501 gpr_mu_unlock(&fd->pollable_mu);
502 gpr_mu_unlock(&fd->orphan_mu);
504 UNREF_BY(fd, 2, reason); /* Drop the reference */
507 static bool fd_is_shutdown(grpc_fd* fd) {
508 return fd->read_closure.IsShutdown();
511 /* Might be called multiple times */
512 static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
513 if (fd->read_closure.SetShutdown(GRPC_ERROR_REF(why))) {
514 if (shutdown(fd->fd, SHUT_RDWR)) {
515 if (errno != ENOTCONN) {
516 gpr_log(GPR_ERROR, "Error shutting down fd %d. errno: %d",
517 grpc_fd_wrapped_fd(fd), errno);
520 fd->write_closure.SetShutdown(GRPC_ERROR_REF(why));
521 fd->error_closure.SetShutdown(GRPC_ERROR_REF(why));
523 GRPC_ERROR_UNREF(why);
526 static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
527 fd->read_closure.NotifyOn(closure);
530 static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
531 fd->write_closure.NotifyOn(closure);
534 static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
535 fd->error_closure.NotifyOn(closure);
538 static bool fd_has_pollset(grpc_fd* fd, grpc_pollset* pollset) {
539 const int epfd = pollset->active_pollable->epfd;
540 grpc_core::MutexLock lock(&fd->pollable_mu);
541 for (size_t i = 0; i < fd->pollset_fds.size(); ++i) {
542 if (fd->pollset_fds[i] == epfd) {
549 static void fd_add_pollset(grpc_fd* fd, grpc_pollset* pollset) {
550 const int epfd = pollset->active_pollable->epfd;
551 grpc_core::MutexLock lock(&fd->pollable_mu);
552 fd->pollset_fds.push_back(epfd);
555 /*******************************************************************************
556 * Pollable Definitions
559 static grpc_error* pollable_create(pollable_type type, pollable** p) {
562 int epfd = epoll_create1(EPOLL_CLOEXEC);
564 return GRPC_OS_ERROR(errno, "epoll_create1");
566 GRPC_FD_TRACE("Pollable_create: created epfd: %d (type: %d)", epfd, type);
567 *p = static_cast<pollable*>(gpr_malloc(sizeof(**p)));
568 grpc_error* err = grpc_wakeup_fd_init(&(*p)->wakeup);
569 if (err != GRPC_ERROR_NONE) {
571 "Pollable_create: closed epfd: %d (type: %d). wakeupfd_init error",
578 struct epoll_event ev;
579 ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
580 ev.data.ptr = (void*)(1 | (intptr_t) & (*p)->wakeup);
581 if (epoll_ctl(epfd, EPOLL_CTL_ADD, (*p)->wakeup.read_fd, &ev) != 0) {
582 err = GRPC_OS_ERROR(errno, "epoll_ctl");
584 "Pollable_create: closed epfd: %d (type: %d). epoll_ctl error", epfd,
587 grpc_wakeup_fd_destroy(&(*p)->wakeup);
594 new (&(*p)->refs) grpc_core::RefCount(1, &grpc_trace_pollable_refcount);
595 gpr_mu_init(&(*p)->mu);
597 (*p)->owner_fd = nullptr;
598 gpr_mu_init(&(*p)->owner_orphan_mu);
599 (*p)->owner_orphaned = false;
600 (*p)->pollset_set = nullptr;
601 (*p)->next = (*p)->prev = *p;
602 (*p)->root_worker = nullptr;
603 (*p)->event_cursor = 0;
604 (*p)->event_count = 0;
605 return GRPC_ERROR_NONE;
608 static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
609 grpc_error* error = GRPC_ERROR_NONE;
610 static const char* err_desc = "pollable_add_fd";
611 const int epfd = p->epfd;
612 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
613 gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
616 struct epoll_event ev_fd;
618 static_cast<uint32_t>(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE);
619 /* Use the second least significant bit of ev_fd.data.ptr to store track_err
620 * to avoid synchronization issues when accessing it after receiving an event.
621 * Accessing fd would be a data race there because the fd might have been
622 * returned to the free list at that point. */
623 ev_fd.data.ptr = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(fd) |
624 (fd->track_err ? 2 : 0));
625 GRPC_STATS_INC_SYSCALL_EPOLL_CTL();
626 if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
631 append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
638 /*******************************************************************************
639 * Pollset Definitions
642 GPR_TLS_DECL(g_current_thread_pollset);
643 GPR_TLS_DECL(g_current_thread_worker);
645 /* Global state management */
646 static grpc_error* pollset_global_init(void) {
647 gpr_tls_init(&g_current_thread_pollset);
648 gpr_tls_init(&g_current_thread_worker);
649 return pollable_create(PO_EMPTY, &g_empty_pollable);
652 static void pollset_global_shutdown(void) {
653 POLLABLE_UNREF(g_empty_pollable, "g_empty_pollable");
654 gpr_tls_destroy(&g_current_thread_pollset);
655 gpr_tls_destroy(&g_current_thread_worker);
658 /* pollset->mu must be held while calling this function */
659 static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
660 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
662 "PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
663 "rw=%p (target:NULL) cpsc=%d (target:0)",
664 pollset, pollset->active_pollable, pollset->shutdown_closure,
665 pollset->root_worker, pollset->containing_pollset_set_count);
667 if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
668 pollset->containing_pollset_set_count == 0) {
669 GPR_TIMER_MARK("pollset_finish_shutdown", 0);
670 grpc_core::ExecCtx::Run(DEBUG_LOCATION, pollset->shutdown_closure,
672 pollset->shutdown_closure = nullptr;
673 pollset->already_shutdown = true;
677 /* pollset->mu must be held before calling this function,
678 * pollset->active_pollable->mu & specific_worker->pollable_obj->mu must not be
680 static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
681 GPR_TIMER_SCOPE("kick_one_worker", 0);
682 pollable* p = specific_worker->pollable_obj;
683 grpc_core::MutexLock lock(&p->mu);
684 GPR_ASSERT(specific_worker != nullptr);
685 if (specific_worker->kicked) {
686 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
687 gpr_log(GPR_INFO, "PS:%p kicked_specific_but_already_kicked", p);
689 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
690 return GRPC_ERROR_NONE;
692 if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
693 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
694 gpr_log(GPR_INFO, "PS:%p kicked_specific_but_awake", p);
696 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
697 specific_worker->kicked = true;
698 return GRPC_ERROR_NONE;
700 if (specific_worker == p->root_worker) {
701 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
702 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
703 gpr_log(GPR_INFO, "PS:%p kicked_specific_via_wakeup_fd", p);
705 specific_worker->kicked = true;
706 grpc_error* error = grpc_wakeup_fd_wakeup(&p->wakeup);
709 if (specific_worker->initialized_cv) {
710 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
711 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
712 gpr_log(GPR_INFO, "PS:%p kicked_specific_via_cv", p);
714 specific_worker->kicked = true;
715 gpr_cv_signal(&specific_worker->cv);
716 return GRPC_ERROR_NONE;
718 // we can get here during end_worker after removing specific_worker from the
719 // pollable list but before removing it from the pollset list
720 return GRPC_ERROR_NONE;
723 static grpc_error* pollset_kick(grpc_pollset* pollset,
724 grpc_pollset_worker* specific_worker) {
725 GPR_TIMER_SCOPE("pollset_kick", 0);
726 GRPC_STATS_INC_POLLSET_KICK();
727 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
729 "PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
730 pollset, specific_worker,
731 (void*)gpr_tls_get(&g_current_thread_pollset),
732 (void*)gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
734 if (specific_worker == nullptr) {
735 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
736 if (pollset->root_worker == nullptr) {
737 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
738 gpr_log(GPR_INFO, "PS:%p kicked_any_without_poller", pollset);
740 GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
741 pollset->kicked_without_poller = true;
742 return GRPC_ERROR_NONE;
744 // We've been asked to kick a poller, but we haven't been told which one
746 // We look at the pollset worker list because:
747 // 1. the pollable list may include workers from other pollers, so we'd
748 // need to do an O(N) search
749 // 2. we'd additionally need to take the pollable lock, which we've so
751 // Now, we would prefer to wake a poller in cv_wait, and not in
752 // epoll_wait (since the latter would imply the need to do an additional
754 // We know that if a worker is at the root of a pollable, it's (likely)
755 // also the root of a pollset, and we know that if a worker is NOT at
756 // the root of a pollset, it's (likely) not at the root of a pollable,
757 // so we take our chances and choose the SECOND worker enqueued against
758 // the pollset as a worker that's likely to be in cv_wait
759 return kick_one_worker(
760 pollset->root_worker->links[PWLINK_POLLSET].next);
763 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
764 gpr_log(GPR_INFO, "PS:%p kicked_any_but_awake", pollset);
766 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
767 return GRPC_ERROR_NONE;
770 return kick_one_worker(specific_worker);
774 static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
775 GPR_TIMER_SCOPE("pollset_kick_all", 0);
776 grpc_error* error = GRPC_ERROR_NONE;
777 const char* err_desc = "pollset_kick_all";
778 grpc_pollset_worker* w = pollset->root_worker;
781 GRPC_STATS_INC_POLLSET_KICK();
782 append_error(&error, kick_one_worker(w), err_desc);
783 w = w->links[PWLINK_POLLSET].next;
784 } while (w != pollset->root_worker);
789 static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
790 gpr_mu_init(&pollset->mu);
791 gpr_atm_no_barrier_store(&pollset->worker_count, 0);
792 gpr_atm_no_barrier_store(&pollset->active_pollable_type, PO_EMPTY);
793 pollset->active_pollable = POLLABLE_REF(g_empty_pollable, "pollset");
794 pollset->kicked_without_poller = false;
795 pollset->shutdown_closure = nullptr;
796 pollset->already_shutdown = false;
797 pollset->root_worker = nullptr;
798 pollset->containing_pollset_set_count = 0;
802 static int poll_deadline_to_millis_timeout(grpc_millis millis) {
803 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
804 grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
810 return static_cast<int>(delta);
813 static void fd_become_readable(grpc_fd* fd) { fd->read_closure.SetReady(); }
815 static void fd_become_writable(grpc_fd* fd) { fd->write_closure.SetReady(); }
817 static void fd_has_errors(grpc_fd* fd) { fd->error_closure.SetReady(); }
819 /* Get the pollable_obj attached to this fd. If none is attached, create a new
820 * pollable object (of type PO_FD), attach it to the fd and return it
822 * Note that if a pollable object is already attached to the fd, it may be of
823 * either PO_FD or PO_MULTI type */
824 static grpc_error* get_fd_pollable(grpc_fd* fd, pollable** p) {
825 gpr_mu_lock(&fd->pollable_mu);
826 grpc_error* error = GRPC_ERROR_NONE;
827 static const char* err_desc = "get_fd_pollable";
828 if (fd->pollable_obj == nullptr) {
829 if (append_error(&error, pollable_create(PO_FD, &fd->pollable_obj),
831 fd->pollable_obj->owner_fd = fd;
832 if (!append_error(&error, pollable_add_fd(fd->pollable_obj, fd),
834 POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
835 fd->pollable_obj = nullptr;
839 if (error == GRPC_ERROR_NONE) {
840 GPR_ASSERT(fd->pollable_obj != nullptr);
841 *p = POLLABLE_REF(fd->pollable_obj, "pollset");
843 GPR_ASSERT(fd->pollable_obj == nullptr);
846 gpr_mu_unlock(&fd->pollable_mu);
850 /* pollset->po.mu lock must be held by the caller before calling this */
851 static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
852 GPR_TIMER_SCOPE("pollset_shutdown", 0);
853 GPR_ASSERT(pollset->shutdown_closure == nullptr);
854 pollset->shutdown_closure = closure;
855 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
856 pollset_maybe_finish_shutdown(pollset);
859 static grpc_error* pollable_process_events(grpc_pollset* pollset,
860 pollable* pollable_obj, bool drain) {
861 GPR_TIMER_SCOPE("pollable_process_events", 0);
862 static const char* err_desc = "pollset_process_events";
863 // Use a simple heuristic to determine how many fd events to process
864 // per loop iteration. (events/workers)
865 int handle_count = 1;
866 int worker_count = gpr_atm_no_barrier_load(&pollset->worker_count);
867 GPR_ASSERT(worker_count > 0);
869 (pollable_obj->event_count - pollable_obj->event_cursor) / worker_count;
870 if (handle_count == 0) {
873 grpc_error* error = GRPC_ERROR_NONE;
874 for (int i = 0; (drain || i < handle_count) &&
875 pollable_obj->event_cursor != pollable_obj->event_count;
877 int n = pollable_obj->event_cursor++;
878 struct epoll_event* ev = &pollable_obj->events[n];
879 void* data_ptr = ev->data.ptr;
880 if (1 & (intptr_t)data_ptr) {
881 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
882 gpr_log(GPR_INFO, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
885 grpc_wakeup_fd_consume_wakeup(
886 (grpc_wakeup_fd*)((~static_cast<intptr_t>(1)) &
887 (intptr_t)data_ptr)),
891 reinterpret_cast<grpc_fd*>(reinterpret_cast<intptr_t>(data_ptr) & ~2);
892 bool track_err = reinterpret_cast<intptr_t>(data_ptr) & 2;
893 bool cancel = (ev->events & EPOLLHUP) != 0;
894 bool error = (ev->events & EPOLLERR) != 0;
895 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
896 bool write_ev = (ev->events & EPOLLOUT) != 0;
897 bool err_fallback = error && !track_err;
899 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
901 "PS:%p got fd %p: cancel=%d read=%d "
903 pollset, fd, cancel, read_ev, write_ev);
905 if (error && !err_fallback) {
908 if (read_ev || cancel || err_fallback) {
909 fd_become_readable(fd);
911 if (write_ev || cancel || err_fallback) {
912 fd_become_writable(fd);
920 /* pollset_shutdown is guaranteed to be called before pollset_destroy. */
921 static void pollset_destroy(grpc_pollset* pollset) {
922 POLLABLE_UNREF(pollset->active_pollable, "pollset");
923 pollset->active_pollable = nullptr;
924 gpr_mu_destroy(&pollset->mu);
927 static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
928 GPR_TIMER_SCOPE("pollable_epoll", 0);
929 int timeout = poll_deadline_to_millis_timeout(deadline);
931 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
932 gpr_log(GPR_INFO, "POLLABLE:%p[%s] poll for %dms", p,
933 pollable_desc(p).c_str(), timeout);
937 GRPC_SCHEDULING_START_BLOCKING_REGION;
941 GRPC_STATS_INC_SYSCALL_POLL();
942 r = epoll_wait(p->epfd, p->events, MAX_EPOLL_EVENTS, timeout);
943 } while (r < 0 && errno == EINTR);
945 GRPC_SCHEDULING_END_BLOCKING_REGION;
948 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
950 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
951 gpr_log(GPR_INFO, "POLLABLE:%p got %d events", p, r);
957 return GRPC_ERROR_NONE;
960 /* Return true if first in list */
961 static bool worker_insert(grpc_pollset_worker** root_worker,
962 grpc_pollset_worker* worker, pwlinks link) {
963 if (*root_worker == nullptr) {
964 *root_worker = worker;
965 worker->links[link].next = worker->links[link].prev = worker;
968 worker->links[link].next = *root_worker;
969 worker->links[link].prev = worker->links[link].next->links[link].prev;
970 worker->links[link].next->links[link].prev = worker;
971 worker->links[link].prev->links[link].next = worker;
976 /* returns the new root IFF the root changed */
977 typedef enum { WRR_NEW_ROOT, WRR_EMPTIED, WRR_REMOVED } worker_remove_result;
979 static worker_remove_result worker_remove(grpc_pollset_worker** root_worker,
980 grpc_pollset_worker* worker,
982 if (worker == *root_worker) {
983 if (worker == worker->links[link].next) {
984 *root_worker = nullptr;
987 *root_worker = worker->links[link].next;
988 worker->links[link].prev->links[link].next = worker->links[link].next;
989 worker->links[link].next->links[link].prev = worker->links[link].prev;
993 worker->links[link].prev->links[link].next = worker->links[link].next;
994 worker->links[link].next->links[link].prev = worker->links[link].prev;
999 /* Return true if this thread should poll */
1000 static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
1001 grpc_pollset_worker** worker_hdl,
1002 grpc_millis deadline) {
1003 GPR_TIMER_SCOPE("begin_worker", 0);
1005 (pollset->shutdown_closure == nullptr && !pollset->already_shutdown);
1006 gpr_atm_no_barrier_fetch_add(&pollset->worker_count, 1);
1007 if (worker_hdl != nullptr) *worker_hdl = worker;
1008 worker->initialized_cv = false;
1009 worker->kicked = false;
1010 worker->pollset = pollset;
1011 worker->pollable_obj =
1012 POLLABLE_REF(pollset->active_pollable, "pollset_worker");
1013 worker_insert(&pollset->root_worker, worker, PWLINK_POLLSET);
1014 gpr_mu_lock(&worker->pollable_obj->mu);
1015 if (!worker_insert(&worker->pollable_obj->root_worker, worker,
1017 worker->initialized_cv = true;
1018 gpr_cv_init(&worker->cv);
1019 gpr_mu_unlock(&pollset->mu);
1020 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace) &&
1021 worker->pollable_obj->root_worker != worker) {
1022 gpr_log(GPR_INFO, "PS:%p wait %p w=%p for %dms", pollset,
1023 worker->pollable_obj, worker,
1024 poll_deadline_to_millis_timeout(deadline));
1026 while (do_poll && worker->pollable_obj->root_worker != worker) {
1027 if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
1028 grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
1029 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1030 gpr_log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset,
1031 worker->pollable_obj, worker);
1034 } else if (worker->kicked) {
1035 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1036 gpr_log(GPR_INFO, "PS:%p wakeup %p w=%p", pollset,
1037 worker->pollable_obj, worker);
1040 } else if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace) &&
1041 worker->pollable_obj->root_worker != worker) {
1042 gpr_log(GPR_INFO, "PS:%p spurious_wakeup %p w=%p", pollset,
1043 worker->pollable_obj, worker);
1046 grpc_core::ExecCtx::Get()->InvalidateNow();
1048 gpr_mu_unlock(&pollset->mu);
1050 gpr_mu_unlock(&worker->pollable_obj->mu);
1055 static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
1056 grpc_pollset_worker** /*worker_hdl*/) {
1057 GPR_TIMER_SCOPE("end_worker", 0);
1058 gpr_mu_lock(&pollset->mu);
1059 gpr_mu_lock(&worker->pollable_obj->mu);
1060 switch (worker_remove(&worker->pollable_obj->root_worker, worker,
1062 case WRR_NEW_ROOT: {
1063 // wakeup new poller
1064 grpc_pollset_worker* new_root = worker->pollable_obj->root_worker;
1065 GPR_ASSERT(new_root->initialized_cv);
1066 gpr_cv_signal(&new_root->cv);
1070 if (pollset->active_pollable != worker->pollable_obj) {
1071 // pollable no longer being polled: flush events
1072 pollable_process_events(pollset, worker->pollable_obj, true);
1078 gpr_mu_unlock(&worker->pollable_obj->mu);
1079 POLLABLE_UNREF(worker->pollable_obj, "pollset_worker");
1080 if (worker_remove(&pollset->root_worker, worker, PWLINK_POLLSET) ==
1082 pollset_maybe_finish_shutdown(pollset);
1084 if (worker->initialized_cv) {
1085 gpr_cv_destroy(&worker->cv);
1087 gpr_atm_no_barrier_fetch_add(&pollset->worker_count, -1);
1091 static long sys_gettid(void) { return syscall(__NR_gettid); }
1094 /* pollset->mu lock must be held by the caller before calling this.
1095 The function pollset_work() may temporarily release the lock (pollset->po.mu)
1096 during the course of its execution but it will always re-acquire the lock and
1097 ensure that it is held by the time the function returns */
1098 static grpc_error* pollset_work(grpc_pollset* pollset,
1099 grpc_pollset_worker** worker_hdl,
1100 grpc_millis deadline) {
1101 GPR_TIMER_SCOPE("pollset_work", 0);
1102 #ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
1103 grpc_pollset_worker* worker =
1104 (grpc_pollset_worker*)gpr_malloc(sizeof(*worker));
1105 #define WORKER_PTR (worker)
1107 grpc_pollset_worker worker;
1108 #define WORKER_PTR (&worker)
1111 WORKER_PTR->originator = sys_gettid();
1113 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1115 "PS:%p work hdl=%p worker=%p now=%" PRId64 " deadline=%" PRId64
1116 " kwp=%d pollable=%p",
1117 pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
1118 deadline, pollset->kicked_without_poller, pollset->active_pollable);
1120 static const char* err_desc = "pollset_work";
1121 grpc_error* error = GRPC_ERROR_NONE;
1122 if (pollset->kicked_without_poller) {
1123 pollset->kicked_without_poller = false;
1125 if (begin_worker(pollset, WORKER_PTR, worker_hdl, deadline)) {
1126 gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
1127 gpr_tls_set(&g_current_thread_worker, (intptr_t)WORKER_PTR);
1128 if (WORKER_PTR->pollable_obj->event_cursor ==
1129 WORKER_PTR->pollable_obj->event_count) {
1130 append_error(&error, pollable_epoll(WORKER_PTR->pollable_obj, deadline),
1135 pollable_process_events(pollset, WORKER_PTR->pollable_obj, false),
1137 grpc_core::ExecCtx::Get()->Flush();
1138 gpr_tls_set(&g_current_thread_pollset, 0);
1139 gpr_tls_set(&g_current_thread_worker, 0);
1141 end_worker(pollset, WORKER_PTR, worker_hdl);
1143 #ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
1150 static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked(
1151 grpc_pollset* pollset, grpc_fd* fd) {
1152 static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd";
1153 grpc_error* error = GRPC_ERROR_NONE;
1154 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1156 "PS:%p add fd %p (%d); transition pollable from empty to fd",
1157 pollset, fd, fd->fd);
1159 append_error(&error, pollset_kick_all(pollset), err_desc);
1160 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1161 append_error(&error, get_fd_pollable(fd, &pollset->active_pollable),
1166 static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked(
1167 grpc_pollset* pollset, grpc_fd* and_add_fd) {
1168 static const char* err_desc = "pollset_transition_pollable_from_fd_to_multi";
1169 grpc_error* error = GRPC_ERROR_NONE;
1170 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1173 "PS:%p add fd %p (%d); transition pollable from fd %p to multipoller",
1174 pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
1175 pollset->active_pollable->owner_fd);
1177 append_error(&error, pollset_kick_all(pollset), err_desc);
1178 grpc_fd* initial_fd = pollset->active_pollable->owner_fd;
1179 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1180 pollset->active_pollable = nullptr;
1181 if (append_error(&error, pollable_create(PO_MULTI, &pollset->active_pollable),
1183 append_error(&error, pollable_add_fd(pollset->active_pollable, initial_fd),
1185 if (and_add_fd != nullptr) {
1186 append_error(&error,
1187 pollable_add_fd(pollset->active_pollable, and_add_fd),
1194 /* expects pollsets locked, flag whether fd is locked or not */
1195 static grpc_error* pollset_add_fd_locked(grpc_pollset* pollset, grpc_fd* fd) {
1196 grpc_error* error = GRPC_ERROR_NONE;
1197 pollable* po_at_start =
1198 POLLABLE_REF(pollset->active_pollable, "pollset_add_fd");
1199 switch (pollset->active_pollable->type) {
1201 /* empty pollable --> single fd pollable */
1202 error = pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd);
1205 gpr_mu_lock(&po_at_start->owner_orphan_mu);
1206 if (po_at_start->owner_orphaned) {
1208 pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd);
1210 /* fd --> multipoller */
1212 pollset_transition_pollable_from_fd_to_multi_locked(pollset, fd);
1214 gpr_mu_unlock(&po_at_start->owner_orphan_mu);
1217 error = pollable_add_fd(pollset->active_pollable, fd);
1220 if (error != GRPC_ERROR_NONE) {
1221 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1222 pollset->active_pollable = po_at_start;
1224 gpr_atm_rel_store(&pollset->active_pollable_type,
1225 pollset->active_pollable->type);
1226 POLLABLE_UNREF(po_at_start, "pollset_add_fd");
1231 static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
1232 pollable** pollable_obj) {
1233 grpc_error* error = GRPC_ERROR_NONE;
1234 pollable* po_at_start =
1235 POLLABLE_REF(pollset->active_pollable, "pollset_as_multipollable");
1236 switch (pollset->active_pollable->type) {
1238 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1239 error = pollable_create(PO_MULTI, &pollset->active_pollable);
1240 /* Any workers currently polling on this pollset must now be woked up so
1241 * that they can pick up the new active_pollable */
1242 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1244 "PS:%p active pollable transition from empty to multi",
1247 static const char* err_desc =
1248 "pollset_as_multipollable_locked: empty -> multi";
1249 append_error(&error, pollset_kick_all(pollset), err_desc);
1252 gpr_mu_lock(&po_at_start->owner_orphan_mu);
1253 if (po_at_start->owner_orphaned) {
1254 // Unlock before Unref'ing the pollable
1255 gpr_mu_unlock(&po_at_start->owner_orphan_mu);
1256 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1257 error = pollable_create(PO_MULTI, &pollset->active_pollable);
1259 error = pollset_transition_pollable_from_fd_to_multi_locked(pollset,
1261 gpr_mu_unlock(&po_at_start->owner_orphan_mu);
1267 if (error != GRPC_ERROR_NONE) {
1268 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1269 pollset->active_pollable = po_at_start;
1270 *pollable_obj = nullptr;
1272 gpr_atm_rel_store(&pollset->active_pollable_type,
1273 pollset->active_pollable->type);
1274 *pollable_obj = POLLABLE_REF(pollset->active_pollable, "pollset_set");
1275 POLLABLE_UNREF(po_at_start, "pollset_as_multipollable");
1280 static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
1281 GPR_TIMER_SCOPE("pollset_add_fd", 0);
1283 // We never transition from PO_MULTI to other modes (i.e., PO_FD or PO_EMPTY)
1284 // and, thus, it is safe to simply store and check whether the FD has already
1285 // been added to the active pollable previously.
1286 if (gpr_atm_acq_load(&pollset->active_pollable_type) == PO_MULTI &&
1287 fd_has_pollset(fd, pollset)) {
1291 grpc_core::MutexLock lock(&pollset->mu);
1292 grpc_error* error = pollset_add_fd_locked(pollset, fd);
1294 // If we are in PO_MULTI mode, we should update the pollsets of the FD.
1295 if (gpr_atm_no_barrier_load(&pollset->active_pollable_type) == PO_MULTI) {
1296 fd_add_pollset(fd, pollset);
1299 GRPC_LOG_IF_ERROR("pollset_add_fd", error);
1302 /*******************************************************************************
1303 * Pollset-set Definitions
1306 static grpc_pollset_set* pss_lock_adam(grpc_pollset_set* pss) {
1307 gpr_mu_lock(&pss->mu);
1308 while (pss->parent != nullptr) {
1309 gpr_mu_unlock(&pss->mu);
1311 gpr_mu_lock(&pss->mu);
1316 static grpc_pollset_set* pollset_set_create(void) {
1317 grpc_pollset_set* pss =
1318 static_cast<grpc_pollset_set*>(gpr_zalloc(sizeof(*pss)));
1319 gpr_mu_init(&pss->mu);
1320 new (&pss->refs) grpc_core::RefCount();
1324 static void pollset_set_unref(grpc_pollset_set* pss) {
1325 if (pss == nullptr) return;
1326 if (GPR_LIKELY(!pss->refs.Unref())) return;
1327 pollset_set_unref(pss->parent);
1328 gpr_mu_destroy(&pss->mu);
1329 for (size_t i = 0; i < pss->pollset_count; i++) {
1330 gpr_mu_lock(&pss->pollsets[i]->mu);
1331 if (0 == --pss->pollsets[i]->containing_pollset_set_count) {
1332 pollset_maybe_finish_shutdown(pss->pollsets[i]);
1334 gpr_mu_unlock(&pss->pollsets[i]->mu);
1336 for (size_t i = 0; i < pss->fd_count; i++) {
1337 UNREF_BY(pss->fds[i], 2, "pollset_set");
1339 gpr_free(pss->pollsets);
1344 static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
1345 GPR_TIMER_SCOPE("pollset_set_add_fd", 0);
1346 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1347 gpr_log(GPR_INFO, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
1349 grpc_error* error = GRPC_ERROR_NONE;
1350 static const char* err_desc = "pollset_set_add_fd";
1351 pss = pss_lock_adam(pss);
1352 for (size_t i = 0; i < pss->pollset_count; i++) {
1353 append_error(&error, pollable_add_fd(pss->pollsets[i]->active_pollable, fd),
1356 if (pss->fd_count == pss->fd_capacity) {
1357 pss->fd_capacity = GPR_MAX(pss->fd_capacity * 2, 8);
1358 pss->fds = static_cast<grpc_fd**>(
1359 gpr_realloc(pss->fds, pss->fd_capacity * sizeof(*pss->fds)));
1361 REF_BY(fd, 2, "pollset_set");
1362 pss->fds[pss->fd_count++] = fd;
1363 gpr_mu_unlock(&pss->mu);
1365 GRPC_LOG_IF_ERROR(err_desc, error);
1368 static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
1369 GPR_TIMER_SCOPE("pollset_set_del_fd", 0);
1370 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1371 gpr_log(GPR_INFO, "PSS:%p: del fd %p", pss, fd);
1373 pss = pss_lock_adam(pss);
1375 for (i = 0; i < pss->fd_count; i++) {
1376 if (pss->fds[i] == fd) {
1377 UNREF_BY(fd, 2, "pollset_set");
1381 GPR_ASSERT(i != pss->fd_count);
1382 for (; i < pss->fd_count - 1; i++) {
1383 pss->fds[i] = pss->fds[i + 1];
1386 gpr_mu_unlock(&pss->mu);
1389 static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
1390 GPR_TIMER_SCOPE("pollset_set_del_pollset", 0);
1391 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1392 gpr_log(GPR_INFO, "PSS:%p: del pollset %p", pss, ps);
1394 pss = pss_lock_adam(pss);
1396 for (i = 0; i < pss->pollset_count; i++) {
1397 if (pss->pollsets[i] == ps) {
1401 GPR_ASSERT(i != pss->pollset_count);
1402 for (; i < pss->pollset_count - 1; i++) {
1403 pss->pollsets[i] = pss->pollsets[i + 1];
1405 pss->pollset_count--;
1406 gpr_mu_unlock(&pss->mu);
1407 gpr_mu_lock(&ps->mu);
1408 if (0 == --ps->containing_pollset_set_count) {
1409 pollset_maybe_finish_shutdown(ps);
1411 gpr_mu_unlock(&ps->mu);
1414 // add all fds to pollables, and output a new array of unorphaned out_fds
1415 // assumes pollsets are multipollable
1416 static grpc_error* add_fds_to_pollsets(grpc_fd** fds, size_t fd_count,
1417 grpc_pollset** pollsets,
1418 size_t pollset_count,
1419 const char* err_desc, grpc_fd** out_fds,
1420 size_t* out_fd_count) {
1421 GPR_TIMER_SCOPE("add_fds_to_pollsets", 0);
1422 grpc_error* error = GRPC_ERROR_NONE;
1423 for (size_t i = 0; i < fd_count; i++) {
1424 gpr_mu_lock(&fds[i]->orphan_mu);
1425 if ((gpr_atm_no_barrier_load(&fds[i]->refst) & 1) == 0) {
1426 gpr_mu_unlock(&fds[i]->orphan_mu);
1427 UNREF_BY(fds[i], 2, "pollset_set");
1429 for (size_t j = 0; j < pollset_count; j++) {
1430 append_error(&error,
1431 pollable_add_fd(pollsets[j]->active_pollable, fds[i]),
1434 gpr_mu_unlock(&fds[i]->orphan_mu);
1435 out_fds[(*out_fd_count)++] = fds[i];
1441 static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
1442 GPR_TIMER_SCOPE("pollset_set_add_pollset", 0);
1443 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1444 gpr_log(GPR_INFO, "PSS:%p: add pollset %p", pss, ps);
1446 grpc_error* error = GRPC_ERROR_NONE;
1447 static const char* err_desc = "pollset_set_add_pollset";
1448 pollable* pollable_obj = nullptr;
1449 gpr_mu_lock(&ps->mu);
1450 if (!GRPC_LOG_IF_ERROR(err_desc,
1451 pollset_as_multipollable_locked(ps, &pollable_obj))) {
1452 GPR_ASSERT(pollable_obj == nullptr);
1453 gpr_mu_unlock(&ps->mu);
1456 ps->containing_pollset_set_count++;
1457 gpr_mu_unlock(&ps->mu);
1458 pss = pss_lock_adam(pss);
1459 size_t initial_fd_count = pss->fd_count;
1461 append_error(&error,
1462 add_fds_to_pollsets(pss->fds, initial_fd_count, &ps, 1, err_desc,
1463 pss->fds, &pss->fd_count),
1465 if (pss->pollset_count == pss->pollset_capacity) {
1466 pss->pollset_capacity = GPR_MAX(pss->pollset_capacity * 2, 8);
1467 pss->pollsets = static_cast<grpc_pollset**>(gpr_realloc(
1468 pss->pollsets, pss->pollset_capacity * sizeof(*pss->pollsets)));
1470 pss->pollsets[pss->pollset_count++] = ps;
1471 gpr_mu_unlock(&pss->mu);
1472 POLLABLE_UNREF(pollable_obj, "pollset_set");
1474 GRPC_LOG_IF_ERROR(err_desc, error);
1477 static void pollset_set_add_pollset_set(grpc_pollset_set* a,
1478 grpc_pollset_set* b) {
1479 GPR_TIMER_SCOPE("pollset_set_add_pollset_set", 0);
1480 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1481 gpr_log(GPR_INFO, "PSS: merge (%p, %p)", a, b);
1483 grpc_error* error = GRPC_ERROR_NONE;
1484 static const char* err_desc = "pollset_set_add_fd";
1487 // pollset ancestors are the same: nothing to do
1491 GPR_SWAP(grpc_pollset_set*, a, b);
1493 gpr_mu* a_mu = &a->mu;
1494 gpr_mu* b_mu = &b->mu;
1497 if (a->parent != nullptr) {
1499 } else if (b->parent != nullptr) {
1502 break; // exit loop, both pollsets locked
1504 gpr_mu_unlock(a_mu);
1505 gpr_mu_unlock(b_mu);
1507 // try to do the least copying possible
1508 // TODO(sreek): there's probably a better heuristic here
1509 const size_t a_size = a->fd_count + a->pollset_count;
1510 const size_t b_size = b->fd_count + b->pollset_count;
1511 if (b_size > a_size) {
1512 GPR_SWAP(grpc_pollset_set*, a, b);
1514 if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
1515 gpr_log(GPR_INFO, "PSS: parent %p to %p", b, a);
1519 if (a->fd_capacity < a->fd_count + b->fd_count) {
1520 a->fd_capacity = GPR_MAX(2 * a->fd_capacity, a->fd_count + b->fd_count);
1521 a->fds = static_cast<grpc_fd**>(
1522 gpr_realloc(a->fds, a->fd_capacity * sizeof(*a->fds)));
1524 size_t initial_a_fd_count = a->fd_count;
1528 add_fds_to_pollsets(a->fds, initial_a_fd_count, b->pollsets,
1529 b->pollset_count, "merge_a2b", a->fds, &a->fd_count),
1533 add_fds_to_pollsets(b->fds, b->fd_count, a->pollsets, a->pollset_count,
1534 "merge_b2a", a->fds, &a->fd_count),
1536 if (a->pollset_capacity < a->pollset_count + b->pollset_count) {
1537 a->pollset_capacity =
1538 GPR_MAX(2 * a->pollset_capacity, a->pollset_count + b->pollset_count);
1539 a->pollsets = static_cast<grpc_pollset**>(
1540 gpr_realloc(a->pollsets, a->pollset_capacity * sizeof(*a->pollsets)));
1542 if (b->pollset_count > 0) {
1543 memcpy(a->pollsets + a->pollset_count, b->pollsets,
1544 b->pollset_count * sizeof(*b->pollsets));
1546 a->pollset_count += b->pollset_count;
1548 gpr_free(b->pollsets);
1550 b->pollsets = nullptr;
1551 b->fd_count = b->fd_capacity = b->pollset_count = b->pollset_capacity = 0;
1552 gpr_mu_unlock(&a->mu);
1553 gpr_mu_unlock(&b->mu);
1556 static void pollset_set_del_pollset_set(grpc_pollset_set* /*bag*/,
1557 grpc_pollset_set* /*item*/) {}
1559 /*******************************************************************************
1560 * Event engine binding
1563 static bool is_any_background_poller_thread(void) { return false; }
1565 static void shutdown_background_closure(void) {}
1567 static bool add_closure_to_background_poller(grpc_closure* /*closure*/,
1568 grpc_error* /*error*/) {
1572 static void shutdown_engine(void) {
1573 fd_global_shutdown();
1574 pollset_global_shutdown();
1577 static const grpc_event_engine_vtable vtable = {
1578 sizeof(grpc_pollset),
1602 pollset_set_unref, // destroy ==> unref 1 public ref
1603 pollset_set_add_pollset,
1604 pollset_set_del_pollset,
1605 pollset_set_add_pollset_set,
1606 pollset_set_del_pollset_set,
1610 is_any_background_poller_thread,
1611 shutdown_background_closure,
1613 add_closure_to_background_poller,
1616 const grpc_event_engine_vtable* grpc_init_epollex_linux(
1617 bool /*explicitly_requested*/) {
1618 if (!grpc_has_wakeup_fd()) {
1619 gpr_log(GPR_ERROR, "Skipping epollex because of no wakeup fd.");
1623 if (!grpc_is_epollexclusive_available()) {
1624 gpr_log(GPR_INFO, "Skipping epollex because it is not supported.");
1630 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
1631 pollset_global_shutdown();
1632 fd_global_shutdown();
1639 #else /* defined(GRPC_LINUX_EPOLL_CREATE1) */
1640 #if defined(GRPC_POSIX_SOCKET_EV_EPOLLEX)
1641 #include "src/core/lib/iomgr/ev_epollex_linux.h"
1642 /* If GRPC_LINUX_EPOLL_CREATE1 is not defined, it means
1643 epoll_create1 is not available. Return NULL */
1644 const grpc_event_engine_vtable* grpc_init_epollex_linux(
1645 bool /*explicitly_requested*/) {
1648 #endif /* defined(GRPC_POSIX_SOCKET_EV_EPOLLEX) */
1650 #endif /* !defined(GRPC_LINUX_EPOLL_CREATE1) */