3 * Copyright 2015 gRPC authors.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 #include <grpc/support/port_platform.h>
21 #include "src/core/lib/iomgr/executor.h"
25 #include <grpc/support/alloc.h>
26 #include <grpc/support/cpu.h>
27 #include <grpc/support/log.h>
28 #include <grpc/support/sync.h>
30 #include "src/core/lib/debug/stats.h"
31 #include "src/core/lib/gpr/tls.h"
32 #include "src/core/lib/gpr/useful.h"
33 #include "src/core/lib/gprpp/memory.h"
34 #include "src/core/lib/iomgr/exec_ctx.h"
35 #include "src/core/lib/iomgr/iomgr.h"
39 #define EXECUTOR_TRACE(format, ...) \
40 if (executor_trace.enabled()) { \
41 gpr_log(GPR_INFO, "EXECUTOR " format, __VA_ARGS__); \
44 #define EXECUTOR_TRACE0(str) \
45 if (executor_trace.enabled()) { \
46 gpr_log(GPR_INFO, "EXECUTOR " str); \
52 GPR_TLS_DECL(g_this_thread_state);
54 Executor* executors[static_cast<size_t>(ExecutorType::NUM_EXECUTORS)];
56 void default_enqueue_short(grpc_closure* closure, grpc_error* error) {
57 executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Enqueue(
58 closure, error, true /* is_short */);
61 void default_enqueue_long(grpc_closure* closure, grpc_error* error) {
62 executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Enqueue(
63 closure, error, false /* is_short */);
66 void resolver_enqueue_short(grpc_closure* closure, grpc_error* error) {
67 executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Enqueue(
68 closure, error, true /* is_short */);
71 void resolver_enqueue_long(grpc_closure* closure, grpc_error* error) {
72 executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Enqueue(
73 closure, error, false /* is_short */);
76 const grpc_closure_scheduler_vtable
77 vtables_[static_cast<size_t>(ExecutorType::NUM_EXECUTORS)]
78 [static_cast<size_t>(ExecutorJobType::NUM_JOB_TYPES)] = {
79 {{&default_enqueue_short, &default_enqueue_short,
81 {&default_enqueue_long, &default_enqueue_long, "def-ex-long"}},
82 {{&resolver_enqueue_short, &resolver_enqueue_short,
84 {&resolver_enqueue_long, &resolver_enqueue_long,
87 grpc_closure_scheduler
88 schedulers_[static_cast<size_t>(ExecutorType::NUM_EXECUTORS)]
89 [static_cast<size_t>(ExecutorJobType::NUM_JOB_TYPES)] = {
90 {{&vtables_[static_cast<size_t>(ExecutorType::DEFAULT)]
91 [static_cast<size_t>(ExecutorJobType::SHORT)]},
92 {&vtables_[static_cast<size_t>(ExecutorType::DEFAULT)]
93 [static_cast<size_t>(ExecutorJobType::LONG)]}},
94 {{&vtables_[static_cast<size_t>(ExecutorType::RESOLVER)]
95 [static_cast<size_t>(ExecutorJobType::SHORT)]},
96 {&vtables_[static_cast<size_t>(ExecutorType::RESOLVER)]
97 [static_cast<size_t>(ExecutorJobType::LONG)]}}};
101 TraceFlag executor_trace(false, "executor");
103 Executor::Executor(const char* name) : name_(name) {
104 adding_thread_lock_ = GPR_SPINLOCK_STATIC_INITIALIZER;
105 gpr_atm_rel_store(&num_threads_, 0);
106 max_threads_ = GPR_MAX(1, 2 * gpr_cpu_num_cores());
109 void Executor::Init() { SetThreading(true); }
111 size_t Executor::RunClosures(const char* executor_name,
112 grpc_closure_list list) {
115 // In the executor, the ExecCtx for the thread is declared in the executor
116 // thread itself, but this is the point where we could start seeing
117 // application-level callbacks. No need to create a new ExecCtx, though,
118 // since there already is one and it is flushed (but not destructed) in this
120 grpc_core::ApplicationCallbackExecCtx callback_exec_ctx(
121 GRPC_APP_CALLBACK_EXEC_CTX_FLAG_IS_INTERNAL_THREAD);
123 grpc_closure* c = list.head;
124 while (c != nullptr) {
125 grpc_closure* next = c->next_data.next;
126 grpc_error* error = c->error_data.error;
128 EXECUTOR_TRACE("(%s) run %p [created by %s:%d]", executor_name, c,
129 c->file_created, c->line_created);
130 c->scheduled = false;
132 EXECUTOR_TRACE("(%s) run %p", executor_name, c);
134 c->cb(c->cb_arg, error);
135 GRPC_ERROR_UNREF(error);
138 grpc_core::ExecCtx::Get()->Flush();
144 bool Executor::IsThreaded() const {
145 return gpr_atm_acq_load(&num_threads_) > 0;
148 void Executor::SetThreading(bool threading) {
149 gpr_atm curr_num_threads = gpr_atm_acq_load(&num_threads_);
150 EXECUTOR_TRACE("(%s) SetThreading(%d) begin", name_, threading);
153 if (curr_num_threads > 0) {
154 EXECUTOR_TRACE("(%s) SetThreading(true). curr_num_threads == 0", name_);
158 GPR_ASSERT(num_threads_ == 0);
159 gpr_atm_rel_store(&num_threads_, 1);
160 gpr_tls_init(&g_this_thread_state);
161 thd_state_ = static_cast<ThreadState*>(
162 gpr_zalloc(sizeof(ThreadState) * max_threads_));
164 for (size_t i = 0; i < max_threads_; i++) {
165 gpr_mu_init(&thd_state_[i].mu);
166 gpr_cv_init(&thd_state_[i].cv);
167 thd_state_[i].id = i;
168 thd_state_[i].name = name_;
169 thd_state_[i].thd = grpc_core::Thread();
170 thd_state_[i].elems = GRPC_CLOSURE_LIST_INIT;
174 grpc_core::Thread(name_, &Executor::ThreadMain, &thd_state_[0]);
175 thd_state_[0].thd.Start();
176 } else { // !threading
177 if (curr_num_threads == 0) {
178 EXECUTOR_TRACE("(%s) SetThreading(false). curr_num_threads == 0", name_);
182 for (size_t i = 0; i < max_threads_; i++) {
183 gpr_mu_lock(&thd_state_[i].mu);
184 thd_state_[i].shutdown = true;
185 gpr_cv_signal(&thd_state_[i].cv);
186 gpr_mu_unlock(&thd_state_[i].mu);
189 /* Ensure no thread is adding a new thread. Once this is past, then no
190 * thread will try to add a new one either (since shutdown is true) */
191 gpr_spinlock_lock(&adding_thread_lock_);
192 gpr_spinlock_unlock(&adding_thread_lock_);
194 curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
195 for (gpr_atm i = 0; i < curr_num_threads; i++) {
196 thd_state_[i].thd.Join();
197 EXECUTOR_TRACE("(%s) Thread %" PRIdPTR " of %" PRIdPTR " joined", name_,
198 i + 1, curr_num_threads);
201 gpr_atm_rel_store(&num_threads_, 0);
202 for (size_t i = 0; i < max_threads_; i++) {
203 gpr_mu_destroy(&thd_state_[i].mu);
204 gpr_cv_destroy(&thd_state_[i].cv);
205 RunClosures(thd_state_[i].name, thd_state_[i].elems);
208 gpr_free(thd_state_);
209 gpr_tls_destroy(&g_this_thread_state);
211 // grpc_iomgr_shutdown_background_closure() will close all the registered
212 // fds in the background poller, and wait for all pending closures to
213 // finish. Thus, never call Executor::SetThreading(false) in the middle of
215 // TODO(guantaol): create another method to finish all the pending closures
216 // registered in the background poller by grpc_core::Executor.
217 grpc_iomgr_shutdown_background_closure();
220 EXECUTOR_TRACE("(%s) SetThreading(%d) done", name_, threading);
223 void Executor::Shutdown() { SetThreading(false); }
225 void Executor::ThreadMain(void* arg) {
226 ThreadState* ts = static_cast<ThreadState*>(arg);
227 gpr_tls_set(&g_this_thread_state, reinterpret_cast<intptr_t>(ts));
229 grpc_core::ExecCtx exec_ctx(GRPC_EXEC_CTX_FLAG_IS_INTERNAL_THREAD);
231 size_t subtract_depth = 0;
233 EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")",
234 ts->name, ts->id, subtract_depth);
236 gpr_mu_lock(&ts->mu);
237 ts->depth -= subtract_depth;
238 // Wait for closures to be enqueued or for the executor to be shutdown
239 while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
240 ts->queued_long_job = false;
241 gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
245 EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: shutdown", ts->name, ts->id);
246 gpr_mu_unlock(&ts->mu);
250 GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED();
251 grpc_closure_list closures = ts->elems;
252 ts->elems = GRPC_CLOSURE_LIST_INIT;
253 gpr_mu_unlock(&ts->mu);
255 EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: execute", ts->name, ts->id);
257 grpc_core::ExecCtx::Get()->InvalidateNow();
258 subtract_depth = RunClosures(ts->name, closures);
262 void Executor::Enqueue(grpc_closure* closure, grpc_error* error,
266 GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS();
268 GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS();
273 size_t cur_thread_count =
274 static_cast<size_t>(gpr_atm_acq_load(&num_threads_));
276 // If the number of threads is zero(i.e either the executor is not threaded
277 // or already shutdown), then queue the closure on the exec context itself
278 if (cur_thread_count == 0) {
280 EXECUTOR_TRACE("(%s) schedule %p (created %s:%d) inline", name_, closure,
281 closure->file_created, closure->line_created);
283 EXECUTOR_TRACE("(%s) schedule %p inline", name_, closure);
285 grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(),
290 if (grpc_iomgr_add_closure_to_background_poller(closure, error)) {
294 ThreadState* ts = (ThreadState*)gpr_tls_get(&g_this_thread_state);
296 ts = &thd_state_[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(),
299 GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF();
302 ThreadState* orig_ts = ts;
303 bool try_new_thread = false;
308 "(%s) try to schedule %p (%s) (created %s:%d) to thread "
310 name_, closure, is_short ? "short" : "long", closure->file_created,
311 closure->line_created, ts->id);
313 EXECUTOR_TRACE("(%s) try to schedule %p (%s) to thread %" PRIdPTR, name_,
314 closure, is_short ? "short" : "long", ts->id);
317 gpr_mu_lock(&ts->mu);
318 if (ts->queued_long_job) {
319 // if there's a long job queued, we never queue anything else to this
320 // queue (since long jobs can take 'infinite' time and we need to
321 // guarantee no starvation). Spin through queues and try again
322 gpr_mu_unlock(&ts->mu);
324 ts = &thd_state_[(idx + 1) % cur_thread_count];
326 // We cycled through all the threads. Retry enqueue again by creating
329 // TODO (sreek): There is a potential issue here. We are
330 // unconditionally setting try_new_thread to true here. What if the
331 // executor is shutdown OR if cur_thread_count is already equal to
333 // (Fortunately, this is not an issue yet (as of july 2018) because
334 // there is only one instance of long job in gRPC and hence we will
335 // not hit this code path)
337 try_new_thread = true;
341 continue; // Try the next thread-state
344 // == Found the thread state (i.e thread) to enqueue this closure! ==
346 // Also, if this thread has been waiting for closures, wake it up.
347 // - If grpc_closure_list_empty() is true and the Executor is not
348 // shutdown, it means that the thread must be waiting in ThreadMain()
349 // - Note that gpr_cv_signal() won't immediately wakeup the thread. That
350 // happens after we release the mutex &ts->mu a few lines below
351 if (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
352 GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED();
353 gpr_cv_signal(&ts->cv);
356 grpc_closure_list_append(&ts->elems, closure, error);
358 // If we already queued more than MAX_DEPTH number of closures on this
359 // thread, use this as a hint to create more threads
361 try_new_thread = ts->depth > MAX_DEPTH &&
362 cur_thread_count < max_threads_ && !ts->shutdown;
364 ts->queued_long_job = !is_short;
366 gpr_mu_unlock(&ts->mu);
370 if (try_new_thread && gpr_spinlock_trylock(&adding_thread_lock_)) {
371 cur_thread_count = static_cast<size_t>(gpr_atm_acq_load(&num_threads_));
372 if (cur_thread_count < max_threads_) {
373 // Increment num_threads (safe to do a store instead of a cas because we
374 // always increment num_threads under the 'adding_thread_lock')
375 gpr_atm_rel_store(&num_threads_, cur_thread_count + 1);
377 thd_state_[cur_thread_count].thd = grpc_core::Thread(
378 name_, &Executor::ThreadMain, &thd_state_[cur_thread_count]);
379 thd_state_[cur_thread_count].thd.Start();
381 gpr_spinlock_unlock(&adding_thread_lock_);
385 GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES();
387 } while (retry_push);
390 // Executor::InitAll() and Executor::ShutdownAll() functions are called in the
391 // the grpc_init() and grpc_shutdown() code paths which are protected by a
392 // global mutex. So it is okay to assume that these functions are thread-safe
393 void Executor::InitAll() {
394 EXECUTOR_TRACE0("Executor::InitAll() enter");
396 // Return if Executor::InitAll() is already called earlier
397 if (executors[static_cast<size_t>(ExecutorType::DEFAULT)] != nullptr) {
398 GPR_ASSERT(executors[static_cast<size_t>(ExecutorType::RESOLVER)] !=
403 executors[static_cast<size_t>(ExecutorType::DEFAULT)] =
404 grpc_core::New<Executor>("default-executor");
405 executors[static_cast<size_t>(ExecutorType::RESOLVER)] =
406 grpc_core::New<Executor>("resolver-executor");
408 executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Init();
409 executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Init();
411 EXECUTOR_TRACE0("Executor::InitAll() done");
414 grpc_closure_scheduler* Executor::Scheduler(ExecutorType executor_type,
415 ExecutorJobType job_type) {
416 return &schedulers_[static_cast<size_t>(executor_type)]
417 [static_cast<size_t>(job_type)];
420 grpc_closure_scheduler* Executor::Scheduler(ExecutorJobType job_type) {
421 return Executor::Scheduler(ExecutorType::DEFAULT, job_type);
424 void Executor::ShutdownAll() {
425 EXECUTOR_TRACE0("Executor::ShutdownAll() enter");
427 // Return if Executor:SshutdownAll() is already called earlier
428 if (executors[static_cast<size_t>(ExecutorType::DEFAULT)] == nullptr) {
429 GPR_ASSERT(executors[static_cast<size_t>(ExecutorType::RESOLVER)] ==
434 executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Shutdown();
435 executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Shutdown();
437 // Delete the executor objects.
439 // NOTE: It is important to call Shutdown() on all executors first before
440 // calling Delete() because it is possible for one executor (that is not
441 // shutdown yet) to call Enqueue() on a different executor which is already
442 // shutdown. This is legal and in such cases, the Enqueue() operation
443 // effectively "fails" and enqueues that closure on the calling thread's
446 // By ensuring that all executors are shutdown first, we are also ensuring
447 // that no thread is active across all executors.
449 grpc_core::Delete<Executor>(
450 executors[static_cast<size_t>(ExecutorType::DEFAULT)]);
451 grpc_core::Delete<Executor>(
452 executors[static_cast<size_t>(ExecutorType::RESOLVER)]);
453 executors[static_cast<size_t>(ExecutorType::DEFAULT)] = nullptr;
454 executors[static_cast<size_t>(ExecutorType::RESOLVER)] = nullptr;
456 EXECUTOR_TRACE0("Executor::ShutdownAll() done");
459 bool Executor::IsThreaded(ExecutorType executor_type) {
460 GPR_ASSERT(executor_type < ExecutorType::NUM_EXECUTORS);
461 return executors[static_cast<size_t>(executor_type)]->IsThreaded();
464 bool Executor::IsThreadedDefault() {
465 return Executor::IsThreaded(ExecutorType::DEFAULT);
468 void Executor::SetThreadingAll(bool enable) {
469 EXECUTOR_TRACE("Executor::SetThreadingAll(%d) called", enable);
470 for (size_t i = 0; i < static_cast<size_t>(ExecutorType::NUM_EXECUTORS);
472 executors[i]->SetThreading(enable);
476 void Executor::SetThreadingDefault(bool enable) {
477 EXECUTOR_TRACE("Executor::SetThreadingDefault(%d) called", enable);
478 executors[static_cast<size_t>(ExecutorType::DEFAULT)]->SetThreading(enable);
481 } // namespace grpc_core