Imported Upstream version 1.21.0
[platform/upstream/grpc.git] / src / core / lib / iomgr / executor.cc
1 /*
2  *
3  * Copyright 2015 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18
19 #include <grpc/support/port_platform.h>
20
21 #include "src/core/lib/iomgr/executor.h"
22
23 #include <string.h>
24
25 #include <grpc/support/alloc.h>
26 #include <grpc/support/cpu.h>
27 #include <grpc/support/log.h>
28 #include <grpc/support/sync.h>
29
30 #include "src/core/lib/debug/stats.h"
31 #include "src/core/lib/gpr/tls.h"
32 #include "src/core/lib/gpr/useful.h"
33 #include "src/core/lib/gprpp/memory.h"
34 #include "src/core/lib/iomgr/exec_ctx.h"
35 #include "src/core/lib/iomgr/iomgr.h"
36
37 #define MAX_DEPTH 2
38
39 #define EXECUTOR_TRACE(format, ...)                       \
40   do {                                                    \
41     if (GRPC_TRACE_FLAG_ENABLED(executor_trace)) {        \
42       gpr_log(GPR_INFO, "EXECUTOR " format, __VA_ARGS__); \
43     }                                                     \
44   } while (0)
45
46 #define EXECUTOR_TRACE0(str)                       \
47   do {                                             \
48     if (GRPC_TRACE_FLAG_ENABLED(executor_trace)) { \
49       gpr_log(GPR_INFO, "EXECUTOR " str);          \
50     }                                              \
51   } while (0)
52
53 namespace grpc_core {
54 namespace {
55
56 GPR_TLS_DECL(g_this_thread_state);
57
58 Executor* executors[static_cast<size_t>(ExecutorType::NUM_EXECUTORS)];
59
60 void default_enqueue_short(grpc_closure* closure, grpc_error* error) {
61   executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Enqueue(
62       closure, error, true /* is_short */);
63 }
64
65 void default_enqueue_long(grpc_closure* closure, grpc_error* error) {
66   executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Enqueue(
67       closure, error, false /* is_short */);
68 }
69
70 void resolver_enqueue_short(grpc_closure* closure, grpc_error* error) {
71   executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Enqueue(
72       closure, error, true /* is_short */);
73 }
74
75 void resolver_enqueue_long(grpc_closure* closure, grpc_error* error) {
76   executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Enqueue(
77       closure, error, false /* is_short */);
78 }
79
80 const grpc_closure_scheduler_vtable
81     vtables_[static_cast<size_t>(ExecutorType::NUM_EXECUTORS)]
82             [static_cast<size_t>(ExecutorJobType::NUM_JOB_TYPES)] = {
83                 {{&default_enqueue_short, &default_enqueue_short,
84                   "def-ex-short"},
85                  {&default_enqueue_long, &default_enqueue_long, "def-ex-long"}},
86                 {{&resolver_enqueue_short, &resolver_enqueue_short,
87                   "res-ex-short"},
88                  {&resolver_enqueue_long, &resolver_enqueue_long,
89                   "res-ex-long"}}};
90
91 grpc_closure_scheduler
92     schedulers_[static_cast<size_t>(ExecutorType::NUM_EXECUTORS)]
93                [static_cast<size_t>(ExecutorJobType::NUM_JOB_TYPES)] = {
94                    {{&vtables_[static_cast<size_t>(ExecutorType::DEFAULT)]
95                               [static_cast<size_t>(ExecutorJobType::SHORT)]},
96                     {&vtables_[static_cast<size_t>(ExecutorType::DEFAULT)]
97                               [static_cast<size_t>(ExecutorJobType::LONG)]}},
98                    {{&vtables_[static_cast<size_t>(ExecutorType::RESOLVER)]
99                               [static_cast<size_t>(ExecutorJobType::SHORT)]},
100                     {&vtables_[static_cast<size_t>(ExecutorType::RESOLVER)]
101                               [static_cast<size_t>(ExecutorJobType::LONG)]}}};
102
103 }  // namespace
104
105 TraceFlag executor_trace(false, "executor");
106
107 Executor::Executor(const char* name) : name_(name) {
108   adding_thread_lock_ = GPR_SPINLOCK_STATIC_INITIALIZER;
109   gpr_atm_rel_store(&num_threads_, 0);
110   max_threads_ = GPR_MAX(1, 2 * gpr_cpu_num_cores());
111 }
112
113 void Executor::Init() { SetThreading(true); }
114
115 size_t Executor::RunClosures(const char* executor_name,
116                              grpc_closure_list list) {
117   size_t n = 0;
118
119   // In the executor, the ExecCtx for the thread is declared in the executor
120   // thread itself, but this is the point where we could start seeing
121   // application-level callbacks. No need to create a new ExecCtx, though,
122   // since there already is one and it is flushed (but not destructed) in this
123   // function itself.
124   grpc_core::ApplicationCallbackExecCtx callback_exec_ctx(
125       GRPC_APP_CALLBACK_EXEC_CTX_FLAG_IS_INTERNAL_THREAD);
126
127   grpc_closure* c = list.head;
128   while (c != nullptr) {
129     grpc_closure* next = c->next_data.next;
130     grpc_error* error = c->error_data.error;
131 #ifndef NDEBUG
132     EXECUTOR_TRACE("(%s) run %p [created by %s:%d]", executor_name, c,
133                    c->file_created, c->line_created);
134     c->scheduled = false;
135 #else
136     EXECUTOR_TRACE("(%s) run %p", executor_name, c);
137 #endif
138     c->cb(c->cb_arg, error);
139     GRPC_ERROR_UNREF(error);
140     c = next;
141     n++;
142     grpc_core::ExecCtx::Get()->Flush();
143   }
144
145   return n;
146 }
147
148 bool Executor::IsThreaded() const {
149   return gpr_atm_acq_load(&num_threads_) > 0;
150 }
151
152 void Executor::SetThreading(bool threading) {
153   gpr_atm curr_num_threads = gpr_atm_acq_load(&num_threads_);
154   EXECUTOR_TRACE("(%s) SetThreading(%d) begin", name_, threading);
155
156   if (threading) {
157     if (curr_num_threads > 0) {
158       EXECUTOR_TRACE("(%s) SetThreading(true). curr_num_threads == 0", name_);
159       return;
160     }
161
162     GPR_ASSERT(num_threads_ == 0);
163     gpr_atm_rel_store(&num_threads_, 1);
164     gpr_tls_init(&g_this_thread_state);
165     thd_state_ = static_cast<ThreadState*>(
166         gpr_zalloc(sizeof(ThreadState) * max_threads_));
167
168     for (size_t i = 0; i < max_threads_; i++) {
169       gpr_mu_init(&thd_state_[i].mu);
170       gpr_cv_init(&thd_state_[i].cv);
171       thd_state_[i].id = i;
172       thd_state_[i].name = name_;
173       thd_state_[i].thd = grpc_core::Thread();
174       thd_state_[i].elems = GRPC_CLOSURE_LIST_INIT;
175     }
176
177     thd_state_[0].thd =
178         grpc_core::Thread(name_, &Executor::ThreadMain, &thd_state_[0]);
179     thd_state_[0].thd.Start();
180   } else {  // !threading
181     if (curr_num_threads == 0) {
182       EXECUTOR_TRACE("(%s) SetThreading(false). curr_num_threads == 0", name_);
183       return;
184     }
185
186     for (size_t i = 0; i < max_threads_; i++) {
187       gpr_mu_lock(&thd_state_[i].mu);
188       thd_state_[i].shutdown = true;
189       gpr_cv_signal(&thd_state_[i].cv);
190       gpr_mu_unlock(&thd_state_[i].mu);
191     }
192
193     /* Ensure no thread is adding a new thread. Once this is past, then no
194      * thread will try to add a new one either (since shutdown is true) */
195     gpr_spinlock_lock(&adding_thread_lock_);
196     gpr_spinlock_unlock(&adding_thread_lock_);
197
198     curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
199     for (gpr_atm i = 0; i < curr_num_threads; i++) {
200       thd_state_[i].thd.Join();
201       EXECUTOR_TRACE("(%s) Thread %" PRIdPTR " of %" PRIdPTR " joined", name_,
202                      i + 1, curr_num_threads);
203     }
204
205     gpr_atm_rel_store(&num_threads_, 0);
206     for (size_t i = 0; i < max_threads_; i++) {
207       gpr_mu_destroy(&thd_state_[i].mu);
208       gpr_cv_destroy(&thd_state_[i].cv);
209       RunClosures(thd_state_[i].name, thd_state_[i].elems);
210     }
211
212     gpr_free(thd_state_);
213     gpr_tls_destroy(&g_this_thread_state);
214
215     // grpc_iomgr_shutdown_background_closure() will close all the registered
216     // fds in the background poller, and wait for all pending closures to
217     // finish. Thus, never call Executor::SetThreading(false) in the middle of
218     // an application.
219     // TODO(guantaol): create another method to finish all the pending closures
220     // registered in the background poller by grpc_core::Executor.
221     grpc_iomgr_shutdown_background_closure();
222   }
223
224   EXECUTOR_TRACE("(%s) SetThreading(%d) done", name_, threading);
225 }
226
227 void Executor::Shutdown() { SetThreading(false); }
228
229 void Executor::ThreadMain(void* arg) {
230   ThreadState* ts = static_cast<ThreadState*>(arg);
231   gpr_tls_set(&g_this_thread_state, reinterpret_cast<intptr_t>(ts));
232
233   grpc_core::ExecCtx exec_ctx(GRPC_EXEC_CTX_FLAG_IS_INTERNAL_THREAD);
234
235   size_t subtract_depth = 0;
236   for (;;) {
237     EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")",
238                    ts->name, ts->id, subtract_depth);
239
240     gpr_mu_lock(&ts->mu);
241     ts->depth -= subtract_depth;
242     // Wait for closures to be enqueued or for the executor to be shutdown
243     while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
244       ts->queued_long_job = false;
245       gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
246     }
247
248     if (ts->shutdown) {
249       EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: shutdown", ts->name, ts->id);
250       gpr_mu_unlock(&ts->mu);
251       break;
252     }
253
254     GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED();
255     grpc_closure_list closures = ts->elems;
256     ts->elems = GRPC_CLOSURE_LIST_INIT;
257     gpr_mu_unlock(&ts->mu);
258
259     EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: execute", ts->name, ts->id);
260
261     grpc_core::ExecCtx::Get()->InvalidateNow();
262     subtract_depth = RunClosures(ts->name, closures);
263   }
264 }
265
266 void Executor::Enqueue(grpc_closure* closure, grpc_error* error,
267                        bool is_short) {
268   bool retry_push;
269   if (is_short) {
270     GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS();
271   } else {
272     GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS();
273   }
274
275   do {
276     retry_push = false;
277     size_t cur_thread_count =
278         static_cast<size_t>(gpr_atm_acq_load(&num_threads_));
279
280     // If the number of threads is zero(i.e either the executor is not threaded
281     // or already shutdown), then queue the closure on the exec context itself
282     if (cur_thread_count == 0) {
283 #ifndef NDEBUG
284       EXECUTOR_TRACE("(%s) schedule %p (created %s:%d) inline", name_, closure,
285                      closure->file_created, closure->line_created);
286 #else
287       EXECUTOR_TRACE("(%s) schedule %p inline", name_, closure);
288 #endif
289       grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(),
290                                closure, error);
291       return;
292     }
293
294     if (grpc_iomgr_add_closure_to_background_poller(closure, error)) {
295       return;
296     }
297
298     ThreadState* ts = (ThreadState*)gpr_tls_get(&g_this_thread_state);
299     if (ts == nullptr) {
300       ts = &thd_state_[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(),
301                                         cur_thread_count)];
302     } else {
303       GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF();
304     }
305
306     ThreadState* orig_ts = ts;
307     bool try_new_thread = false;
308
309     for (;;) {
310 #ifndef NDEBUG
311       EXECUTOR_TRACE(
312           "(%s) try to schedule %p (%s) (created %s:%d) to thread "
313           "%" PRIdPTR,
314           name_, closure, is_short ? "short" : "long", closure->file_created,
315           closure->line_created, ts->id);
316 #else
317       EXECUTOR_TRACE("(%s) try to schedule %p (%s) to thread %" PRIdPTR, name_,
318                      closure, is_short ? "short" : "long", ts->id);
319 #endif
320
321       gpr_mu_lock(&ts->mu);
322       if (ts->queued_long_job) {
323         // if there's a long job queued, we never queue anything else to this
324         // queue (since long jobs can take 'infinite' time and we need to
325         // guarantee no starvation). Spin through queues and try again
326         gpr_mu_unlock(&ts->mu);
327         size_t idx = ts->id;
328         ts = &thd_state_[(idx + 1) % cur_thread_count];
329         if (ts == orig_ts) {
330           // We cycled through all the threads. Retry enqueue again by creating
331           // a new thread
332           //
333           // TODO (sreek): There is a potential issue here. We are
334           // unconditionally setting try_new_thread to true here. What if the
335           // executor is shutdown OR if cur_thread_count is already equal to
336           // max_threads ?
337           // (Fortunately, this is not an issue yet (as of july 2018) because
338           // there is only one instance of long job in gRPC and hence we will
339           // not hit this code path)
340           retry_push = true;
341           try_new_thread = true;
342           break;
343         }
344
345         continue;  // Try the next thread-state
346       }
347
348       // == Found the thread state (i.e thread) to enqueue this closure! ==
349
350       // Also, if this thread has been waiting for closures, wake it up.
351       // - If grpc_closure_list_empty() is true and the Executor is not
352       //   shutdown, it means that the thread must be waiting in ThreadMain()
353       // - Note that gpr_cv_signal() won't immediately wakeup the thread. That
354       //   happens after we release the mutex &ts->mu a few lines below
355       if (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
356         GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED();
357         gpr_cv_signal(&ts->cv);
358       }
359
360       grpc_closure_list_append(&ts->elems, closure, error);
361
362       // If we already queued more than MAX_DEPTH number of closures on this
363       // thread, use this as a hint to create more threads
364       ts->depth++;
365       try_new_thread = ts->depth > MAX_DEPTH &&
366                        cur_thread_count < max_threads_ && !ts->shutdown;
367
368       ts->queued_long_job = !is_short;
369
370       gpr_mu_unlock(&ts->mu);
371       break;
372     }
373
374     if (try_new_thread && gpr_spinlock_trylock(&adding_thread_lock_)) {
375       cur_thread_count = static_cast<size_t>(gpr_atm_acq_load(&num_threads_));
376       if (cur_thread_count < max_threads_) {
377         // Increment num_threads (safe to do a store instead of a cas because we
378         // always increment num_threads under the 'adding_thread_lock')
379         gpr_atm_rel_store(&num_threads_, cur_thread_count + 1);
380
381         thd_state_[cur_thread_count].thd = grpc_core::Thread(
382             name_, &Executor::ThreadMain, &thd_state_[cur_thread_count]);
383         thd_state_[cur_thread_count].thd.Start();
384       }
385       gpr_spinlock_unlock(&adding_thread_lock_);
386     }
387
388     if (retry_push) {
389       GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES();
390     }
391   } while (retry_push);
392 }
393
394 // Executor::InitAll() and Executor::ShutdownAll() functions are called in the
395 // the grpc_init() and grpc_shutdown() code paths which are protected by a
396 // global mutex. So it is okay to assume that these functions are thread-safe
397 void Executor::InitAll() {
398   EXECUTOR_TRACE0("Executor::InitAll() enter");
399
400   // Return if Executor::InitAll() is already called earlier
401   if (executors[static_cast<size_t>(ExecutorType::DEFAULT)] != nullptr) {
402     GPR_ASSERT(executors[static_cast<size_t>(ExecutorType::RESOLVER)] !=
403                nullptr);
404     return;
405   }
406
407   executors[static_cast<size_t>(ExecutorType::DEFAULT)] =
408       grpc_core::New<Executor>("default-executor");
409   executors[static_cast<size_t>(ExecutorType::RESOLVER)] =
410       grpc_core::New<Executor>("resolver-executor");
411
412   executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Init();
413   executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Init();
414
415   EXECUTOR_TRACE0("Executor::InitAll() done");
416 }
417
418 grpc_closure_scheduler* Executor::Scheduler(ExecutorType executor_type,
419                                             ExecutorJobType job_type) {
420   return &schedulers_[static_cast<size_t>(executor_type)]
421                      [static_cast<size_t>(job_type)];
422 }
423
424 grpc_closure_scheduler* Executor::Scheduler(ExecutorJobType job_type) {
425   return Executor::Scheduler(ExecutorType::DEFAULT, job_type);
426 }
427
428 void Executor::ShutdownAll() {
429   EXECUTOR_TRACE0("Executor::ShutdownAll() enter");
430
431   // Return if Executor:SshutdownAll() is already called earlier
432   if (executors[static_cast<size_t>(ExecutorType::DEFAULT)] == nullptr) {
433     GPR_ASSERT(executors[static_cast<size_t>(ExecutorType::RESOLVER)] ==
434                nullptr);
435     return;
436   }
437
438   executors[static_cast<size_t>(ExecutorType::DEFAULT)]->Shutdown();
439   executors[static_cast<size_t>(ExecutorType::RESOLVER)]->Shutdown();
440
441   // Delete the executor objects.
442   //
443   // NOTE: It is important to call Shutdown() on all executors first before
444   // calling Delete() because it is possible for one executor (that is not
445   // shutdown yet) to call Enqueue() on a different executor which is already
446   // shutdown. This is legal and in such cases, the Enqueue() operation
447   // effectively "fails" and enqueues that closure on the calling thread's
448   // exec_ctx.
449   //
450   // By ensuring that all executors are shutdown first, we are also ensuring
451   // that no thread is active across all executors.
452
453   grpc_core::Delete<Executor>(
454       executors[static_cast<size_t>(ExecutorType::DEFAULT)]);
455   grpc_core::Delete<Executor>(
456       executors[static_cast<size_t>(ExecutorType::RESOLVER)]);
457   executors[static_cast<size_t>(ExecutorType::DEFAULT)] = nullptr;
458   executors[static_cast<size_t>(ExecutorType::RESOLVER)] = nullptr;
459
460   EXECUTOR_TRACE0("Executor::ShutdownAll() done");
461 }
462
463 bool Executor::IsThreaded(ExecutorType executor_type) {
464   GPR_ASSERT(executor_type < ExecutorType::NUM_EXECUTORS);
465   return executors[static_cast<size_t>(executor_type)]->IsThreaded();
466 }
467
468 bool Executor::IsThreadedDefault() {
469   return Executor::IsThreaded(ExecutorType::DEFAULT);
470 }
471
472 void Executor::SetThreadingAll(bool enable) {
473   EXECUTOR_TRACE("Executor::SetThreadingAll(%d) called", enable);
474   for (size_t i = 0; i < static_cast<size_t>(ExecutorType::NUM_EXECUTORS);
475        i++) {
476     executors[i]->SetThreading(enable);
477   }
478 }
479
480 void Executor::SetThreadingDefault(bool enable) {
481   EXECUTOR_TRACE("Executor::SetThreadingDefault(%d) called", enable);
482   executors[static_cast<size_t>(ExecutorType::DEFAULT)]->SetThreading(enable);
483 }
484
485 }  // namespace grpc_core