3 * Copyright 2015 gRPC authors.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 #include <grpc/support/port_platform.h>
25 #include "absl/strings/str_cat.h"
27 #include <grpc/support/alloc.h>
28 #include <grpc/support/cpu.h>
29 #include <grpc/support/log.h>
30 #include <grpc/support/sync.h>
32 #include "src/core/lib/debug/trace.h"
33 #include "src/core/lib/gpr/spinlock.h"
34 #include "src/core/lib/gpr/tls.h"
35 #include "src/core/lib/gpr/useful.h"
36 #include "src/core/lib/iomgr/exec_ctx.h"
37 #include "src/core/lib/iomgr/port.h"
38 #include "src/core/lib/iomgr/time_averaged_stats.h"
39 #include "src/core/lib/iomgr/timer.h"
40 #include "src/core/lib/iomgr/timer_heap.h"
42 #define INVALID_HEAP_INDEX 0xffffffffu
44 #define ADD_DEADLINE_SCALE 0.33
45 #define MIN_QUEUE_WINDOW_DURATION 0.01
46 #define MAX_QUEUE_WINDOW_DURATION 1
48 grpc_core::TraceFlag grpc_timer_trace(false, "timer");
49 grpc_core::TraceFlag grpc_timer_check_trace(false, "timer_check");
51 /* A "timer shard". Contains a 'heap' and a 'list' of timers. All timers with
52 * deadlines earlier than 'queue_deadline_cap' are maintained in the heap and
53 * others are maintained in the list (unordered). This helps to keep the number
54 * of elements in the heap low.
56 * The 'queue_deadline_cap' gets recomputed periodically based on the timer
57 * stats maintained in 'stats' and the relevant timers are then moved from the
62 grpc_time_averaged_stats stats;
63 /* All and only timers with deadlines < this will be in the heap. */
64 grpc_millis queue_deadline_cap;
65 /* The deadline of the next timer due in this shard. */
66 grpc_millis min_deadline;
67 /* Index of this timer_shard in the g_shard_queue. */
68 uint32_t shard_queue_index;
69 /* This holds all timers with deadlines < queue_deadline_cap. Timers in this
70 list have the top bit of their deadline set to 0. */
72 /* This holds timers whose deadline is >= queue_deadline_cap. */
75 static size_t g_num_shards;
77 /* Array of timer shards. Whenever a timer (grpc_timer *) is added, its address
78 * is hashed to select the timer shard to add the timer to */
79 static timer_shard* g_shards;
81 /* Maintains a sorted list of timer shards (sorted by their min_deadline, i.e
82 * the deadline of the next timer in each shard).
83 * Access to this is protected by g_shared_mutables.mu */
84 static timer_shard** g_shard_queue;
88 /* == DEBUG ONLY: hash table for duplicate timer detection == */
90 #define NUM_HASH_BUCKETS 1009 /* Prime number close to 1000 */
92 static gpr_mu g_hash_mu[NUM_HASH_BUCKETS]; /* One mutex per bucket */
93 static grpc_timer* g_timer_ht[NUM_HASH_BUCKETS] = {nullptr};
95 static void init_timer_ht() {
96 for (int i = 0; i < NUM_HASH_BUCKETS; i++) {
97 gpr_mu_init(&g_hash_mu[i]);
101 static void destroy_timer_ht() {
102 for (int i = 0; i < NUM_HASH_BUCKETS; i++) {
103 gpr_mu_destroy(&g_hash_mu[i]);
107 static bool is_in_ht(grpc_timer* t) {
108 size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
110 gpr_mu_lock(&g_hash_mu[i]);
111 grpc_timer* p = g_timer_ht[i];
112 while (p != nullptr && p != t) {
113 p = p->hash_table_next;
115 gpr_mu_unlock(&g_hash_mu[i]);
120 static void add_to_ht(grpc_timer* t) {
121 GPR_ASSERT(!t->hash_table_next);
122 size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
124 gpr_mu_lock(&g_hash_mu[i]);
125 grpc_timer* p = g_timer_ht[i];
126 while (p != nullptr && p != t) {
127 p = p->hash_table_next;
131 grpc_closure* c = t->closure;
133 "** Duplicate timer (%p) being added. Closure: (%p), created at: "
134 "(%s:%d), scheduled at: (%s:%d) **",
135 t, c, c->file_created, c->line_created, c->file_initiated,
140 /* Timer not present in the bucket. Insert at head of the list */
141 t->hash_table_next = g_timer_ht[i];
143 gpr_mu_unlock(&g_hash_mu[i]);
146 static void remove_from_ht(grpc_timer* t) {
147 size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS);
148 bool removed = false;
150 gpr_mu_lock(&g_hash_mu[i]);
151 if (g_timer_ht[i] == t) {
152 g_timer_ht[i] = g_timer_ht[i]->hash_table_next;
154 } else if (g_timer_ht[i] != nullptr) {
155 grpc_timer* p = g_timer_ht[i];
156 while (p->hash_table_next != nullptr && p->hash_table_next != t) {
157 p = p->hash_table_next;
160 if (p->hash_table_next == t) {
161 p->hash_table_next = t->hash_table_next;
165 gpr_mu_unlock(&g_hash_mu[i]);
168 grpc_closure* c = t->closure;
170 "** Removing timer (%p) that is not added to hash table. Closure "
171 "(%p), created at: (%s:%d), scheduled at: (%s:%d) **",
172 t, c, c->file_created, c->line_created, c->file_initiated,
177 t->hash_table_next = nullptr;
180 /* If a timer is added to a timer shard (either heap or a list), it must
181 * be pending. A timer is added to hash table only-if it is added to the
183 * Therefore, if timer->pending is false, it cannot be in hash table */
184 static void validate_non_pending_timer(grpc_timer* t) {
185 if (!t->pending && is_in_ht(t)) {
186 grpc_closure* c = t->closure;
188 "** gpr_timer_cancel() called on a non-pending timer (%p) which "
189 "is in the hash table. Closure: (%p), created at: (%s:%d), "
190 "scheduled at: (%s:%d) **",
191 t, c, c->file_created, c->line_created, c->file_initiated,
197 #define INIT_TIMER_HASH_TABLE() init_timer_ht()
198 #define DESTROY_TIMER_HASH_TABLE() destroy_timer_ht()
199 #define ADD_TO_HASH_TABLE(t) add_to_ht((t))
200 #define REMOVE_FROM_HASH_TABLE(t) remove_from_ht((t))
201 #define VALIDATE_NON_PENDING_TIMER(t) validate_non_pending_timer((t))
205 #define INIT_TIMER_HASH_TABLE()
206 #define DESTROY_TIMER_HASH_TABLE()
207 #define ADD_TO_HASH_TABLE(t)
208 #define REMOVE_FROM_HASH_TABLE(t)
209 #define VALIDATE_NON_PENDING_TIMER(t)
213 /* Thread local variable that stores the deadline of the next timer the thread
214 * has last-seen. This is an optimization to prevent the thread from checking
215 * shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock,
216 * an expensive operation) */
217 static GPR_THREAD_LOCAL(grpc_millis) g_last_seen_min_timer;
219 struct shared_mutables {
220 /* The deadline of the next timer due across all timer shards */
221 grpc_millis min_timer;
222 /* Allow only one run_some_expired_timers at once */
223 gpr_spinlock checker_mu;
225 /* Protects g_shard_queue (and the shared_mutables struct itself) */
227 } GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE);
229 static struct shared_mutables g_shared_mutables;
231 static grpc_millis saturating_add(grpc_millis a, grpc_millis b) {
232 if (a > GRPC_MILLIS_INF_FUTURE - b) {
233 return GRPC_MILLIS_INF_FUTURE;
238 static grpc_timer_check_result run_some_expired_timers(grpc_millis now,
240 grpc_error_handle error);
242 static grpc_millis compute_min_deadline(timer_shard* shard) {
243 return grpc_timer_heap_is_empty(&shard->heap)
244 ? saturating_add(shard->queue_deadline_cap, 1)
245 : grpc_timer_heap_top(&shard->heap)->deadline;
248 static void timer_list_init() {
251 g_num_shards = GPR_CLAMP(2 * gpr_cpu_num_cores(), 1, 32);
253 static_cast<timer_shard*>(gpr_zalloc(g_num_shards * sizeof(*g_shards)));
254 g_shard_queue = static_cast<timer_shard**>(
255 gpr_zalloc(g_num_shards * sizeof(*g_shard_queue)));
257 g_shared_mutables.initialized = true;
258 g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
259 gpr_mu_init(&g_shared_mutables.mu);
260 g_shared_mutables.min_timer = grpc_core::ExecCtx::Get()->Now();
262 g_last_seen_min_timer = 0;
264 for (i = 0; i < g_num_shards; i++) {
265 timer_shard* shard = &g_shards[i];
266 gpr_mu_init(&shard->mu);
267 grpc_time_averaged_stats_init(&shard->stats, 1.0 / ADD_DEADLINE_SCALE, 0.1,
269 shard->queue_deadline_cap = g_shared_mutables.min_timer;
270 shard->shard_queue_index = i;
271 grpc_timer_heap_init(&shard->heap);
272 shard->list.next = shard->list.prev = &shard->list;
273 shard->min_deadline = compute_min_deadline(shard);
274 g_shard_queue[i] = shard;
277 INIT_TIMER_HASH_TABLE();
280 static void timer_list_shutdown() {
282 run_some_expired_timers(
283 GRPC_MILLIS_INF_FUTURE, nullptr,
284 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
285 for (i = 0; i < g_num_shards; i++) {
286 timer_shard* shard = &g_shards[i];
287 gpr_mu_destroy(&shard->mu);
288 grpc_timer_heap_destroy(&shard->heap);
290 gpr_mu_destroy(&g_shared_mutables.mu);
292 gpr_free(g_shard_queue);
293 g_shared_mutables.initialized = false;
295 DESTROY_TIMER_HASH_TABLE();
298 /* returns true if the first element in the list */
299 static void list_join(grpc_timer* head, grpc_timer* timer) {
301 timer->prev = head->prev;
302 timer->next->prev = timer->prev->next = timer;
305 static void list_remove(grpc_timer* timer) {
306 timer->next->prev = timer->prev;
307 timer->prev->next = timer->next;
310 static void swap_adjacent_shards_in_queue(uint32_t first_shard_queue_index) {
312 temp = g_shard_queue[first_shard_queue_index];
313 g_shard_queue[first_shard_queue_index] =
314 g_shard_queue[first_shard_queue_index + 1];
315 g_shard_queue[first_shard_queue_index + 1] = temp;
316 g_shard_queue[first_shard_queue_index]->shard_queue_index =
317 first_shard_queue_index;
318 g_shard_queue[first_shard_queue_index + 1]->shard_queue_index =
319 first_shard_queue_index + 1;
322 static void note_deadline_change(timer_shard* shard) {
323 while (shard->shard_queue_index > 0 &&
324 shard->min_deadline <
325 g_shard_queue[shard->shard_queue_index - 1]->min_deadline) {
326 swap_adjacent_shards_in_queue(shard->shard_queue_index - 1);
328 while (shard->shard_queue_index < g_num_shards - 1 &&
329 shard->min_deadline >
330 g_shard_queue[shard->shard_queue_index + 1]->min_deadline) {
331 swap_adjacent_shards_in_queue(shard->shard_queue_index);
335 void grpc_timer_init_unset(grpc_timer* timer) { timer->pending = false; }
337 static void timer_init(grpc_timer* timer, grpc_millis deadline,
338 grpc_closure* closure) {
339 int is_first_timer = 0;
340 timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
341 timer->closure = closure;
342 timer->deadline = deadline;
345 timer->hash_table_next = nullptr;
348 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) {
349 gpr_log(GPR_INFO, "TIMER %p: SET %" PRId64 " now %" PRId64 " call %p[%p]",
350 timer, deadline, grpc_core::ExecCtx::Get()->Now(), closure,
354 if (!g_shared_mutables.initialized) {
355 timer->pending = false;
356 grpc_core::ExecCtx::Run(
357 DEBUG_LOCATION, timer->closure,
358 GRPC_ERROR_CREATE_FROM_STATIC_STRING(
359 "Attempt to create timer before initialization"));
363 gpr_mu_lock(&shard->mu);
364 timer->pending = true;
365 grpc_millis now = grpc_core::ExecCtx::Get()->Now();
366 if (deadline <= now) {
367 timer->pending = false;
368 grpc_core::ExecCtx::Run(DEBUG_LOCATION, timer->closure, GRPC_ERROR_NONE);
369 gpr_mu_unlock(&shard->mu);
374 grpc_time_averaged_stats_add_sample(
375 &shard->stats, static_cast<double>(deadline - now) / 1000.0);
377 ADD_TO_HASH_TABLE(timer);
379 if (deadline < shard->queue_deadline_cap) {
380 is_first_timer = grpc_timer_heap_add(&shard->heap, timer);
382 timer->heap_index = INVALID_HEAP_INDEX;
383 list_join(&shard->list, timer);
385 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) {
387 " .. add to shard %d with queue_deadline_cap=%" PRId64
388 " => is_first_timer=%s",
389 static_cast<int>(shard - g_shards), shard->queue_deadline_cap,
390 is_first_timer ? "true" : "false");
392 gpr_mu_unlock(&shard->mu);
394 /* Deadline may have decreased, we need to adjust the main queue. Note
395 that there is a potential racy unlocked region here. There could be a
396 reordering of multiple grpc_timer_init calls, at this point, but the < test
397 below should ensure that we err on the side of caution. There could
398 also be a race with grpc_timer_check, which might beat us to the lock. In
399 that case, it is possible that the timer that we added will have already
400 run by the time we hold the lock, but that too is a safe error.
401 Finally, it's possible that the grpc_timer_check that intervened failed to
402 trigger the new timer because the min_deadline hadn't yet been reduced.
403 In that case, the timer will simply have to wait for the next
405 if (is_first_timer) {
406 gpr_mu_lock(&g_shared_mutables.mu);
407 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) {
408 gpr_log(GPR_INFO, " .. old shard min_deadline=%" PRId64,
409 shard->min_deadline);
411 if (deadline < shard->min_deadline) {
412 grpc_millis old_min_deadline = g_shard_queue[0]->min_deadline;
413 shard->min_deadline = deadline;
414 note_deadline_change(shard);
415 if (shard->shard_queue_index == 0 && deadline < old_min_deadline) {
417 // TODO(sreek): Using c-style cast here. static_cast<> gives an error
418 // (on mac platforms complaining that gpr_atm* is (long *) while
419 // (&g_shared_mutables.min_timer) is a (long long *). The cast should be
420 // safe since we know that both are pointer types and 64-bit wide.
421 gpr_atm_no_barrier_store((gpr_atm*)(&g_shared_mutables.min_timer),
424 // On 32-bit systems, gpr_atm_no_barrier_store does not work on 64-bit
425 // types (like grpc_millis). So all reads and writes to
426 // g_shared_mutables.min_timer varialbe under g_shared_mutables.mu
427 g_shared_mutables.min_timer = deadline;
432 gpr_mu_unlock(&g_shared_mutables.mu);
436 static void timer_consume_kick(void) {
437 /* Force re-evaluation of last seen min */
438 g_last_seen_min_timer = 0;
441 static void timer_cancel(grpc_timer* timer) {
442 if (!g_shared_mutables.initialized) {
443 /* must have already been cancelled, also the shard mutex is invalid */
447 timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
448 gpr_mu_lock(&shard->mu);
449 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) {
450 gpr_log(GPR_INFO, "TIMER %p: CANCEL pending=%s", timer,
451 timer->pending ? "true" : "false");
454 if (timer->pending) {
455 REMOVE_FROM_HASH_TABLE(timer);
457 grpc_core::ExecCtx::Run(DEBUG_LOCATION, timer->closure,
458 GRPC_ERROR_CANCELLED);
459 timer->pending = false;
460 if (timer->heap_index == INVALID_HEAP_INDEX) {
463 grpc_timer_heap_remove(&shard->heap, timer);
466 VALIDATE_NON_PENDING_TIMER(timer);
468 gpr_mu_unlock(&shard->mu);
471 /* Rebalances the timer shard by computing a new 'queue_deadline_cap' and moving
472 all relevant timers in shard->list (i.e timers with deadlines earlier than
473 'queue_deadline_cap') into into shard->heap.
474 Returns 'true' if shard->heap has at least ONE element
475 REQUIRES: shard->mu locked */
476 static bool refill_heap(timer_shard* shard, grpc_millis now) {
477 /* Compute the new queue window width and bound by the limits: */
478 double computed_deadline_delta =
479 grpc_time_averaged_stats_update_average(&shard->stats) *
481 double deadline_delta =
482 GPR_CLAMP(computed_deadline_delta, MIN_QUEUE_WINDOW_DURATION,
483 MAX_QUEUE_WINDOW_DURATION);
484 grpc_timer *timer, *next;
486 /* Compute the new cap and put all timers under it into the queue: */
487 shard->queue_deadline_cap =
488 saturating_add(GPR_MAX(now, shard->queue_deadline_cap),
489 static_cast<grpc_millis>(deadline_delta * 1000.0));
491 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
492 gpr_log(GPR_INFO, " .. shard[%d]->queue_deadline_cap --> %" PRId64,
493 static_cast<int>(shard - g_shards), shard->queue_deadline_cap);
495 for (timer = shard->list.next; timer != &shard->list; timer = next) {
498 if (timer->deadline < shard->queue_deadline_cap) {
499 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
500 gpr_log(GPR_INFO, " .. add timer with deadline %" PRId64 " to heap",
504 grpc_timer_heap_add(&shard->heap, timer);
507 return !grpc_timer_heap_is_empty(&shard->heap);
510 /* This pops the next non-cancelled timer with deadline <= now from the
511 queue, or returns NULL if there isn't one.
512 REQUIRES: shard->mu locked */
513 static grpc_timer* pop_one(timer_shard* shard, grpc_millis now) {
516 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
517 gpr_log(GPR_INFO, " .. shard[%d]: heap_empty=%s",
518 static_cast<int>(shard - g_shards),
519 grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
521 if (grpc_timer_heap_is_empty(&shard->heap)) {
522 if (now < shard->queue_deadline_cap) return nullptr;
523 if (!refill_heap(shard, now)) return nullptr;
525 timer = grpc_timer_heap_top(&shard->heap);
526 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
528 " .. check top timer deadline=%" PRId64 " now=%" PRId64,
529 timer->deadline, now);
531 if (timer->deadline > now) return nullptr;
532 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) {
533 gpr_log(GPR_INFO, "TIMER %p: FIRE %" PRId64 "ms late", timer,
534 now - timer->deadline);
536 timer->pending = false;
537 grpc_timer_heap_pop(&shard->heap);
542 /* REQUIRES: shard->mu unlocked */
543 static size_t pop_timers(timer_shard* shard, grpc_millis now,
544 grpc_millis* new_min_deadline,
545 grpc_error_handle error) {
548 gpr_mu_lock(&shard->mu);
549 while ((timer = pop_one(shard, now))) {
550 REMOVE_FROM_HASH_TABLE(timer);
551 grpc_core::ExecCtx::Run(DEBUG_LOCATION, timer->closure,
552 GRPC_ERROR_REF(error));
555 *new_min_deadline = compute_min_deadline(shard);
556 gpr_mu_unlock(&shard->mu);
557 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
558 gpr_log(GPR_INFO, " .. shard[%d] popped %" PRIdPTR,
559 static_cast<int>(shard - g_shards), n);
564 static grpc_timer_check_result run_some_expired_timers(
565 grpc_millis now, grpc_millis* next, grpc_error_handle error) {
566 grpc_timer_check_result result = GRPC_TIMERS_NOT_CHECKED;
569 // TODO(sreek): Using c-style cast here. static_cast<> gives an error (on
570 // mac platforms complaining that gpr_atm* is (long *) while
571 // (&g_shared_mutables.min_timer) is a (long long *). The cast should be
572 // safe since we know that both are pointer types and 64-bit wide
573 grpc_millis min_timer = static_cast<grpc_millis>(
574 gpr_atm_no_barrier_load((gpr_atm*)(&g_shared_mutables.min_timer)));
576 // On 32-bit systems, gpr_atm_no_barrier_load does not work on 64-bit types
577 // (like grpc_millis). So all reads and writes to g_shared_mutables.min_timer
578 // are done under g_shared_mutables.mu
579 gpr_mu_lock(&g_shared_mutables.mu);
580 grpc_millis min_timer = g_shared_mutables.min_timer;
581 gpr_mu_unlock(&g_shared_mutables.mu);
583 g_last_seen_min_timer = min_timer;
585 if (now < min_timer) {
586 if (next != nullptr) *next = GPR_MIN(*next, min_timer);
587 return GRPC_TIMERS_CHECKED_AND_EMPTY;
590 if (gpr_spinlock_trylock(&g_shared_mutables.checker_mu)) {
591 gpr_mu_lock(&g_shared_mutables.mu);
592 result = GRPC_TIMERS_CHECKED_AND_EMPTY;
594 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
595 gpr_log(GPR_INFO, " .. shard[%d]->min_deadline = %" PRId64,
596 static_cast<int>(g_shard_queue[0] - g_shards),
597 g_shard_queue[0]->min_deadline);
600 while (g_shard_queue[0]->min_deadline < now ||
601 (now != GRPC_MILLIS_INF_FUTURE &&
602 g_shard_queue[0]->min_deadline == now)) {
603 grpc_millis new_min_deadline;
605 /* For efficiency, we pop as many available timers as we can from the
606 shard. This may violate perfect timer deadline ordering, but that
607 shouldn't be a big deal because we don't make ordering guarantees. */
608 if (pop_timers(g_shard_queue[0], now, &new_min_deadline, error) > 0) {
609 result = GRPC_TIMERS_FIRED;
612 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
615 ", shard[%d]->min_deadline %" PRId64 " --> %" PRId64
617 result, static_cast<int>(g_shard_queue[0] - g_shards),
618 g_shard_queue[0]->min_deadline, new_min_deadline, now);
621 /* An grpc_timer_init() on the shard could intervene here, adding a new
622 timer that is earlier than new_min_deadline. However,
623 grpc_timer_init() will block on the mutex before it can call
624 set_min_deadline, so this one will complete first and then the Addtimer
625 will reduce the min_deadline (perhaps unnecessarily). */
626 g_shard_queue[0]->min_deadline = new_min_deadline;
627 note_deadline_change(g_shard_queue[0]);
631 *next = GPR_MIN(*next, g_shard_queue[0]->min_deadline);
635 // TODO(sreek): Using c-style cast here. static_cast<> gives an error (on
636 // mac platforms complaining that gpr_atm* is (long *) while
637 // (&g_shared_mutables.min_timer) is a (long long *). The cast should be
638 // safe since we know that both are pointer types and 64-bit wide
639 gpr_atm_no_barrier_store((gpr_atm*)(&g_shared_mutables.min_timer),
640 g_shard_queue[0]->min_deadline);
642 // On 32-bit systems, gpr_atm_no_barrier_store does not work on 64-bit
643 // types (like grpc_millis). So all reads and writes to
644 // g_shared_mutables.min_timer are done under g_shared_mutables.mu
645 g_shared_mutables.min_timer = g_shard_queue[0]->min_deadline;
647 gpr_mu_unlock(&g_shared_mutables.mu);
648 gpr_spinlock_unlock(&g_shared_mutables.checker_mu);
651 GRPC_ERROR_UNREF(error);
656 static grpc_timer_check_result timer_check(grpc_millis* next) {
658 grpc_millis now = grpc_core::ExecCtx::Get()->Now();
660 /* fetch from a thread-local first: this avoids contention on a globally
661 mutable cacheline in the common case */
662 grpc_millis min_timer = g_last_seen_min_timer;
664 if (now < min_timer) {
665 if (next != nullptr) {
666 *next = GPR_MIN(*next, min_timer);
668 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
669 gpr_log(GPR_INFO, "TIMER CHECK SKIP: now=%" PRId64 " min_timer=%" PRId64,
672 return GRPC_TIMERS_CHECKED_AND_EMPTY;
675 grpc_error_handle shutdown_error =
676 now != GRPC_MILLIS_INF_FUTURE
678 : GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutting down timer system");
681 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
682 std::string next_str;
683 if (next == nullptr) {
686 next_str = absl::StrCat(*next);
690 "TIMER CHECK BEGIN: now=%" PRId64 " next=%s tls_min=%" PRId64
691 " glob_min=%" PRId64,
692 now, next_str.c_str(), min_timer,
693 static_cast<grpc_millis>(gpr_atm_no_barrier_load(
694 (gpr_atm*)(&g_shared_mutables.min_timer))));
696 gpr_log(GPR_INFO, "TIMER CHECK BEGIN: now=%" PRId64 " next=%s min=%" PRId64,
697 now, next_str.c_str(), min_timer);
701 grpc_timer_check_result r =
702 run_some_expired_timers(now, next, shutdown_error);
704 if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
705 std::string next_str;
706 if (next == nullptr) {
709 next_str = absl::StrCat(*next);
711 gpr_log(GPR_INFO, "TIMER CHECK END: r=%d; next=%s", r, next_str.c_str());
716 grpc_timer_vtable grpc_generic_timer_vtable = {
717 timer_init, timer_cancel, timer_check,
718 timer_list_init, timer_list_shutdown, timer_consume_kick};