Imported Upstream version 2.1.10
[platform/upstream/libevent.git] / event.c
1 /*
2  * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
29
30 #ifdef _WIN32
31 #include <winsock2.h>
32 #define WIN32_LEAN_AND_MEAN
33 #include <windows.h>
34 #undef WIN32_LEAN_AND_MEAN
35 #endif
36 #include <sys/types.h>
37 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
38 #include <sys/time.h>
39 #endif
40 #include <sys/queue.h>
41 #ifdef EVENT__HAVE_SYS_SOCKET_H
42 #include <sys/socket.h>
43 #endif
44 #include <stdio.h>
45 #include <stdlib.h>
46 #ifdef EVENT__HAVE_UNISTD_H
47 #include <unistd.h>
48 #endif
49 #include <ctype.h>
50 #include <errno.h>
51 #include <signal.h>
52 #include <string.h>
53 #include <time.h>
54 #include <limits.h>
55 #ifdef EVENT__HAVE_FCNTL_H
56 #include <fcntl.h>
57 #endif
58
59 #include "event2/event.h"
60 #include "event2/event_struct.h"
61 #include "event2/event_compat.h"
62 #include "event-internal.h"
63 #include "defer-internal.h"
64 #include "evthread-internal.h"
65 #include "event2/thread.h"
66 #include "event2/util.h"
67 #include "log-internal.h"
68 #include "evmap-internal.h"
69 #include "iocp-internal.h"
70 #include "changelist-internal.h"
71 #define HT_NO_CACHE_HASH_VALUES
72 #include "ht-internal.h"
73 #include "util-internal.h"
74
75
76 #ifdef EVENT__HAVE_WORKING_KQUEUE
77 #include "kqueue-internal.h"
78 #endif
79
80 #ifdef EVENT__HAVE_EVENT_PORTS
81 extern const struct eventop evportops;
82 #endif
83 #ifdef EVENT__HAVE_SELECT
84 extern const struct eventop selectops;
85 #endif
86 #ifdef EVENT__HAVE_POLL
87 extern const struct eventop pollops;
88 #endif
89 #ifdef EVENT__HAVE_EPOLL
90 extern const struct eventop epollops;
91 #endif
92 #ifdef EVENT__HAVE_WORKING_KQUEUE
93 extern const struct eventop kqops;
94 #endif
95 #ifdef EVENT__HAVE_DEVPOLL
96 extern const struct eventop devpollops;
97 #endif
98 #ifdef _WIN32
99 extern const struct eventop win32ops;
100 #endif
101
102 /* Array of backends in order of preference. */
103 static const struct eventop *eventops[] = {
104 #ifdef EVENT__HAVE_EVENT_PORTS
105         &evportops,
106 #endif
107 #ifdef EVENT__HAVE_WORKING_KQUEUE
108         &kqops,
109 #endif
110 #ifdef EVENT__HAVE_EPOLL
111         &epollops,
112 #endif
113 #ifdef EVENT__HAVE_DEVPOLL
114         &devpollops,
115 #endif
116 #ifdef EVENT__HAVE_POLL
117         &pollops,
118 #endif
119 #ifdef EVENT__HAVE_SELECT
120         &selectops,
121 #endif
122 #ifdef _WIN32
123         &win32ops,
124 #endif
125         NULL
126 };
127
128 /* Global state; deprecated */
129 EVENT2_EXPORT_SYMBOL
130 struct event_base *event_global_current_base_ = NULL;
131 #define current_base event_global_current_base_
132
133 /* Global state */
134
135 static void *event_self_cbarg_ptr_ = NULL;
136
137 /* Prototypes */
138 static void     event_queue_insert_active(struct event_base *, struct event_callback *);
139 static void     event_queue_insert_active_later(struct event_base *, struct event_callback *);
140 static void     event_queue_insert_timeout(struct event_base *, struct event *);
141 static void     event_queue_insert_inserted(struct event_base *, struct event *);
142 static void     event_queue_remove_active(struct event_base *, struct event_callback *);
143 static void     event_queue_remove_active_later(struct event_base *, struct event_callback *);
144 static void     event_queue_remove_timeout(struct event_base *, struct event *);
145 static void     event_queue_remove_inserted(struct event_base *, struct event *);
146 static void event_queue_make_later_events_active(struct event_base *base);
147
148 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
149 static int event_del_(struct event *ev, int blocking);
150
151 #ifdef USE_REINSERT_TIMEOUT
152 /* This code seems buggy; only turn it on if we find out what the trouble is. */
153 static void     event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
154 #endif
155
156 static int      event_haveevents(struct event_base *);
157
158 static int      event_process_active(struct event_base *);
159
160 static int      timeout_next(struct event_base *, struct timeval **);
161 static void     timeout_process(struct event_base *);
162
163 static inline void      event_signal_closure(struct event_base *, struct event *ev);
164 static inline void      event_persist_closure(struct event_base *, struct event *ev);
165
166 static int      evthread_notify_base(struct event_base *base);
167
168 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
169     struct event *ev);
170
171 #ifndef EVENT__DISABLE_DEBUG_MODE
172 /* These functions implement a hashtable of which 'struct event *' structures
173  * have been setup or added.  We don't want to trust the content of the struct
174  * event itself, since we're trying to work through cases where an event gets
175  * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
176  */
177
178 struct event_debug_entry {
179         HT_ENTRY(event_debug_entry) node;
180         const struct event *ptr;
181         unsigned added : 1;
182 };
183
184 static inline unsigned
185 hash_debug_entry(const struct event_debug_entry *e)
186 {
187         /* We need to do this silliness to convince compilers that we
188          * honestly mean to cast e->ptr to an integer, and discard any
189          * part of it that doesn't fit in an unsigned.
190          */
191         unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
192         /* Our hashtable implementation is pretty sensitive to low bits,
193          * and every struct event is over 64 bytes in size, so we can
194          * just say >>6. */
195         return (u >> 6);
196 }
197
198 static inline int
199 eq_debug_entry(const struct event_debug_entry *a,
200     const struct event_debug_entry *b)
201 {
202         return a->ptr == b->ptr;
203 }
204
205 int event_debug_mode_on_ = 0;
206
207
208 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
209 /**
210  * @brief debug mode variable which is set for any function/structure that needs
211  *        to be shared across threads (if thread support is enabled).
212  *
213  *        When and if evthreads are initialized, this variable will be evaluated,
214  *        and if set to something other than zero, this means the evthread setup 
215  *        functions were called out of order.
216  *
217  *        See: "Locks and threading" in the documentation.
218  */
219 int event_debug_created_threadable_ctx_ = 0;
220 #endif
221
222 /* Set if it's too late to enable event_debug_mode. */
223 static int event_debug_mode_too_late = 0;
224 #ifndef EVENT__DISABLE_THREAD_SUPPORT
225 static void *event_debug_map_lock_ = NULL;
226 #endif
227 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
228         HT_INITIALIZER();
229
230 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
231     eq_debug_entry)
232 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
233     eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
234
235 /* record that ev is now setup (that is, ready for an add) */
236 static void event_debug_note_setup_(const struct event *ev)
237 {
238         struct event_debug_entry *dent, find;
239
240         if (!event_debug_mode_on_)
241                 goto out;
242
243         find.ptr = ev;
244         EVLOCK_LOCK(event_debug_map_lock_, 0);
245         dent = HT_FIND(event_debug_map, &global_debug_map, &find);
246         if (dent) {
247                 dent->added = 0;
248         } else {
249                 dent = mm_malloc(sizeof(*dent));
250                 if (!dent)
251                         event_err(1,
252                             "Out of memory in debugging code");
253                 dent->ptr = ev;
254                 dent->added = 0;
255                 HT_INSERT(event_debug_map, &global_debug_map, dent);
256         }
257         EVLOCK_UNLOCK(event_debug_map_lock_, 0);
258
259 out:
260         event_debug_mode_too_late = 1;
261 }
262 /* record that ev is no longer setup */
263 static void event_debug_note_teardown_(const struct event *ev)
264 {
265         struct event_debug_entry *dent, find;
266
267         if (!event_debug_mode_on_)
268                 goto out;
269
270         find.ptr = ev;
271         EVLOCK_LOCK(event_debug_map_lock_, 0);
272         dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
273         if (dent)
274                 mm_free(dent);
275         EVLOCK_UNLOCK(event_debug_map_lock_, 0);
276
277 out:
278         event_debug_mode_too_late = 1;
279 }
280 /* Macro: record that ev is now added */
281 static void event_debug_note_add_(const struct event *ev)
282 {
283         struct event_debug_entry *dent,find;
284
285         if (!event_debug_mode_on_)
286                 goto out;
287
288         find.ptr = ev;
289         EVLOCK_LOCK(event_debug_map_lock_, 0);
290         dent = HT_FIND(event_debug_map, &global_debug_map, &find);
291         if (dent) {
292                 dent->added = 1;
293         } else {
294                 event_errx(EVENT_ERR_ABORT_,
295                     "%s: noting an add on a non-setup event %p"
296                     " (events: 0x%x, fd: "EV_SOCK_FMT
297                     ", flags: 0x%x)",
298                     __func__, ev, ev->ev_events,
299                     EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
300         }
301         EVLOCK_UNLOCK(event_debug_map_lock_, 0);
302
303 out:
304         event_debug_mode_too_late = 1;
305 }
306 /* record that ev is no longer added */
307 static void event_debug_note_del_(const struct event *ev)
308 {
309         struct event_debug_entry *dent, find;
310
311         if (!event_debug_mode_on_)
312                 goto out;
313
314         find.ptr = ev;
315         EVLOCK_LOCK(event_debug_map_lock_, 0);
316         dent = HT_FIND(event_debug_map, &global_debug_map, &find);
317         if (dent) {
318                 dent->added = 0;
319         } else {
320                 event_errx(EVENT_ERR_ABORT_,
321                     "%s: noting a del on a non-setup event %p"
322                     " (events: 0x%x, fd: "EV_SOCK_FMT
323                     ", flags: 0x%x)",
324                     __func__, ev, ev->ev_events,
325                     EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
326         }
327         EVLOCK_UNLOCK(event_debug_map_lock_, 0);
328
329 out:
330         event_debug_mode_too_late = 1;
331 }
332 /* assert that ev is setup (i.e., okay to add or inspect) */
333 static void event_debug_assert_is_setup_(const struct event *ev)
334 {
335         struct event_debug_entry *dent, find;
336
337         if (!event_debug_mode_on_)
338                 return;
339
340         find.ptr = ev;
341         EVLOCK_LOCK(event_debug_map_lock_, 0);
342         dent = HT_FIND(event_debug_map, &global_debug_map, &find);
343         if (!dent) {
344                 event_errx(EVENT_ERR_ABORT_,
345                     "%s called on a non-initialized event %p"
346                     " (events: 0x%x, fd: "EV_SOCK_FMT
347                     ", flags: 0x%x)",
348                     __func__, ev, ev->ev_events,
349                     EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
350         }
351         EVLOCK_UNLOCK(event_debug_map_lock_, 0);
352 }
353 /* assert that ev is not added (i.e., okay to tear down or set up again) */
354 static void event_debug_assert_not_added_(const struct event *ev)
355 {
356         struct event_debug_entry *dent, find;
357
358         if (!event_debug_mode_on_)
359                 return;
360
361         find.ptr = ev;
362         EVLOCK_LOCK(event_debug_map_lock_, 0);
363         dent = HT_FIND(event_debug_map, &global_debug_map, &find);
364         if (dent && dent->added) {
365                 event_errx(EVENT_ERR_ABORT_,
366                     "%s called on an already added event %p"
367                     " (events: 0x%x, fd: "EV_SOCK_FMT", "
368                     "flags: 0x%x)",
369                     __func__, ev, ev->ev_events,
370                     EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
371         }
372         EVLOCK_UNLOCK(event_debug_map_lock_, 0);
373 }
374 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
375 {
376         if (!event_debug_mode_on_)
377                 return;
378         if (fd < 0)
379                 return;
380
381 #ifndef _WIN32
382         {
383                 int flags;
384                 if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
385                         EVUTIL_ASSERT(flags & O_NONBLOCK);
386                 }
387         }
388 #endif
389 }
390 #else
391 static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
392 static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
393 static void event_debug_note_add_(const struct event *ev) { (void)ev; }
394 static void event_debug_note_del_(const struct event *ev) { (void)ev; }
395 static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
396 static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
397 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
398 #endif
399
400 #define EVENT_BASE_ASSERT_LOCKED(base)          \
401         EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
402
403 /* How often (in seconds) do we check for changes in wall clock time relative
404  * to monotonic time?  Set this to -1 for 'never.' */
405 #define CLOCK_SYNC_INTERVAL 5
406
407 /** Set 'tp' to the current time according to 'base'.  We must hold the lock
408  * on 'base'.  If there is a cached time, return it.  Otherwise, use
409  * clock_gettime or gettimeofday as appropriate to find out the right time.
410  * Return 0 on success, -1 on failure.
411  */
412 static int
413 gettime(struct event_base *base, struct timeval *tp)
414 {
415         EVENT_BASE_ASSERT_LOCKED(base);
416
417         if (base->tv_cache.tv_sec) {
418                 *tp = base->tv_cache;
419                 return (0);
420         }
421
422         if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
423                 return -1;
424         }
425
426         if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
427             < tp->tv_sec) {
428                 struct timeval tv;
429                 evutil_gettimeofday(&tv,NULL);
430                 evutil_timersub(&tv, tp, &base->tv_clock_diff);
431                 base->last_updated_clock_diff = tp->tv_sec;
432         }
433
434         return 0;
435 }
436
437 int
438 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
439 {
440         int r;
441         if (!base) {
442                 base = current_base;
443                 if (!current_base)
444                         return evutil_gettimeofday(tv, NULL);
445         }
446
447         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
448         if (base->tv_cache.tv_sec == 0) {
449                 r = evutil_gettimeofday(tv, NULL);
450         } else {
451                 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
452                 r = 0;
453         }
454         EVBASE_RELEASE_LOCK(base, th_base_lock);
455         return r;
456 }
457
458 /** Make 'base' have no current cached time. */
459 static inline void
460 clear_time_cache(struct event_base *base)
461 {
462         base->tv_cache.tv_sec = 0;
463 }
464
465 /** Replace the cached time in 'base' with the current time. */
466 static inline void
467 update_time_cache(struct event_base *base)
468 {
469         base->tv_cache.tv_sec = 0;
470         if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
471             gettime(base, &base->tv_cache);
472 }
473
474 int
475 event_base_update_cache_time(struct event_base *base)
476 {
477
478         if (!base) {
479                 base = current_base;
480                 if (!current_base)
481                         return -1;
482         }
483
484         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
485         if (base->running_loop)
486                 update_time_cache(base);
487         EVBASE_RELEASE_LOCK(base, th_base_lock);
488         return 0;
489 }
490
491 static inline struct event *
492 event_callback_to_event(struct event_callback *evcb)
493 {
494         EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
495         return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
496 }
497
498 static inline struct event_callback *
499 event_to_event_callback(struct event *ev)
500 {
501         return &ev->ev_evcallback;
502 }
503
504 struct event_base *
505 event_init(void)
506 {
507         struct event_base *base = event_base_new_with_config(NULL);
508
509         if (base == NULL) {
510                 event_errx(1, "%s: Unable to construct event_base", __func__);
511                 return NULL;
512         }
513
514         current_base = base;
515
516         return (base);
517 }
518
519 struct event_base *
520 event_base_new(void)
521 {
522         struct event_base *base = NULL;
523         struct event_config *cfg = event_config_new();
524         if (cfg) {
525                 base = event_base_new_with_config(cfg);
526                 event_config_free(cfg);
527         }
528         return base;
529 }
530
531 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
532  * avoid. */
533 static int
534 event_config_is_avoided_method(const struct event_config *cfg,
535     const char *method)
536 {
537         struct event_config_entry *entry;
538
539         TAILQ_FOREACH(entry, &cfg->entries, next) {
540                 if (entry->avoid_method != NULL &&
541                     strcmp(entry->avoid_method, method) == 0)
542                         return (1);
543         }
544
545         return (0);
546 }
547
548 /** Return true iff 'method' is disabled according to the environment. */
549 static int
550 event_is_method_disabled(const char *name)
551 {
552         char environment[64];
553         int i;
554
555         evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
556         for (i = 8; environment[i] != '\0'; ++i)
557                 environment[i] = EVUTIL_TOUPPER_(environment[i]);
558         /* Note that evutil_getenv_() ignores the environment entirely if
559          * we're setuid */
560         return (evutil_getenv_(environment) != NULL);
561 }
562
563 int
564 event_base_get_features(const struct event_base *base)
565 {
566         return base->evsel->features;
567 }
568
569 void
570 event_enable_debug_mode(void)
571 {
572 #ifndef EVENT__DISABLE_DEBUG_MODE
573         if (event_debug_mode_on_)
574                 event_errx(1, "%s was called twice!", __func__);
575         if (event_debug_mode_too_late)
576                 event_errx(1, "%s must be called *before* creating any events "
577                     "or event_bases",__func__);
578
579         event_debug_mode_on_ = 1;
580
581         HT_INIT(event_debug_map, &global_debug_map);
582 #endif
583 }
584
585 void
586 event_disable_debug_mode(void)
587 {
588 #ifndef EVENT__DISABLE_DEBUG_MODE
589         struct event_debug_entry **ent, *victim;
590
591         EVLOCK_LOCK(event_debug_map_lock_, 0);
592         for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
593                 victim = *ent;
594                 ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
595                 mm_free(victim);
596         }
597         HT_CLEAR(event_debug_map, &global_debug_map);
598         EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
599
600         event_debug_mode_on_  = 0;
601 #endif
602 }
603
604 struct event_base *
605 event_base_new_with_config(const struct event_config *cfg)
606 {
607         int i;
608         struct event_base *base;
609         int should_check_environment;
610
611 #ifndef EVENT__DISABLE_DEBUG_MODE
612         event_debug_mode_too_late = 1;
613 #endif
614
615         if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
616                 event_warn("%s: calloc", __func__);
617                 return NULL;
618         }
619
620         if (cfg)
621                 base->flags = cfg->flags;
622
623         should_check_environment =
624             !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
625
626         {
627                 struct timeval tmp;
628                 int precise_time =
629                     cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
630                 int flags;
631                 if (should_check_environment && !precise_time) {
632                         precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
633                         if (precise_time) {
634                                 base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
635                         }
636                 }
637                 flags = precise_time ? EV_MONOT_PRECISE : 0;
638                 evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
639
640                 gettime(base, &tmp);
641         }
642
643         min_heap_ctor_(&base->timeheap);
644
645         base->sig.ev_signal_pair[0] = -1;
646         base->sig.ev_signal_pair[1] = -1;
647         base->th_notify_fd[0] = -1;
648         base->th_notify_fd[1] = -1;
649
650         TAILQ_INIT(&base->active_later_queue);
651
652         evmap_io_initmap_(&base->io);
653         evmap_signal_initmap_(&base->sigmap);
654         event_changelist_init_(&base->changelist);
655
656         base->evbase = NULL;
657
658         if (cfg) {
659                 memcpy(&base->max_dispatch_time,
660                     &cfg->max_dispatch_interval, sizeof(struct timeval));
661                 base->limit_callbacks_after_prio =
662                     cfg->limit_callbacks_after_prio;
663         } else {
664                 base->max_dispatch_time.tv_sec = -1;
665                 base->limit_callbacks_after_prio = 1;
666         }
667         if (cfg && cfg->max_dispatch_callbacks >= 0) {
668                 base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
669         } else {
670                 base->max_dispatch_callbacks = INT_MAX;
671         }
672         if (base->max_dispatch_callbacks == INT_MAX &&
673             base->max_dispatch_time.tv_sec == -1)
674                 base->limit_callbacks_after_prio = INT_MAX;
675
676         for (i = 0; eventops[i] && !base->evbase; i++) {
677                 if (cfg != NULL) {
678                         /* determine if this backend should be avoided */
679                         if (event_config_is_avoided_method(cfg,
680                                 eventops[i]->name))
681                                 continue;
682                         if ((eventops[i]->features & cfg->require_features)
683                             != cfg->require_features)
684                                 continue;
685                 }
686
687                 /* also obey the environment variables */
688                 if (should_check_environment &&
689                     event_is_method_disabled(eventops[i]->name))
690                         continue;
691
692                 base->evsel = eventops[i];
693
694                 base->evbase = base->evsel->init(base);
695         }
696
697         if (base->evbase == NULL) {
698                 event_warnx("%s: no event mechanism available",
699                     __func__);
700                 base->evsel = NULL;
701                 event_base_free(base);
702                 return NULL;
703         }
704
705         if (evutil_getenv_("EVENT_SHOW_METHOD"))
706                 event_msgx("libevent using: %s", base->evsel->name);
707
708         /* allocate a single active event queue */
709         if (event_base_priority_init(base, 1) < 0) {
710                 event_base_free(base);
711                 return NULL;
712         }
713
714         /* prepare for threading */
715
716 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
717         event_debug_created_threadable_ctx_ = 1;
718 #endif
719
720 #ifndef EVENT__DISABLE_THREAD_SUPPORT
721         if (EVTHREAD_LOCKING_ENABLED() &&
722             (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
723                 int r;
724                 EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
725                 EVTHREAD_ALLOC_COND(base->current_event_cond);
726                 r = evthread_make_base_notifiable(base);
727                 if (r<0) {
728                         event_warnx("%s: Unable to make base notifiable.", __func__);
729                         event_base_free(base);
730                         return NULL;
731                 }
732         }
733 #endif
734
735 #ifdef _WIN32
736         if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
737                 event_base_start_iocp_(base, cfg->n_cpus_hint);
738 #endif
739
740         return (base);
741 }
742
743 int
744 event_base_start_iocp_(struct event_base *base, int n_cpus)
745 {
746 #ifdef _WIN32
747         if (base->iocp)
748                 return 0;
749         base->iocp = event_iocp_port_launch_(n_cpus);
750         if (!base->iocp) {
751                 event_warnx("%s: Couldn't launch IOCP", __func__);
752                 return -1;
753         }
754         return 0;
755 #else
756         return -1;
757 #endif
758 }
759
760 void
761 event_base_stop_iocp_(struct event_base *base)
762 {
763 #ifdef _WIN32
764         int rv;
765
766         if (!base->iocp)
767                 return;
768         rv = event_iocp_shutdown_(base->iocp, -1);
769         EVUTIL_ASSERT(rv >= 0);
770         base->iocp = NULL;
771 #endif
772 }
773
774 static int
775 event_base_cancel_single_callback_(struct event_base *base,
776     struct event_callback *evcb,
777     int run_finalizers)
778 {
779         int result = 0;
780
781         if (evcb->evcb_flags & EVLIST_INIT) {
782                 struct event *ev = event_callback_to_event(evcb);
783                 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
784                         event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
785                         result = 1;
786                 }
787         } else {
788                 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
789                 event_callback_cancel_nolock_(base, evcb, 1);
790                 EVBASE_RELEASE_LOCK(base, th_base_lock);
791                 result = 1;
792         }
793
794         if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
795                 switch (evcb->evcb_closure) {
796                 case EV_CLOSURE_EVENT_FINALIZE:
797                 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
798                         struct event *ev = event_callback_to_event(evcb);
799                         ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
800                         if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
801                                 mm_free(ev);
802                         break;
803                 }
804                 case EV_CLOSURE_CB_FINALIZE:
805                         evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
806                         break;
807                 default:
808                         break;
809                 }
810         }
811         return result;
812 }
813
814 static int event_base_free_queues_(struct event_base *base, int run_finalizers)
815 {
816         int deleted = 0, i;
817
818         for (i = 0; i < base->nactivequeues; ++i) {
819                 struct event_callback *evcb, *next;
820                 for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
821                         next = TAILQ_NEXT(evcb, evcb_active_next);
822                         deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
823                         evcb = next;
824                 }
825         }
826
827         {
828                 struct event_callback *evcb;
829                 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
830                         deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
831                 }
832         }
833
834         return deleted;
835 }
836
837 static void
838 event_base_free_(struct event_base *base, int run_finalizers)
839 {
840         int i;
841         size_t n_deleted=0;
842         struct event *ev;
843         /* XXXX grab the lock? If there is contention when one thread frees
844          * the base, then the contending thread will be very sad soon. */
845
846         /* event_base_free(NULL) is how to free the current_base if we
847          * made it with event_init and forgot to hold a reference to it. */
848         if (base == NULL && current_base)
849                 base = current_base;
850         /* Don't actually free NULL. */
851         if (base == NULL) {
852                 event_warnx("%s: no base to free", __func__);
853                 return;
854         }
855         /* XXX(niels) - check for internal events first */
856
857 #ifdef _WIN32
858         event_base_stop_iocp_(base);
859 #endif
860
861         /* threading fds if we have them */
862         if (base->th_notify_fd[0] != -1) {
863                 event_del(&base->th_notify);
864                 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
865                 if (base->th_notify_fd[1] != -1)
866                         EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
867                 base->th_notify_fd[0] = -1;
868                 base->th_notify_fd[1] = -1;
869                 event_debug_unassign(&base->th_notify);
870         }
871
872         /* Delete all non-internal events. */
873         evmap_delete_all_(base);
874
875         while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
876                 event_del(ev);
877                 ++n_deleted;
878         }
879         for (i = 0; i < base->n_common_timeouts; ++i) {
880                 struct common_timeout_list *ctl =
881                     base->common_timeout_queues[i];
882                 event_del(&ctl->timeout_event); /* Internal; doesn't count */
883                 event_debug_unassign(&ctl->timeout_event);
884                 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
885                         struct event *next = TAILQ_NEXT(ev,
886                             ev_timeout_pos.ev_next_with_common_timeout);
887                         if (!(ev->ev_flags & EVLIST_INTERNAL)) {
888                                 event_del(ev);
889                                 ++n_deleted;
890                         }
891                         ev = next;
892                 }
893                 mm_free(ctl);
894         }
895         if (base->common_timeout_queues)
896                 mm_free(base->common_timeout_queues);
897
898         for (;;) {
899                 /* For finalizers we can register yet another finalizer out from
900                  * finalizer, and iff finalizer will be in active_later_queue we can
901                  * add finalizer to activequeues, and we will have events in
902                  * activequeues after this function returns, which is not what we want
903                  * (we even have an assertion for this).
904                  *
905                  * A simple case is bufferevent with underlying (i.e. filters).
906                  */
907                 int i = event_base_free_queues_(base, run_finalizers);
908                 event_debug(("%s: %d events freed", __func__, i));
909                 if (!i) {
910                         break;
911                 }
912                 n_deleted += i;
913         }
914
915         if (n_deleted)
916                 event_debug(("%s: "EV_SIZE_FMT" events were still set in base",
917                         __func__, n_deleted));
918
919         while (LIST_FIRST(&base->once_events)) {
920                 struct event_once *eonce = LIST_FIRST(&base->once_events);
921                 LIST_REMOVE(eonce, next_once);
922                 mm_free(eonce);
923         }
924
925         if (base->evsel != NULL && base->evsel->dealloc != NULL)
926                 base->evsel->dealloc(base);
927
928         for (i = 0; i < base->nactivequeues; ++i)
929                 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
930
931         EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
932         min_heap_dtor_(&base->timeheap);
933
934         mm_free(base->activequeues);
935
936         evmap_io_clear_(&base->io);
937         evmap_signal_clear_(&base->sigmap);
938         event_changelist_freemem_(&base->changelist);
939
940         EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
941         EVTHREAD_FREE_COND(base->current_event_cond);
942
943         /* If we're freeing current_base, there won't be a current_base. */
944         if (base == current_base)
945                 current_base = NULL;
946         mm_free(base);
947 }
948
949 void
950 event_base_free_nofinalize(struct event_base *base)
951 {
952         event_base_free_(base, 0);
953 }
954
955 void
956 event_base_free(struct event_base *base)
957 {
958         event_base_free_(base, 1);
959 }
960
961 /* Fake eventop; used to disable the backend temporarily inside event_reinit
962  * so that we can call event_del() on an event without telling the backend.
963  */
964 static int
965 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
966     short events, void *fdinfo)
967 {
968         return 0;
969 }
970 const struct eventop nil_eventop = {
971         "nil",
972         NULL, /* init: unused. */
973         NULL, /* add: unused. */
974         nil_backend_del, /* del: used, so needs to be killed. */
975         NULL, /* dispatch: unused. */
976         NULL, /* dealloc: unused. */
977         0, 0, 0
978 };
979
980 /* reinitialize the event base after a fork */
981 int
982 event_reinit(struct event_base *base)
983 {
984         const struct eventop *evsel;
985         int res = 0;
986         int was_notifiable = 0;
987         int had_signal_added = 0;
988
989         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
990
991         evsel = base->evsel;
992
993         /* check if this event mechanism requires reinit on the backend */
994         if (evsel->need_reinit) {
995                 /* We're going to call event_del() on our notify events (the
996                  * ones that tell about signals and wakeup events).  But we
997                  * don't actually want to tell the backend to change its
998                  * state, since it might still share some resource (a kqueue,
999                  * an epoll fd) with the parent process, and we don't want to
1000                  * delete the fds from _that_ backend, we temporarily stub out
1001                  * the evsel with a replacement.
1002                  */
1003                 base->evsel = &nil_eventop;
1004         }
1005
1006         /* We need to re-create a new signal-notification fd and a new
1007          * thread-notification fd.  Otherwise, we'll still share those with
1008          * the parent process, which would make any notification sent to them
1009          * get received by one or both of the event loops, more or less at
1010          * random.
1011          */
1012         if (base->sig.ev_signal_added) {
1013                 event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1014                 event_debug_unassign(&base->sig.ev_signal);
1015                 memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1016                 had_signal_added = 1;
1017                 base->sig.ev_signal_added = 0;
1018         }
1019         if (base->sig.ev_signal_pair[0] != -1)
1020                 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1021         if (base->sig.ev_signal_pair[1] != -1)
1022                 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1023         if (base->th_notify_fn != NULL) {
1024                 was_notifiable = 1;
1025                 base->th_notify_fn = NULL;
1026         }
1027         if (base->th_notify_fd[0] != -1) {
1028                 event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1029                 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1030                 if (base->th_notify_fd[1] != -1)
1031                         EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1032                 base->th_notify_fd[0] = -1;
1033                 base->th_notify_fd[1] = -1;
1034                 event_debug_unassign(&base->th_notify);
1035         }
1036
1037         /* Replace the original evsel. */
1038         base->evsel = evsel;
1039
1040         if (evsel->need_reinit) {
1041                 /* Reconstruct the backend through brute-force, so that we do
1042                  * not share any structures with the parent process. For some
1043                  * backends, this is necessary: epoll and kqueue, for
1044                  * instance, have events associated with a kernel
1045                  * structure. If didn't reinitialize, we'd share that
1046                  * structure with the parent process, and any changes made by
1047                  * the parent would affect our backend's behavior (and vice
1048                  * versa).
1049                  */
1050                 if (base->evsel->dealloc != NULL)
1051                         base->evsel->dealloc(base);
1052                 base->evbase = evsel->init(base);
1053                 if (base->evbase == NULL) {
1054                         event_errx(1,
1055                            "%s: could not reinitialize event mechanism",
1056                            __func__);
1057                         res = -1;
1058                         goto done;
1059                 }
1060
1061                 /* Empty out the changelist (if any): we are starting from a
1062                  * blank slate. */
1063                 event_changelist_freemem_(&base->changelist);
1064
1065                 /* Tell the event maps to re-inform the backend about all
1066                  * pending events. This will make the signal notification
1067                  * event get re-created if necessary. */
1068                 if (evmap_reinit_(base) < 0)
1069                         res = -1;
1070         } else {
1071                 res = evsig_init_(base);
1072                 if (res == 0 && had_signal_added) {
1073                         res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1074                         if (res == 0)
1075                                 base->sig.ev_signal_added = 1;
1076                 }
1077         }
1078
1079         /* If we were notifiable before, and nothing just exploded, become
1080          * notifiable again. */
1081         if (was_notifiable && res == 0)
1082                 res = evthread_make_base_notifiable_nolock_(base);
1083
1084 done:
1085         EVBASE_RELEASE_LOCK(base, th_base_lock);
1086         return (res);
1087 }
1088
1089 /* Get the monotonic time for this event_base' timer */
1090 int
1091 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1092 {
1093   int rv = -1;
1094
1095   if (base && tv) {
1096     EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1097     rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1098     EVBASE_RELEASE_LOCK(base, th_base_lock);
1099   }
1100
1101   return rv;
1102 }
1103
1104 const char **
1105 event_get_supported_methods(void)
1106 {
1107         static const char **methods = NULL;
1108         const struct eventop **method;
1109         const char **tmp;
1110         int i = 0, k;
1111
1112         /* count all methods */
1113         for (method = &eventops[0]; *method != NULL; ++method) {
1114                 ++i;
1115         }
1116
1117         /* allocate one more than we need for the NULL pointer */
1118         tmp = mm_calloc((i + 1), sizeof(char *));
1119         if (tmp == NULL)
1120                 return (NULL);
1121
1122         /* populate the array with the supported methods */
1123         for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1124                 tmp[i++] = eventops[k]->name;
1125         }
1126         tmp[i] = NULL;
1127
1128         if (methods != NULL)
1129                 mm_free((char**)methods);
1130
1131         methods = tmp;
1132
1133         return (methods);
1134 }
1135
1136 struct event_config *
1137 event_config_new(void)
1138 {
1139         struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1140
1141         if (cfg == NULL)
1142                 return (NULL);
1143
1144         TAILQ_INIT(&cfg->entries);
1145         cfg->max_dispatch_interval.tv_sec = -1;
1146         cfg->max_dispatch_callbacks = INT_MAX;
1147         cfg->limit_callbacks_after_prio = 1;
1148
1149         return (cfg);
1150 }
1151
1152 static void
1153 event_config_entry_free(struct event_config_entry *entry)
1154 {
1155         if (entry->avoid_method != NULL)
1156                 mm_free((char *)entry->avoid_method);
1157         mm_free(entry);
1158 }
1159
1160 void
1161 event_config_free(struct event_config *cfg)
1162 {
1163         struct event_config_entry *entry;
1164
1165         while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1166                 TAILQ_REMOVE(&cfg->entries, entry, next);
1167                 event_config_entry_free(entry);
1168         }
1169         mm_free(cfg);
1170 }
1171
1172 int
1173 event_config_set_flag(struct event_config *cfg, int flag)
1174 {
1175         if (!cfg)
1176                 return -1;
1177         cfg->flags |= flag;
1178         return 0;
1179 }
1180
1181 int
1182 event_config_avoid_method(struct event_config *cfg, const char *method)
1183 {
1184         struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1185         if (entry == NULL)
1186                 return (-1);
1187
1188         if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1189                 mm_free(entry);
1190                 return (-1);
1191         }
1192
1193         TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1194
1195         return (0);
1196 }
1197
1198 int
1199 event_config_require_features(struct event_config *cfg,
1200     int features)
1201 {
1202         if (!cfg)
1203                 return (-1);
1204         cfg->require_features = features;
1205         return (0);
1206 }
1207
1208 int
1209 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1210 {
1211         if (!cfg)
1212                 return (-1);
1213         cfg->n_cpus_hint = cpus;
1214         return (0);
1215 }
1216
1217 int
1218 event_config_set_max_dispatch_interval(struct event_config *cfg,
1219     const struct timeval *max_interval, int max_callbacks, int min_priority)
1220 {
1221         if (max_interval)
1222                 memcpy(&cfg->max_dispatch_interval, max_interval,
1223                     sizeof(struct timeval));
1224         else
1225                 cfg->max_dispatch_interval.tv_sec = -1;
1226         cfg->max_dispatch_callbacks =
1227             max_callbacks >= 0 ? max_callbacks : INT_MAX;
1228         if (min_priority < 0)
1229                 min_priority = 0;
1230         cfg->limit_callbacks_after_prio = min_priority;
1231         return (0);
1232 }
1233
1234 int
1235 event_priority_init(int npriorities)
1236 {
1237         return event_base_priority_init(current_base, npriorities);
1238 }
1239
1240 int
1241 event_base_priority_init(struct event_base *base, int npriorities)
1242 {
1243         int i, r;
1244         r = -1;
1245
1246         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1247
1248         if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1249             || npriorities >= EVENT_MAX_PRIORITIES)
1250                 goto err;
1251
1252         if (npriorities == base->nactivequeues)
1253                 goto ok;
1254
1255         if (base->nactivequeues) {
1256                 mm_free(base->activequeues);
1257                 base->nactivequeues = 0;
1258         }
1259
1260         /* Allocate our priority queues */
1261         base->activequeues = (struct evcallback_list *)
1262           mm_calloc(npriorities, sizeof(struct evcallback_list));
1263         if (base->activequeues == NULL) {
1264                 event_warn("%s: calloc", __func__);
1265                 goto err;
1266         }
1267         base->nactivequeues = npriorities;
1268
1269         for (i = 0; i < base->nactivequeues; ++i) {
1270                 TAILQ_INIT(&base->activequeues[i]);
1271         }
1272
1273 ok:
1274         r = 0;
1275 err:
1276         EVBASE_RELEASE_LOCK(base, th_base_lock);
1277         return (r);
1278 }
1279
1280 int
1281 event_base_get_npriorities(struct event_base *base)
1282 {
1283
1284         int n;
1285         if (base == NULL)
1286                 base = current_base;
1287
1288         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1289         n = base->nactivequeues;
1290         EVBASE_RELEASE_LOCK(base, th_base_lock);
1291         return (n);
1292 }
1293
1294 int
1295 event_base_get_num_events(struct event_base *base, unsigned int type)
1296 {
1297         int r = 0;
1298
1299         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1300
1301         if (type & EVENT_BASE_COUNT_ACTIVE)
1302                 r += base->event_count_active;
1303
1304         if (type & EVENT_BASE_COUNT_VIRTUAL)
1305                 r += base->virtual_event_count;
1306
1307         if (type & EVENT_BASE_COUNT_ADDED)
1308                 r += base->event_count;
1309
1310         EVBASE_RELEASE_LOCK(base, th_base_lock);
1311
1312         return r;
1313 }
1314
1315 int
1316 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1317 {
1318         int r = 0;
1319
1320         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1321
1322         if (type & EVENT_BASE_COUNT_ACTIVE) {
1323                 r += base->event_count_active_max;
1324                 if (clear)
1325                         base->event_count_active_max = 0;
1326         }
1327
1328         if (type & EVENT_BASE_COUNT_VIRTUAL) {
1329                 r += base->virtual_event_count_max;
1330                 if (clear)
1331                         base->virtual_event_count_max = 0;
1332         }
1333
1334         if (type & EVENT_BASE_COUNT_ADDED) {
1335                 r += base->event_count_max;
1336                 if (clear)
1337                         base->event_count_max = 0;
1338         }
1339
1340         EVBASE_RELEASE_LOCK(base, th_base_lock);
1341
1342         return r;
1343 }
1344
1345 /* Returns true iff we're currently watching any events. */
1346 static int
1347 event_haveevents(struct event_base *base)
1348 {
1349         /* Caller must hold th_base_lock */
1350         return (base->virtual_event_count > 0 || base->event_count > 0);
1351 }
1352
1353 /* "closure" function called when processing active signal events */
1354 static inline void
1355 event_signal_closure(struct event_base *base, struct event *ev)
1356 {
1357         short ncalls;
1358         int should_break;
1359
1360         /* Allows deletes to work */
1361         ncalls = ev->ev_ncalls;
1362         if (ncalls != 0)
1363                 ev->ev_pncalls = &ncalls;
1364         EVBASE_RELEASE_LOCK(base, th_base_lock);
1365         while (ncalls) {
1366                 ncalls--;
1367                 ev->ev_ncalls = ncalls;
1368                 if (ncalls == 0)
1369                         ev->ev_pncalls = NULL;
1370                 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1371
1372                 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1373                 should_break = base->event_break;
1374                 EVBASE_RELEASE_LOCK(base, th_base_lock);
1375
1376                 if (should_break) {
1377                         if (ncalls != 0)
1378                                 ev->ev_pncalls = NULL;
1379                         return;
1380                 }
1381         }
1382 }
1383
1384 /* Common timeouts are special timeouts that are handled as queues rather than
1385  * in the minheap.  This is more efficient than the minheap if we happen to
1386  * know that we're going to get several thousands of timeout events all with
1387  * the same timeout value.
1388  *
1389  * Since all our timeout handling code assumes timevals can be copied,
1390  * assigned, etc, we can't use "magic pointer" to encode these common
1391  * timeouts.  Searching through a list to see if every timeout is common could
1392  * also get inefficient.  Instead, we take advantage of the fact that tv_usec
1393  * is 32 bits long, but only uses 20 of those bits (since it can never be over
1394  * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
1395  * of index into the event_base's aray of common timeouts.
1396  */
1397
1398 #define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
1399 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1400 #define COMMON_TIMEOUT_IDX_SHIFT 20
1401 #define COMMON_TIMEOUT_MASK     0xf0000000
1402 #define COMMON_TIMEOUT_MAGIC    0x50000000
1403
1404 #define COMMON_TIMEOUT_IDX(tv) \
1405         (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1406
1407 /** Return true iff if 'tv' is a common timeout in 'base' */
1408 static inline int
1409 is_common_timeout(const struct timeval *tv,
1410     const struct event_base *base)
1411 {
1412         int idx;
1413         if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1414                 return 0;
1415         idx = COMMON_TIMEOUT_IDX(tv);
1416         return idx < base->n_common_timeouts;
1417 }
1418
1419 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1420  * one is a common timeout. */
1421 static inline int
1422 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1423 {
1424         return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1425             (tv2->tv_usec & ~MICROSECONDS_MASK);
1426 }
1427
1428 /** Requires that 'tv' is a common timeout.  Return the corresponding
1429  * common_timeout_list. */
1430 static inline struct common_timeout_list *
1431 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1432 {
1433         return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1434 }
1435
1436 #if 0
1437 static inline int
1438 common_timeout_ok(const struct timeval *tv,
1439     struct event_base *base)
1440 {
1441         const struct timeval *expect =
1442             &get_common_timeout_list(base, tv)->duration;
1443         return tv->tv_sec == expect->tv_sec &&
1444             tv->tv_usec == expect->tv_usec;
1445 }
1446 #endif
1447
1448 /* Add the timeout for the first event in given common timeout list to the
1449  * event_base's minheap. */
1450 static void
1451 common_timeout_schedule(struct common_timeout_list *ctl,
1452     const struct timeval *now, struct event *head)
1453 {
1454         struct timeval timeout = head->ev_timeout;
1455         timeout.tv_usec &= MICROSECONDS_MASK;
1456         event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1457 }
1458
1459 /* Callback: invoked when the timeout for a common timeout queue triggers.
1460  * This means that (at least) the first event in that queue should be run,
1461  * and the timeout should be rescheduled if there are more events. */
1462 static void
1463 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1464 {
1465         struct timeval now;
1466         struct common_timeout_list *ctl = arg;
1467         struct event_base *base = ctl->base;
1468         struct event *ev = NULL;
1469         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1470         gettime(base, &now);
1471         while (1) {
1472                 ev = TAILQ_FIRST(&ctl->events);
1473                 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1474                     (ev->ev_timeout.tv_sec == now.tv_sec &&
1475                         (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1476                         break;
1477                 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1478                 event_active_nolock_(ev, EV_TIMEOUT, 1);
1479         }
1480         if (ev)
1481                 common_timeout_schedule(ctl, &now, ev);
1482         EVBASE_RELEASE_LOCK(base, th_base_lock);
1483 }
1484
1485 #define MAX_COMMON_TIMEOUTS 256
1486
1487 const struct timeval *
1488 event_base_init_common_timeout(struct event_base *base,
1489     const struct timeval *duration)
1490 {
1491         int i;
1492         struct timeval tv;
1493         const struct timeval *result=NULL;
1494         struct common_timeout_list *new_ctl;
1495
1496         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1497         if (duration->tv_usec > 1000000) {
1498                 memcpy(&tv, duration, sizeof(struct timeval));
1499                 if (is_common_timeout(duration, base))
1500                         tv.tv_usec &= MICROSECONDS_MASK;
1501                 tv.tv_sec += tv.tv_usec / 1000000;
1502                 tv.tv_usec %= 1000000;
1503                 duration = &tv;
1504         }
1505         for (i = 0; i < base->n_common_timeouts; ++i) {
1506                 const struct common_timeout_list *ctl =
1507                     base->common_timeout_queues[i];
1508                 if (duration->tv_sec == ctl->duration.tv_sec &&
1509                     duration->tv_usec ==
1510                     (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1511                         EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1512                         result = &ctl->duration;
1513                         goto done;
1514                 }
1515         }
1516         if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1517                 event_warnx("%s: Too many common timeouts already in use; "
1518                     "we only support %d per event_base", __func__,
1519                     MAX_COMMON_TIMEOUTS);
1520                 goto done;
1521         }
1522         if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1523                 int n = base->n_common_timeouts < 16 ? 16 :
1524                     base->n_common_timeouts*2;
1525                 struct common_timeout_list **newqueues =
1526                     mm_realloc(base->common_timeout_queues,
1527                         n*sizeof(struct common_timeout_queue *));
1528                 if (!newqueues) {
1529                         event_warn("%s: realloc",__func__);
1530                         goto done;
1531                 }
1532                 base->n_common_timeouts_allocated = n;
1533                 base->common_timeout_queues = newqueues;
1534         }
1535         new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1536         if (!new_ctl) {
1537                 event_warn("%s: calloc",__func__);
1538                 goto done;
1539         }
1540         TAILQ_INIT(&new_ctl->events);
1541         new_ctl->duration.tv_sec = duration->tv_sec;
1542         new_ctl->duration.tv_usec =
1543             duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1544             (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1545         evtimer_assign(&new_ctl->timeout_event, base,
1546             common_timeout_callback, new_ctl);
1547         new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1548         event_priority_set(&new_ctl->timeout_event, 0);
1549         new_ctl->base = base;
1550         base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1551         result = &new_ctl->duration;
1552
1553 done:
1554         if (result)
1555                 EVUTIL_ASSERT(is_common_timeout(result, base));
1556
1557         EVBASE_RELEASE_LOCK(base, th_base_lock);
1558         return result;
1559 }
1560
1561 /* Closure function invoked when we're activating a persistent event. */
1562 static inline void
1563 event_persist_closure(struct event_base *base, struct event *ev)
1564 {
1565         void (*evcb_callback)(evutil_socket_t, short, void *);
1566
1567         // Other fields of *ev that must be stored before executing
1568         evutil_socket_t evcb_fd;
1569         short evcb_res;
1570         void *evcb_arg;
1571
1572         /* reschedule the persistent event if we have a timeout. */
1573         if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1574                 /* If there was a timeout, we want it to run at an interval of
1575                  * ev_io_timeout after the last time it was _scheduled_ for,
1576                  * not ev_io_timeout after _now_.  If it fired for another
1577                  * reason, though, the timeout ought to start ticking _now_. */
1578                 struct timeval run_at, relative_to, delay, now;
1579                 ev_uint32_t usec_mask = 0;
1580                 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1581                         &ev->ev_io_timeout));
1582                 gettime(base, &now);
1583                 if (is_common_timeout(&ev->ev_timeout, base)) {
1584                         delay = ev->ev_io_timeout;
1585                         usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1586                         delay.tv_usec &= MICROSECONDS_MASK;
1587                         if (ev->ev_res & EV_TIMEOUT) {
1588                                 relative_to = ev->ev_timeout;
1589                                 relative_to.tv_usec &= MICROSECONDS_MASK;
1590                         } else {
1591                                 relative_to = now;
1592                         }
1593                 } else {
1594                         delay = ev->ev_io_timeout;
1595                         if (ev->ev_res & EV_TIMEOUT) {
1596                                 relative_to = ev->ev_timeout;
1597                         } else {
1598                                 relative_to = now;
1599                         }
1600                 }
1601                 evutil_timeradd(&relative_to, &delay, &run_at);
1602                 if (evutil_timercmp(&run_at, &now, <)) {
1603                         /* Looks like we missed at least one invocation due to
1604                          * a clock jump, not running the event loop for a
1605                          * while, really slow callbacks, or
1606                          * something. Reschedule relative to now.
1607                          */
1608                         evutil_timeradd(&now, &delay, &run_at);
1609                 }
1610                 run_at.tv_usec |= usec_mask;
1611                 event_add_nolock_(ev, &run_at, 1);
1612         }
1613
1614         // Save our callback before we release the lock
1615         evcb_callback = ev->ev_callback;
1616         evcb_fd = ev->ev_fd;
1617         evcb_res = ev->ev_res;
1618         evcb_arg = ev->ev_arg;
1619
1620         // Release the lock
1621         EVBASE_RELEASE_LOCK(base, th_base_lock);
1622
1623         // Execute the callback
1624         (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1625 }
1626
1627 /*
1628   Helper for event_process_active to process all the events in a single queue,
1629   releasing the lock as we go.  This function requires that the lock be held
1630   when it's invoked.  Returns -1 if we get a signal or an event_break that
1631   means we should stop processing any active events now.  Otherwise returns
1632   the number of non-internal event_callbacks that we processed.
1633 */
1634 static int
1635 event_process_active_single_queue(struct event_base *base,
1636     struct evcallback_list *activeq,
1637     int max_to_process, const struct timeval *endtime)
1638 {
1639         struct event_callback *evcb;
1640         int count = 0;
1641
1642         EVUTIL_ASSERT(activeq != NULL);
1643
1644         for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1645                 struct event *ev=NULL;
1646                 if (evcb->evcb_flags & EVLIST_INIT) {
1647                         ev = event_callback_to_event(evcb);
1648
1649                         if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1650                                 event_queue_remove_active(base, evcb);
1651                         else
1652                                 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1653                         event_debug((
1654                             "event_process_active: event: %p, %s%s%scall %p",
1655                             ev,
1656                             ev->ev_res & EV_READ ? "EV_READ " : " ",
1657                             ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1658                             ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1659                             ev->ev_callback));
1660                 } else {
1661                         event_queue_remove_active(base, evcb);
1662                         event_debug(("event_process_active: event_callback %p, "
1663                                 "closure %d, call %p",
1664                                 evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1665                 }
1666
1667                 if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1668                         ++count;
1669
1670
1671                 base->current_event = evcb;
1672 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1673                 base->current_event_waiters = 0;
1674 #endif
1675
1676                 switch (evcb->evcb_closure) {
1677                 case EV_CLOSURE_EVENT_SIGNAL:
1678                         EVUTIL_ASSERT(ev != NULL);
1679                         event_signal_closure(base, ev);
1680                         break;
1681                 case EV_CLOSURE_EVENT_PERSIST:
1682                         EVUTIL_ASSERT(ev != NULL);
1683                         event_persist_closure(base, ev);
1684                         break;
1685                 case EV_CLOSURE_EVENT: {
1686                         void (*evcb_callback)(evutil_socket_t, short, void *);
1687                         short res;
1688                         EVUTIL_ASSERT(ev != NULL);
1689                         evcb_callback = *ev->ev_callback;
1690                         res = ev->ev_res;
1691                         EVBASE_RELEASE_LOCK(base, th_base_lock);
1692                         evcb_callback(ev->ev_fd, res, ev->ev_arg);
1693                 }
1694                 break;
1695                 case EV_CLOSURE_CB_SELF: {
1696                         void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1697                         EVBASE_RELEASE_LOCK(base, th_base_lock);
1698                         evcb_selfcb(evcb, evcb->evcb_arg);
1699                 }
1700                 break;
1701                 case EV_CLOSURE_EVENT_FINALIZE:
1702                 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1703                         void (*evcb_evfinalize)(struct event *, void *);
1704                         int evcb_closure = evcb->evcb_closure;
1705                         EVUTIL_ASSERT(ev != NULL);
1706                         base->current_event = NULL;
1707                         evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1708                         EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1709                         EVBASE_RELEASE_LOCK(base, th_base_lock);
1710                         evcb_evfinalize(ev, ev->ev_arg);
1711                         event_debug_note_teardown_(ev);
1712                         if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1713                                 mm_free(ev);
1714                 }
1715                 break;
1716                 case EV_CLOSURE_CB_FINALIZE: {
1717                         void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1718                         base->current_event = NULL;
1719                         EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1720                         EVBASE_RELEASE_LOCK(base, th_base_lock);
1721                         evcb_cbfinalize(evcb, evcb->evcb_arg);
1722                 }
1723                 break;
1724                 default:
1725                         EVUTIL_ASSERT(0);
1726                 }
1727
1728                 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1729                 base->current_event = NULL;
1730 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1731                 if (base->current_event_waiters) {
1732                         base->current_event_waiters = 0;
1733                         EVTHREAD_COND_BROADCAST(base->current_event_cond);
1734                 }
1735 #endif
1736
1737                 if (base->event_break)
1738                         return -1;
1739                 if (count >= max_to_process)
1740                         return count;
1741                 if (count && endtime) {
1742                         struct timeval now;
1743                         update_time_cache(base);
1744                         gettime(base, &now);
1745                         if (evutil_timercmp(&now, endtime, >=))
1746                                 return count;
1747                 }
1748                 if (base->event_continue)
1749                         break;
1750         }
1751         return count;
1752 }
1753
1754 /*
1755  * Active events are stored in priority queues.  Lower priorities are always
1756  * process before higher priorities.  Low priority events can starve high
1757  * priority ones.
1758  */
1759
1760 static int
1761 event_process_active(struct event_base *base)
1762 {
1763         /* Caller must hold th_base_lock */
1764         struct evcallback_list *activeq = NULL;
1765         int i, c = 0;
1766         const struct timeval *endtime;
1767         struct timeval tv;
1768         const int maxcb = base->max_dispatch_callbacks;
1769         const int limit_after_prio = base->limit_callbacks_after_prio;
1770         if (base->max_dispatch_time.tv_sec >= 0) {
1771                 update_time_cache(base);
1772                 gettime(base, &tv);
1773                 evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1774                 endtime = &tv;
1775         } else {
1776                 endtime = NULL;
1777         }
1778
1779         for (i = 0; i < base->nactivequeues; ++i) {
1780                 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1781                         base->event_running_priority = i;
1782                         activeq = &base->activequeues[i];
1783                         if (i < limit_after_prio)
1784                                 c = event_process_active_single_queue(base, activeq,
1785                                     INT_MAX, NULL);
1786                         else
1787                                 c = event_process_active_single_queue(base, activeq,
1788                                     maxcb, endtime);
1789                         if (c < 0) {
1790                                 goto done;
1791                         } else if (c > 0)
1792                                 break; /* Processed a real event; do not
1793                                         * consider lower-priority events */
1794                         /* If we get here, all of the events we processed
1795                          * were internal.  Continue. */
1796                 }
1797         }
1798
1799 done:
1800         base->event_running_priority = -1;
1801
1802         return c;
1803 }
1804
1805 /*
1806  * Wait continuously for events.  We exit only if no events are left.
1807  */
1808
1809 int
1810 event_dispatch(void)
1811 {
1812         return (event_loop(0));
1813 }
1814
1815 int
1816 event_base_dispatch(struct event_base *event_base)
1817 {
1818         return (event_base_loop(event_base, 0));
1819 }
1820
1821 const char *
1822 event_base_get_method(const struct event_base *base)
1823 {
1824         EVUTIL_ASSERT(base);
1825         return (base->evsel->name);
1826 }
1827
1828 /** Callback: used to implement event_base_loopexit by telling the event_base
1829  * that it's time to exit its loop. */
1830 static void
1831 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1832 {
1833         struct event_base *base = arg;
1834         base->event_gotterm = 1;
1835 }
1836
1837 int
1838 event_loopexit(const struct timeval *tv)
1839 {
1840         return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1841                     current_base, tv));
1842 }
1843
1844 int
1845 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1846 {
1847         return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1848                     event_base, tv));
1849 }
1850
1851 int
1852 event_loopbreak(void)
1853 {
1854         return (event_base_loopbreak(current_base));
1855 }
1856
1857 int
1858 event_base_loopbreak(struct event_base *event_base)
1859 {
1860         int r = 0;
1861         if (event_base == NULL)
1862                 return (-1);
1863
1864         EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1865         event_base->event_break = 1;
1866
1867         if (EVBASE_NEED_NOTIFY(event_base)) {
1868                 r = evthread_notify_base(event_base);
1869         } else {
1870                 r = (0);
1871         }
1872         EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1873         return r;
1874 }
1875
1876 int
1877 event_base_loopcontinue(struct event_base *event_base)
1878 {
1879         int r = 0;
1880         if (event_base == NULL)
1881                 return (-1);
1882
1883         EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1884         event_base->event_continue = 1;
1885
1886         if (EVBASE_NEED_NOTIFY(event_base)) {
1887                 r = evthread_notify_base(event_base);
1888         } else {
1889                 r = (0);
1890         }
1891         EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1892         return r;
1893 }
1894
1895 int
1896 event_base_got_break(struct event_base *event_base)
1897 {
1898         int res;
1899         EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1900         res = event_base->event_break;
1901         EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1902         return res;
1903 }
1904
1905 int
1906 event_base_got_exit(struct event_base *event_base)
1907 {
1908         int res;
1909         EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1910         res = event_base->event_gotterm;
1911         EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1912         return res;
1913 }
1914
1915 /* not thread safe */
1916
1917 int
1918 event_loop(int flags)
1919 {
1920         return event_base_loop(current_base, flags);
1921 }
1922
1923 int
1924 event_base_loop(struct event_base *base, int flags)
1925 {
1926         const struct eventop *evsel = base->evsel;
1927         struct timeval tv;
1928         struct timeval *tv_p;
1929         int res, done, retval = 0;
1930
1931         /* Grab the lock.  We will release it inside evsel.dispatch, and again
1932          * as we invoke user callbacks. */
1933         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1934
1935         if (base->running_loop) {
1936                 event_warnx("%s: reentrant invocation.  Only one event_base_loop"
1937                     " can run on each event_base at once.", __func__);
1938                 EVBASE_RELEASE_LOCK(base, th_base_lock);
1939                 return -1;
1940         }
1941
1942         base->running_loop = 1;
1943
1944         clear_time_cache(base);
1945
1946         if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1947                 evsig_set_base_(base);
1948
1949         done = 0;
1950
1951 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1952         base->th_owner_id = EVTHREAD_GET_ID();
1953 #endif
1954
1955         base->event_gotterm = base->event_break = 0;
1956
1957         while (!done) {
1958                 base->event_continue = 0;
1959                 base->n_deferreds_queued = 0;
1960
1961                 /* Terminate the loop if we have been asked to */
1962                 if (base->event_gotterm) {
1963                         break;
1964                 }
1965
1966                 if (base->event_break) {
1967                         break;
1968                 }
1969
1970                 tv_p = &tv;
1971                 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1972                         timeout_next(base, &tv_p);
1973                 } else {
1974                         /*
1975                          * if we have active events, we just poll new events
1976                          * without waiting.
1977                          */
1978                         evutil_timerclear(&tv);
1979                 }
1980
1981                 /* If we have no events, we just exit */
1982                 if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1983                     !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1984                         event_debug(("%s: no events registered.", __func__));
1985                         retval = 1;
1986                         goto done;
1987                 }
1988
1989                 event_queue_make_later_events_active(base);
1990
1991                 clear_time_cache(base);
1992
1993                 res = evsel->dispatch(base, tv_p);
1994
1995                 if (res == -1) {
1996                         event_debug(("%s: dispatch returned unsuccessfully.",
1997                                 __func__));
1998                         retval = -1;
1999                         goto done;
2000                 }
2001
2002                 update_time_cache(base);
2003
2004                 timeout_process(base);
2005
2006                 if (N_ACTIVE_CALLBACKS(base)) {
2007                         int n = event_process_active(base);
2008                         if ((flags & EVLOOP_ONCE)
2009                             && N_ACTIVE_CALLBACKS(base) == 0
2010                             && n != 0)
2011                                 done = 1;
2012                 } else if (flags & EVLOOP_NONBLOCK)
2013                         done = 1;
2014         }
2015         event_debug(("%s: asked to terminate loop.", __func__));
2016
2017 done:
2018         clear_time_cache(base);
2019         base->running_loop = 0;
2020
2021         EVBASE_RELEASE_LOCK(base, th_base_lock);
2022
2023         return (retval);
2024 }
2025
2026 /* One-time callback to implement event_base_once: invokes the user callback,
2027  * then deletes the allocated storage */
2028 static void
2029 event_once_cb(evutil_socket_t fd, short events, void *arg)
2030 {
2031         struct event_once *eonce = arg;
2032
2033         (*eonce->cb)(fd, events, eonce->arg);
2034         EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2035         LIST_REMOVE(eonce, next_once);
2036         EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2037         event_debug_unassign(&eonce->ev);
2038         mm_free(eonce);
2039 }
2040
2041 /* not threadsafe, event scheduled once. */
2042 int
2043 event_once(evutil_socket_t fd, short events,
2044     void (*callback)(evutil_socket_t, short, void *),
2045     void *arg, const struct timeval *tv)
2046 {
2047         return event_base_once(current_base, fd, events, callback, arg, tv);
2048 }
2049
2050 /* Schedules an event once */
2051 int
2052 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2053     void (*callback)(evutil_socket_t, short, void *),
2054     void *arg, const struct timeval *tv)
2055 {
2056         struct event_once *eonce;
2057         int res = 0;
2058         int activate = 0;
2059
2060         /* We cannot support signals that just fire once, or persistent
2061          * events. */
2062         if (events & (EV_SIGNAL|EV_PERSIST))
2063                 return (-1);
2064
2065         if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2066                 return (-1);
2067
2068         eonce->cb = callback;
2069         eonce->arg = arg;
2070
2071         if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2072                 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2073
2074                 if (tv == NULL || ! evutil_timerisset(tv)) {
2075                         /* If the event is going to become active immediately,
2076                          * don't put it on the timeout queue.  This is one
2077                          * idiom for scheduling a callback, so let's make
2078                          * it fast (and order-preserving). */
2079                         activate = 1;
2080                 }
2081         } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2082                 events &= EV_READ|EV_WRITE|EV_CLOSED;
2083
2084                 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2085         } else {
2086                 /* Bad event combination */
2087                 mm_free(eonce);
2088                 return (-1);
2089         }
2090
2091         if (res == 0) {
2092                 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2093                 if (activate)
2094                         event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2095                 else
2096                         res = event_add_nolock_(&eonce->ev, tv, 0);
2097
2098                 if (res != 0) {
2099                         mm_free(eonce);
2100                         return (res);
2101                 } else {
2102                         LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2103                 }
2104                 EVBASE_RELEASE_LOCK(base, th_base_lock);
2105         }
2106
2107         return (0);
2108 }
2109
2110 int
2111 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2112 {
2113         if (!base)
2114                 base = current_base;
2115         if (arg == &event_self_cbarg_ptr_)
2116                 arg = ev;
2117
2118         if (!(events & EV_SIGNAL))
2119                 event_debug_assert_socket_nonblocking_(fd);
2120         event_debug_assert_not_added_(ev);
2121
2122         ev->ev_base = base;
2123
2124         ev->ev_callback = callback;
2125         ev->ev_arg = arg;
2126         ev->ev_fd = fd;
2127         ev->ev_events = events;
2128         ev->ev_res = 0;
2129         ev->ev_flags = EVLIST_INIT;
2130         ev->ev_ncalls = 0;
2131         ev->ev_pncalls = NULL;
2132
2133         if (events & EV_SIGNAL) {
2134                 if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2135                         event_warnx("%s: EV_SIGNAL is not compatible with "
2136                             "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2137                         return -1;
2138                 }
2139                 ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2140         } else {
2141                 if (events & EV_PERSIST) {
2142                         evutil_timerclear(&ev->ev_io_timeout);
2143                         ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2144                 } else {
2145                         ev->ev_closure = EV_CLOSURE_EVENT;
2146                 }
2147         }
2148
2149         min_heap_elem_init_(ev);
2150
2151         if (base != NULL) {
2152                 /* by default, we put new events into the middle priority */
2153                 ev->ev_pri = base->nactivequeues / 2;
2154         }
2155
2156         event_debug_note_setup_(ev);
2157
2158         return 0;
2159 }
2160
2161 int
2162 event_base_set(struct event_base *base, struct event *ev)
2163 {
2164         /* Only innocent events may be assigned to a different base */
2165         if (ev->ev_flags != EVLIST_INIT)
2166                 return (-1);
2167
2168         event_debug_assert_is_setup_(ev);
2169
2170         ev->ev_base = base;
2171         ev->ev_pri = base->nactivequeues/2;
2172
2173         return (0);
2174 }
2175
2176 void
2177 event_set(struct event *ev, evutil_socket_t fd, short events,
2178           void (*callback)(evutil_socket_t, short, void *), void *arg)
2179 {
2180         int r;
2181         r = event_assign(ev, current_base, fd, events, callback, arg);
2182         EVUTIL_ASSERT(r == 0);
2183 }
2184
2185 void *
2186 event_self_cbarg(void)
2187 {
2188         return &event_self_cbarg_ptr_;
2189 }
2190
2191 struct event *
2192 event_base_get_running_event(struct event_base *base)
2193 {
2194         struct event *ev = NULL;
2195         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2196         if (EVBASE_IN_THREAD(base)) {
2197                 struct event_callback *evcb = base->current_event;
2198                 if (evcb->evcb_flags & EVLIST_INIT)
2199                         ev = event_callback_to_event(evcb);
2200         }
2201         EVBASE_RELEASE_LOCK(base, th_base_lock);
2202         return ev;
2203 }
2204
2205 struct event *
2206 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2207 {
2208         struct event *ev;
2209         ev = mm_malloc(sizeof(struct event));
2210         if (ev == NULL)
2211                 return (NULL);
2212         if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2213                 mm_free(ev);
2214                 return (NULL);
2215         }
2216
2217         return (ev);
2218 }
2219
2220 void
2221 event_free(struct event *ev)
2222 {
2223         /* This is disabled, so that events which have been finalized be a
2224          * valid target for event_free(). That's */
2225         // event_debug_assert_is_setup_(ev);
2226
2227         /* make sure that this event won't be coming back to haunt us. */
2228         event_del(ev);
2229         event_debug_note_teardown_(ev);
2230         mm_free(ev);
2231
2232 }
2233
2234 void
2235 event_debug_unassign(struct event *ev)
2236 {
2237         event_debug_assert_not_added_(ev);
2238         event_debug_note_teardown_(ev);
2239
2240         ev->ev_flags &= ~EVLIST_INIT;
2241 }
2242
2243 #define EVENT_FINALIZE_FREE_ 0x10000
2244 static int
2245 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2246 {
2247         ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2248             EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2249
2250         event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2251         ev->ev_closure = closure;
2252         ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2253         event_active_nolock_(ev, EV_FINALIZE, 1);
2254         ev->ev_flags |= EVLIST_FINALIZING;
2255         return 0;
2256 }
2257
2258 static int
2259 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2260 {
2261         int r;
2262         struct event_base *base = ev->ev_base;
2263         if (EVUTIL_FAILURE_CHECK(!base)) {
2264                 event_warnx("%s: event has no event_base set.", __func__);
2265                 return -1;
2266         }
2267
2268         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2269         r = event_finalize_nolock_(base, flags, ev, cb);
2270         EVBASE_RELEASE_LOCK(base, th_base_lock);
2271         return r;
2272 }
2273
2274 int
2275 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2276 {
2277         return event_finalize_impl_(flags, ev, cb);
2278 }
2279
2280 int
2281 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2282 {
2283         return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2284 }
2285
2286 void
2287 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2288 {
2289         struct event *ev = NULL;
2290         if (evcb->evcb_flags & EVLIST_INIT) {
2291                 ev = event_callback_to_event(evcb);
2292                 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2293         } else {
2294                 event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2295         }
2296
2297         evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2298         evcb->evcb_cb_union.evcb_cbfinalize = cb;
2299         event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2300         evcb->evcb_flags |= EVLIST_FINALIZING;
2301 }
2302
2303 void
2304 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2305 {
2306         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2307         event_callback_finalize_nolock_(base, flags, evcb, cb);
2308         EVBASE_RELEASE_LOCK(base, th_base_lock);
2309 }
2310
2311 /** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
2312  * callback will be invoked on *one of them*, after they have *all* been
2313  * finalized. */
2314 int
2315 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2316 {
2317         int n_pending = 0, i;
2318
2319         if (base == NULL)
2320                 base = current_base;
2321
2322         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2323
2324         event_debug(("%s: %d events finalizing", __func__, n_cbs));
2325
2326         /* At most one can be currently executing; the rest we just
2327          * cancel... But we always make sure that the finalize callback
2328          * runs. */
2329         for (i = 0; i < n_cbs; ++i) {
2330                 struct event_callback *evcb = evcbs[i];
2331                 if (evcb == base->current_event) {
2332                         event_callback_finalize_nolock_(base, 0, evcb, cb);
2333                         ++n_pending;
2334                 } else {
2335                         event_callback_cancel_nolock_(base, evcb, 0);
2336                 }
2337         }
2338
2339         if (n_pending == 0) {
2340                 /* Just do the first one. */
2341                 event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2342         }
2343
2344         EVBASE_RELEASE_LOCK(base, th_base_lock);
2345         return 0;
2346 }
2347
2348 /*
2349  * Set's the priority of an event - if an event is already scheduled
2350  * changing the priority is going to fail.
2351  */
2352
2353 int
2354 event_priority_set(struct event *ev, int pri)
2355 {
2356         event_debug_assert_is_setup_(ev);
2357
2358         if (ev->ev_flags & EVLIST_ACTIVE)
2359                 return (-1);
2360         if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2361                 return (-1);
2362
2363         ev->ev_pri = pri;
2364
2365         return (0);
2366 }
2367
2368 /*
2369  * Checks if a specific event is pending or scheduled.
2370  */
2371
2372 int
2373 event_pending(const struct event *ev, short event, struct timeval *tv)
2374 {
2375         int flags = 0;
2376
2377         if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2378                 event_warnx("%s: event has no event_base set.", __func__);
2379                 return 0;
2380         }
2381
2382         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2383         event_debug_assert_is_setup_(ev);
2384
2385         if (ev->ev_flags & EVLIST_INSERTED)
2386                 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2387         if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2388                 flags |= ev->ev_res;
2389         if (ev->ev_flags & EVLIST_TIMEOUT)
2390                 flags |= EV_TIMEOUT;
2391
2392         event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2393
2394         /* See if there is a timeout that we should report */
2395         if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2396                 struct timeval tmp = ev->ev_timeout;
2397                 tmp.tv_usec &= MICROSECONDS_MASK;
2398                 /* correctly remamp to real time */
2399                 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2400         }
2401
2402         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2403
2404         return (flags & event);
2405 }
2406
2407 int
2408 event_initialized(const struct event *ev)
2409 {
2410         if (!(ev->ev_flags & EVLIST_INIT))
2411                 return 0;
2412
2413         return 1;
2414 }
2415
2416 void
2417 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2418 {
2419         event_debug_assert_is_setup_(event);
2420
2421         if (base_out)
2422                 *base_out = event->ev_base;
2423         if (fd_out)
2424                 *fd_out = event->ev_fd;
2425         if (events_out)
2426                 *events_out = event->ev_events;
2427         if (callback_out)
2428                 *callback_out = event->ev_callback;
2429         if (arg_out)
2430                 *arg_out = event->ev_arg;
2431 }
2432
2433 size_t
2434 event_get_struct_event_size(void)
2435 {
2436         return sizeof(struct event);
2437 }
2438
2439 evutil_socket_t
2440 event_get_fd(const struct event *ev)
2441 {
2442         event_debug_assert_is_setup_(ev);
2443         return ev->ev_fd;
2444 }
2445
2446 struct event_base *
2447 event_get_base(const struct event *ev)
2448 {
2449         event_debug_assert_is_setup_(ev);
2450         return ev->ev_base;
2451 }
2452
2453 short
2454 event_get_events(const struct event *ev)
2455 {
2456         event_debug_assert_is_setup_(ev);
2457         return ev->ev_events;
2458 }
2459
2460 event_callback_fn
2461 event_get_callback(const struct event *ev)
2462 {
2463         event_debug_assert_is_setup_(ev);
2464         return ev->ev_callback;
2465 }
2466
2467 void *
2468 event_get_callback_arg(const struct event *ev)
2469 {
2470         event_debug_assert_is_setup_(ev);
2471         return ev->ev_arg;
2472 }
2473
2474 int
2475 event_get_priority(const struct event *ev)
2476 {
2477         event_debug_assert_is_setup_(ev);
2478         return ev->ev_pri;
2479 }
2480
2481 int
2482 event_add(struct event *ev, const struct timeval *tv)
2483 {
2484         int res;
2485
2486         if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2487                 event_warnx("%s: event has no event_base set.", __func__);
2488                 return -1;
2489         }
2490
2491         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2492
2493         res = event_add_nolock_(ev, tv, 0);
2494
2495         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2496
2497         return (res);
2498 }
2499
2500 /* Helper callback: wake an event_base from another thread.  This version
2501  * works by writing a byte to one end of a socketpair, so that the event_base
2502  * listening on the other end will wake up as the corresponding event
2503  * triggers */
2504 static int
2505 evthread_notify_base_default(struct event_base *base)
2506 {
2507         char buf[1];
2508         int r;
2509         buf[0] = (char) 0;
2510 #ifdef _WIN32
2511         r = send(base->th_notify_fd[1], buf, 1, 0);
2512 #else
2513         r = write(base->th_notify_fd[1], buf, 1);
2514 #endif
2515         return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2516 }
2517
2518 #ifdef EVENT__HAVE_EVENTFD
2519 /* Helper callback: wake an event_base from another thread.  This version
2520  * assumes that you have a working eventfd() implementation. */
2521 static int
2522 evthread_notify_base_eventfd(struct event_base *base)
2523 {
2524         ev_uint64_t msg = 1;
2525         int r;
2526         do {
2527                 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2528         } while (r < 0 && errno == EAGAIN);
2529
2530         return (r < 0) ? -1 : 0;
2531 }
2532 #endif
2533
2534
2535 /** Tell the thread currently running the event_loop for base (if any) that it
2536  * needs to stop waiting in its dispatch function (if it is) and process all
2537  * active callbacks. */
2538 static int
2539 evthread_notify_base(struct event_base *base)
2540 {
2541         EVENT_BASE_ASSERT_LOCKED(base);
2542         if (!base->th_notify_fn)
2543                 return -1;
2544         if (base->is_notify_pending)
2545                 return 0;
2546         base->is_notify_pending = 1;
2547         return base->th_notify_fn(base);
2548 }
2549
2550 /* Implementation function to remove a timeout on a currently pending event.
2551  */
2552 int
2553 event_remove_timer_nolock_(struct event *ev)
2554 {
2555         struct event_base *base = ev->ev_base;
2556
2557         EVENT_BASE_ASSERT_LOCKED(base);
2558         event_debug_assert_is_setup_(ev);
2559
2560         event_debug(("event_remove_timer_nolock: event: %p", ev));
2561
2562         /* If it's not pending on a timeout, we don't need to do anything. */
2563         if (ev->ev_flags & EVLIST_TIMEOUT) {
2564                 event_queue_remove_timeout(base, ev);
2565                 evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2566         }
2567
2568         return (0);
2569 }
2570
2571 int
2572 event_remove_timer(struct event *ev)
2573 {
2574         int res;
2575
2576         if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2577                 event_warnx("%s: event has no event_base set.", __func__);
2578                 return -1;
2579         }
2580
2581         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2582
2583         res = event_remove_timer_nolock_(ev);
2584
2585         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2586
2587         return (res);
2588 }
2589
2590 /* Implementation function to add an event.  Works just like event_add,
2591  * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
2592  * we treat tv as an absolute time, not as an interval to add to the current
2593  * time */
2594 int
2595 event_add_nolock_(struct event *ev, const struct timeval *tv,
2596     int tv_is_absolute)
2597 {
2598         struct event_base *base = ev->ev_base;
2599         int res = 0;
2600         int notify = 0;
2601
2602         EVENT_BASE_ASSERT_LOCKED(base);
2603         event_debug_assert_is_setup_(ev);
2604
2605         event_debug((
2606                  "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2607                  ev,
2608                  EV_SOCK_ARG(ev->ev_fd),
2609                  ev->ev_events & EV_READ ? "EV_READ " : " ",
2610                  ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2611                  ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2612                  tv ? "EV_TIMEOUT " : " ",
2613                  ev->ev_callback));
2614
2615         EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2616
2617         if (ev->ev_flags & EVLIST_FINALIZING) {
2618                 /* XXXX debug */
2619                 return (-1);
2620         }
2621
2622         /*
2623          * prepare for timeout insertion further below, if we get a
2624          * failure on any step, we should not change any state.
2625          */
2626         if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2627                 if (min_heap_reserve_(&base->timeheap,
2628                         1 + min_heap_size_(&base->timeheap)) == -1)
2629                         return (-1);  /* ENOMEM == errno */
2630         }
2631
2632         /* If the main thread is currently executing a signal event's
2633          * callback, and we are not the main thread, then we want to wait
2634          * until the callback is done before we mess with the event, or else
2635          * we can race on ev_ncalls and ev_pncalls below. */
2636 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2637         if (base->current_event == event_to_event_callback(ev) &&
2638             (ev->ev_events & EV_SIGNAL)
2639             && !EVBASE_IN_THREAD(base)) {
2640                 ++base->current_event_waiters;
2641                 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2642         }
2643 #endif
2644
2645         if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2646             !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2647                 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2648                         res = evmap_io_add_(base, ev->ev_fd, ev);
2649                 else if (ev->ev_events & EV_SIGNAL)
2650                         res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2651                 if (res != -1)
2652                         event_queue_insert_inserted(base, ev);
2653                 if (res == 1) {
2654                         /* evmap says we need to notify the main thread. */
2655                         notify = 1;
2656                         res = 0;
2657                 }
2658         }
2659
2660         /*
2661          * we should change the timeout state only if the previous event
2662          * addition succeeded.
2663          */
2664         if (res != -1 && tv != NULL) {
2665                 struct timeval now;
2666                 int common_timeout;
2667 #ifdef USE_REINSERT_TIMEOUT
2668                 int was_common;
2669                 int old_timeout_idx;
2670 #endif
2671
2672                 /*
2673                  * for persistent timeout events, we remember the
2674                  * timeout value and re-add the event.
2675                  *
2676                  * If tv_is_absolute, this was already set.
2677                  */
2678                 if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2679                         ev->ev_io_timeout = *tv;
2680
2681 #ifndef USE_REINSERT_TIMEOUT
2682                 if (ev->ev_flags & EVLIST_TIMEOUT) {
2683                         event_queue_remove_timeout(base, ev);
2684                 }
2685 #endif
2686
2687                 /* Check if it is active due to a timeout.  Rescheduling
2688                  * this timeout before the callback can be executed
2689                  * removes it from the active list. */
2690                 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2691                     (ev->ev_res & EV_TIMEOUT)) {
2692                         if (ev->ev_events & EV_SIGNAL) {
2693                                 /* See if we are just active executing
2694                                  * this event in a loop
2695                                  */
2696                                 if (ev->ev_ncalls && ev->ev_pncalls) {
2697                                         /* Abort loop */
2698                                         *ev->ev_pncalls = 0;
2699                                 }
2700                         }
2701
2702                         event_queue_remove_active(base, event_to_event_callback(ev));
2703                 }
2704
2705                 gettime(base, &now);
2706
2707                 common_timeout = is_common_timeout(tv, base);
2708 #ifdef USE_REINSERT_TIMEOUT
2709                 was_common = is_common_timeout(&ev->ev_timeout, base);
2710                 old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2711 #endif
2712
2713                 if (tv_is_absolute) {
2714                         ev->ev_timeout = *tv;
2715                 } else if (common_timeout) {
2716                         struct timeval tmp = *tv;
2717                         tmp.tv_usec &= MICROSECONDS_MASK;
2718                         evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2719                         ev->ev_timeout.tv_usec |=
2720                             (tv->tv_usec & ~MICROSECONDS_MASK);
2721                 } else {
2722                         evutil_timeradd(&now, tv, &ev->ev_timeout);
2723                 }
2724
2725                 event_debug((
2726                          "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2727                          ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2728
2729 #ifdef USE_REINSERT_TIMEOUT
2730                 event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2731 #else
2732                 event_queue_insert_timeout(base, ev);
2733 #endif
2734
2735                 if (common_timeout) {
2736                         struct common_timeout_list *ctl =
2737                             get_common_timeout_list(base, &ev->ev_timeout);
2738                         if (ev == TAILQ_FIRST(&ctl->events)) {
2739                                 common_timeout_schedule(ctl, &now, ev);
2740                         }
2741                 } else {
2742                         struct event* top = NULL;
2743                         /* See if the earliest timeout is now earlier than it
2744                          * was before: if so, we will need to tell the main
2745                          * thread to wake up earlier than it would otherwise.
2746                          * We double check the timeout of the top element to
2747                          * handle time distortions due to system suspension.
2748                          */
2749                         if (min_heap_elt_is_top_(ev))
2750                                 notify = 1;
2751                         else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2752                                          evutil_timercmp(&top->ev_timeout, &now, <))
2753                                 notify = 1;
2754                 }
2755         }
2756
2757         /* if we are not in the right thread, we need to wake up the loop */
2758         if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2759                 evthread_notify_base(base);
2760
2761         event_debug_note_add_(ev);
2762
2763         return (res);
2764 }
2765
2766 static int
2767 event_del_(struct event *ev, int blocking)
2768 {
2769         int res;
2770         struct event_base *base = ev->ev_base;
2771
2772         if (EVUTIL_FAILURE_CHECK(!base)) {
2773                 event_warnx("%s: event has no event_base set.", __func__);
2774                 return -1;
2775         }
2776
2777         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2778         res = event_del_nolock_(ev, blocking);
2779         EVBASE_RELEASE_LOCK(base, th_base_lock);
2780
2781         return (res);
2782 }
2783
2784 int
2785 event_del(struct event *ev)
2786 {
2787         return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2788 }
2789
2790 int
2791 event_del_block(struct event *ev)
2792 {
2793         return event_del_(ev, EVENT_DEL_BLOCK);
2794 }
2795
2796 int
2797 event_del_noblock(struct event *ev)
2798 {
2799         return event_del_(ev, EVENT_DEL_NOBLOCK);
2800 }
2801
2802 /** Helper for event_del: always called with th_base_lock held.
2803  *
2804  * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2805  * EVEN_IF_FINALIZING} values. See those for more information.
2806  */
2807 int
2808 event_del_nolock_(struct event *ev, int blocking)
2809 {
2810         struct event_base *base;
2811         int res = 0, notify = 0;
2812
2813         event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2814                 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2815
2816         /* An event without a base has not been added */
2817         if (ev->ev_base == NULL)
2818                 return (-1);
2819
2820         EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2821
2822         if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2823                 if (ev->ev_flags & EVLIST_FINALIZING) {
2824                         /* XXXX Debug */
2825                         return 0;
2826                 }
2827         }
2828
2829         base = ev->ev_base;
2830
2831         EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2832
2833         /* See if we are just active executing this event in a loop */
2834         if (ev->ev_events & EV_SIGNAL) {
2835                 if (ev->ev_ncalls && ev->ev_pncalls) {
2836                         /* Abort loop */
2837                         *ev->ev_pncalls = 0;
2838                 }
2839         }
2840
2841         if (ev->ev_flags & EVLIST_TIMEOUT) {
2842                 /* NOTE: We never need to notify the main thread because of a
2843                  * deleted timeout event: all that could happen if we don't is
2844                  * that the dispatch loop might wake up too early.  But the
2845                  * point of notifying the main thread _is_ to wake up the
2846                  * dispatch loop early anyway, so we wouldn't gain anything by
2847                  * doing it.
2848                  */
2849                 event_queue_remove_timeout(base, ev);
2850         }
2851
2852         if (ev->ev_flags & EVLIST_ACTIVE)
2853                 event_queue_remove_active(base, event_to_event_callback(ev));
2854         else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2855                 event_queue_remove_active_later(base, event_to_event_callback(ev));
2856
2857         if (ev->ev_flags & EVLIST_INSERTED) {
2858                 event_queue_remove_inserted(base, ev);
2859                 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2860                         res = evmap_io_del_(base, ev->ev_fd, ev);
2861                 else
2862                         res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2863                 if (res == 1) {
2864                         /* evmap says we need to notify the main thread. */
2865                         notify = 1;
2866                         res = 0;
2867                 }
2868                 /* If we do not have events, let's notify event base so it can
2869                  * exit without waiting */
2870                 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2871                         notify = 1;
2872         }
2873
2874         /* if we are not in the right thread, we need to wake up the loop */
2875         if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2876                 evthread_notify_base(base);
2877
2878         event_debug_note_del_(ev);
2879
2880         /* If the main thread is currently executing this event's callback,
2881          * and we are not the main thread, then we want to wait until the
2882          * callback is done before returning. That way, when this function
2883          * returns, it will be safe to free the user-supplied argument.
2884          */
2885 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2886         if (blocking != EVENT_DEL_NOBLOCK &&
2887             base->current_event == event_to_event_callback(ev) &&
2888             !EVBASE_IN_THREAD(base) &&
2889             (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2890                 ++base->current_event_waiters;
2891                 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2892         }
2893 #endif
2894
2895         return (res);
2896 }
2897
2898 void
2899 event_active(struct event *ev, int res, short ncalls)
2900 {
2901         if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2902                 event_warnx("%s: event has no event_base set.", __func__);
2903                 return;
2904         }
2905
2906         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2907
2908         event_debug_assert_is_setup_(ev);
2909
2910         event_active_nolock_(ev, res, ncalls);
2911
2912         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2913 }
2914
2915
2916 void
2917 event_active_nolock_(struct event *ev, int res, short ncalls)
2918 {
2919         struct event_base *base;
2920
2921         event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2922                 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2923
2924         base = ev->ev_base;
2925         EVENT_BASE_ASSERT_LOCKED(base);
2926
2927         if (ev->ev_flags & EVLIST_FINALIZING) {
2928                 /* XXXX debug */
2929                 return;
2930         }
2931
2932         switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2933         default:
2934         case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2935                 EVUTIL_ASSERT(0);
2936                 break;
2937         case EVLIST_ACTIVE:
2938                 /* We get different kinds of events, add them together */
2939                 ev->ev_res |= res;
2940                 return;
2941         case EVLIST_ACTIVE_LATER:
2942                 ev->ev_res |= res;
2943                 break;
2944         case 0:
2945                 ev->ev_res = res;
2946                 break;
2947         }
2948
2949         if (ev->ev_pri < base->event_running_priority)
2950                 base->event_continue = 1;
2951
2952         if (ev->ev_events & EV_SIGNAL) {
2953 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2954                 if (base->current_event == event_to_event_callback(ev) &&
2955                     !EVBASE_IN_THREAD(base)) {
2956                         ++base->current_event_waiters;
2957                         EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2958                 }
2959 #endif
2960                 ev->ev_ncalls = ncalls;
2961                 ev->ev_pncalls = NULL;
2962         }
2963
2964         event_callback_activate_nolock_(base, event_to_event_callback(ev));
2965 }
2966
2967 void
2968 event_active_later_(struct event *ev, int res)
2969 {
2970         EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2971         event_active_later_nolock_(ev, res);
2972         EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2973 }
2974
2975 void
2976 event_active_later_nolock_(struct event *ev, int res)
2977 {
2978         struct event_base *base = ev->ev_base;
2979         EVENT_BASE_ASSERT_LOCKED(base);
2980
2981         if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2982                 /* We get different kinds of events, add them together */
2983                 ev->ev_res |= res;
2984                 return;
2985         }
2986
2987         ev->ev_res = res;
2988
2989         event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2990 }
2991
2992 int
2993 event_callback_activate_(struct event_base *base,
2994     struct event_callback *evcb)
2995 {
2996         int r;
2997         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2998         r = event_callback_activate_nolock_(base, evcb);
2999         EVBASE_RELEASE_LOCK(base, th_base_lock);
3000         return r;
3001 }
3002
3003 int
3004 event_callback_activate_nolock_(struct event_base *base,
3005     struct event_callback *evcb)
3006 {
3007         int r = 1;
3008
3009         if (evcb->evcb_flags & EVLIST_FINALIZING)
3010                 return 0;
3011
3012         switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3013         default:
3014                 EVUTIL_ASSERT(0);
3015                 EVUTIL_FALLTHROUGH;
3016         case EVLIST_ACTIVE_LATER:
3017                 event_queue_remove_active_later(base, evcb);
3018                 r = 0;
3019                 break;
3020         case EVLIST_ACTIVE:
3021                 return 0;
3022         case 0:
3023                 break;
3024         }
3025
3026         event_queue_insert_active(base, evcb);
3027
3028         if (EVBASE_NEED_NOTIFY(base))
3029                 evthread_notify_base(base);
3030
3031         return r;
3032 }
3033
3034 int
3035 event_callback_activate_later_nolock_(struct event_base *base,
3036     struct event_callback *evcb)
3037 {
3038         if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3039                 return 0;
3040
3041         event_queue_insert_active_later(base, evcb);
3042         if (EVBASE_NEED_NOTIFY(base))
3043                 evthread_notify_base(base);
3044         return 1;
3045 }
3046
3047 void
3048 event_callback_init_(struct event_base *base,
3049     struct event_callback *cb)
3050 {
3051         memset(cb, 0, sizeof(*cb));
3052         cb->evcb_pri = base->nactivequeues - 1;
3053 }
3054
3055 int
3056 event_callback_cancel_(struct event_base *base,
3057     struct event_callback *evcb)
3058 {
3059         int r;
3060         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3061         r = event_callback_cancel_nolock_(base, evcb, 0);
3062         EVBASE_RELEASE_LOCK(base, th_base_lock);
3063         return r;
3064 }
3065
3066 int
3067 event_callback_cancel_nolock_(struct event_base *base,
3068     struct event_callback *evcb, int even_if_finalizing)
3069 {
3070         if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3071                 return 0;
3072
3073         if (evcb->evcb_flags & EVLIST_INIT)
3074                 return event_del_nolock_(event_callback_to_event(evcb),
3075                     even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3076
3077         switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3078         default:
3079         case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3080                 EVUTIL_ASSERT(0);
3081                 break;
3082         case EVLIST_ACTIVE:
3083                 /* We get different kinds of events, add them together */
3084                 event_queue_remove_active(base, evcb);
3085                 return 0;
3086         case EVLIST_ACTIVE_LATER:
3087                 event_queue_remove_active_later(base, evcb);
3088                 break;
3089         case 0:
3090                 break;
3091         }
3092
3093         return 0;
3094 }
3095
3096 void
3097 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3098 {
3099         memset(cb, 0, sizeof(*cb));
3100         cb->evcb_cb_union.evcb_selfcb = fn;
3101         cb->evcb_arg = arg;
3102         cb->evcb_pri = priority;
3103         cb->evcb_closure = EV_CLOSURE_CB_SELF;
3104 }
3105
3106 void
3107 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3108 {
3109         cb->evcb_pri = priority;
3110 }
3111
3112 void
3113 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3114 {
3115         if (!base)
3116                 base = current_base;
3117         event_callback_cancel_(base, cb);
3118 }
3119
3120 #define MAX_DEFERREDS_QUEUED 32
3121 int
3122 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3123 {
3124         int r = 1;
3125         if (!base)
3126                 base = current_base;
3127         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3128         if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3129                 r = event_callback_activate_later_nolock_(base, cb);
3130         } else {
3131                 r = event_callback_activate_nolock_(base, cb);
3132                 if (r) {
3133                         ++base->n_deferreds_queued;
3134                 }
3135         }
3136         EVBASE_RELEASE_LOCK(base, th_base_lock);
3137         return r;
3138 }
3139
3140 static int
3141 timeout_next(struct event_base *base, struct timeval **tv_p)
3142 {
3143         /* Caller must hold th_base_lock */
3144         struct timeval now;
3145         struct event *ev;
3146         struct timeval *tv = *tv_p;
3147         int res = 0;
3148
3149         ev = min_heap_top_(&base->timeheap);
3150
3151         if (ev == NULL) {
3152                 /* if no time-based events are active wait for I/O */
3153                 *tv_p = NULL;
3154                 goto out;
3155         }
3156
3157         if (gettime(base, &now) == -1) {
3158                 res = -1;
3159                 goto out;
3160         }
3161
3162         if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3163                 evutil_timerclear(tv);
3164                 goto out;
3165         }
3166
3167         evutil_timersub(&ev->ev_timeout, &now, tv);
3168
3169         EVUTIL_ASSERT(tv->tv_sec >= 0);
3170         EVUTIL_ASSERT(tv->tv_usec >= 0);
3171         event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3172
3173 out:
3174         return (res);
3175 }
3176
3177 /* Activate every event whose timeout has elapsed. */
3178 static void
3179 timeout_process(struct event_base *base)
3180 {
3181         /* Caller must hold lock. */
3182         struct timeval now;
3183         struct event *ev;
3184
3185         if (min_heap_empty_(&base->timeheap)) {
3186                 return;
3187         }
3188
3189         gettime(base, &now);
3190
3191         while ((ev = min_heap_top_(&base->timeheap))) {
3192                 if (evutil_timercmp(&ev->ev_timeout, &now, >))
3193                         break;
3194
3195                 /* delete this event from the I/O queues */
3196                 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3197
3198                 event_debug(("timeout_process: event: %p, call %p",
3199                          ev, ev->ev_callback));
3200                 event_active_nolock_(ev, EV_TIMEOUT, 1);
3201         }
3202 }
3203
3204 #ifndef MAX
3205 #define MAX(a,b) (((a)>(b))?(a):(b))
3206 #endif
3207
3208 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3209
3210 /* These are a fancy way to spell
3211      if (~flags & EVLIST_INTERNAL)
3212          base->event_count--/++;
3213 */
3214 #define DECR_EVENT_COUNT(base,flags) \
3215         ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3216 #define INCR_EVENT_COUNT(base,flags) do {                                       \
3217         ((base)->event_count += !((flags) & EVLIST_INTERNAL));                  \
3218         MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);          \
3219 } while (0)
3220
3221 static void
3222 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3223 {
3224         EVENT_BASE_ASSERT_LOCKED(base);
3225         if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3226                 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3227                     ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3228                 return;
3229         }
3230         DECR_EVENT_COUNT(base, ev->ev_flags);
3231         ev->ev_flags &= ~EVLIST_INSERTED;
3232 }
3233 static void
3234 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3235 {
3236         EVENT_BASE_ASSERT_LOCKED(base);
3237         if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3238                 event_errx(1, "%s: %p not on queue %x", __func__,
3239                            evcb, EVLIST_ACTIVE);
3240                 return;
3241         }
3242         DECR_EVENT_COUNT(base, evcb->evcb_flags);
3243         evcb->evcb_flags &= ~EVLIST_ACTIVE;
3244         base->event_count_active--;
3245
3246         TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3247             evcb, evcb_active_next);
3248 }
3249 static void
3250 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3251 {
3252         EVENT_BASE_ASSERT_LOCKED(base);
3253         if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3254                 event_errx(1, "%s: %p not on queue %x", __func__,
3255                            evcb, EVLIST_ACTIVE_LATER);
3256                 return;
3257         }
3258         DECR_EVENT_COUNT(base, evcb->evcb_flags);
3259         evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3260         base->event_count_active--;
3261
3262         TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3263 }
3264 static void
3265 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3266 {
3267         EVENT_BASE_ASSERT_LOCKED(base);
3268         if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3269                 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3270                     ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3271                 return;
3272         }
3273         DECR_EVENT_COUNT(base, ev->ev_flags);
3274         ev->ev_flags &= ~EVLIST_TIMEOUT;
3275
3276         if (is_common_timeout(&ev->ev_timeout, base)) {
3277                 struct common_timeout_list *ctl =
3278                     get_common_timeout_list(base, &ev->ev_timeout);
3279                 TAILQ_REMOVE(&ctl->events, ev,
3280                     ev_timeout_pos.ev_next_with_common_timeout);
3281         } else {
3282                 min_heap_erase_(&base->timeheap, ev);
3283         }
3284 }
3285
3286 #ifdef USE_REINSERT_TIMEOUT
3287 /* Remove and reinsert 'ev' into the timeout queue. */
3288 static void
3289 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3290     int was_common, int is_common, int old_timeout_idx)
3291 {
3292         struct common_timeout_list *ctl;
3293         if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3294                 event_queue_insert_timeout(base, ev);
3295                 return;
3296         }
3297
3298         switch ((was_common<<1) | is_common) {
3299         case 3: /* Changing from one common timeout to another */
3300                 ctl = base->common_timeout_queues[old_timeout_idx];
3301                 TAILQ_REMOVE(&ctl->events, ev,
3302                     ev_timeout_pos.ev_next_with_common_timeout);
3303                 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3304                 insert_common_timeout_inorder(ctl, ev);
3305                 break;
3306         case 2: /* Was common; is no longer common */
3307                 ctl = base->common_timeout_queues[old_timeout_idx];
3308                 TAILQ_REMOVE(&ctl->events, ev,
3309                     ev_timeout_pos.ev_next_with_common_timeout);
3310                 min_heap_push_(&base->timeheap, ev);
3311                 break;
3312         case 1: /* Wasn't common; has become common. */
3313                 min_heap_erase_(&base->timeheap, ev);
3314                 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3315                 insert_common_timeout_inorder(ctl, ev);
3316                 break;
3317         case 0: /* was in heap; is still on heap. */
3318                 min_heap_adjust_(&base->timeheap, ev);
3319                 break;
3320         default:
3321                 EVUTIL_ASSERT(0); /* unreachable */
3322                 break;
3323         }
3324 }
3325 #endif
3326
3327 /* Add 'ev' to the common timeout list in 'ev'. */
3328 static void
3329 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3330     struct event *ev)
3331 {
3332         struct event *e;
3333         /* By all logic, we should just be able to append 'ev' to the end of
3334          * ctl->events, since the timeout on each 'ev' is set to {the common
3335          * timeout} + {the time when we add the event}, and so the events
3336          * should arrive in order of their timeeouts.  But just in case
3337          * there's some wacky threading issue going on, we do a search from
3338          * the end of 'ev' to find the right insertion point.
3339          */
3340         TAILQ_FOREACH_REVERSE(e, &ctl->events,
3341             event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3342                 /* This timercmp is a little sneaky, since both ev and e have
3343                  * magic values in tv_usec.  Fortunately, they ought to have
3344                  * the _same_ magic values in tv_usec.  Let's assert for that.
3345                  */
3346                 EVUTIL_ASSERT(
3347                         is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3348                 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3349                         TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3350                             ev_timeout_pos.ev_next_with_common_timeout);
3351                         return;
3352                 }
3353         }
3354         TAILQ_INSERT_HEAD(&ctl->events, ev,
3355             ev_timeout_pos.ev_next_with_common_timeout);
3356 }
3357
3358 static void
3359 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3360 {
3361         EVENT_BASE_ASSERT_LOCKED(base);
3362
3363         if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3364                 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3365                     ev, EV_SOCK_ARG(ev->ev_fd));
3366                 return;
3367         }
3368
3369         INCR_EVENT_COUNT(base, ev->ev_flags);
3370
3371         ev->ev_flags |= EVLIST_INSERTED;
3372 }
3373
3374 static void
3375 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3376 {
3377         EVENT_BASE_ASSERT_LOCKED(base);
3378
3379         if (evcb->evcb_flags & EVLIST_ACTIVE) {
3380                 /* Double insertion is possible for active events */
3381                 return;
3382         }
3383
3384         INCR_EVENT_COUNT(base, evcb->evcb_flags);
3385
3386         evcb->evcb_flags |= EVLIST_ACTIVE;
3387
3388         base->event_count_active++;
3389         MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3390         EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3391         TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3392             evcb, evcb_active_next);
3393 }
3394
3395 static void
3396 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3397 {
3398         EVENT_BASE_ASSERT_LOCKED(base);
3399         if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3400                 /* Double insertion is possible */
3401                 return;
3402         }
3403
3404         INCR_EVENT_COUNT(base, evcb->evcb_flags);
3405         evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3406         base->event_count_active++;
3407         MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3408         EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3409         TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3410 }
3411
3412 static void
3413 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3414 {
3415         EVENT_BASE_ASSERT_LOCKED(base);
3416
3417         if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3418                 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3419                     ev, EV_SOCK_ARG(ev->ev_fd));
3420                 return;
3421         }
3422
3423         INCR_EVENT_COUNT(base, ev->ev_flags);
3424
3425         ev->ev_flags |= EVLIST_TIMEOUT;
3426
3427         if (is_common_timeout(&ev->ev_timeout, base)) {
3428                 struct common_timeout_list *ctl =
3429                     get_common_timeout_list(base, &ev->ev_timeout);
3430                 insert_common_timeout_inorder(ctl, ev);
3431         } else {
3432                 min_heap_push_(&base->timeheap, ev);
3433         }
3434 }
3435
3436 static void
3437 event_queue_make_later_events_active(struct event_base *base)
3438 {
3439         struct event_callback *evcb;
3440         EVENT_BASE_ASSERT_LOCKED(base);
3441
3442         while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3443                 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3444                 evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3445                 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3446                 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3447                 base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3448         }
3449 }
3450
3451 /* Functions for debugging */
3452
3453 const char *
3454 event_get_version(void)
3455 {
3456         return (EVENT__VERSION);
3457 }
3458
3459 ev_uint32_t
3460 event_get_version_number(void)
3461 {
3462         return (EVENT__NUMERIC_VERSION);
3463 }
3464
3465 /*
3466  * No thread-safe interface needed - the information should be the same
3467  * for all threads.
3468  */
3469
3470 const char *
3471 event_get_method(void)
3472 {
3473         return (current_base->evsel->name);
3474 }
3475
3476 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3477 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3478 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3479 static void (*mm_free_fn_)(void *p) = NULL;
3480
3481 void *
3482 event_mm_malloc_(size_t sz)
3483 {
3484         if (sz == 0)
3485                 return NULL;
3486
3487         if (mm_malloc_fn_)
3488                 return mm_malloc_fn_(sz);
3489         else
3490                 return malloc(sz);
3491 }
3492
3493 void *
3494 event_mm_calloc_(size_t count, size_t size)
3495 {
3496         if (count == 0 || size == 0)
3497                 return NULL;
3498
3499         if (mm_malloc_fn_) {
3500                 size_t sz = count * size;
3501                 void *p = NULL;
3502                 if (count > EV_SIZE_MAX / size)
3503                         goto error;
3504                 p = mm_malloc_fn_(sz);
3505                 if (p)
3506                         return memset(p, 0, sz);
3507         } else {
3508                 void *p = calloc(count, size);
3509 #ifdef _WIN32
3510                 /* Windows calloc doesn't reliably set ENOMEM */
3511                 if (p == NULL)
3512                         goto error;
3513 #endif
3514                 return p;
3515         }
3516
3517 error:
3518         errno = ENOMEM;
3519         return NULL;
3520 }
3521
3522 char *
3523 event_mm_strdup_(const char *str)
3524 {
3525         if (!str) {
3526                 errno = EINVAL;
3527                 return NULL;
3528         }
3529
3530         if (mm_malloc_fn_) {
3531                 size_t ln = strlen(str);
3532                 void *p = NULL;
3533                 if (ln == EV_SIZE_MAX)
3534                         goto error;
3535                 p = mm_malloc_fn_(ln+1);
3536                 if (p)
3537                         return memcpy(p, str, ln+1);
3538         } else
3539 #ifdef _WIN32
3540                 return _strdup(str);
3541 #else
3542                 return strdup(str);
3543 #endif
3544
3545 error:
3546         errno = ENOMEM;
3547         return NULL;
3548 }
3549
3550 void *
3551 event_mm_realloc_(void *ptr, size_t sz)
3552 {
3553         if (mm_realloc_fn_)
3554                 return mm_realloc_fn_(ptr, sz);
3555         else
3556                 return realloc(ptr, sz);
3557 }
3558
3559 void
3560 event_mm_free_(void *ptr)
3561 {
3562         if (mm_free_fn_)
3563                 mm_free_fn_(ptr);
3564         else
3565                 free(ptr);
3566 }
3567
3568 void
3569 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3570                         void *(*realloc_fn)(void *ptr, size_t sz),
3571                         void (*free_fn)(void *ptr))
3572 {
3573         mm_malloc_fn_ = malloc_fn;
3574         mm_realloc_fn_ = realloc_fn;
3575         mm_free_fn_ = free_fn;
3576 }
3577 #endif
3578
3579 #ifdef EVENT__HAVE_EVENTFD
3580 static void
3581 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3582 {
3583         ev_uint64_t msg;
3584         ev_ssize_t r;
3585         struct event_base *base = arg;
3586
3587         r = read(fd, (void*) &msg, sizeof(msg));
3588         if (r<0 && errno != EAGAIN) {
3589                 event_sock_warn(fd, "Error reading from eventfd");
3590         }
3591         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3592         base->is_notify_pending = 0;
3593         EVBASE_RELEASE_LOCK(base, th_base_lock);
3594 }
3595 #endif
3596
3597 static void
3598 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3599 {
3600         unsigned char buf[1024];
3601         struct event_base *base = arg;
3602 #ifdef _WIN32
3603         while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3604                 ;
3605 #else
3606         while (read(fd, (char*)buf, sizeof(buf)) > 0)
3607                 ;
3608 #endif
3609
3610         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3611         base->is_notify_pending = 0;
3612         EVBASE_RELEASE_LOCK(base, th_base_lock);
3613 }
3614
3615 int
3616 evthread_make_base_notifiable(struct event_base *base)
3617 {
3618         int r;
3619         if (!base)
3620                 return -1;
3621
3622         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3623         r = evthread_make_base_notifiable_nolock_(base);
3624         EVBASE_RELEASE_LOCK(base, th_base_lock);
3625         return r;
3626 }
3627
3628 static int
3629 evthread_make_base_notifiable_nolock_(struct event_base *base)
3630 {
3631         void (*cb)(evutil_socket_t, short, void *);
3632         int (*notify)(struct event_base *);
3633
3634         if (base->th_notify_fn != NULL) {
3635                 /* The base is already notifiable: we're doing fine. */
3636                 return 0;
3637         }
3638
3639 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3640         if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3641                 base->th_notify_fn = event_kq_notify_base_;
3642                 /* No need to add an event here; the backend can wake
3643                  * itself up just fine. */
3644                 return 0;
3645         }
3646 #endif
3647
3648 #ifdef EVENT__HAVE_EVENTFD
3649         base->th_notify_fd[0] = evutil_eventfd_(0,
3650             EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3651         if (base->th_notify_fd[0] >= 0) {
3652                 base->th_notify_fd[1] = -1;
3653                 notify = evthread_notify_base_eventfd;
3654                 cb = evthread_notify_drain_eventfd;
3655         } else
3656 #endif
3657         if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3658                 notify = evthread_notify_base_default;
3659                 cb = evthread_notify_drain_default;
3660         } else {
3661                 return -1;
3662         }
3663
3664         base->th_notify_fn = notify;
3665
3666         /* prepare an event that we can use for wakeup */
3667         event_assign(&base->th_notify, base, base->th_notify_fd[0],
3668                                  EV_READ|EV_PERSIST, cb, base);
3669
3670         /* we need to mark this as internal event */
3671         base->th_notify.ev_flags |= EVLIST_INTERNAL;
3672         event_priority_set(&base->th_notify, 0);
3673
3674         return event_add_nolock_(&base->th_notify, NULL, 0);
3675 }
3676
3677 int
3678 event_base_foreach_event_nolock_(struct event_base *base,
3679     event_base_foreach_event_cb fn, void *arg)
3680 {
3681         int r, i;
3682         size_t u;
3683         struct event *ev;
3684
3685         /* Start out with all the EVLIST_INSERTED events. */
3686         if ((r = evmap_foreach_event_(base, fn, arg)))
3687                 return r;
3688
3689         /* Okay, now we deal with those events that have timeouts and are in
3690          * the min-heap. */
3691         for (u = 0; u < base->timeheap.n; ++u) {
3692                 ev = base->timeheap.p[u];
3693                 if (ev->ev_flags & EVLIST_INSERTED) {
3694                         /* we already processed this one */
3695                         continue;
3696                 }
3697                 if ((r = fn(base, ev, arg)))
3698                         return r;
3699         }
3700
3701         /* Now for the events in one of the timeout queues.
3702          * the min-heap. */
3703         for (i = 0; i < base->n_common_timeouts; ++i) {
3704                 struct common_timeout_list *ctl =
3705                     base->common_timeout_queues[i];
3706                 TAILQ_FOREACH(ev, &ctl->events,
3707                     ev_timeout_pos.ev_next_with_common_timeout) {
3708                         if (ev->ev_flags & EVLIST_INSERTED) {
3709                                 /* we already processed this one */
3710                                 continue;
3711                         }
3712                         if ((r = fn(base, ev, arg)))
3713                                 return r;
3714                 }
3715         }
3716
3717         /* Finally, we deal wit all the active events that we haven't touched
3718          * yet. */
3719         for (i = 0; i < base->nactivequeues; ++i) {
3720                 struct event_callback *evcb;
3721                 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3722                         if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3723                                 /* This isn't an event (evlist_init clear), or
3724                                  * we already processed it. (inserted or
3725                                  * timeout set */
3726                                 continue;
3727                         }
3728                         ev = event_callback_to_event(evcb);
3729                         if ((r = fn(base, ev, arg)))
3730                                 return r;
3731                 }
3732         }
3733
3734         return 0;
3735 }
3736
3737 /* Helper for event_base_dump_events: called on each event in the event base;
3738  * dumps only the inserted events. */
3739 static int
3740 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3741 {
3742         FILE *output = arg;
3743         const char *gloss = (e->ev_events & EV_SIGNAL) ?
3744             "sig" : "fd ";
3745
3746         if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3747                 return 0;
3748
3749         fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3750             (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3751             (e->ev_events&EV_READ)?" Read":"",
3752             (e->ev_events&EV_WRITE)?" Write":"",
3753             (e->ev_events&EV_CLOSED)?" EOF":"",
3754             (e->ev_events&EV_SIGNAL)?" Signal":"",
3755             (e->ev_events&EV_PERSIST)?" Persist":"",
3756             (e->ev_events&EV_ET)?" ET":"",
3757             (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3758         if (e->ev_flags & EVLIST_TIMEOUT) {
3759                 struct timeval tv;
3760                 tv.tv_sec = e->ev_timeout.tv_sec;
3761                 tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3762                 evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3763                 fprintf(output, " Timeout=%ld.%06d",
3764                     (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3765         }
3766         fputc('\n', output);
3767
3768         return 0;
3769 }
3770
3771 /* Helper for event_base_dump_events: called on each event in the event base;
3772  * dumps only the active events. */
3773 static int
3774 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3775 {
3776         FILE *output = arg;
3777         const char *gloss = (e->ev_events & EV_SIGNAL) ?
3778             "sig" : "fd ";
3779
3780         if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3781                 return 0;
3782
3783         fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3784             (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3785             (e->ev_res&EV_READ)?" Read":"",
3786             (e->ev_res&EV_WRITE)?" Write":"",
3787             (e->ev_res&EV_CLOSED)?" EOF":"",
3788             (e->ev_res&EV_SIGNAL)?" Signal":"",
3789             (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3790             (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3791             (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3792
3793         return 0;
3794 }
3795
3796 int
3797 event_base_foreach_event(struct event_base *base,
3798     event_base_foreach_event_cb fn, void *arg)
3799 {
3800         int r;
3801         if ((!fn) || (!base)) {
3802                 return -1;
3803         }
3804         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3805         r = event_base_foreach_event_nolock_(base, fn, arg);
3806         EVBASE_RELEASE_LOCK(base, th_base_lock);
3807         return r;
3808 }
3809
3810
3811 void
3812 event_base_dump_events(struct event_base *base, FILE *output)
3813 {
3814         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3815         fprintf(output, "Inserted events:\n");
3816         event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3817
3818         fprintf(output, "Active events:\n");
3819         event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3820         EVBASE_RELEASE_LOCK(base, th_base_lock);
3821 }
3822
3823 void
3824 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3825 {
3826         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3827
3828         /* Activate any non timer events */
3829         if (!(events & EV_TIMEOUT)) {
3830                 evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3831         } else {
3832                 /* If we want to activate timer events, loop and activate each event with
3833                  * the same fd in both the timeheap and common timeouts list */
3834                 int i;
3835                 size_t u;
3836                 struct event *ev;
3837
3838                 for (u = 0; u < base->timeheap.n; ++u) {
3839                         ev = base->timeheap.p[u];
3840                         if (ev->ev_fd == fd) {
3841                                 event_active_nolock_(ev, EV_TIMEOUT, 1);
3842                         }
3843                 }
3844
3845                 for (i = 0; i < base->n_common_timeouts; ++i) {
3846                         struct common_timeout_list *ctl = base->common_timeout_queues[i];
3847                         TAILQ_FOREACH(ev, &ctl->events,
3848                                 ev_timeout_pos.ev_next_with_common_timeout) {
3849                                 if (ev->ev_fd == fd) {
3850                                         event_active_nolock_(ev, EV_TIMEOUT, 1);
3851                                 }
3852                         }
3853                 }
3854         }
3855
3856         EVBASE_RELEASE_LOCK(base, th_base_lock);
3857 }
3858
3859 void
3860 event_base_active_by_signal(struct event_base *base, int sig)
3861 {
3862         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3863         evmap_signal_active_(base, sig, 1);
3864         EVBASE_RELEASE_LOCK(base, th_base_lock);
3865 }
3866
3867
3868 void
3869 event_base_add_virtual_(struct event_base *base)
3870 {
3871         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3872         base->virtual_event_count++;
3873         MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3874         EVBASE_RELEASE_LOCK(base, th_base_lock);
3875 }
3876
3877 void
3878 event_base_del_virtual_(struct event_base *base)
3879 {
3880         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3881         EVUTIL_ASSERT(base->virtual_event_count > 0);
3882         base->virtual_event_count--;
3883         if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3884                 evthread_notify_base(base);
3885         EVBASE_RELEASE_LOCK(base, th_base_lock);
3886 }
3887
3888 static void
3889 event_free_debug_globals_locks(void)
3890 {
3891 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3892 #ifndef EVENT__DISABLE_DEBUG_MODE
3893         if (event_debug_map_lock_ != NULL) {
3894                 EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3895                 event_debug_map_lock_ = NULL;
3896                 evthreadimpl_disable_lock_debugging_();
3897         }
3898 #endif /* EVENT__DISABLE_DEBUG_MODE */
3899 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3900         return;
3901 }
3902
3903 static void
3904 event_free_debug_globals(void)
3905 {
3906         event_free_debug_globals_locks();
3907 }
3908
3909 static void
3910 event_free_evsig_globals(void)
3911 {
3912         evsig_free_globals_();
3913 }
3914
3915 static void
3916 event_free_evutil_globals(void)
3917 {
3918         evutil_free_globals_();
3919 }
3920
3921 static void
3922 event_free_globals(void)
3923 {
3924         event_free_debug_globals();
3925         event_free_evsig_globals();
3926         event_free_evutil_globals();
3927 }
3928
3929 void
3930 libevent_global_shutdown(void)
3931 {
3932         event_disable_debug_mode();
3933         event_free_globals();
3934 }
3935
3936 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3937 int
3938 event_global_setup_locks_(const int enable_locks)
3939 {
3940 #ifndef EVENT__DISABLE_DEBUG_MODE
3941         EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3942 #endif
3943         if (evsig_global_setup_locks_(enable_locks) < 0)
3944                 return -1;
3945         if (evutil_global_setup_locks_(enable_locks) < 0)
3946                 return -1;
3947         if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3948                 return -1;
3949         return 0;
3950 }
3951 #endif
3952
3953 void
3954 event_base_assert_ok_(struct event_base *base)
3955 {
3956         EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3957         event_base_assert_ok_nolock_(base);
3958         EVBASE_RELEASE_LOCK(base, th_base_lock);
3959 }
3960
3961 void
3962 event_base_assert_ok_nolock_(struct event_base *base)
3963 {
3964         int i;
3965         size_t u;
3966         int count;
3967
3968         /* First do checks on the per-fd and per-signal lists */
3969         evmap_check_integrity_(base);
3970
3971         /* Check the heap property */
3972         for (u = 1; u < base->timeheap.n; ++u) {
3973                 size_t parent = (u - 1) / 2;
3974                 struct event *ev, *p_ev;
3975                 ev = base->timeheap.p[u];
3976                 p_ev = base->timeheap.p[parent];
3977                 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3978                 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3979                 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == u);
3980         }
3981
3982         /* Check that the common timeouts are fine */
3983         for (i = 0; i < base->n_common_timeouts; ++i) {
3984                 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3985                 struct event *last=NULL, *ev;
3986
3987                 EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3988
3989                 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3990                         if (last)
3991                                 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3992                         EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3993                         EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3994                         EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3995                         last = ev;
3996                 }
3997         }
3998
3999         /* Check the active queues. */
4000         count = 0;
4001         for (i = 0; i < base->nactivequeues; ++i) {
4002                 struct event_callback *evcb;
4003                 EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4004                 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4005                         EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4006                         EVUTIL_ASSERT(evcb->evcb_pri == i);
4007                         ++count;
4008                 }
4009         }
4010
4011         {
4012                 struct event_callback *evcb;
4013                 TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4014                         EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4015                         ++count;
4016                 }
4017         }
4018         EVUTIL_ASSERT(count == base->event_count_active);
4019 }