2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "event2/event-config.h"
28 #include "evconfig-private.h"
32 #define WIN32_LEAN_AND_MEAN
34 #undef WIN32_LEAN_AND_MEAN
36 #include <sys/types.h>
37 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
40 #include <sys/queue.h>
41 #ifdef EVENT__HAVE_SYS_SOCKET_H
42 #include <sys/socket.h>
46 #ifdef EVENT__HAVE_UNISTD_H
55 #ifdef EVENT__HAVE_FCNTL_H
59 #include "event2/event.h"
60 #include "event2/event_struct.h"
61 #include "event2/event_compat.h"
62 #include "event-internal.h"
63 #include "defer-internal.h"
64 #include "evthread-internal.h"
65 #include "event2/thread.h"
66 #include "event2/util.h"
67 #include "log-internal.h"
68 #include "evmap-internal.h"
69 #include "iocp-internal.h"
70 #include "changelist-internal.h"
71 #define HT_NO_CACHE_HASH_VALUES
72 #include "ht-internal.h"
73 #include "util-internal.h"
76 #ifdef EVENT__HAVE_WORKING_KQUEUE
77 #include "kqueue-internal.h"
80 #ifdef EVENT__HAVE_EVENT_PORTS
81 extern const struct eventop evportops;
83 #ifdef EVENT__HAVE_SELECT
84 extern const struct eventop selectops;
86 #ifdef EVENT__HAVE_POLL
87 extern const struct eventop pollops;
89 #ifdef EVENT__HAVE_EPOLL
90 extern const struct eventop epollops;
92 #ifdef EVENT__HAVE_WORKING_KQUEUE
93 extern const struct eventop kqops;
95 #ifdef EVENT__HAVE_DEVPOLL
96 extern const struct eventop devpollops;
99 extern const struct eventop win32ops;
102 /* Array of backends in order of preference. */
103 static const struct eventop *eventops[] = {
104 #ifdef EVENT__HAVE_EVENT_PORTS
107 #ifdef EVENT__HAVE_WORKING_KQUEUE
110 #ifdef EVENT__HAVE_EPOLL
113 #ifdef EVENT__HAVE_DEVPOLL
116 #ifdef EVENT__HAVE_POLL
119 #ifdef EVENT__HAVE_SELECT
128 /* Global state; deprecated */
130 struct event_base *event_global_current_base_ = NULL;
131 #define current_base event_global_current_base_
135 static void *event_self_cbarg_ptr_ = NULL;
138 static void event_queue_insert_active(struct event_base *, struct event_callback *);
139 static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
140 static void event_queue_insert_timeout(struct event_base *, struct event *);
141 static void event_queue_insert_inserted(struct event_base *, struct event *);
142 static void event_queue_remove_active(struct event_base *, struct event_callback *);
143 static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
144 static void event_queue_remove_timeout(struct event_base *, struct event *);
145 static void event_queue_remove_inserted(struct event_base *, struct event *);
146 static void event_queue_make_later_events_active(struct event_base *base);
148 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
149 static int event_del_(struct event *ev, int blocking);
151 #ifdef USE_REINSERT_TIMEOUT
152 /* This code seems buggy; only turn it on if we find out what the trouble is. */
153 static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
156 static int event_haveevents(struct event_base *);
158 static int event_process_active(struct event_base *);
160 static int timeout_next(struct event_base *, struct timeval **);
161 static void timeout_process(struct event_base *);
163 static inline void event_signal_closure(struct event_base *, struct event *ev);
164 static inline void event_persist_closure(struct event_base *, struct event *ev);
166 static int evthread_notify_base(struct event_base *base);
168 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
171 #ifndef EVENT__DISABLE_DEBUG_MODE
172 /* These functions implement a hashtable of which 'struct event *' structures
173 * have been setup or added. We don't want to trust the content of the struct
174 * event itself, since we're trying to work through cases where an event gets
175 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
178 struct event_debug_entry {
179 HT_ENTRY(event_debug_entry) node;
180 const struct event *ptr;
184 static inline unsigned
185 hash_debug_entry(const struct event_debug_entry *e)
187 /* We need to do this silliness to convince compilers that we
188 * honestly mean to cast e->ptr to an integer, and discard any
189 * part of it that doesn't fit in an unsigned.
191 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
192 /* Our hashtable implementation is pretty sensitive to low bits,
193 * and every struct event is over 64 bytes in size, so we can
199 eq_debug_entry(const struct event_debug_entry *a,
200 const struct event_debug_entry *b)
202 return a->ptr == b->ptr;
205 int event_debug_mode_on_ = 0;
208 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
210 * @brief debug mode variable which is set for any function/structure that needs
211 * to be shared across threads (if thread support is enabled).
213 * When and if evthreads are initialized, this variable will be evaluated,
214 * and if set to something other than zero, this means the evthread setup
215 * functions were called out of order.
217 * See: "Locks and threading" in the documentation.
219 int event_debug_created_threadable_ctx_ = 0;
222 /* Set if it's too late to enable event_debug_mode. */
223 static int event_debug_mode_too_late = 0;
224 #ifndef EVENT__DISABLE_THREAD_SUPPORT
225 static void *event_debug_map_lock_ = NULL;
227 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
230 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
232 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
233 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
235 /* record that ev is now setup (that is, ready for an add) */
236 static void event_debug_note_setup_(const struct event *ev)
238 struct event_debug_entry *dent, find;
240 if (!event_debug_mode_on_)
244 EVLOCK_LOCK(event_debug_map_lock_, 0);
245 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
249 dent = mm_malloc(sizeof(*dent));
252 "Out of memory in debugging code");
255 HT_INSERT(event_debug_map, &global_debug_map, dent);
257 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
260 event_debug_mode_too_late = 1;
262 /* record that ev is no longer setup */
263 static void event_debug_note_teardown_(const struct event *ev)
265 struct event_debug_entry *dent, find;
267 if (!event_debug_mode_on_)
271 EVLOCK_LOCK(event_debug_map_lock_, 0);
272 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
275 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
278 event_debug_mode_too_late = 1;
280 /* Macro: record that ev is now added */
281 static void event_debug_note_add_(const struct event *ev)
283 struct event_debug_entry *dent,find;
285 if (!event_debug_mode_on_)
289 EVLOCK_LOCK(event_debug_map_lock_, 0);
290 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
294 event_errx(EVENT_ERR_ABORT_,
295 "%s: noting an add on a non-setup event %p"
296 " (events: 0x%x, fd: "EV_SOCK_FMT
298 __func__, ev, ev->ev_events,
299 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
301 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
304 event_debug_mode_too_late = 1;
306 /* record that ev is no longer added */
307 static void event_debug_note_del_(const struct event *ev)
309 struct event_debug_entry *dent, find;
311 if (!event_debug_mode_on_)
315 EVLOCK_LOCK(event_debug_map_lock_, 0);
316 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
320 event_errx(EVENT_ERR_ABORT_,
321 "%s: noting a del on a non-setup event %p"
322 " (events: 0x%x, fd: "EV_SOCK_FMT
324 __func__, ev, ev->ev_events,
325 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
327 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
330 event_debug_mode_too_late = 1;
332 /* assert that ev is setup (i.e., okay to add or inspect) */
333 static void event_debug_assert_is_setup_(const struct event *ev)
335 struct event_debug_entry *dent, find;
337 if (!event_debug_mode_on_)
341 EVLOCK_LOCK(event_debug_map_lock_, 0);
342 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
344 event_errx(EVENT_ERR_ABORT_,
345 "%s called on a non-initialized event %p"
346 " (events: 0x%x, fd: "EV_SOCK_FMT
348 __func__, ev, ev->ev_events,
349 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
351 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
353 /* assert that ev is not added (i.e., okay to tear down or set up again) */
354 static void event_debug_assert_not_added_(const struct event *ev)
356 struct event_debug_entry *dent, find;
358 if (!event_debug_mode_on_)
362 EVLOCK_LOCK(event_debug_map_lock_, 0);
363 dent = HT_FIND(event_debug_map, &global_debug_map, &find);
364 if (dent && dent->added) {
365 event_errx(EVENT_ERR_ABORT_,
366 "%s called on an already added event %p"
367 " (events: 0x%x, fd: "EV_SOCK_FMT", "
369 __func__, ev, ev->ev_events,
370 EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
372 EVLOCK_UNLOCK(event_debug_map_lock_, 0);
374 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
376 if (!event_debug_mode_on_)
384 if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
385 EVUTIL_ASSERT(flags & O_NONBLOCK);
391 static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
392 static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
393 static void event_debug_note_add_(const struct event *ev) { (void)ev; }
394 static void event_debug_note_del_(const struct event *ev) { (void)ev; }
395 static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
396 static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
397 static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
400 #define EVENT_BASE_ASSERT_LOCKED(base) \
401 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
403 /* How often (in seconds) do we check for changes in wall clock time relative
404 * to monotonic time? Set this to -1 for 'never.' */
405 #define CLOCK_SYNC_INTERVAL 5
407 /** Set 'tp' to the current time according to 'base'. We must hold the lock
408 * on 'base'. If there is a cached time, return it. Otherwise, use
409 * clock_gettime or gettimeofday as appropriate to find out the right time.
410 * Return 0 on success, -1 on failure.
413 gettime(struct event_base *base, struct timeval *tp)
415 EVENT_BASE_ASSERT_LOCKED(base);
417 if (base->tv_cache.tv_sec) {
418 *tp = base->tv_cache;
422 if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
426 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
429 evutil_gettimeofday(&tv,NULL);
430 evutil_timersub(&tv, tp, &base->tv_clock_diff);
431 base->last_updated_clock_diff = tp->tv_sec;
438 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
444 return evutil_gettimeofday(tv, NULL);
447 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
448 if (base->tv_cache.tv_sec == 0) {
449 r = evutil_gettimeofday(tv, NULL);
451 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
454 EVBASE_RELEASE_LOCK(base, th_base_lock);
458 /** Make 'base' have no current cached time. */
460 clear_time_cache(struct event_base *base)
462 base->tv_cache.tv_sec = 0;
465 /** Replace the cached time in 'base' with the current time. */
467 update_time_cache(struct event_base *base)
469 base->tv_cache.tv_sec = 0;
470 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
471 gettime(base, &base->tv_cache);
475 event_base_update_cache_time(struct event_base *base)
484 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
485 if (base->running_loop)
486 update_time_cache(base);
487 EVBASE_RELEASE_LOCK(base, th_base_lock);
491 static inline struct event *
492 event_callback_to_event(struct event_callback *evcb)
494 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
495 return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
498 static inline struct event_callback *
499 event_to_event_callback(struct event *ev)
501 return &ev->ev_evcallback;
507 struct event_base *base = event_base_new_with_config(NULL);
510 event_errx(1, "%s: Unable to construct event_base", __func__);
522 struct event_base *base = NULL;
523 struct event_config *cfg = event_config_new();
525 base = event_base_new_with_config(cfg);
526 event_config_free(cfg);
531 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
534 event_config_is_avoided_method(const struct event_config *cfg,
537 struct event_config_entry *entry;
539 TAILQ_FOREACH(entry, &cfg->entries, next) {
540 if (entry->avoid_method != NULL &&
541 strcmp(entry->avoid_method, method) == 0)
548 /** Return true iff 'method' is disabled according to the environment. */
550 event_is_method_disabled(const char *name)
552 char environment[64];
555 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
556 for (i = 8; environment[i] != '\0'; ++i)
557 environment[i] = EVUTIL_TOUPPER_(environment[i]);
558 /* Note that evutil_getenv_() ignores the environment entirely if
560 return (evutil_getenv_(environment) != NULL);
564 event_base_get_features(const struct event_base *base)
566 return base->evsel->features;
570 event_enable_debug_mode(void)
572 #ifndef EVENT__DISABLE_DEBUG_MODE
573 if (event_debug_mode_on_)
574 event_errx(1, "%s was called twice!", __func__);
575 if (event_debug_mode_too_late)
576 event_errx(1, "%s must be called *before* creating any events "
577 "or event_bases",__func__);
579 event_debug_mode_on_ = 1;
581 HT_INIT(event_debug_map, &global_debug_map);
586 event_disable_debug_mode(void)
588 #ifndef EVENT__DISABLE_DEBUG_MODE
589 struct event_debug_entry **ent, *victim;
591 EVLOCK_LOCK(event_debug_map_lock_, 0);
592 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
594 ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
597 HT_CLEAR(event_debug_map, &global_debug_map);
598 EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
600 event_debug_mode_on_ = 0;
605 event_base_new_with_config(const struct event_config *cfg)
608 struct event_base *base;
609 int should_check_environment;
611 #ifndef EVENT__DISABLE_DEBUG_MODE
612 event_debug_mode_too_late = 1;
615 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
616 event_warn("%s: calloc", __func__);
621 base->flags = cfg->flags;
623 should_check_environment =
624 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
629 cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
631 if (should_check_environment && !precise_time) {
632 precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
634 base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
637 flags = precise_time ? EV_MONOT_PRECISE : 0;
638 evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
643 min_heap_ctor_(&base->timeheap);
645 base->sig.ev_signal_pair[0] = -1;
646 base->sig.ev_signal_pair[1] = -1;
647 base->th_notify_fd[0] = -1;
648 base->th_notify_fd[1] = -1;
650 TAILQ_INIT(&base->active_later_queue);
652 evmap_io_initmap_(&base->io);
653 evmap_signal_initmap_(&base->sigmap);
654 event_changelist_init_(&base->changelist);
659 memcpy(&base->max_dispatch_time,
660 &cfg->max_dispatch_interval, sizeof(struct timeval));
661 base->limit_callbacks_after_prio =
662 cfg->limit_callbacks_after_prio;
664 base->max_dispatch_time.tv_sec = -1;
665 base->limit_callbacks_after_prio = 1;
667 if (cfg && cfg->max_dispatch_callbacks >= 0) {
668 base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
670 base->max_dispatch_callbacks = INT_MAX;
672 if (base->max_dispatch_callbacks == INT_MAX &&
673 base->max_dispatch_time.tv_sec == -1)
674 base->limit_callbacks_after_prio = INT_MAX;
676 for (i = 0; eventops[i] && !base->evbase; i++) {
678 /* determine if this backend should be avoided */
679 if (event_config_is_avoided_method(cfg,
682 if ((eventops[i]->features & cfg->require_features)
683 != cfg->require_features)
687 /* also obey the environment variables */
688 if (should_check_environment &&
689 event_is_method_disabled(eventops[i]->name))
692 base->evsel = eventops[i];
694 base->evbase = base->evsel->init(base);
697 if (base->evbase == NULL) {
698 event_warnx("%s: no event mechanism available",
701 event_base_free(base);
705 if (evutil_getenv_("EVENT_SHOW_METHOD"))
706 event_msgx("libevent using: %s", base->evsel->name);
708 /* allocate a single active event queue */
709 if (event_base_priority_init(base, 1) < 0) {
710 event_base_free(base);
714 /* prepare for threading */
716 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
717 event_debug_created_threadable_ctx_ = 1;
720 #ifndef EVENT__DISABLE_THREAD_SUPPORT
721 if (EVTHREAD_LOCKING_ENABLED() &&
722 (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
724 EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
725 EVTHREAD_ALLOC_COND(base->current_event_cond);
726 r = evthread_make_base_notifiable(base);
728 event_warnx("%s: Unable to make base notifiable.", __func__);
729 event_base_free(base);
736 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
737 event_base_start_iocp_(base, cfg->n_cpus_hint);
744 event_base_start_iocp_(struct event_base *base, int n_cpus)
749 base->iocp = event_iocp_port_launch_(n_cpus);
751 event_warnx("%s: Couldn't launch IOCP", __func__);
761 event_base_stop_iocp_(struct event_base *base)
768 rv = event_iocp_shutdown_(base->iocp, -1);
769 EVUTIL_ASSERT(rv >= 0);
775 event_base_cancel_single_callback_(struct event_base *base,
776 struct event_callback *evcb,
781 if (evcb->evcb_flags & EVLIST_INIT) {
782 struct event *ev = event_callback_to_event(evcb);
783 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
784 event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
788 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
789 event_callback_cancel_nolock_(base, evcb, 1);
790 EVBASE_RELEASE_LOCK(base, th_base_lock);
794 if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
795 switch (evcb->evcb_closure) {
796 case EV_CLOSURE_EVENT_FINALIZE:
797 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
798 struct event *ev = event_callback_to_event(evcb);
799 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
800 if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
804 case EV_CLOSURE_CB_FINALIZE:
805 evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
814 static int event_base_free_queues_(struct event_base *base, int run_finalizers)
818 for (i = 0; i < base->nactivequeues; ++i) {
819 struct event_callback *evcb, *next;
820 for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
821 next = TAILQ_NEXT(evcb, evcb_active_next);
822 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
828 struct event_callback *evcb;
829 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
830 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
838 event_base_free_(struct event_base *base, int run_finalizers)
842 /* XXXX grab the lock? If there is contention when one thread frees
843 * the base, then the contending thread will be very sad soon. */
845 /* event_base_free(NULL) is how to free the current_base if we
846 * made it with event_init and forgot to hold a reference to it. */
847 if (base == NULL && current_base)
849 /* Don't actually free NULL. */
851 event_warnx("%s: no base to free", __func__);
854 /* XXX(niels) - check for internal events first */
857 event_base_stop_iocp_(base);
860 /* threading fds if we have them */
861 if (base->th_notify_fd[0] != -1) {
862 event_del(&base->th_notify);
863 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
864 if (base->th_notify_fd[1] != -1)
865 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
866 base->th_notify_fd[0] = -1;
867 base->th_notify_fd[1] = -1;
868 event_debug_unassign(&base->th_notify);
871 /* Delete all non-internal events. */
872 evmap_delete_all_(base);
874 while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
878 for (i = 0; i < base->n_common_timeouts; ++i) {
879 struct common_timeout_list *ctl =
880 base->common_timeout_queues[i];
881 event_del(&ctl->timeout_event); /* Internal; doesn't count */
882 event_debug_unassign(&ctl->timeout_event);
883 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
884 struct event *next = TAILQ_NEXT(ev,
885 ev_timeout_pos.ev_next_with_common_timeout);
886 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
894 if (base->common_timeout_queues)
895 mm_free(base->common_timeout_queues);
898 /* For finalizers we can register yet another finalizer out from
899 * finalizer, and iff finalizer will be in active_later_queue we can
900 * add finalizer to activequeues, and we will have events in
901 * activequeues after this function returns, which is not what we want
902 * (we even have an assertion for this).
904 * A simple case is bufferevent with underlying (i.e. filters).
906 int i = event_base_free_queues_(base, run_finalizers);
907 event_debug(("%s: %d events freed", __func__, i));
915 event_debug(("%s: %d events were still set in base",
916 __func__, n_deleted));
918 while (LIST_FIRST(&base->once_events)) {
919 struct event_once *eonce = LIST_FIRST(&base->once_events);
920 LIST_REMOVE(eonce, next_once);
924 if (base->evsel != NULL && base->evsel->dealloc != NULL)
925 base->evsel->dealloc(base);
927 for (i = 0; i < base->nactivequeues; ++i)
928 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
930 EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
931 min_heap_dtor_(&base->timeheap);
933 mm_free(base->activequeues);
935 evmap_io_clear_(&base->io);
936 evmap_signal_clear_(&base->sigmap);
937 event_changelist_freemem_(&base->changelist);
939 EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
940 EVTHREAD_FREE_COND(base->current_event_cond);
942 /* If we're freeing current_base, there won't be a current_base. */
943 if (base == current_base)
949 event_base_free_nofinalize(struct event_base *base)
951 event_base_free_(base, 0);
955 event_base_free(struct event_base *base)
957 event_base_free_(base, 1);
960 /* Fake eventop; used to disable the backend temporarily inside event_reinit
961 * so that we can call event_del() on an event without telling the backend.
964 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
965 short events, void *fdinfo)
969 const struct eventop nil_eventop = {
971 NULL, /* init: unused. */
972 NULL, /* add: unused. */
973 nil_backend_del, /* del: used, so needs to be killed. */
974 NULL, /* dispatch: unused. */
975 NULL, /* dealloc: unused. */
979 /* reinitialize the event base after a fork */
981 event_reinit(struct event_base *base)
983 const struct eventop *evsel;
985 int was_notifiable = 0;
986 int had_signal_added = 0;
988 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
990 if (base->running_loop) {
991 event_warnx("%s: forked from the event_loop.", __func__);
998 /* check if this event mechanism requires reinit on the backend */
999 if (evsel->need_reinit) {
1000 /* We're going to call event_del() on our notify events (the
1001 * ones that tell about signals and wakeup events). But we
1002 * don't actually want to tell the backend to change its
1003 * state, since it might still share some resource (a kqueue,
1004 * an epoll fd) with the parent process, and we don't want to
1005 * delete the fds from _that_ backend, we temporarily stub out
1006 * the evsel with a replacement.
1008 base->evsel = &nil_eventop;
1011 /* We need to re-create a new signal-notification fd and a new
1012 * thread-notification fd. Otherwise, we'll still share those with
1013 * the parent process, which would make any notification sent to them
1014 * get received by one or both of the event loops, more or less at
1017 if (base->sig.ev_signal_added) {
1018 event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
1019 event_debug_unassign(&base->sig.ev_signal);
1020 memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
1021 had_signal_added = 1;
1022 base->sig.ev_signal_added = 0;
1024 if (base->sig.ev_signal_pair[0] != -1)
1025 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
1026 if (base->sig.ev_signal_pair[1] != -1)
1027 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
1028 if (base->th_notify_fn != NULL) {
1030 base->th_notify_fn = NULL;
1032 if (base->th_notify_fd[0] != -1) {
1033 event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
1034 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
1035 if (base->th_notify_fd[1] != -1)
1036 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
1037 base->th_notify_fd[0] = -1;
1038 base->th_notify_fd[1] = -1;
1039 event_debug_unassign(&base->th_notify);
1042 /* Replace the original evsel. */
1043 base->evsel = evsel;
1045 if (evsel->need_reinit) {
1046 /* Reconstruct the backend through brute-force, so that we do
1047 * not share any structures with the parent process. For some
1048 * backends, this is necessary: epoll and kqueue, for
1049 * instance, have events associated with a kernel
1050 * structure. If didn't reinitialize, we'd share that
1051 * structure with the parent process, and any changes made by
1052 * the parent would affect our backend's behavior (and vice
1055 if (base->evsel->dealloc != NULL)
1056 base->evsel->dealloc(base);
1057 base->evbase = evsel->init(base);
1058 if (base->evbase == NULL) {
1060 "%s: could not reinitialize event mechanism",
1066 /* Empty out the changelist (if any): we are starting from a
1068 event_changelist_freemem_(&base->changelist);
1070 /* Tell the event maps to re-inform the backend about all
1071 * pending events. This will make the signal notification
1072 * event get re-created if necessary. */
1073 if (evmap_reinit_(base) < 0)
1076 res = evsig_init_(base);
1077 if (res == 0 && had_signal_added) {
1078 res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1080 base->sig.ev_signal_added = 1;
1084 /* If we were notifiable before, and nothing just exploded, become
1085 * notifiable again. */
1086 if (was_notifiable && res == 0)
1087 res = evthread_make_base_notifiable_nolock_(base);
1090 EVBASE_RELEASE_LOCK(base, th_base_lock);
1094 /* Get the monotonic time for this event_base' timer */
1096 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1101 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1102 rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1103 EVBASE_RELEASE_LOCK(base, th_base_lock);
1110 event_get_supported_methods(void)
1112 static const char **methods = NULL;
1113 const struct eventop **method;
1117 /* count all methods */
1118 for (method = &eventops[0]; *method != NULL; ++method) {
1122 /* allocate one more than we need for the NULL pointer */
1123 tmp = mm_calloc((i + 1), sizeof(char *));
1127 /* populate the array with the supported methods */
1128 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1129 tmp[i++] = eventops[k]->name;
1133 if (methods != NULL)
1134 mm_free((char**)methods);
1141 struct event_config *
1142 event_config_new(void)
1144 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1149 TAILQ_INIT(&cfg->entries);
1150 cfg->max_dispatch_interval.tv_sec = -1;
1151 cfg->max_dispatch_callbacks = INT_MAX;
1152 cfg->limit_callbacks_after_prio = 1;
1158 event_config_entry_free(struct event_config_entry *entry)
1160 if (entry->avoid_method != NULL)
1161 mm_free((char *)entry->avoid_method);
1166 event_config_free(struct event_config *cfg)
1168 struct event_config_entry *entry;
1170 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1171 TAILQ_REMOVE(&cfg->entries, entry, next);
1172 event_config_entry_free(entry);
1178 event_config_set_flag(struct event_config *cfg, int flag)
1187 event_config_avoid_method(struct event_config *cfg, const char *method)
1189 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1193 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1198 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1204 event_config_require_features(struct event_config *cfg,
1209 cfg->require_features = features;
1214 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1218 cfg->n_cpus_hint = cpus;
1223 event_config_set_max_dispatch_interval(struct event_config *cfg,
1224 const struct timeval *max_interval, int max_callbacks, int min_priority)
1227 memcpy(&cfg->max_dispatch_interval, max_interval,
1228 sizeof(struct timeval));
1230 cfg->max_dispatch_interval.tv_sec = -1;
1231 cfg->max_dispatch_callbacks =
1232 max_callbacks >= 0 ? max_callbacks : INT_MAX;
1233 if (min_priority < 0)
1235 cfg->limit_callbacks_after_prio = min_priority;
1240 event_priority_init(int npriorities)
1242 return event_base_priority_init(current_base, npriorities);
1246 event_base_priority_init(struct event_base *base, int npriorities)
1251 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1253 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1254 || npriorities >= EVENT_MAX_PRIORITIES)
1257 if (npriorities == base->nactivequeues)
1260 if (base->nactivequeues) {
1261 mm_free(base->activequeues);
1262 base->nactivequeues = 0;
1265 /* Allocate our priority queues */
1266 base->activequeues = (struct evcallback_list *)
1267 mm_calloc(npriorities, sizeof(struct evcallback_list));
1268 if (base->activequeues == NULL) {
1269 event_warn("%s: calloc", __func__);
1272 base->nactivequeues = npriorities;
1274 for (i = 0; i < base->nactivequeues; ++i) {
1275 TAILQ_INIT(&base->activequeues[i]);
1281 EVBASE_RELEASE_LOCK(base, th_base_lock);
1286 event_base_get_npriorities(struct event_base *base)
1291 base = current_base;
1293 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1294 n = base->nactivequeues;
1295 EVBASE_RELEASE_LOCK(base, th_base_lock);
1300 event_base_get_num_events(struct event_base *base, unsigned int type)
1304 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1306 if (type & EVENT_BASE_COUNT_ACTIVE)
1307 r += base->event_count_active;
1309 if (type & EVENT_BASE_COUNT_VIRTUAL)
1310 r += base->virtual_event_count;
1312 if (type & EVENT_BASE_COUNT_ADDED)
1313 r += base->event_count;
1315 EVBASE_RELEASE_LOCK(base, th_base_lock);
1321 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1325 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1327 if (type & EVENT_BASE_COUNT_ACTIVE) {
1328 r += base->event_count_active_max;
1330 base->event_count_active_max = 0;
1333 if (type & EVENT_BASE_COUNT_VIRTUAL) {
1334 r += base->virtual_event_count_max;
1336 base->virtual_event_count_max = 0;
1339 if (type & EVENT_BASE_COUNT_ADDED) {
1340 r += base->event_count_max;
1342 base->event_count_max = 0;
1345 EVBASE_RELEASE_LOCK(base, th_base_lock);
1350 /* Returns true iff we're currently watching any events. */
1352 event_haveevents(struct event_base *base)
1354 /* Caller must hold th_base_lock */
1355 return (base->virtual_event_count > 0 || base->event_count > 0);
1358 /* "closure" function called when processing active signal events */
1360 event_signal_closure(struct event_base *base, struct event *ev)
1365 /* Allows deletes to work */
1366 ncalls = ev->ev_ncalls;
1368 ev->ev_pncalls = &ncalls;
1369 EVBASE_RELEASE_LOCK(base, th_base_lock);
1372 ev->ev_ncalls = ncalls;
1374 ev->ev_pncalls = NULL;
1375 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1377 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1378 should_break = base->event_break;
1379 EVBASE_RELEASE_LOCK(base, th_base_lock);
1383 ev->ev_pncalls = NULL;
1389 /* Common timeouts are special timeouts that are handled as queues rather than
1390 * in the minheap. This is more efficient than the minheap if we happen to
1391 * know that we're going to get several thousands of timeout events all with
1392 * the same timeout value.
1394 * Since all our timeout handling code assumes timevals can be copied,
1395 * assigned, etc, we can't use "magic pointer" to encode these common
1396 * timeouts. Searching through a list to see if every timeout is common could
1397 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1398 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1399 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1400 * of index into the event_base's aray of common timeouts.
1403 #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1404 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1405 #define COMMON_TIMEOUT_IDX_SHIFT 20
1406 #define COMMON_TIMEOUT_MASK 0xf0000000
1407 #define COMMON_TIMEOUT_MAGIC 0x50000000
1409 #define COMMON_TIMEOUT_IDX(tv) \
1410 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1412 /** Return true iff if 'tv' is a common timeout in 'base' */
1414 is_common_timeout(const struct timeval *tv,
1415 const struct event_base *base)
1418 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1420 idx = COMMON_TIMEOUT_IDX(tv);
1421 return idx < base->n_common_timeouts;
1424 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
1425 * one is a common timeout. */
1427 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1429 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1430 (tv2->tv_usec & ~MICROSECONDS_MASK);
1433 /** Requires that 'tv' is a common timeout. Return the corresponding
1434 * common_timeout_list. */
1435 static inline struct common_timeout_list *
1436 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1438 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1443 common_timeout_ok(const struct timeval *tv,
1444 struct event_base *base)
1446 const struct timeval *expect =
1447 &get_common_timeout_list(base, tv)->duration;
1448 return tv->tv_sec == expect->tv_sec &&
1449 tv->tv_usec == expect->tv_usec;
1453 /* Add the timeout for the first event in given common timeout list to the
1454 * event_base's minheap. */
1456 common_timeout_schedule(struct common_timeout_list *ctl,
1457 const struct timeval *now, struct event *head)
1459 struct timeval timeout = head->ev_timeout;
1460 timeout.tv_usec &= MICROSECONDS_MASK;
1461 event_add_nolock_(&ctl->timeout_event, &timeout, 1);
1464 /* Callback: invoked when the timeout for a common timeout queue triggers.
1465 * This means that (at least) the first event in that queue should be run,
1466 * and the timeout should be rescheduled if there are more events. */
1468 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1471 struct common_timeout_list *ctl = arg;
1472 struct event_base *base = ctl->base;
1473 struct event *ev = NULL;
1474 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1475 gettime(base, &now);
1477 ev = TAILQ_FIRST(&ctl->events);
1478 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1479 (ev->ev_timeout.tv_sec == now.tv_sec &&
1480 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1482 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1483 event_active_nolock_(ev, EV_TIMEOUT, 1);
1486 common_timeout_schedule(ctl, &now, ev);
1487 EVBASE_RELEASE_LOCK(base, th_base_lock);
1490 #define MAX_COMMON_TIMEOUTS 256
1492 const struct timeval *
1493 event_base_init_common_timeout(struct event_base *base,
1494 const struct timeval *duration)
1498 const struct timeval *result=NULL;
1499 struct common_timeout_list *new_ctl;
1501 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1502 if (duration->tv_usec > 1000000) {
1503 memcpy(&tv, duration, sizeof(struct timeval));
1504 if (is_common_timeout(duration, base))
1505 tv.tv_usec &= MICROSECONDS_MASK;
1506 tv.tv_sec += tv.tv_usec / 1000000;
1507 tv.tv_usec %= 1000000;
1510 for (i = 0; i < base->n_common_timeouts; ++i) {
1511 const struct common_timeout_list *ctl =
1512 base->common_timeout_queues[i];
1513 if (duration->tv_sec == ctl->duration.tv_sec &&
1514 duration->tv_usec ==
1515 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1516 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1517 result = &ctl->duration;
1521 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1522 event_warnx("%s: Too many common timeouts already in use; "
1523 "we only support %d per event_base", __func__,
1524 MAX_COMMON_TIMEOUTS);
1527 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1528 int n = base->n_common_timeouts < 16 ? 16 :
1529 base->n_common_timeouts*2;
1530 struct common_timeout_list **newqueues =
1531 mm_realloc(base->common_timeout_queues,
1532 n*sizeof(struct common_timeout_queue *));
1534 event_warn("%s: realloc",__func__);
1537 base->n_common_timeouts_allocated = n;
1538 base->common_timeout_queues = newqueues;
1540 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1542 event_warn("%s: calloc",__func__);
1545 TAILQ_INIT(&new_ctl->events);
1546 new_ctl->duration.tv_sec = duration->tv_sec;
1547 new_ctl->duration.tv_usec =
1548 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1549 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1550 evtimer_assign(&new_ctl->timeout_event, base,
1551 common_timeout_callback, new_ctl);
1552 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1553 event_priority_set(&new_ctl->timeout_event, 0);
1554 new_ctl->base = base;
1555 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1556 result = &new_ctl->duration;
1560 EVUTIL_ASSERT(is_common_timeout(result, base));
1562 EVBASE_RELEASE_LOCK(base, th_base_lock);
1566 /* Closure function invoked when we're activating a persistent event. */
1568 event_persist_closure(struct event_base *base, struct event *ev)
1570 void (*evcb_callback)(evutil_socket_t, short, void *);
1572 // Other fields of *ev that must be stored before executing
1573 evutil_socket_t evcb_fd;
1577 /* reschedule the persistent event if we have a timeout. */
1578 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1579 /* If there was a timeout, we want it to run at an interval of
1580 * ev_io_timeout after the last time it was _scheduled_ for,
1581 * not ev_io_timeout after _now_. If it fired for another
1582 * reason, though, the timeout ought to start ticking _now_. */
1583 struct timeval run_at, relative_to, delay, now;
1584 ev_uint32_t usec_mask = 0;
1585 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1586 &ev->ev_io_timeout));
1587 gettime(base, &now);
1588 if (is_common_timeout(&ev->ev_timeout, base)) {
1589 delay = ev->ev_io_timeout;
1590 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1591 delay.tv_usec &= MICROSECONDS_MASK;
1592 if (ev->ev_res & EV_TIMEOUT) {
1593 relative_to = ev->ev_timeout;
1594 relative_to.tv_usec &= MICROSECONDS_MASK;
1599 delay = ev->ev_io_timeout;
1600 if (ev->ev_res & EV_TIMEOUT) {
1601 relative_to = ev->ev_timeout;
1606 evutil_timeradd(&relative_to, &delay, &run_at);
1607 if (evutil_timercmp(&run_at, &now, <)) {
1608 /* Looks like we missed at least one invocation due to
1609 * a clock jump, not running the event loop for a
1610 * while, really slow callbacks, or
1611 * something. Reschedule relative to now.
1613 evutil_timeradd(&now, &delay, &run_at);
1615 run_at.tv_usec |= usec_mask;
1616 event_add_nolock_(ev, &run_at, 1);
1619 // Save our callback before we release the lock
1620 evcb_callback = ev->ev_callback;
1621 evcb_fd = ev->ev_fd;
1622 evcb_res = ev->ev_res;
1623 evcb_arg = ev->ev_arg;
1626 EVBASE_RELEASE_LOCK(base, th_base_lock);
1628 // Execute the callback
1629 (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1633 Helper for event_process_active to process all the events in a single queue,
1634 releasing the lock as we go. This function requires that the lock be held
1635 when it's invoked. Returns -1 if we get a signal or an event_break that
1636 means we should stop processing any active events now. Otherwise returns
1637 the number of non-internal event_callbacks that we processed.
1640 event_process_active_single_queue(struct event_base *base,
1641 struct evcallback_list *activeq,
1642 int max_to_process, const struct timeval *endtime)
1644 struct event_callback *evcb;
1647 EVUTIL_ASSERT(activeq != NULL);
1649 for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1650 struct event *ev=NULL;
1651 if (evcb->evcb_flags & EVLIST_INIT) {
1652 ev = event_callback_to_event(evcb);
1654 if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1655 event_queue_remove_active(base, evcb);
1657 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1659 "event_process_active: event: %p, %s%s%scall %p",
1661 ev->ev_res & EV_READ ? "EV_READ " : " ",
1662 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1663 ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1666 event_queue_remove_active(base, evcb);
1667 event_debug(("event_process_active: event_callback %p, "
1668 "closure %d, call %p",
1669 evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1672 if (!(evcb->evcb_flags & EVLIST_INTERNAL))
1676 base->current_event = evcb;
1677 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1678 base->current_event_waiters = 0;
1681 switch (evcb->evcb_closure) {
1682 case EV_CLOSURE_EVENT_SIGNAL:
1683 EVUTIL_ASSERT(ev != NULL);
1684 event_signal_closure(base, ev);
1686 case EV_CLOSURE_EVENT_PERSIST:
1687 EVUTIL_ASSERT(ev != NULL);
1688 event_persist_closure(base, ev);
1690 case EV_CLOSURE_EVENT: {
1691 void (*evcb_callback)(evutil_socket_t, short, void *);
1693 EVUTIL_ASSERT(ev != NULL);
1694 evcb_callback = *ev->ev_callback;
1696 EVBASE_RELEASE_LOCK(base, th_base_lock);
1697 evcb_callback(ev->ev_fd, res, ev->ev_arg);
1700 case EV_CLOSURE_CB_SELF: {
1701 void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1702 EVBASE_RELEASE_LOCK(base, th_base_lock);
1703 evcb_selfcb(evcb, evcb->evcb_arg);
1706 case EV_CLOSURE_EVENT_FINALIZE:
1707 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1708 void (*evcb_evfinalize)(struct event *, void *);
1709 int evcb_closure = evcb->evcb_closure;
1710 EVUTIL_ASSERT(ev != NULL);
1711 base->current_event = NULL;
1712 evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1713 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1714 EVBASE_RELEASE_LOCK(base, th_base_lock);
1715 evcb_evfinalize(ev, ev->ev_arg);
1716 event_debug_note_teardown_(ev);
1717 if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1721 case EV_CLOSURE_CB_FINALIZE: {
1722 void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1723 base->current_event = NULL;
1724 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1725 EVBASE_RELEASE_LOCK(base, th_base_lock);
1726 evcb_cbfinalize(evcb, evcb->evcb_arg);
1733 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1734 base->current_event = NULL;
1735 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1736 if (base->current_event_waiters) {
1737 base->current_event_waiters = 0;
1738 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1742 if (base->event_break)
1744 if (count >= max_to_process)
1746 if (count && endtime) {
1748 update_time_cache(base);
1749 gettime(base, &now);
1750 if (evutil_timercmp(&now, endtime, >=))
1753 if (base->event_continue)
1760 * Active events are stored in priority queues. Lower priorities are always
1761 * process before higher priorities. Low priority events can starve high
1766 event_process_active(struct event_base *base)
1768 /* Caller must hold th_base_lock */
1769 struct evcallback_list *activeq = NULL;
1771 const struct timeval *endtime;
1773 const int maxcb = base->max_dispatch_callbacks;
1774 const int limit_after_prio = base->limit_callbacks_after_prio;
1775 if (base->max_dispatch_time.tv_sec >= 0) {
1776 update_time_cache(base);
1778 evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1784 for (i = 0; i < base->nactivequeues; ++i) {
1785 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1786 base->event_running_priority = i;
1787 activeq = &base->activequeues[i];
1788 if (i < limit_after_prio)
1789 c = event_process_active_single_queue(base, activeq,
1792 c = event_process_active_single_queue(base, activeq,
1797 break; /* Processed a real event; do not
1798 * consider lower-priority events */
1799 /* If we get here, all of the events we processed
1800 * were internal. Continue. */
1805 base->event_running_priority = -1;
1811 * Wait continuously for events. We exit only if no events are left.
1815 event_dispatch(void)
1817 return (event_loop(0));
1821 event_base_dispatch(struct event_base *event_base)
1823 return (event_base_loop(event_base, 0));
1827 event_base_get_method(const struct event_base *base)
1829 EVUTIL_ASSERT(base);
1830 return (base->evsel->name);
1833 /** Callback: used to implement event_base_loopexit by telling the event_base
1834 * that it's time to exit its loop. */
1836 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1838 struct event_base *base = arg;
1839 base->event_gotterm = 1;
1843 event_loopexit(const struct timeval *tv)
1845 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1850 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1852 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1857 event_loopbreak(void)
1859 return (event_base_loopbreak(current_base));
1863 event_base_loopbreak(struct event_base *event_base)
1866 if (event_base == NULL)
1869 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1870 event_base->event_break = 1;
1872 if (EVBASE_NEED_NOTIFY(event_base)) {
1873 r = evthread_notify_base(event_base);
1877 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1882 event_base_loopcontinue(struct event_base *event_base)
1885 if (event_base == NULL)
1888 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1889 event_base->event_continue = 1;
1891 if (EVBASE_NEED_NOTIFY(event_base)) {
1892 r = evthread_notify_base(event_base);
1896 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1901 event_base_got_break(struct event_base *event_base)
1904 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1905 res = event_base->event_break;
1906 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1911 event_base_got_exit(struct event_base *event_base)
1914 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1915 res = event_base->event_gotterm;
1916 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1920 /* not thread safe */
1923 event_loop(int flags)
1925 return event_base_loop(current_base, flags);
1929 event_base_loop(struct event_base *base, int flags)
1931 const struct eventop *evsel = base->evsel;
1933 struct timeval *tv_p;
1934 int res, done, retval = 0;
1936 /* Grab the lock. We will release it inside evsel.dispatch, and again
1937 * as we invoke user callbacks. */
1938 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1940 if (base->running_loop) {
1941 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1942 " can run on each event_base at once.", __func__);
1943 EVBASE_RELEASE_LOCK(base, th_base_lock);
1947 base->running_loop = 1;
1949 clear_time_cache(base);
1951 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
1952 evsig_set_base_(base);
1956 #ifndef EVENT__DISABLE_THREAD_SUPPORT
1957 base->th_owner_id = EVTHREAD_GET_ID();
1960 base->event_gotterm = base->event_break = 0;
1963 base->event_continue = 0;
1964 base->n_deferreds_queued = 0;
1966 /* Terminate the loop if we have been asked to */
1967 if (base->event_gotterm) {
1971 if (base->event_break) {
1976 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1977 timeout_next(base, &tv_p);
1980 * if we have active events, we just poll new events
1983 evutil_timerclear(&tv);
1986 /* If we have no events, we just exit */
1987 if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1988 !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
1989 event_debug(("%s: no events registered.", __func__));
1994 event_queue_make_later_events_active(base);
1996 clear_time_cache(base);
1998 res = evsel->dispatch(base, tv_p);
2001 event_debug(("%s: dispatch returned unsuccessfully.",
2007 update_time_cache(base);
2009 timeout_process(base);
2011 if (N_ACTIVE_CALLBACKS(base)) {
2012 int n = event_process_active(base);
2013 if ((flags & EVLOOP_ONCE)
2014 && N_ACTIVE_CALLBACKS(base) == 0
2017 } else if (flags & EVLOOP_NONBLOCK)
2020 event_debug(("%s: asked to terminate loop.", __func__));
2023 clear_time_cache(base);
2024 base->running_loop = 0;
2026 EVBASE_RELEASE_LOCK(base, th_base_lock);
2031 /* One-time callback to implement event_base_once: invokes the user callback,
2032 * then deletes the allocated storage */
2034 event_once_cb(evutil_socket_t fd, short events, void *arg)
2036 struct event_once *eonce = arg;
2038 (*eonce->cb)(fd, events, eonce->arg);
2039 EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
2040 LIST_REMOVE(eonce, next_once);
2041 EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
2042 event_debug_unassign(&eonce->ev);
2046 /* not threadsafe, event scheduled once. */
2048 event_once(evutil_socket_t fd, short events,
2049 void (*callback)(evutil_socket_t, short, void *),
2050 void *arg, const struct timeval *tv)
2052 return event_base_once(current_base, fd, events, callback, arg, tv);
2055 /* Schedules an event once */
2057 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2058 void (*callback)(evutil_socket_t, short, void *),
2059 void *arg, const struct timeval *tv)
2061 struct event_once *eonce;
2065 /* We cannot support signals that just fire once, or persistent
2067 if (events & (EV_SIGNAL|EV_PERSIST))
2070 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2073 eonce->cb = callback;
2076 if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
2077 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
2079 if (tv == NULL || ! evutil_timerisset(tv)) {
2080 /* If the event is going to become active immediately,
2081 * don't put it on the timeout queue. This is one
2082 * idiom for scheduling a callback, so let's make
2083 * it fast (and order-preserving). */
2086 } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2087 events &= EV_READ|EV_WRITE|EV_CLOSED;
2089 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2091 /* Bad event combination */
2097 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2099 event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2101 res = event_add_nolock_(&eonce->ev, tv, 0);
2107 LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2109 EVBASE_RELEASE_LOCK(base, th_base_lock);
2116 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2119 base = current_base;
2120 if (arg == &event_self_cbarg_ptr_)
2123 if (!(events & EV_SIGNAL))
2124 event_debug_assert_socket_nonblocking_(fd);
2125 event_debug_assert_not_added_(ev);
2129 ev->ev_callback = callback;
2132 ev->ev_events = events;
2134 ev->ev_flags = EVLIST_INIT;
2136 ev->ev_pncalls = NULL;
2138 if (events & EV_SIGNAL) {
2139 if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
2140 event_warnx("%s: EV_SIGNAL is not compatible with "
2141 "EV_READ, EV_WRITE or EV_CLOSED", __func__);
2144 ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
2146 if (events & EV_PERSIST) {
2147 evutil_timerclear(&ev->ev_io_timeout);
2148 ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
2150 ev->ev_closure = EV_CLOSURE_EVENT;
2154 min_heap_elem_init_(ev);
2157 /* by default, we put new events into the middle priority */
2158 ev->ev_pri = base->nactivequeues / 2;
2161 event_debug_note_setup_(ev);
2167 event_base_set(struct event_base *base, struct event *ev)
2169 /* Only innocent events may be assigned to a different base */
2170 if (ev->ev_flags != EVLIST_INIT)
2173 event_debug_assert_is_setup_(ev);
2176 ev->ev_pri = base->nactivequeues/2;
2182 event_set(struct event *ev, evutil_socket_t fd, short events,
2183 void (*callback)(evutil_socket_t, short, void *), void *arg)
2186 r = event_assign(ev, current_base, fd, events, callback, arg);
2187 EVUTIL_ASSERT(r == 0);
2191 event_self_cbarg(void)
2193 return &event_self_cbarg_ptr_;
2197 event_base_get_running_event(struct event_base *base)
2199 struct event *ev = NULL;
2200 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2201 if (EVBASE_IN_THREAD(base)) {
2202 struct event_callback *evcb = base->current_event;
2203 if (evcb->evcb_flags & EVLIST_INIT)
2204 ev = event_callback_to_event(evcb);
2206 EVBASE_RELEASE_LOCK(base, th_base_lock);
2211 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2214 ev = mm_malloc(sizeof(struct event));
2217 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2226 event_free(struct event *ev)
2228 /* This is disabled, so that events which have been finalized be a
2229 * valid target for event_free(). That's */
2230 // event_debug_assert_is_setup_(ev);
2232 /* make sure that this event won't be coming back to haunt us. */
2234 event_debug_note_teardown_(ev);
2240 event_debug_unassign(struct event *ev)
2242 event_debug_assert_not_added_(ev);
2243 event_debug_note_teardown_(ev);
2245 ev->ev_flags &= ~EVLIST_INIT;
2248 #define EVENT_FINALIZE_FREE_ 0x10000
2250 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2252 ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2253 EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2255 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2256 ev->ev_closure = closure;
2257 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2258 event_active_nolock_(ev, EV_FINALIZE, 1);
2259 ev->ev_flags |= EVLIST_FINALIZING;
2264 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2267 struct event_base *base = ev->ev_base;
2268 if (EVUTIL_FAILURE_CHECK(!base)) {
2269 event_warnx("%s: event has no event_base set.", __func__);
2273 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2274 r = event_finalize_nolock_(base, flags, ev, cb);
2275 EVBASE_RELEASE_LOCK(base, th_base_lock);
2280 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2282 return event_finalize_impl_(flags, ev, cb);
2286 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2288 return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2292 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2294 struct event *ev = NULL;
2295 if (evcb->evcb_flags & EVLIST_INIT) {
2296 ev = event_callback_to_event(evcb);
2297 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2299 event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2302 evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2303 evcb->evcb_cb_union.evcb_cbfinalize = cb;
2304 event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2305 evcb->evcb_flags |= EVLIST_FINALIZING;
2309 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2311 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2312 event_callback_finalize_nolock_(base, flags, evcb, cb);
2313 EVBASE_RELEASE_LOCK(base, th_base_lock);
2316 /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
2317 * callback will be invoked on *one of them*, after they have *all* been
2320 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2322 int n_pending = 0, i;
2325 base = current_base;
2327 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2329 event_debug(("%s: %d events finalizing", __func__, n_cbs));
2331 /* At most one can be currently executing; the rest we just
2332 * cancel... But we always make sure that the finalize callback
2334 for (i = 0; i < n_cbs; ++i) {
2335 struct event_callback *evcb = evcbs[i];
2336 if (evcb == base->current_event) {
2337 event_callback_finalize_nolock_(base, 0, evcb, cb);
2340 event_callback_cancel_nolock_(base, evcb, 0);
2344 if (n_pending == 0) {
2345 /* Just do the first one. */
2346 event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2349 EVBASE_RELEASE_LOCK(base, th_base_lock);
2354 * Set's the priority of an event - if an event is already scheduled
2355 * changing the priority is going to fail.
2359 event_priority_set(struct event *ev, int pri)
2361 event_debug_assert_is_setup_(ev);
2363 if (ev->ev_flags & EVLIST_ACTIVE)
2365 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2374 * Checks if a specific event is pending or scheduled.
2378 event_pending(const struct event *ev, short event, struct timeval *tv)
2382 if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2383 event_warnx("%s: event has no event_base set.", __func__);
2387 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2388 event_debug_assert_is_setup_(ev);
2390 if (ev->ev_flags & EVLIST_INSERTED)
2391 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2392 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2393 flags |= ev->ev_res;
2394 if (ev->ev_flags & EVLIST_TIMEOUT)
2395 flags |= EV_TIMEOUT;
2397 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
2399 /* See if there is a timeout that we should report */
2400 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2401 struct timeval tmp = ev->ev_timeout;
2402 tmp.tv_usec &= MICROSECONDS_MASK;
2403 /* correctly remamp to real time */
2404 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
2407 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2409 return (flags & event);
2413 event_initialized(const struct event *ev)
2415 if (!(ev->ev_flags & EVLIST_INIT))
2422 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2424 event_debug_assert_is_setup_(event);
2427 *base_out = event->ev_base;
2429 *fd_out = event->ev_fd;
2431 *events_out = event->ev_events;
2433 *callback_out = event->ev_callback;
2435 *arg_out = event->ev_arg;
2439 event_get_struct_event_size(void)
2441 return sizeof(struct event);
2445 event_get_fd(const struct event *ev)
2447 event_debug_assert_is_setup_(ev);
2452 event_get_base(const struct event *ev)
2454 event_debug_assert_is_setup_(ev);
2459 event_get_events(const struct event *ev)
2461 event_debug_assert_is_setup_(ev);
2462 return ev->ev_events;
2466 event_get_callback(const struct event *ev)
2468 event_debug_assert_is_setup_(ev);
2469 return ev->ev_callback;
2473 event_get_callback_arg(const struct event *ev)
2475 event_debug_assert_is_setup_(ev);
2480 event_get_priority(const struct event *ev)
2482 event_debug_assert_is_setup_(ev);
2487 event_add(struct event *ev, const struct timeval *tv)
2491 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2492 event_warnx("%s: event has no event_base set.", __func__);
2496 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2498 res = event_add_nolock_(ev, tv, 0);
2500 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2505 /* Helper callback: wake an event_base from another thread. This version
2506 * works by writing a byte to one end of a socketpair, so that the event_base
2507 * listening on the other end will wake up as the corresponding event
2510 evthread_notify_base_default(struct event_base *base)
2516 r = send(base->th_notify_fd[1], buf, 1, 0);
2518 r = write(base->th_notify_fd[1], buf, 1);
2520 return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
2523 #ifdef EVENT__HAVE_EVENTFD
2524 /* Helper callback: wake an event_base from another thread. This version
2525 * assumes that you have a working eventfd() implementation. */
2527 evthread_notify_base_eventfd(struct event_base *base)
2529 ev_uint64_t msg = 1;
2532 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2533 } while (r < 0 && errno == EAGAIN);
2535 return (r < 0) ? -1 : 0;
2540 /** Tell the thread currently running the event_loop for base (if any) that it
2541 * needs to stop waiting in its dispatch function (if it is) and process all
2542 * active callbacks. */
2544 evthread_notify_base(struct event_base *base)
2546 EVENT_BASE_ASSERT_LOCKED(base);
2547 if (!base->th_notify_fn)
2549 if (base->is_notify_pending)
2551 base->is_notify_pending = 1;
2552 return base->th_notify_fn(base);
2555 /* Implementation function to remove a timeout on a currently pending event.
2558 event_remove_timer_nolock_(struct event *ev)
2560 struct event_base *base = ev->ev_base;
2562 EVENT_BASE_ASSERT_LOCKED(base);
2563 event_debug_assert_is_setup_(ev);
2565 event_debug(("event_remove_timer_nolock: event: %p", ev));
2567 /* If it's not pending on a timeout, we don't need to do anything. */
2568 if (ev->ev_flags & EVLIST_TIMEOUT) {
2569 event_queue_remove_timeout(base, ev);
2570 evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2577 event_remove_timer(struct event *ev)
2581 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2582 event_warnx("%s: event has no event_base set.", __func__);
2586 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2588 res = event_remove_timer_nolock_(ev);
2590 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2595 /* Implementation function to add an event. Works just like event_add,
2596 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2597 * we treat tv as an absolute time, not as an interval to add to the current
2600 event_add_nolock_(struct event *ev, const struct timeval *tv,
2603 struct event_base *base = ev->ev_base;
2607 EVENT_BASE_ASSERT_LOCKED(base);
2608 event_debug_assert_is_setup_(ev);
2611 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
2613 EV_SOCK_ARG(ev->ev_fd),
2614 ev->ev_events & EV_READ ? "EV_READ " : " ",
2615 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
2616 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
2617 tv ? "EV_TIMEOUT " : " ",
2620 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2622 if (ev->ev_flags & EVLIST_FINALIZING) {
2628 * prepare for timeout insertion further below, if we get a
2629 * failure on any step, we should not change any state.
2631 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
2632 if (min_heap_reserve_(&base->timeheap,
2633 1 + min_heap_size_(&base->timeheap)) == -1)
2634 return (-1); /* ENOMEM == errno */
2637 /* If the main thread is currently executing a signal event's
2638 * callback, and we are not the main thread, then we want to wait
2639 * until the callback is done before we mess with the event, or else
2640 * we can race on ev_ncalls and ev_pncalls below. */
2641 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2642 if (base->current_event == event_to_event_callback(ev) &&
2643 (ev->ev_events & EV_SIGNAL)
2644 && !EVBASE_IN_THREAD(base)) {
2645 ++base->current_event_waiters;
2646 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2650 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2651 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2652 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2653 res = evmap_io_add_(base, ev->ev_fd, ev);
2654 else if (ev->ev_events & EV_SIGNAL)
2655 res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
2657 event_queue_insert_inserted(base, ev);
2659 /* evmap says we need to notify the main thread. */
2666 * we should change the timeout state only if the previous event
2667 * addition succeeded.
2669 if (res != -1 && tv != NULL) {
2672 #ifdef USE_REINSERT_TIMEOUT
2674 int old_timeout_idx;
2678 * for persistent timeout events, we remember the
2679 * timeout value and re-add the event.
2681 * If tv_is_absolute, this was already set.
2683 if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
2684 ev->ev_io_timeout = *tv;
2686 #ifndef USE_REINSERT_TIMEOUT
2687 if (ev->ev_flags & EVLIST_TIMEOUT) {
2688 event_queue_remove_timeout(base, ev);
2692 /* Check if it is active due to a timeout. Rescheduling
2693 * this timeout before the callback can be executed
2694 * removes it from the active list. */
2695 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2696 (ev->ev_res & EV_TIMEOUT)) {
2697 if (ev->ev_events & EV_SIGNAL) {
2698 /* See if we are just active executing
2699 * this event in a loop
2701 if (ev->ev_ncalls && ev->ev_pncalls) {
2703 *ev->ev_pncalls = 0;
2707 event_queue_remove_active(base, event_to_event_callback(ev));
2710 gettime(base, &now);
2712 common_timeout = is_common_timeout(tv, base);
2713 #ifdef USE_REINSERT_TIMEOUT
2714 was_common = is_common_timeout(&ev->ev_timeout, base);
2715 old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2718 if (tv_is_absolute) {
2719 ev->ev_timeout = *tv;
2720 } else if (common_timeout) {
2721 struct timeval tmp = *tv;
2722 tmp.tv_usec &= MICROSECONDS_MASK;
2723 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2724 ev->ev_timeout.tv_usec |=
2725 (tv->tv_usec & ~MICROSECONDS_MASK);
2727 evutil_timeradd(&now, tv, &ev->ev_timeout);
2731 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2732 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
2734 #ifdef USE_REINSERT_TIMEOUT
2735 event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2737 event_queue_insert_timeout(base, ev);
2740 if (common_timeout) {
2741 struct common_timeout_list *ctl =
2742 get_common_timeout_list(base, &ev->ev_timeout);
2743 if (ev == TAILQ_FIRST(&ctl->events)) {
2744 common_timeout_schedule(ctl, &now, ev);
2747 struct event* top = NULL;
2748 /* See if the earliest timeout is now earlier than it
2749 * was before: if so, we will need to tell the main
2750 * thread to wake up earlier than it would otherwise.
2751 * We double check the timeout of the top element to
2752 * handle time distortions due to system suspension.
2754 if (min_heap_elt_is_top_(ev))
2756 else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2757 evutil_timercmp(&top->ev_timeout, &now, <))
2762 /* if we are not in the right thread, we need to wake up the loop */
2763 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2764 evthread_notify_base(base);
2766 event_debug_note_add_(ev);
2772 event_del_(struct event *ev, int blocking)
2775 struct event_base *base = ev->ev_base;
2777 if (EVUTIL_FAILURE_CHECK(!base)) {
2778 event_warnx("%s: event has no event_base set.", __func__);
2782 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2783 res = event_del_nolock_(ev, blocking);
2784 EVBASE_RELEASE_LOCK(base, th_base_lock);
2790 event_del(struct event *ev)
2792 return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2796 event_del_block(struct event *ev)
2798 return event_del_(ev, EVENT_DEL_BLOCK);
2802 event_del_noblock(struct event *ev)
2804 return event_del_(ev, EVENT_DEL_NOBLOCK);
2807 /** Helper for event_del: always called with th_base_lock held.
2809 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2810 * EVEN_IF_FINALIZING} values. See those for more information.
2813 event_del_nolock_(struct event *ev, int blocking)
2815 struct event_base *base;
2816 int res = 0, notify = 0;
2818 event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2819 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2821 /* An event without a base has not been added */
2822 if (ev->ev_base == NULL)
2825 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2827 if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2828 if (ev->ev_flags & EVLIST_FINALIZING) {
2836 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2838 /* See if we are just active executing this event in a loop */
2839 if (ev->ev_events & EV_SIGNAL) {
2840 if (ev->ev_ncalls && ev->ev_pncalls) {
2842 *ev->ev_pncalls = 0;
2846 if (ev->ev_flags & EVLIST_TIMEOUT) {
2847 /* NOTE: We never need to notify the main thread because of a
2848 * deleted timeout event: all that could happen if we don't is
2849 * that the dispatch loop might wake up too early. But the
2850 * point of notifying the main thread _is_ to wake up the
2851 * dispatch loop early anyway, so we wouldn't gain anything by
2854 event_queue_remove_timeout(base, ev);
2857 if (ev->ev_flags & EVLIST_ACTIVE)
2858 event_queue_remove_active(base, event_to_event_callback(ev));
2859 else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2860 event_queue_remove_active_later(base, event_to_event_callback(ev));
2862 if (ev->ev_flags & EVLIST_INSERTED) {
2863 event_queue_remove_inserted(base, ev);
2864 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2865 res = evmap_io_del_(base, ev->ev_fd, ev);
2867 res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
2869 /* evmap says we need to notify the main thread. */
2873 /* If we do not have events, let's notify event base so it can
2874 * exit without waiting */
2875 if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
2879 /* if we are not in the right thread, we need to wake up the loop */
2880 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2881 evthread_notify_base(base);
2883 event_debug_note_del_(ev);
2885 /* If the main thread is currently executing this event's callback,
2886 * and we are not the main thread, then we want to wait until the
2887 * callback is done before returning. That way, when this function
2888 * returns, it will be safe to free the user-supplied argument.
2890 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2891 if (blocking != EVENT_DEL_NOBLOCK &&
2892 base->current_event == event_to_event_callback(ev) &&
2893 !EVBASE_IN_THREAD(base) &&
2894 (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
2895 ++base->current_event_waiters;
2896 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2904 event_active(struct event *ev, int res, short ncalls)
2906 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2907 event_warnx("%s: event has no event_base set.", __func__);
2911 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2913 event_debug_assert_is_setup_(ev);
2915 event_active_nolock_(ev, res, ncalls);
2917 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2922 event_active_nolock_(struct event *ev, int res, short ncalls)
2924 struct event_base *base;
2926 event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2927 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2930 EVENT_BASE_ASSERT_LOCKED(base);
2932 if (ev->ev_flags & EVLIST_FINALIZING) {
2937 switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2939 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2943 /* We get different kinds of events, add them together */
2946 case EVLIST_ACTIVE_LATER:
2954 if (ev->ev_pri < base->event_running_priority)
2955 base->event_continue = 1;
2957 if (ev->ev_events & EV_SIGNAL) {
2958 #ifndef EVENT__DISABLE_THREAD_SUPPORT
2959 if (base->current_event == event_to_event_callback(ev) &&
2960 !EVBASE_IN_THREAD(base)) {
2961 ++base->current_event_waiters;
2962 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2965 ev->ev_ncalls = ncalls;
2966 ev->ev_pncalls = NULL;
2969 event_callback_activate_nolock_(base, event_to_event_callback(ev));
2973 event_active_later_(struct event *ev, int res)
2975 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2976 event_active_later_nolock_(ev, res);
2977 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2981 event_active_later_nolock_(struct event *ev, int res)
2983 struct event_base *base = ev->ev_base;
2984 EVENT_BASE_ASSERT_LOCKED(base);
2986 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2987 /* We get different kinds of events, add them together */
2994 event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2998 event_callback_activate_(struct event_base *base,
2999 struct event_callback *evcb)
3002 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3003 r = event_callback_activate_nolock_(base, evcb);
3004 EVBASE_RELEASE_LOCK(base, th_base_lock);
3009 event_callback_activate_nolock_(struct event_base *base,
3010 struct event_callback *evcb)
3014 if (evcb->evcb_flags & EVLIST_FINALIZING)
3017 switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
3021 case EVLIST_ACTIVE_LATER:
3022 event_queue_remove_active_later(base, evcb);
3031 event_queue_insert_active(base, evcb);
3033 if (EVBASE_NEED_NOTIFY(base))
3034 evthread_notify_base(base);
3040 event_callback_activate_later_nolock_(struct event_base *base,
3041 struct event_callback *evcb)
3043 if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
3046 event_queue_insert_active_later(base, evcb);
3047 if (EVBASE_NEED_NOTIFY(base))
3048 evthread_notify_base(base);
3053 event_callback_init_(struct event_base *base,
3054 struct event_callback *cb)
3056 memset(cb, 0, sizeof(*cb));
3057 cb->evcb_pri = base->nactivequeues - 1;
3061 event_callback_cancel_(struct event_base *base,
3062 struct event_callback *evcb)
3065 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3066 r = event_callback_cancel_nolock_(base, evcb, 0);
3067 EVBASE_RELEASE_LOCK(base, th_base_lock);
3072 event_callback_cancel_nolock_(struct event_base *base,
3073 struct event_callback *evcb, int even_if_finalizing)
3075 if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3078 if (evcb->evcb_flags & EVLIST_INIT)
3079 return event_del_nolock_(event_callback_to_event(evcb),
3080 even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3082 switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3084 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3088 /* We get different kinds of events, add them together */
3089 event_queue_remove_active(base, evcb);
3091 case EVLIST_ACTIVE_LATER:
3092 event_queue_remove_active_later(base, evcb);
3102 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
3104 memset(cb, 0, sizeof(*cb));
3105 cb->evcb_cb_union.evcb_selfcb = fn;
3107 cb->evcb_pri = priority;
3108 cb->evcb_closure = EV_CLOSURE_CB_SELF;
3112 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
3114 cb->evcb_pri = priority;
3118 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3121 base = current_base;
3122 event_callback_cancel_(base, cb);
3125 #define MAX_DEFERREDS_QUEUED 32
3127 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3131 base = current_base;
3132 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3133 if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3134 r = event_callback_activate_later_nolock_(base, cb);
3136 r = event_callback_activate_nolock_(base, cb);
3138 ++base->n_deferreds_queued;
3141 EVBASE_RELEASE_LOCK(base, th_base_lock);
3146 timeout_next(struct event_base *base, struct timeval **tv_p)
3148 /* Caller must hold th_base_lock */
3151 struct timeval *tv = *tv_p;
3154 ev = min_heap_top_(&base->timeheap);
3157 /* if no time-based events are active wait for I/O */
3162 if (gettime(base, &now) == -1) {
3167 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3168 evutil_timerclear(tv);
3172 evutil_timersub(&ev->ev_timeout, &now, tv);
3174 EVUTIL_ASSERT(tv->tv_sec >= 0);
3175 EVUTIL_ASSERT(tv->tv_usec >= 0);
3176 event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
3182 /* Activate every event whose timeout has elapsed. */
3184 timeout_process(struct event_base *base)
3186 /* Caller must hold lock. */
3190 if (min_heap_empty_(&base->timeheap)) {
3194 gettime(base, &now);
3196 while ((ev = min_heap_top_(&base->timeheap))) {
3197 if (evutil_timercmp(&ev->ev_timeout, &now, >))
3200 /* delete this event from the I/O queues */
3201 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
3203 event_debug(("timeout_process: event: %p, call %p",
3204 ev, ev->ev_callback));
3205 event_active_nolock_(ev, EV_TIMEOUT, 1);
3210 #define MAX(a,b) (((a)>(b))?(a):(b))
3213 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3215 /* These are a fancy way to spell
3216 if (~flags & EVLIST_INTERNAL)
3217 base->event_count--/++;
3219 #define DECR_EVENT_COUNT(base,flags) \
3220 ((base)->event_count -= !((flags) & EVLIST_INTERNAL))
3221 #define INCR_EVENT_COUNT(base,flags) do { \
3222 ((base)->event_count += !((flags) & EVLIST_INTERNAL)); \
3223 MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
3227 event_queue_remove_inserted(struct event_base *base, struct event *ev)
3229 EVENT_BASE_ASSERT_LOCKED(base);
3230 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
3231 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3232 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3235 DECR_EVENT_COUNT(base, ev->ev_flags);
3236 ev->ev_flags &= ~EVLIST_INSERTED;
3239 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3241 EVENT_BASE_ASSERT_LOCKED(base);
3242 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3243 event_errx(1, "%s: %p not on queue %x", __func__,
3244 evcb, EVLIST_ACTIVE);
3247 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3248 evcb->evcb_flags &= ~EVLIST_ACTIVE;
3249 base->event_count_active--;
3251 TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3252 evcb, evcb_active_next);
3255 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3257 EVENT_BASE_ASSERT_LOCKED(base);
3258 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3259 event_errx(1, "%s: %p not on queue %x", __func__,
3260 evcb, EVLIST_ACTIVE_LATER);
3263 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3264 evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3265 base->event_count_active--;
3267 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3270 event_queue_remove_timeout(struct event_base *base, struct event *ev)
3272 EVENT_BASE_ASSERT_LOCKED(base);
3273 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3274 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3275 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3278 DECR_EVENT_COUNT(base, ev->ev_flags);
3279 ev->ev_flags &= ~EVLIST_TIMEOUT;
3281 if (is_common_timeout(&ev->ev_timeout, base)) {
3282 struct common_timeout_list *ctl =
3283 get_common_timeout_list(base, &ev->ev_timeout);
3284 TAILQ_REMOVE(&ctl->events, ev,
3285 ev_timeout_pos.ev_next_with_common_timeout);
3287 min_heap_erase_(&base->timeheap, ev);
3291 #ifdef USE_REINSERT_TIMEOUT
3292 /* Remove and reinsert 'ev' into the timeout queue. */
3294 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3295 int was_common, int is_common, int old_timeout_idx)
3297 struct common_timeout_list *ctl;
3298 if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3299 event_queue_insert_timeout(base, ev);
3303 switch ((was_common<<1) | is_common) {
3304 case 3: /* Changing from one common timeout to another */
3305 ctl = base->common_timeout_queues[old_timeout_idx];
3306 TAILQ_REMOVE(&ctl->events, ev,
3307 ev_timeout_pos.ev_next_with_common_timeout);
3308 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3309 insert_common_timeout_inorder(ctl, ev);
3311 case 2: /* Was common; is no longer common */
3312 ctl = base->common_timeout_queues[old_timeout_idx];
3313 TAILQ_REMOVE(&ctl->events, ev,
3314 ev_timeout_pos.ev_next_with_common_timeout);
3315 min_heap_push_(&base->timeheap, ev);
3317 case 1: /* Wasn't common; has become common. */
3318 min_heap_erase_(&base->timeheap, ev);
3319 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3320 insert_common_timeout_inorder(ctl, ev);
3322 case 0: /* was in heap; is still on heap. */
3323 min_heap_adjust_(&base->timeheap, ev);
3326 EVUTIL_ASSERT(0); /* unreachable */
3332 /* Add 'ev' to the common timeout list in 'ev'. */
3334 insert_common_timeout_inorder(struct common_timeout_list *ctl,
3338 /* By all logic, we should just be able to append 'ev' to the end of
3339 * ctl->events, since the timeout on each 'ev' is set to {the common
3340 * timeout} + {the time when we add the event}, and so the events
3341 * should arrive in order of their timeeouts. But just in case
3342 * there's some wacky threading issue going on, we do a search from
3343 * the end of 'ev' to find the right insertion point.
3345 TAILQ_FOREACH_REVERSE(e, &ctl->events,
3346 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3347 /* This timercmp is a little sneaky, since both ev and e have
3348 * magic values in tv_usec. Fortunately, they ought to have
3349 * the _same_ magic values in tv_usec. Let's assert for that.
3352 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3353 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3354 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3355 ev_timeout_pos.ev_next_with_common_timeout);
3359 TAILQ_INSERT_HEAD(&ctl->events, ev,
3360 ev_timeout_pos.ev_next_with_common_timeout);
3364 event_queue_insert_inserted(struct event_base *base, struct event *ev)
3366 EVENT_BASE_ASSERT_LOCKED(base);
3368 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3369 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3370 ev, EV_SOCK_ARG(ev->ev_fd));
3374 INCR_EVENT_COUNT(base, ev->ev_flags);
3376 ev->ev_flags |= EVLIST_INSERTED;
3380 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3382 EVENT_BASE_ASSERT_LOCKED(base);
3384 if (evcb->evcb_flags & EVLIST_ACTIVE) {
3385 /* Double insertion is possible for active events */
3389 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3391 evcb->evcb_flags |= EVLIST_ACTIVE;
3393 base->event_count_active++;
3394 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3395 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3396 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3397 evcb, evcb_active_next);
3401 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3403 EVENT_BASE_ASSERT_LOCKED(base);
3404 if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3405 /* Double insertion is possible */
3409 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3410 evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3411 base->event_count_active++;
3412 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3413 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3414 TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3418 event_queue_insert_timeout(struct event_base *base, struct event *ev)
3420 EVENT_BASE_ASSERT_LOCKED(base);
3422 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3423 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3424 ev, EV_SOCK_ARG(ev->ev_fd));
3428 INCR_EVENT_COUNT(base, ev->ev_flags);
3430 ev->ev_flags |= EVLIST_TIMEOUT;
3432 if (is_common_timeout(&ev->ev_timeout, base)) {
3433 struct common_timeout_list *ctl =
3434 get_common_timeout_list(base, &ev->ev_timeout);
3435 insert_common_timeout_inorder(ctl, ev);
3437 min_heap_push_(&base->timeheap, ev);
3442 event_queue_make_later_events_active(struct event_base *base)
3444 struct event_callback *evcb;
3445 EVENT_BASE_ASSERT_LOCKED(base);
3447 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3448 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3449 evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3450 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3451 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3452 base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
3456 /* Functions for debugging */
3459 event_get_version(void)
3461 return (EVENT__VERSION);
3465 event_get_version_number(void)
3467 return (EVENT__NUMERIC_VERSION);
3471 * No thread-safe interface needed - the information should be the same
3476 event_get_method(void)
3478 return (current_base->evsel->name);
3481 #ifndef EVENT__DISABLE_MM_REPLACEMENT
3482 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3483 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3484 static void (*mm_free_fn_)(void *p) = NULL;
3487 event_mm_malloc_(size_t sz)
3493 return mm_malloc_fn_(sz);
3499 event_mm_calloc_(size_t count, size_t size)
3501 if (count == 0 || size == 0)
3504 if (mm_malloc_fn_) {
3505 size_t sz = count * size;
3507 if (count > EV_SIZE_MAX / size)
3509 p = mm_malloc_fn_(sz);
3511 return memset(p, 0, sz);
3513 void *p = calloc(count, size);
3515 /* Windows calloc doesn't reliably set ENOMEM */
3528 event_mm_strdup_(const char *str)
3535 if (mm_malloc_fn_) {
3536 size_t ln = strlen(str);
3538 if (ln == EV_SIZE_MAX)
3540 p = mm_malloc_fn_(ln+1);
3542 return memcpy(p, str, ln+1);
3545 return _strdup(str);
3556 event_mm_realloc_(void *ptr, size_t sz)
3559 return mm_realloc_fn_(ptr, sz);
3561 return realloc(ptr, sz);
3565 event_mm_free_(void *ptr)
3574 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3575 void *(*realloc_fn)(void *ptr, size_t sz),
3576 void (*free_fn)(void *ptr))
3578 mm_malloc_fn_ = malloc_fn;
3579 mm_realloc_fn_ = realloc_fn;
3580 mm_free_fn_ = free_fn;
3584 #ifdef EVENT__HAVE_EVENTFD
3586 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3590 struct event_base *base = arg;
3592 r = read(fd, (void*) &msg, sizeof(msg));
3593 if (r<0 && errno != EAGAIN) {
3594 event_sock_warn(fd, "Error reading from eventfd");
3596 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3597 base->is_notify_pending = 0;
3598 EVBASE_RELEASE_LOCK(base, th_base_lock);
3603 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3605 unsigned char buf[1024];
3606 struct event_base *base = arg;
3608 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3611 while (read(fd, (char*)buf, sizeof(buf)) > 0)
3615 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3616 base->is_notify_pending = 0;
3617 EVBASE_RELEASE_LOCK(base, th_base_lock);
3621 evthread_make_base_notifiable(struct event_base *base)
3627 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3628 r = evthread_make_base_notifiable_nolock_(base);
3629 EVBASE_RELEASE_LOCK(base, th_base_lock);
3634 evthread_make_base_notifiable_nolock_(struct event_base *base)
3636 void (*cb)(evutil_socket_t, short, void *);
3637 int (*notify)(struct event_base *);
3639 if (base->th_notify_fn != NULL) {
3640 /* The base is already notifiable: we're doing fine. */
3644 #if defined(EVENT__HAVE_WORKING_KQUEUE)
3645 if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3646 base->th_notify_fn = event_kq_notify_base_;
3647 /* No need to add an event here; the backend can wake
3648 * itself up just fine. */
3653 #ifdef EVENT__HAVE_EVENTFD
3654 base->th_notify_fd[0] = evutil_eventfd_(0,
3655 EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
3656 if (base->th_notify_fd[0] >= 0) {
3657 base->th_notify_fd[1] = -1;
3658 notify = evthread_notify_base_eventfd;
3659 cb = evthread_notify_drain_eventfd;
3662 if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3663 notify = evthread_notify_base_default;
3664 cb = evthread_notify_drain_default;
3669 base->th_notify_fn = notify;
3671 /* prepare an event that we can use for wakeup */
3672 event_assign(&base->th_notify, base, base->th_notify_fd[0],
3673 EV_READ|EV_PERSIST, cb, base);
3675 /* we need to mark this as internal event */
3676 base->th_notify.ev_flags |= EVLIST_INTERNAL;
3677 event_priority_set(&base->th_notify, 0);
3679 return event_add_nolock_(&base->th_notify, NULL, 0);
3683 event_base_foreach_event_nolock_(struct event_base *base,
3684 event_base_foreach_event_cb fn, void *arg)
3690 /* Start out with all the EVLIST_INSERTED events. */
3691 if ((r = evmap_foreach_event_(base, fn, arg)))
3694 /* Okay, now we deal with those events that have timeouts and are in
3696 for (u = 0; u < base->timeheap.n; ++u) {
3697 ev = base->timeheap.p[u];
3698 if (ev->ev_flags & EVLIST_INSERTED) {
3699 /* we already processed this one */
3702 if ((r = fn(base, ev, arg)))
3706 /* Now for the events in one of the timeout queues.
3708 for (i = 0; i < base->n_common_timeouts; ++i) {
3709 struct common_timeout_list *ctl =
3710 base->common_timeout_queues[i];
3711 TAILQ_FOREACH(ev, &ctl->events,
3712 ev_timeout_pos.ev_next_with_common_timeout) {
3713 if (ev->ev_flags & EVLIST_INSERTED) {
3714 /* we already processed this one */
3717 if ((r = fn(base, ev, arg)))
3722 /* Finally, we deal wit all the active events that we haven't touched
3724 for (i = 0; i < base->nactivequeues; ++i) {
3725 struct event_callback *evcb;
3726 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3727 if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3728 /* This isn't an event (evlist_init clear), or
3729 * we already processed it. (inserted or
3733 ev = event_callback_to_event(evcb);
3734 if ((r = fn(base, ev, arg)))
3742 /* Helper for event_base_dump_events: called on each event in the event base;
3743 * dumps only the inserted events. */
3745 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3748 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3751 if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3754 fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
3755 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3756 (e->ev_events&EV_READ)?" Read":"",
3757 (e->ev_events&EV_WRITE)?" Write":"",
3758 (e->ev_events&EV_CLOSED)?" EOF":"",
3759 (e->ev_events&EV_SIGNAL)?" Signal":"",
3760 (e->ev_events&EV_PERSIST)?" Persist":"",
3761 (e->ev_events&EV_ET)?" ET":"",
3762 (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3763 if (e->ev_flags & EVLIST_TIMEOUT) {
3765 tv.tv_sec = e->ev_timeout.tv_sec;
3766 tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3767 evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3768 fprintf(output, " Timeout=%ld.%06d",
3769 (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3771 fputc('\n', output);
3776 /* Helper for event_base_dump_events: called on each event in the event base;
3777 * dumps only the active events. */
3779 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3782 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3785 if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3788 fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3789 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3790 (e->ev_res&EV_READ)?" Read":"",
3791 (e->ev_res&EV_WRITE)?" Write":"",
3792 (e->ev_res&EV_CLOSED)?" EOF":"",
3793 (e->ev_res&EV_SIGNAL)?" Signal":"",
3794 (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3795 (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3796 (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3802 event_base_foreach_event(struct event_base *base,
3803 event_base_foreach_event_cb fn, void *arg)
3806 if ((!fn) || (!base)) {
3809 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3810 r = event_base_foreach_event_nolock_(base, fn, arg);
3811 EVBASE_RELEASE_LOCK(base, th_base_lock);
3817 event_base_dump_events(struct event_base *base, FILE *output)
3819 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3820 fprintf(output, "Inserted events:\n");
3821 event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3823 fprintf(output, "Active events:\n");
3824 event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
3825 EVBASE_RELEASE_LOCK(base, th_base_lock);
3829 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3831 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3833 /* Activate any non timer events */
3834 if (!(events & EV_TIMEOUT)) {
3835 evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3837 /* If we want to activate timer events, loop and activate each event with
3838 * the same fd in both the timeheap and common timeouts list */
3843 for (u = 0; u < base->timeheap.n; ++u) {
3844 ev = base->timeheap.p[u];
3845 if (ev->ev_fd == fd) {
3846 event_active_nolock_(ev, EV_TIMEOUT, 1);
3850 for (i = 0; i < base->n_common_timeouts; ++i) {
3851 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3852 TAILQ_FOREACH(ev, &ctl->events,
3853 ev_timeout_pos.ev_next_with_common_timeout) {
3854 if (ev->ev_fd == fd) {
3855 event_active_nolock_(ev, EV_TIMEOUT, 1);
3861 EVBASE_RELEASE_LOCK(base, th_base_lock);
3865 event_base_active_by_signal(struct event_base *base, int sig)
3867 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3868 evmap_signal_active_(base, sig, 1);
3869 EVBASE_RELEASE_LOCK(base, th_base_lock);
3874 event_base_add_virtual_(struct event_base *base)
3876 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3877 base->virtual_event_count++;
3878 MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3879 EVBASE_RELEASE_LOCK(base, th_base_lock);
3883 event_base_del_virtual_(struct event_base *base)
3885 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3886 EVUTIL_ASSERT(base->virtual_event_count > 0);
3887 base->virtual_event_count--;
3888 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3889 evthread_notify_base(base);
3890 EVBASE_RELEASE_LOCK(base, th_base_lock);
3894 event_free_debug_globals_locks(void)
3896 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3897 #ifndef EVENT__DISABLE_DEBUG_MODE
3898 if (event_debug_map_lock_ != NULL) {
3899 EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3900 event_debug_map_lock_ = NULL;
3901 evthreadimpl_disable_lock_debugging_();
3903 #endif /* EVENT__DISABLE_DEBUG_MODE */
3904 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
3909 event_free_debug_globals(void)
3911 event_free_debug_globals_locks();
3915 event_free_evsig_globals(void)
3917 evsig_free_globals_();
3921 event_free_evutil_globals(void)
3923 evutil_free_globals_();
3927 event_free_globals(void)
3929 event_free_debug_globals();
3930 event_free_evsig_globals();
3931 event_free_evutil_globals();
3935 libevent_global_shutdown(void)
3937 event_disable_debug_mode();
3938 event_free_globals();
3941 #ifndef EVENT__DISABLE_THREAD_SUPPORT
3943 event_global_setup_locks_(const int enable_locks)
3945 #ifndef EVENT__DISABLE_DEBUG_MODE
3946 EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
3948 if (evsig_global_setup_locks_(enable_locks) < 0)
3950 if (evutil_global_setup_locks_(enable_locks) < 0)
3952 if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3959 event_base_assert_ok_(struct event_base *base)
3961 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3962 event_base_assert_ok_nolock_(base);
3963 EVBASE_RELEASE_LOCK(base, th_base_lock);
3967 event_base_assert_ok_nolock_(struct event_base *base)
3972 /* First do checks on the per-fd and per-signal lists */
3973 evmap_check_integrity_(base);
3975 /* Check the heap property */
3976 for (i = 1; i < (int)base->timeheap.n; ++i) {
3977 int parent = (i - 1) / 2;
3978 struct event *ev, *p_ev;
3979 ev = base->timeheap.p[i];
3980 p_ev = base->timeheap.p[parent];
3981 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3982 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3983 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3986 /* Check that the common timeouts are fine */
3987 for (i = 0; i < base->n_common_timeouts; ++i) {
3988 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3989 struct event *last=NULL, *ev;
3991 EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3993 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3995 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
3996 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
3997 EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3998 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
4003 /* Check the active queues. */
4005 for (i = 0; i < base->nactivequeues; ++i) {
4006 struct event_callback *evcb;
4007 EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
4008 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
4009 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
4010 EVUTIL_ASSERT(evcb->evcb_pri == i);
4016 struct event_callback *evcb;
4017 TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
4018 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
4022 EVUTIL_ASSERT(count == base->event_count_active);